From deedb917325ea9ce8085df45dd925b8d583fd661 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Sep 2020 14:26:54 +0100 Subject: [PATCH 001/245] Fix `MultiWriterIdGenerator.current_position`. (#8257) It did not correctly handle IDs finishing being persisted out of order, resulting in the `current_position` lagging until new IDs are persisted. --- changelog.d/8257.misc | 1 + synapse/storage/util/id_generators.py | 43 +++++++++++++++++++---- tests/storage/test_id_generators.py | 50 +++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 6 deletions(-) create mode 100644 changelog.d/8257.misc diff --git a/changelog.d/8257.misc b/changelog.d/8257.misc new file mode 100644 index 000000000000..47ac583eb4f2 --- /dev/null +++ b/changelog.d/8257.misc @@ -0,0 +1 @@ +Fix non-user visible bug in implementation of `MultiWriterIdGenerator.get_current_token_for_writer`. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index b7eb4f8ac90e..2a66b3ad4e97 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -224,6 +224,10 @@ def __init__( # should be less than the minimum of this set (if not empty). self._unfinished_ids = set() # type: Set[int] + # Set of local IDs that we've processed that are larger than the current + # position, due to there being smaller unpersisted IDs. + self._finished_ids = set() # type: Set[int] + # We track the max position where we know everything before has been # persisted. This is done by a) looking at the min across all instances # and b) noting that if we have seen a run of persisted positions @@ -348,17 +352,44 @@ def get_next_txn(self, txn: LoggingTransaction): def _mark_id_as_finished(self, next_id: int): """The ID has finished being processed so we should advance the - current poistion if possible. + current position if possible. """ with self._lock: self._unfinished_ids.discard(next_id) + self._finished_ids.add(next_id) + + new_cur = None + + if self._unfinished_ids: + # If there are unfinished IDs then the new position will be the + # largest finished ID less than the minimum unfinished ID. + + finished = set() + + min_unfinshed = min(self._unfinished_ids) + for s in self._finished_ids: + if s < min_unfinshed: + if new_cur is None or new_cur < s: + new_cur = s + else: + finished.add(s) + + # We clear these out since they're now all less than the new + # position. + self._finished_ids = finished + else: + # There are no unfinished IDs so the new position is simply the + # largest finished one. + new_cur = max(self._finished_ids) + + # We clear these out since they're now all less than the new + # position. + self._finished_ids.clear() - # Figure out if its safe to advance the position by checking there - # aren't any lower allocated IDs that are yet to finish. - if all(c > next_id for c in self._unfinished_ids): + if new_cur: curr = self._current_positions.get(self._instance_name, 0) - self._current_positions[self._instance_name] = max(curr, next_id) + self._current_positions[self._instance_name] = max(curr, new_cur) self._add_persisted_position(next_id) @@ -428,7 +459,7 @@ def _add_persisted_position(self, new_id: int): # We move the current min position up if the minimum current positions # of all instances is higher (since by definition all positions less # that that have been persisted). - min_curr = min(self._current_positions.values()) + min_curr = min(self._current_positions.values(), default=0) self._persisted_upto_position = max(min_curr, self._persisted_upto_position) # We now iterate through the seen positions, discarding those that are diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index f0a8e32f1eaf..20636fc40027 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -122,6 +122,56 @@ async def _get_next_async(): self.assertEqual(id_gen.get_positions(), {"master": 8}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 8) + def test_out_of_order_finish(self): + """Test that IDs persisted out of order are correctly handled + """ + + # Prefill table with 7 rows written by 'master' + self._insert_rows("master", 7) + + id_gen = self._create_id_generator() + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + ctx1 = self.get_success(id_gen.get_next()) + ctx2 = self.get_success(id_gen.get_next()) + ctx3 = self.get_success(id_gen.get_next()) + ctx4 = self.get_success(id_gen.get_next()) + + s1 = ctx1.__enter__() + s2 = ctx2.__enter__() + s3 = ctx3.__enter__() + s4 = ctx4.__enter__() + + self.assertEqual(s1, 8) + self.assertEqual(s2, 9) + self.assertEqual(s3, 10) + self.assertEqual(s4, 11) + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + ctx2.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + ctx1.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 9}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) + + ctx4.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 9}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) + + ctx3.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 11}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 11) + def test_multi_instance(self): """Test that reads and writes from multiple processes are handled correctly. From 703e2b8a960a0845a5fbe14c34c45ae802300a4d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 8 Sep 2020 14:52:51 +0100 Subject: [PATCH 002/245] Use the right constructor for log records (#8278) Update `log_function` to use the right factory to create log records, to make sure that they have `request` attributes. Fixes: #8267. --- changelog.d/8278.bugfix | 1 + synapse/logging/utils.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8278.bugfix diff --git a/changelog.d/8278.bugfix b/changelog.d/8278.bugfix new file mode 100644 index 000000000000..50e40ca2a9da --- /dev/null +++ b/changelog.d/8278.bugfix @@ -0,0 +1 @@ +Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py index fea774e2e524..becf66dd86c8 100644 --- a/synapse/logging/utils.py +++ b/synapse/logging/utils.py @@ -29,11 +29,11 @@ def _log_debug_as_f(f, msg, msg_args): lineno = f.__code__.co_firstlineno pathname = f.__code__.co_filename - record = logging.LogRecord( + record = logger.makeRecord( name=name, level=logging.DEBUG, - pathname=pathname, - lineno=lineno, + fn=pathname, + lno=lineno, msg=msg, args=msg_args, exc_info=None, From 0f545e6b9670fd780579445ff68dba95a8e08545 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Sep 2020 15:00:17 +0100 Subject: [PATCH 003/245] Clean up types for PaginationConfig (#8250) This removes `SourcePaginationConfig` and `get_pagination_rows`. The reasoning behind this is that these generic classes/functions erased the types of the IDs it used (i.e. instead of passing around `StreamToken` it'd pass in e.g. `token.room_key`, which don't have uniform types). --- changelog.d/8250.misc | 1 + synapse/handlers/initial_sync.py | 11 +++--- synapse/handlers/pagination.py | 42 +++++++++++----------- synapse/handlers/presence.py | 3 -- synapse/handlers/receipts.py | 15 -------- synapse/notifier.py | 5 +-- synapse/streams/config.py | 61 +++++++++++--------------------- 7 files changed, 52 insertions(+), 86 deletions(-) create mode 100644 changelog.d/8250.misc diff --git a/changelog.d/8250.misc b/changelog.d/8250.misc new file mode 100644 index 000000000000..b6896a9300d5 --- /dev/null +++ b/changelog.d/8250.misc @@ -0,0 +1 @@ +Clean up type hints for `PaginationConfig`. diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index d5ddc583ad69..ddb8f0712bae 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -116,14 +116,13 @@ async def _snapshot_all_rooms( now_token = self.hs.get_event_sources().get_current_token() presence_stream = self.hs.get_event_sources().sources["presence"] - pagination_config = PaginationConfig(from_token=now_token) - presence, _ = await presence_stream.get_pagination_rows( - user, pagination_config.get_source_config("presence"), None + presence, _ = await presence_stream.get_new_events( + user, from_key=None, include_offline=False ) - receipt_stream = self.hs.get_event_sources().sources["receipt"] - receipt, _ = await receipt_stream.get_pagination_rows( - user, pagination_config.get_source_config("receipt"), None + joined_rooms = [r.room_id for r in room_list if r.membership == Membership.JOIN] + receipt = await self.store.get_linearized_receipts_for_rooms( + joined_rooms, to_key=int(now_token.receipt_key), ) tags_by_room = await self.store.get_tags_for_user(user_id) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 34ed0e292157..195a1fd77e3e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -335,20 +335,16 @@ async def get_messages( user_id = requester.user.to_string() if pagin_config.from_token: - room_token = pagin_config.from_token.room_key + from_token = pagin_config.from_token else: - pagin_config.from_token = ( - self.hs.get_event_sources().get_current_token_for_pagination() - ) - room_token = pagin_config.from_token.room_key - - room_token = RoomStreamToken.parse(room_token) + from_token = self.hs.get_event_sources().get_current_token_for_pagination() - pagin_config.from_token = pagin_config.from_token.copy_and_replace( - "room_key", str(room_token) - ) + if pagin_config.limit is None: + # This shouldn't happen as we've set a default limit before this + # gets called. + raise Exception("limit not set") - source_config = pagin_config.get_source_config("room") + room_token = RoomStreamToken.parse(from_token.room_key) with await self.pagination_lock.read(room_id): ( @@ -358,7 +354,7 @@ async def get_messages( room_id, user_id, allow_departed_users=True ) - if source_config.direction == "b": + if pagin_config.direction == "b": # if we're going backwards, we might need to backfill. This # requires that we have a topo token. if room_token.topological: @@ -381,22 +377,28 @@ async def get_messages( member_event_id ) if RoomStreamToken.parse(leave_token).topological < max_topo: - source_config.from_key = str(leave_token) + from_token = from_token.copy_and_replace( + "room_key", leave_token + ) await self.hs.get_handlers().federation_handler.maybe_backfill( room_id, max_topo ) + to_room_key = None + if pagin_config.to_token: + to_room_key = pagin_config.to_token.room_key + events, next_key = await self.store.paginate_room_events( room_id=room_id, - from_key=source_config.from_key, - to_key=source_config.to_key, - direction=source_config.direction, - limit=source_config.limit, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, event_filter=event_filter, ) - next_token = pagin_config.from_token.copy_and_replace("room_key", next_key) + next_token = from_token.copy_and_replace("room_key", next_key) if events: if event_filter: @@ -409,7 +411,7 @@ async def get_messages( if not events: return { "chunk": [], - "start": pagin_config.from_token.to_string(), + "start": from_token.to_string(), "end": next_token.to_string(), } @@ -438,7 +440,7 @@ async def get_messages( events, time_now, as_client_event=as_client_event ) ), - "start": pagin_config.from_token.to_string(), + "start": from_token.to_string(), "end": next_token.to_string(), } diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 91a3aec1cc17..1000ac95ff18 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1108,9 +1108,6 @@ async def get_new_events( def get_current_key(self): return self.store.get_current_presence_token() - async def get_pagination_rows(self, user, pagination_config, key): - return await self.get_new_events(user, from_key=None, include_offline=False) - @cached(num_args=2, cache_context=True) async def _get_interested_in(self, user, explicit_room_id, cache_context): """Returns the set of users that the given user should see presence diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 2cc6c2eb68aa..bdd8e52edd3b 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -142,18 +142,3 @@ async def get_new_events(self, from_key, room_ids, **kwargs): def get_current_key(self, direction="f"): return self.store.get_max_receipt_stream_id() - - async def get_pagination_rows(self, user, config, key): - to_key = int(config.from_key) - - if config.to_key: - from_key = int(config.to_key) - else: - from_key = None - - room_ids = await self.store.get_rooms_for_user(user.to_string()) - events = await self.store.get_linearized_receipts_for_rooms( - room_ids, from_key=from_key, to_key=to_key - ) - - return (events, to_key) diff --git a/synapse/notifier.py b/synapse/notifier.py index b7f4041306fd..71f2370874b3 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -432,8 +432,9 @@ async def get_events_for( If explicit_room_id is set, that room will be polled for events only if it is world readable or the user has joined the room. """ - from_token = pagination_config.from_token - if not from_token: + if pagination_config.from_token: + from_token = pagination_config.from_token + else: from_token = self.event_sources.get_current_token() limit = pagination_config.limit diff --git a/synapse/streams/config.py b/synapse/streams/config.py index d97dc4d10175..0bdf846edf62 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -14,9 +14,13 @@ # limitations under the License. import logging +from typing import Optional + +import attr from synapse.api.errors import SynapseError from synapse.http.servlet import parse_integer, parse_string +from synapse.http.site import SynapseRequest from synapse.types import StreamToken logger = logging.getLogger(__name__) @@ -25,38 +29,22 @@ MAX_LIMIT = 1000 -class SourcePaginationConfig: - - """A configuration object which stores pagination parameters for a - specific event source.""" - - def __init__(self, from_key=None, to_key=None, direction="f", limit=None): - self.from_key = from_key - self.to_key = to_key - self.direction = "f" if direction == "f" else "b" - self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None - - def __repr__(self): - return "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)" % ( - self.from_key, - self.to_key, - self.direction, - self.limit, - ) - - +@attr.s(slots=True) class PaginationConfig: - """A configuration object which stores pagination parameters.""" - def __init__(self, from_token=None, to_token=None, direction="f", limit=None): - self.from_token = from_token - self.to_token = to_token - self.direction = "f" if direction == "f" else "b" - self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None + from_token = attr.ib(type=Optional[StreamToken]) + to_token = attr.ib(type=Optional[StreamToken]) + direction = attr.ib(type=str) + limit = attr.ib(type=Optional[int]) @classmethod - def from_request(cls, request, raise_invalid_params=True, default_limit=None): + def from_request( + cls, + request: SynapseRequest, + raise_invalid_params: bool = True, + default_limit: Optional[int] = None, + ) -> "PaginationConfig": direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) from_tok = parse_string(request, "from") @@ -78,8 +66,11 @@ def from_request(cls, request, raise_invalid_params=True, default_limit=None): limit = parse_integer(request, "limit", default=default_limit) - if limit and limit < 0: - raise SynapseError(400, "Limit must be 0 or above") + if limit: + if limit < 0: + raise SynapseError(400, "Limit must be 0 or above") + + limit = min(int(limit), MAX_LIMIT) try: return PaginationConfig(from_tok, to_tok, direction, limit) @@ -87,20 +78,10 @@ def from_request(cls, request, raise_invalid_params=True, default_limit=None): logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.") - def __repr__(self): + def __repr__(self) -> str: return ("PaginationConfig(from_tok=%r, to_tok=%r, direction=%r, limit=%r)") % ( self.from_token, self.to_token, self.direction, self.limit, ) - - def get_source_config(self, source_name): - keyname = "%s_key" % source_name - - return SourcePaginationConfig( - from_key=getattr(self.from_token, keyname), - to_key=getattr(self.to_token, keyname) if self.to_token else None, - direction=self.direction, - limit=self.limit, - ) From 094896a69d44a69946df099da59adec0b52da0ac Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 8 Sep 2020 16:03:09 +0100 Subject: [PATCH 004/245] Add a config option for validating 'next_link' parameters against a domain whitelist (#8275) This is a config option ported over from DINUM's Sydent: https://github.com/matrix-org/sydent/pull/285 They've switched to validating 3PIDs via Synapse rather than Sydent, and would like to retain this functionality. This original purpose for this change is phishing prevention. This solution could also potentially be replaced by a similar one to https://github.com/matrix-org/synapse/pull/8004, but across all `*/submit_token` endpoint. This option may still be useful to enterprise even with that safeguard in place though, if they want to be absolutely sure that their employees don't follow links to other domains. --- changelog.d/8275.feature | 1 + docs/sample_config.yaml | 18 ++++ synapse/config/server.py | 33 ++++++- synapse/rest/client/v2_alpha/account.py | 66 +++++++++++-- tests/rest/client/v2_alpha/test_account.py | 103 +++++++++++++++++++-- 5 files changed, 204 insertions(+), 17 deletions(-) create mode 100644 changelog.d/8275.feature diff --git a/changelog.d/8275.feature b/changelog.d/8275.feature new file mode 100644 index 000000000000..17549c3df39a --- /dev/null +++ b/changelog.d/8275.feature @@ -0,0 +1 @@ +Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 3528d9e11f5c..994b0a62c4c4 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -432,6 +432,24 @@ retention: # #request_token_inhibit_3pid_errors: true +# A list of domains that the domain portion of 'next_link' parameters +# must match. +# +# This parameter is optionally provided by clients while requesting +# validation of an email or phone number, and maps to a link that +# users will be automatically redirected to after validation +# succeeds. Clients can make use this parameter to aid the validation +# process. +# +# The whitelist is applied whether the homeserver or an +# identity server is handling validation. +# +# The default value is no whitelist functionality; all domains are +# allowed. Setting this value to an empty list will instead disallow +# all domains. +# +#next_link_domain_whitelist: ["matrix.org"] + ## TLS ## diff --git a/synapse/config/server.py b/synapse/config/server.py index e85c6a0840c6..532b91047024 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,7 +19,7 @@ import os.path import re from textwrap import indent -from typing import Any, Dict, Iterable, List, Optional +from typing import Any, Dict, Iterable, List, Optional, Set import attr import yaml @@ -542,6 +542,19 @@ class LimitRemoteRoomsConfig: users_new_default_push_rules ) # type: set + # Whitelist of domain names that given next_link parameters must have + next_link_domain_whitelist = config.get( + "next_link_domain_whitelist" + ) # type: Optional[List[str]] + + self.next_link_domain_whitelist = None # type: Optional[Set[str]] + if next_link_domain_whitelist is not None: + if not isinstance(next_link_domain_whitelist, list): + raise ConfigError("'next_link_domain_whitelist' must be a list") + + # Turn the list into a set to improve lookup speed. + self.next_link_domain_whitelist = set(next_link_domain_whitelist) + def has_tls_listener(self) -> bool: return any(listener.tls for listener in self.listeners) @@ -1014,6 +1027,24 @@ def generate_config_section( # act as if no error happened and return a fake session ID ('sid') to clients. # #request_token_inhibit_3pid_errors: true + + # A list of domains that the domain portion of 'next_link' parameters + # must match. + # + # This parameter is optionally provided by clients while requesting + # validation of an email or phone number, and maps to a link that + # users will be automatically redirected to after validation + # succeeds. Clients can make use this parameter to aid the validation + # process. + # + # The whitelist is applied whether the homeserver or an + # identity server is handling validation. + # + # The default value is no whitelist functionality; all domains are + # allowed. Setting this value to an empty list will instead disallow + # all domains. + # + #next_link_domain_whitelist: ["matrix.org"] """ % locals() ) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 34814777313e..455051ac4626 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -17,6 +17,11 @@ import logging import random from http import HTTPStatus +from typing import TYPE_CHECKING +from urllib.parse import urlparse + +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer from synapse.api.constants import LoginType from synapse.api.errors import ( @@ -98,6 +103,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) + # The email will be sent to the stored address. # This avoids a potential account hijack by requesting a password reset to # an email address which is controlled by the attacker but which, after @@ -446,6 +454,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) + existing_user_id = await self.store.get_user_id_by_threepid("email", email) if existing_user_id is not None: @@ -517,6 +528,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) + existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) if existing_user_id is not None: @@ -603,15 +617,10 @@ async def on_GET(self, request): # Perform a 302 redirect if next_link is set if next_link: - if next_link.startswith("file:///"): - logger.warning( - "Not redirecting to next_link as it is a local file: address" - ) - else: - request.setResponseCode(302) - request.setHeader("Location", next_link) - finish_request(request) - return None + request.setResponseCode(302) + request.setHeader("Location", next_link) + finish_request(request) + return None # Otherwise show the success template html = self.config.email_add_threepid_template_success_html_content @@ -875,6 +884,45 @@ async def on_POST(self, request): return 200, {"id_server_unbind_result": id_server_unbind_result} +def assert_valid_next_link(hs: "HomeServer", next_link: str): + """ + Raises a SynapseError if a given next_link value is invalid + + next_link is valid if the scheme is http(s) and the next_link.domain_whitelist config + option is either empty or contains a domain that matches the one in the given next_link + + Args: + hs: The homeserver object + next_link: The next_link value given by the client + + Raises: + SynapseError: If the next_link is invalid + """ + valid = True + + # Parse the contents of the URL + next_link_parsed = urlparse(next_link) + + # Scheme must not point to the local drive + if next_link_parsed.scheme == "file": + valid = False + + # If the domain whitelist is set, the domain must be in it + if ( + valid + and hs.config.next_link_domain_whitelist is not None + and next_link_parsed.hostname not in hs.config.next_link_domain_whitelist + ): + valid = False + + if not valid: + raise SynapseError( + 400, + "'next_link' domain not included in whitelist, or not http(s)", + errcode=Codes.INVALID_PARAM, + ) + + class WhoamiRestServlet(RestServlet): PATTERNS = client_patterns("/account/whoami$") diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 152a5182fa39..0a51aeff92ac 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -14,11 +14,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import json import os import re from email.parser import Parser +from typing import Optional import pkg_resources @@ -29,6 +29,7 @@ from synapse.rest.client.v2_alpha import account, register from tests import unittest +from tests.unittest import override_config class PasswordResetTestCase(unittest.HomeserverTestCase): @@ -668,16 +669,104 @@ def test_no_valid_token(self): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def _request_token(self, email, client_secret): + @override_config({"next_link_domain_whitelist": None}) + def test_next_link(self): + """Tests a valid next_link parameter value with no whitelist (good case)""" + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.com/a/good/site", + expect_code=200, + ) + + @override_config({"next_link_domain_whitelist": None}) + def test_next_link_exotic_protocol(self): + """Tests using a esoteric protocol as a next_link parameter value. + Someone may be hosting a client on IPFS etc. + """ + self._request_token( + "something@example.com", + "some_secret", + next_link="some-protocol://abcdefghijklmopqrstuvwxyz", + expect_code=200, + ) + + @override_config({"next_link_domain_whitelist": None}) + def test_next_link_file_uri(self): + """Tests next_link parameters cannot be file URI""" + # Attempt to use a next_link value that points to the local disk + self._request_token( + "something@example.com", + "some_secret", + next_link="file:///host/path", + expect_code=400, + ) + + @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) + def test_next_link_domain_whitelist(self): + """Tests next_link parameters must fit the whitelist if provided""" + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.com/some/good/page", + expect_code=200, + ) + + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.org/some/also/good/page", + expect_code=200, + ) + + self._request_token( + "something@example.com", + "some_secret", + next_link="https://bad.example.org/some/bad/page", + expect_code=400, + ) + + @override_config({"next_link_domain_whitelist": []}) + def test_empty_next_link_domain_whitelist(self): + """Tests an empty next_lint_domain_whitelist value, meaning next_link is essentially + disallowed + """ + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.com/a/page", + expect_code=400, + ) + + def _request_token( + self, + email: str, + client_secret: str, + next_link: Optional[str] = None, + expect_code: int = 200, + ) -> str: + """Request a validation token to add an email address to a user's account + + Args: + email: The email address to validate + client_secret: A secret string + next_link: A link to redirect the user to after validation + expect_code: Expected return code of the call + + Returns: + The ID of the new threepid validation session + """ + body = {"client_secret": client_secret, "email": email, "send_attempt": 1} + if next_link: + body["next_link"] = next_link + request, channel = self.make_request( - "POST", - b"account/3pid/email/requestToken", - {"client_secret": client_secret, "email": email, "send_attempt": 1}, + "POST", b"account/3pid/email/requestToken", body, ) self.render(request) - self.assertEquals(200, channel.code, channel.result) + self.assertEquals(expect_code, channel.code, channel.result) - return channel.json_body["sid"] + return channel.json_body.get("sid") def _request_token_invalid_email( self, email, expected_errcode, expected_error, client_secret="foobar", From 63c0e9e1954fc7fc10a2575c54aecc8944de60f3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Sep 2020 16:48:15 +0100 Subject: [PATCH 005/245] Add types to StreamToken and RoomStreamToken (#8279) The intention here is to change `StreamToken.room_key` to be a `RoomStreamToken` in a future PR, but that is a big enough change without this refactoring too. --- changelog.d/8279.misc | 1 + synapse/handlers/sync.py | 5 +- synapse/storage/databases/main/devices.py | 7 +- synapse/storage/databases/main/stream.py | 21 +-- synapse/types.py | 152 +++++++++++----------- 5 files changed, 95 insertions(+), 91 deletions(-) create mode 100644 changelog.d/8279.misc diff --git a/changelog.d/8279.misc b/changelog.d/8279.misc new file mode 100644 index 000000000000..99f669001f18 --- /dev/null +++ b/changelog.d/8279.misc @@ -0,0 +1 @@ +Add type hints to `StreamToken` and `RoomStreamToken` classes. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index e2ddb628ff22..cc47e8b62c8f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1310,12 +1310,11 @@ async def _generate_sync_entry_for_presence( presence_source = self.event_sources.sources["presence"] since_token = sync_result_builder.since_token + presence_key = None + include_offline = False if since_token and not sync_result_builder.full_state: presence_key = since_token.presence_key include_offline = True - else: - presence_key = None - include_offline = False presence, presence_key = await presence_source.get_new_events( user=user, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index add4e3ea0ec0..306fc6947c19 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -481,7 +481,7 @@ async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict] } async def get_users_whose_devices_changed( - self, from_key: str, user_ids: Iterable[str] + self, from_key: int, user_ids: Iterable[str] ) -> Set[str]: """Get set of users whose devices have changed since `from_key` that are in the given list of user_ids. @@ -493,7 +493,6 @@ async def get_users_whose_devices_changed( Returns: The set of user_ids whose devices have changed since `from_key` """ - from_key = int(from_key) # Get set of users who *may* have changed. Users not in the returned # list have definitely not changed. @@ -527,7 +526,7 @@ def _get_users_whose_devices_changed_txn(txn): ) async def get_users_whose_signatures_changed( - self, user_id: str, from_key: str + self, user_id: str, from_key: int ) -> Set[str]: """Get the users who have new cross-signing signatures made by `user_id` since `from_key`. @@ -539,7 +538,7 @@ async def get_users_whose_signatures_changed( Returns: A set of user IDs with updated signatures. """ - from_key = int(from_key) + if self._user_signature_stream_cache.has_entity_changed(user_id, from_key): sql = """ SELECT DISTINCT user_ids FROM user_signature_stream diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index be6df8a6d1d4..08a13a8b47b6 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -79,8 +79,8 @@ def generate_pagination_where_clause( direction: str, column_names: Tuple[str, str], - from_token: Optional[Tuple[int, int]], - to_token: Optional[Tuple[int, int]], + from_token: Optional[Tuple[Optional[int], int]], + to_token: Optional[Tuple[Optional[int], int]], engine: BaseDatabaseEngine, ) -> str: """Creates an SQL expression to bound the columns by the pagination @@ -535,13 +535,13 @@ async def get_recent_event_ids_for_room( if limit == 0: return [], end_token - end_token = RoomStreamToken.parse(end_token) + parsed_end_token = RoomStreamToken.parse(end_token) rows, token = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_txn, room_id, - from_token=end_token, + from_token=parsed_end_token, limit=limit, ) @@ -989,8 +989,8 @@ def _paginate_room_events_txn( bounds = generate_pagination_where_clause( direction=direction, column_names=("topological_ordering", "stream_ordering"), - from_token=from_token, - to_token=to_token, + from_token=from_token.as_tuple(), + to_token=to_token.as_tuple() if to_token else None, engine=self.database_engine, ) @@ -1083,16 +1083,17 @@ async def paginate_room_events( and `to_key`). """ - from_key = RoomStreamToken.parse(from_key) + parsed_from_key = RoomStreamToken.parse(from_key) + parsed_to_key = None if to_key: - to_key = RoomStreamToken.parse(to_key) + parsed_to_key = RoomStreamToken.parse(to_key) rows, token = await self.db_pool.runInteraction( "paginate_room_events", self._paginate_room_events_txn, room_id, - from_key, - to_key, + parsed_from_key, + parsed_to_key, direction, limit, event_filter, diff --git a/synapse/types.py b/synapse/types.py index f7de48f14843..ba4533503880 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -18,7 +18,7 @@ import string import sys from collections import namedtuple -from typing import Any, Dict, Mapping, MutableMapping, Tuple, Type, TypeVar +from typing import Any, Dict, Mapping, MutableMapping, Optional, Tuple, Type, TypeVar import attr from signedjson.key import decode_verify_key_bytes @@ -362,22 +362,79 @@ def f2(m): return username.decode("ascii") -class StreamToken( - namedtuple( - "Token", - ( - "room_key", - "presence_key", - "typing_key", - "receipt_key", - "account_data_key", - "push_rules_key", - "to_device_key", - "device_list_key", - "groups_key", - ), +@attr.s(frozen=True, slots=True) +class RoomStreamToken: + """Tokens are positions between events. The token "s1" comes after event 1. + + s0 s1 + | | + [0] V [1] V [2] + + Tokens can either be a point in the live event stream or a cursor going + through historic events. + + When traversing the live event stream events are ordered by when they + arrived at the homeserver. + + When traversing historic events the events are ordered by their depth in + the event graph "topological_ordering" and then by when they arrived at the + homeserver "stream_ordering". + + Live tokens start with an "s" followed by the "stream_ordering" id of the + event it comes after. Historic tokens start with a "t" followed by the + "topological_ordering" id of the event it comes after, followed by "-", + followed by the "stream_ordering" id of the event it comes after. + """ + + topological = attr.ib( + type=Optional[int], + validator=attr.validators.optional(attr.validators.instance_of(int)), ) -): + stream = attr.ib(type=int, validator=attr.validators.instance_of(int)) + + @classmethod + def parse(cls, string: str) -> "RoomStreamToken": + try: + if string[0] == "s": + return cls(topological=None, stream=int(string[1:])) + if string[0] == "t": + parts = string[1:].split("-", 1) + return cls(topological=int(parts[0]), stream=int(parts[1])) + except Exception: + pass + raise SynapseError(400, "Invalid token %r" % (string,)) + + @classmethod + def parse_stream_token(cls, string: str) -> "RoomStreamToken": + try: + if string[0] == "s": + return cls(topological=None, stream=int(string[1:])) + except Exception: + pass + raise SynapseError(400, "Invalid token %r" % (string,)) + + def as_tuple(self) -> Tuple[Optional[int], int]: + return (self.topological, self.stream) + + def __str__(self) -> str: + if self.topological is not None: + return "t%d-%d" % (self.topological, self.stream) + else: + return "s%d" % (self.stream,) + + +@attr.s(slots=True, frozen=True) +class StreamToken: + room_key = attr.ib(type=str) + presence_key = attr.ib(type=int) + typing_key = attr.ib(type=int) + receipt_key = attr.ib(type=int) + account_data_key = attr.ib(type=int) + push_rules_key = attr.ib(type=int) + to_device_key = attr.ib(type=int) + device_list_key = attr.ib(type=int) + groups_key = attr.ib(type=int) + _SEPARATOR = "_" START = None # type: StreamToken @@ -385,15 +442,15 @@ class StreamToken( def from_string(cls, string): try: keys = string.split(cls._SEPARATOR) - while len(keys) < len(cls._fields): + while len(keys) < len(attr.fields(cls)): # i.e. old token from before receipt_key keys.append("0") - return cls(*keys) + return cls(keys[0], *(int(k) for k in keys[1:])) except Exception: raise SynapseError(400, "Invalid Token") def to_string(self): - return self._SEPARATOR.join([str(k) for k in self]) + return self._SEPARATOR.join([str(k) for k in attr.astuple(self)]) @property def room_stream_id(self): @@ -435,63 +492,10 @@ def copy_and_advance(self, key, new_value): return self def copy_and_replace(self, key, new_value): - return self._replace(**{key: new_value}) - - -StreamToken.START = StreamToken(*(["s0"] + ["0"] * (len(StreamToken._fields) - 1))) - - -class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): - """Tokens are positions between events. The token "s1" comes after event 1. - - s0 s1 - | | - [0] V [1] V [2] - - Tokens can either be a point in the live event stream or a cursor going - through historic events. - - When traversing the live event stream events are ordered by when they - arrived at the homeserver. - - When traversing historic events the events are ordered by their depth in - the event graph "topological_ordering" and then by when they arrived at the - homeserver "stream_ordering". - - Live tokens start with an "s" followed by the "stream_ordering" id of the - event it comes after. Historic tokens start with a "t" followed by the - "topological_ordering" id of the event it comes after, followed by "-", - followed by the "stream_ordering" id of the event it comes after. - """ + return attr.evolve(self, **{key: new_value}) - __slots__ = [] # type: list - - @classmethod - def parse(cls, string): - try: - if string[0] == "s": - return cls(topological=None, stream=int(string[1:])) - if string[0] == "t": - parts = string[1:].split("-", 1) - return cls(topological=int(parts[0]), stream=int(parts[1])) - except Exception: - pass - raise SynapseError(400, "Invalid token %r" % (string,)) - @classmethod - def parse_stream_token(cls, string): - try: - if string[0] == "s": - return cls(topological=None, stream=int(string[1:])) - except Exception: - pass - raise SynapseError(400, "Invalid token %r" % (string,)) - - def __str__(self): - if self.topological is not None: - return "t%d-%d" % (self.topological, self.stream) - else: - return "s%d" % (self.stream,) +StreamToken.START = StreamToken.from_string("s0_0") class ThirdPartyInstanceID( From 560f3b8609a3d1d566f33eeab029a4e96fe3ee02 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Tue, 8 Sep 2020 16:19:50 +0000 Subject: [PATCH 006/245] Include method in thumbnail media name (#7124) This fixes an issue where different methods (crop/scale) overwrite each other. This first tries the new path. If that fails and we are looking for a remote thumbnail, it tries the old path. If that still isn't found, it continues as normal. This should probably be removed in the future, after some of the newer thumbnails were generated with the new path on most deployments. Then the overhead should be minimal if the other thumbnails need to be regenerated. Signed-off-by: Nicolas Werner --- changelog.d/7124.bugfix | 1 + synapse/rest/media/v1/filepath.py | 19 ++++++- synapse/rest/media/v1/media_storage.py | 28 +++++++++ .../databases/main/media_repository.py | 57 +++++++++++++++++++ ...ethod_to_thumbnail_constraint.sql.postgres | 33 +++++++++++ ..._method_to_thumbnail_constraint.sql.sqlite | 44 ++++++++++++++ 6 files changed, 181 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7124.bugfix create mode 100644 synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres create mode 100644 synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite diff --git a/changelog.d/7124.bugfix b/changelog.d/7124.bugfix new file mode 100644 index 000000000000..8fd177780d7d --- /dev/null +++ b/changelog.d/7124.bugfix @@ -0,0 +1 @@ +Fix a bug in the media repository where remote thumbnails with the same size but different crop methods would overwrite each other. Contributed by @deepbluev7. diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index d2826374a7ab..7447eeaebeee 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -80,7 +80,7 @@ def remote_media_thumbnail_rel( self, server_name, file_id, width, height, content_type, method ): top_level_type, sub_type = content_type.split("/") - file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type) + file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method) return os.path.join( "remote_thumbnail", server_name, @@ -92,6 +92,23 @@ def remote_media_thumbnail_rel( remote_media_thumbnail = _wrap_in_base_path(remote_media_thumbnail_rel) + # Legacy path that was used to store thumbnails previously. + # Should be removed after some time, when most of the thumbnails are stored + # using the new path. + def remote_media_thumbnail_rel_legacy( + self, server_name, file_id, width, height, content_type + ): + top_level_type, sub_type = content_type.split("/") + file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type) + return os.path.join( + "remote_thumbnail", + server_name, + file_id[0:2], + file_id[2:4], + file_id[4:], + file_name, + ) + def remote_media_thumbnail_dir(self, server_name, file_id): return os.path.join( self.base_path, diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 3a352b5631aa..5681677fc93d 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -147,6 +147,20 @@ async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: if os.path.exists(local_path): return FileResponder(open(local_path, "rb")) + # Fallback for paths without method names + # Should be removed in the future + if file_info.thumbnail and file_info.server_name: + legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail_width, + height=file_info.thumbnail_height, + content_type=file_info.thumbnail_type, + ) + legacy_local_path = os.path.join(self.local_media_directory, legacy_path) + if os.path.exists(legacy_local_path): + return FileResponder(open(legacy_local_path, "rb")) + for provider in self.storage_providers: res = await provider.fetch(path, file_info) # type: Any if res: @@ -170,6 +184,20 @@ async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str: if os.path.exists(local_path): return local_path + # Fallback for paths without method names + # Should be removed in the future + if file_info.thumbnail and file_info.server_name: + legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail_width, + height=file_info.thumbnail_height, + content_type=file_info.thumbnail_type, + ) + legacy_local_path = os.path.join(self.local_media_directory, legacy_path) + if os.path.exists(legacy_local_path): + return legacy_local_path + dirname = os.path.dirname(local_path) if not os.path.exists(dirname): os.makedirs(dirname) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 86557d551277..1d76c761a603 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -17,6 +17,10 @@ from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool +BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = ( + "media_repository_drop_index_wo_method" +) + class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -32,6 +36,59 @@ def __init__(self, database: DatabasePool, db_conn, hs): where_clause="url_cache IS NOT NULL", ) + # The following the updates add the method to the unique constraint of + # the thumbnail databases. That fixes an issue, where thumbnails of the + # same resolution, but different methods could overwrite one another. + # This can happen with custom thumbnail configs or with dynamic thumbnailing. + self.db_pool.updates.register_background_index_update( + update_name="local_media_repository_thumbnails_method_idx", + index_name="local_media_repository_thumbn_media_id_width_height_method_key", + table="local_media_repository_thumbnails", + columns=[ + "media_id", + "thumbnail_width", + "thumbnail_height", + "thumbnail_type", + "thumbnail_method", + ], + unique=True, + ) + + self.db_pool.updates.register_background_index_update( + update_name="remote_media_repository_thumbnails_method_idx", + index_name="remote_media_repository_thumbn_media_origin_id_width_height_method_key", + table="remote_media_cache_thumbnails", + columns=[ + "media_origin", + "media_id", + "thumbnail_width", + "thumbnail_height", + "thumbnail_type", + "thumbnail_method", + ], + unique=True, + ) + + self.db_pool.updates.register_background_update_handler( + BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD, + self._drop_media_index_without_method, + ) + + async def _drop_media_index_without_method(self, progress, batch_size): + def f(txn): + txn.execute( + "ALTER TABLE local_media_repository_thumbnails DROP CONSTRAINT IF EXISTS local_media_repository_thumbn_media_id_thumbnail_width_thum_key" + ) + txn.execute( + "ALTER TABLE remote_media_cache_thumbnails DROP CONSTRAINT IF EXISTS remote_media_repository_thumbn_media_id_thumbnail_width_thum_key" + ) + + await self.db_pool.runInteraction("drop_media_indices_without_method", f) + await self.db_pool.updates._end_background_update( + BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD + ) + return 1 + class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): """Persistence for attachments and avatars""" diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres new file mode 100644 index 000000000000..b64926e9c9eb --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres @@ -0,0 +1,33 @@ +/* Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This adds the method to the unique key constraint of the thumbnail databases. + * Otherwise you can't have a scaled and a cropped thumbnail with the same + * resolution, which happens quite often with dynamic thumbnailing. + * This is the postgres specific migration modifying the table with a background + * migration. + */ + +-- add new index that includes method to local media +INSERT INTO background_updates (update_name, progress_json) VALUES + ('local_media_repository_thumbnails_method_idx', '{}'); + +-- add new index that includes method to remote media +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('remote_media_repository_thumbnails_method_idx', '{}', 'local_media_repository_thumbnails_method_idx'); + +-- drop old index +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('media_repository_drop_index_wo_method', '{}', 'remote_media_repository_thumbnails_method_idx'); + diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite new file mode 100644 index 000000000000..1d0c04b53a8c --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite @@ -0,0 +1,44 @@ +/* Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This adds the method to the unique key constraint of the thumbnail databases. + * Otherwise you can't have a scaled and a cropped thumbnail with the same + * resolution, which happens quite often with dynamic thumbnailing. + * This is a sqlite specific migration, since sqlite can't modify the unique + * constraint of a table without recreating it. + */ + +CREATE TABLE local_media_repository_thumbnails_new ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) ); + +INSERT INTO local_media_repository_thumbnails_new + SELECT media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method, thumbnail_length + FROM local_media_repository_thumbnails; + +DROP TABLE local_media_repository_thumbnails; + +ALTER TABLE local_media_repository_thumbnails_new RENAME TO local_media_repository_thumbnails; + +CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id); + + + +CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails_new ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) ); + +INSERT INTO remote_media_cache_thumbnails_new + SELECT media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_method, thumbnail_type, thumbnail_length, filesystem_id + FROM remote_media_cache_thumbnails; + +DROP TABLE remote_media_cache_thumbnails; + +ALTER TABLE remote_media_cache_thumbnails_new RENAME TO remote_media_cache_thumbnails; From 1553adc83122ac245f523524ae1583cd556ed121 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Sep 2020 17:43:31 +0100 Subject: [PATCH 007/245] Fix mypy error on develop (#8282) --- changelog.d/8282.misc | 1 + synapse/handlers/pagination.py | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8282.misc diff --git a/changelog.d/8282.misc b/changelog.d/8282.misc new file mode 100644 index 000000000000..b6896a9300d5 --- /dev/null +++ b/changelog.d/8282.misc @@ -0,0 +1 @@ +Clean up type hints for `PaginationConfig`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 195a1fd77e3e..ec17d3d8884a 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -373,12 +373,15 @@ async def get_messages( # case "JOIN" would have been returned. assert member_event_id - leave_token = await self.store.get_topological_token_for_event( + leave_token_str = await self.store.get_topological_token_for_event( member_event_id ) - if RoomStreamToken.parse(leave_token).topological < max_topo: + leave_token = RoomStreamToken.parse(leave_token_str) + assert leave_token.topological is not None + + if leave_token.topological < max_topo: from_token = from_token.copy_and_replace( - "room_key", leave_token + "room_key", leave_token_str ) await self.hs.get_handlers().federation_handler.maybe_backfill( From e45b834119468272816c6558ebadb5cc286f148b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Sep 2020 16:50:51 -0400 Subject: [PATCH 008/245] Add types to async_helpers (#8260) --- changelog.d/8260.misc | 1 + mypy.ini | 3 +- synapse/util/async_helpers.py | 135 +++++++++++++++++++++------------- 3 files changed, 88 insertions(+), 51 deletions(-) create mode 100644 changelog.d/8260.misc diff --git a/changelog.d/8260.misc b/changelog.d/8260.misc new file mode 100644 index 000000000000..164eea8b59eb --- /dev/null +++ b/changelog.d/8260.misc @@ -0,0 +1 @@ +Add type hints to `synapse.util.async_helpers`. diff --git a/mypy.ini b/mypy.ini index 7764f178569d..460392377e01 100644 --- a/mypy.ini +++ b/mypy.ini @@ -34,7 +34,7 @@ files = synapse/http/federation/well_known_resolver.py, synapse/http/server.py, synapse/http/site.py, - synapse/logging/, + synapse/logging, synapse/metrics, synapse/module_api, synapse/notifier.py, @@ -54,6 +54,7 @@ files = synapse/storage/util, synapse/streams, synapse/types.py, + synapse/util/async_helpers.py, synapse/util/caches/descriptors.py, synapse/util/caches/stream_change_cache.py, synapse/util/metrics.py, diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index bb57e27beb4c..67ce9a5f39a1 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -17,13 +17,25 @@ import collections import logging from contextlib import contextmanager -from typing import Dict, Sequence, Set, Union +from typing import ( + Any, + Callable, + Dict, + Hashable, + Iterable, + List, + Optional, + Set, + TypeVar, + Union, +) import attr from typing_extensions import ContextManager from twisted.internet import defer from twisted.internet.defer import CancelledError +from twisted.internet.interfaces import IReactorTime from twisted.python import failure from synapse.logging.context import ( @@ -54,7 +66,7 @@ class ObservableDeferred: __slots__ = ["_deferred", "_observers", "_result"] - def __init__(self, deferred, consumeErrors=False): + def __init__(self, deferred: defer.Deferred, consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) object.__setattr__(self, "_observers", set()) @@ -111,25 +123,25 @@ def remove(r): success, res = self._result return defer.succeed(res) if success else defer.fail(res) - def observers(self): + def observers(self) -> List[defer.Deferred]: return self._observers - def has_called(self): + def has_called(self) -> bool: return self._result is not None - def has_succeeded(self): + def has_succeeded(self) -> bool: return self._result is not None and self._result[0] is True - def get_result(self): + def get_result(self) -> Any: return self._result[1] - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: return getattr(self._deferred, name) - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: setattr(self._deferred, name, value) - def __repr__(self): + def __repr__(self) -> str: return "" % ( id(self), self._result, @@ -137,18 +149,20 @@ def __repr__(self): ) -def concurrently_execute(func, args, limit): - """Executes the function with each argument conncurrently while limiting +def concurrently_execute( + func: Callable, args: Iterable[Any], limit: int +) -> defer.Deferred: + """Executes the function with each argument concurrently while limiting the number of concurrent executions. Args: - func (func): Function to execute, should return a deferred or coroutine. - args (Iterable): List of arguments to pass to func, each invocation of func + func: Function to execute, should return a deferred or coroutine. + args: List of arguments to pass to func, each invocation of func gets a single argument. - limit (int): Maximum number of conccurent executions. + limit: Maximum number of conccurent executions. Returns: - deferred: Resolved when all function invocations have finished. + Deferred[list]: Resolved when all function invocations have finished. """ it = iter(args) @@ -167,14 +181,17 @@ async def _concurrently_execute_inner(): ).addErrback(unwrapFirstError) -def yieldable_gather_results(func, iter, *args, **kwargs): +def yieldable_gather_results( + func: Callable, iter: Iterable, *args: Any, **kwargs: Any +) -> defer.Deferred: """Executes the function with each argument concurrently. Args: - func (func): Function to execute that returns a Deferred - iter (iter): An iterable that yields items that get passed as the first + func: Function to execute that returns a Deferred + iter: An iterable that yields items that get passed as the first argument to the function *args: Arguments to be passed to each call to func + **kwargs: Keyword arguments to be passed to each call to func Returns Deferred[list]: Resolved when all functions have been invoked, or errors if @@ -188,24 +205,37 @@ def yieldable_gather_results(func, iter, *args, **kwargs): ).addErrback(unwrapFirstError) +@attr.s(slots=True) +class _LinearizerEntry: + # The number of things executing. + count = attr.ib(type=int) + # Deferreds for the things blocked from executing. + deferreds = attr.ib(type=collections.OrderedDict) + + class Linearizer: """Limits concurrent access to resources based on a key. Useful to ensure only a few things happen at a time on a given resource. Example: - with (yield limiter.queue("test_key")): + with await limiter.queue("test_key"): # do some work. """ - def __init__(self, name=None, max_count=1, clock=None): + def __init__( + self, + name: Optional[str] = None, + max_count: int = 1, + clock: Optional[Clock] = None, + ): """ Args: - max_count(int): The maximum number of concurrent accesses + max_count: The maximum number of concurrent accesses """ if name is None: - self.name = id(self) + self.name = id(self) # type: Union[str, int] else: self.name = name @@ -216,15 +246,10 @@ def __init__(self, name=None, max_count=1, clock=None): self._clock = clock self.max_count = max_count - # key_to_defer is a map from the key to a 2 element list where - # the first element is the number of things executing, and - # the second element is an OrderedDict, where the keys are deferreds for the - # things blocked from executing. - self.key_to_defer = ( - {} - ) # type: Dict[str, Sequence[Union[int, Dict[defer.Deferred, int]]]] + # key_to_defer is a map from the key to a _LinearizerEntry. + self.key_to_defer = {} # type: Dict[Hashable, _LinearizerEntry] - def is_queued(self, key) -> bool: + def is_queued(self, key: Hashable) -> bool: """Checks whether there is a process queued up waiting """ entry = self.key_to_defer.get(key) @@ -234,25 +259,27 @@ def is_queued(self, key) -> bool: # There are waiting deferreds only in the OrderedDict of deferreds is # non-empty. - return bool(entry[1]) + return bool(entry.deferreds) - def queue(self, key): + def queue(self, key: Hashable) -> defer.Deferred: # we avoid doing defer.inlineCallbacks here, so that cancellation works correctly. # (https://twistedmatrix.com/trac/ticket/4632 meant that cancellations were not # propagated inside inlineCallbacks until Twisted 18.7) - entry = self.key_to_defer.setdefault(key, [0, collections.OrderedDict()]) + entry = self.key_to_defer.setdefault( + key, _LinearizerEntry(0, collections.OrderedDict()) + ) # If the number of things executing is greater than the maximum # then add a deferred to the list of blocked items # When one of the things currently executing finishes it will callback # this item so that it can continue executing. - if entry[0] >= self.max_count: + if entry.count >= self.max_count: res = self._await_lock(key) else: logger.debug( "Acquired uncontended linearizer lock %r for key %r", self.name, key ) - entry[0] += 1 + entry.count += 1 res = defer.succeed(None) # once we successfully get the lock, we need to return a context manager which @@ -267,15 +294,15 @@ def _ctx_manager(_): # We've finished executing so check if there are any things # blocked waiting to execute and start one of them - entry[0] -= 1 + entry.count -= 1 - if entry[1]: - (next_def, _) = entry[1].popitem(last=False) + if entry.deferreds: + (next_def, _) = entry.deferreds.popitem(last=False) # we need to run the next thing in the sentinel context. with PreserveLoggingContext(): next_def.callback(None) - elif entry[0] == 0: + elif entry.count == 0: # We were the last thing for this key: remove it from the # map. del self.key_to_defer[key] @@ -283,7 +310,7 @@ def _ctx_manager(_): res.addCallback(_ctx_manager) return res - def _await_lock(self, key): + def _await_lock(self, key: Hashable) -> defer.Deferred: """Helper for queue: adds a deferred to the queue Assumes that we've already checked that we've reached the limit of the number @@ -298,11 +325,11 @@ def _await_lock(self, key): logger.debug("Waiting to acquire linearizer lock %r for key %r", self.name, key) new_defer = make_deferred_yieldable(defer.Deferred()) - entry[1][new_defer] = 1 + entry.deferreds[new_defer] = 1 def cb(_r): logger.debug("Acquired linearizer lock %r for key %r", self.name, key) - entry[0] += 1 + entry.count += 1 # if the code holding the lock completes synchronously, then it # will recursively run the next claimant on the list. That can @@ -331,7 +358,7 @@ def eb(e): ) # we just have to take ourselves back out of the queue. - del entry[1][new_defer] + del entry.deferreds[new_defer] return e new_defer.addCallbacks(cb, eb) @@ -419,14 +446,22 @@ def _ctx_manager(): return _ctx_manager() -def _cancelled_to_timed_out_error(value, timeout): +R = TypeVar("R") + + +def _cancelled_to_timed_out_error(value: R, timeout: float) -> R: if isinstance(value, failure.Failure): value.trap(CancelledError) raise defer.TimeoutError(timeout, "Deferred") return value -def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): +def timeout_deferred( + deferred: defer.Deferred, + timeout: float, + reactor: IReactorTime, + on_timeout_cancel: Optional[Callable[[Any, float], Any]] = None, +) -> defer.Deferred: """The in built twisted `Deferred.addTimeout` fails to time out deferreds that have a canceller that throws exceptions. This method creates a new deferred that wraps and times out the given deferred, correctly handling @@ -437,10 +472,10 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred Args: - deferred (Deferred) - timeout (float): Timeout in seconds - reactor (twisted.interfaces.IReactorTime): The twisted reactor to use - on_timeout_cancel (callable): A callable which is called immediately + deferred: The Deferred to potentially timeout. + timeout: Timeout in seconds + reactor: The twisted reactor to use + on_timeout_cancel: A callable which is called immediately after the deferred times out, and not if this deferred is otherwise cancelled before the timeout. @@ -452,7 +487,7 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): CancelledError Failure into a defer.TimeoutError. Returns: - Deferred + A new Deferred. """ new_d = defer.Deferred() From a5370072b53e7ea3ebbd9404ee4133508c2d55b2 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Wed, 9 Sep 2020 11:39:39 +0100 Subject: [PATCH 009/245] Don't remember `enabled` of deleted push rules and properly return 404 for missing push rules in `.../actions` and `.../enabled` (#7796) Signed-off-by: Olivier Wilkinson (reivilibre) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/7796.bugfix | 1 + synapse/rest/client/v1/push_rule.py | 15 +- synapse/storage/databases/main/push_rule.py | 131 ++++- .../10_pushrules_enabled_delete_obsolete.sql | 28 ++ tests/rest/client/v1/test_push_rule_attrs.py | 448 ++++++++++++++++++ 5 files changed, 610 insertions(+), 13 deletions(-) create mode 100644 changelog.d/7796.bugfix create mode 100644 synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql create mode 100644 tests/rest/client/v1/test_push_rule_attrs.py diff --git a/changelog.d/7796.bugfix b/changelog.d/7796.bugfix new file mode 100644 index 000000000000..65e5eb42a248 --- /dev/null +++ b/changelog.d/7796.bugfix @@ -0,0 +1 @@ +Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index e781a3bcf409..ddf8ed5e9ca1 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -163,6 +163,18 @@ def notify_user(self, user_id): self.notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) async def set_rule_attr(self, user_id, spec, val): + if spec["attr"] not in ("enabled", "actions"): + # for the sake of potential future expansion, shouldn't report + # 404 in the case of an unknown request so check it corresponds to + # a known attribute first. + raise UnrecognizedRequestError() + + namespaced_rule_id = _namespaced_rule_id_from_spec(spec) + rule_id = spec["rule_id"] + is_default_rule = rule_id.startswith(".") + if is_default_rule: + if namespaced_rule_id not in BASE_RULE_IDS: + raise NotFoundError("Unknown rule %s" % (namespaced_rule_id,)) if spec["attr"] == "enabled": if isinstance(val, dict) and "enabled" in val: val = val["enabled"] @@ -171,9 +183,8 @@ async def set_rule_attr(self, user_id, spec, val): # This should *actually* take a dict, but many clients pass # bools directly, so let's not break them. raise SynapseError(400, "Value for 'enabled' must be boolean") - namespaced_rule_id = _namespaced_rule_id_from_spec(spec) return await self.store.set_push_rule_enabled( - user_id, namespaced_rule_id, val + user_id, namespaced_rule_id, val, is_default_rule ) elif spec["attr"] == "actions": actions = val.get("actions") diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 0de802a86bde..9790a3199847 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -13,11 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import abc import logging from typing import List, Tuple, Union +from synapse.api.errors import NotFoundError, StoreError from synapse.push.baserules import list_with_base_rules from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage._base import SQLBaseStore, db_to_json @@ -27,6 +27,7 @@ from synapse.storage.databases.main.pusher import PusherWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore +from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException from synapse.storage.util.id_generators import StreamIdGenerator from synapse.util import json_encoder @@ -540,6 +541,25 @@ def _upsert_push_rule_txn( }, ) + # ensure we have a push_rules_enable row + # enabledness defaults to true + if isinstance(self.database_engine, PostgresEngine): + sql = """ + INSERT INTO push_rules_enable (id, user_name, rule_id, enabled) + VALUES (?, ?, ?, ?) + ON CONFLICT DO NOTHING + """ + elif isinstance(self.database_engine, Sqlite3Engine): + sql = """ + INSERT OR IGNORE INTO push_rules_enable (id, user_name, rule_id, enabled) + VALUES (?, ?, ?, ?) + """ + else: + raise RuntimeError("Unknown database engine") + + new_enable_id = self._push_rules_enable_id_gen.get_next() + txn.execute(sql, (new_enable_id, user_id, rule_id, 1)) + async def delete_push_rule(self, user_id: str, rule_id: str) -> None: """ Delete a push rule. Args specify the row to be deleted and can be @@ -552,6 +572,12 @@ async def delete_push_rule(self, user_id: str, rule_id: str) -> None: """ def delete_push_rule_txn(txn, stream_id, event_stream_ordering): + # we don't use simple_delete_one_txn because that would fail if the + # user did not have a push_rule_enable row. + self.db_pool.simple_delete_txn( + txn, "push_rules_enable", {"user_name": user_id, "rule_id": rule_id} + ) + self.db_pool.simple_delete_one_txn( txn, "push_rules", {"user_name": user_id, "rule_id": rule_id} ) @@ -570,10 +596,29 @@ def delete_push_rule_txn(txn, stream_id, event_stream_ordering): event_stream_ordering, ) - async def set_push_rule_enabled(self, user_id, rule_id, enabled) -> None: + async def set_push_rule_enabled( + self, user_id: str, rule_id: str, enabled: bool, is_default_rule: bool + ) -> None: + """ + Sets the `enabled` state of a push rule. + + Args: + user_id: the user ID of the user who wishes to enable/disable the rule + e.g. '@tina:example.org' + rule_id: the full rule ID of the rule to be enabled/disabled + e.g. 'global/override/.m.rule.roomnotif' + or 'global/override/myCustomRule' + enabled: True if the rule is to be enabled, False if it is to be + disabled + is_default_rule: True if and only if this is a server-default rule. + This skips the check for existence (as only user-created rules + are always stored in the database `push_rules` table). + + Raises: + NotFoundError if the rule does not exist. + """ with await self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() - await self.db_pool.runInteraction( "_set_push_rule_enabled_txn", self._set_push_rule_enabled_txn, @@ -582,12 +627,47 @@ async def set_push_rule_enabled(self, user_id, rule_id, enabled) -> None: user_id, rule_id, enabled, + is_default_rule, ) def _set_push_rule_enabled_txn( - self, txn, stream_id, event_stream_ordering, user_id, rule_id, enabled + self, + txn, + stream_id, + event_stream_ordering, + user_id, + rule_id, + enabled, + is_default_rule, ): new_id = self._push_rules_enable_id_gen.get_next() + + if not is_default_rule: + # first check it exists; we need to lock for key share so that a + # transaction that deletes the push rule will conflict with this one. + # We also need a push_rule_enable row to exist for every push_rules + # row, otherwise it is possible to simultaneously delete a push rule + # (that has no _enable row) and enable it, resulting in a dangling + # _enable row. To solve this: we either need to use SERIALISABLE or + # ensure we always have a push_rule_enable row for every push_rule + # row. We chose the latter. + for_key_share = "FOR KEY SHARE" + if not isinstance(self.database_engine, PostgresEngine): + # For key share is not applicable/available on SQLite + for_key_share = "" + sql = ( + """ + SELECT 1 FROM push_rules + WHERE user_name = ? AND rule_id = ? + %s + """ + % for_key_share + ) + txn.execute(sql, (user_id, rule_id)) + if txn.fetchone() is None: + # needed to set NOT_FOUND code. + raise NotFoundError("Push rule does not exist.") + self.db_pool.simple_upsert_txn( txn, "push_rules_enable", @@ -606,8 +686,30 @@ def _set_push_rule_enabled_txn( ) async def set_push_rule_actions( - self, user_id, rule_id, actions, is_default_rule + self, + user_id: str, + rule_id: str, + actions: List[Union[dict, str]], + is_default_rule: bool, ) -> None: + """ + Sets the `actions` state of a push rule. + + Will throw NotFoundError if the rule does not exist; the Code for this + is NOT_FOUND. + + Args: + user_id: the user ID of the user who wishes to enable/disable the rule + e.g. '@tina:example.org' + rule_id: the full rule ID of the rule to be enabled/disabled + e.g. 'global/override/.m.rule.roomnotif' + or 'global/override/myCustomRule' + actions: A list of actions (each action being a dict or string), + e.g. ["notify", {"set_tweak": "highlight", "value": false}] + is_default_rule: True if and only if this is a server-default rule. + This skips the check for existence (as only user-created rules + are always stored in the database `push_rules` table). + """ actions_json = json_encoder.encode(actions) def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering): @@ -629,12 +731,19 @@ def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering): update_stream=False, ) else: - self.db_pool.simple_update_one_txn( - txn, - "push_rules", - {"user_name": user_id, "rule_id": rule_id}, - {"actions": actions_json}, - ) + try: + self.db_pool.simple_update_one_txn( + txn, + "push_rules", + {"user_name": user_id, "rule_id": rule_id}, + {"actions": actions_json}, + ) + except StoreError as serr: + if serr.code == 404: + # this sets the NOT_FOUND error Code + raise NotFoundError("Push rule does not exist") + else: + raise self._insert_push_rules_update_txn( txn, diff --git a/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql b/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql new file mode 100644 index 000000000000..847aebd85eae --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql @@ -0,0 +1,28 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + Delete stuck 'enabled' bits that correspond to deleted or non-existent push rules. + We ignore rules that are server-default rules because they are not defined + in the `push_rules` table. +**/ + +DELETE FROM push_rules_enable WHERE + rule_id NOT LIKE 'global/%/.m.rule.%' + AND NOT EXISTS ( + SELECT 1 FROM push_rules + WHERE push_rules.user_name = push_rules_enable.user_name + AND push_rules.rule_id = push_rules_enable.rule_id + ); diff --git a/tests/rest/client/v1/test_push_rule_attrs.py b/tests/rest/client/v1/test_push_rule_attrs.py new file mode 100644 index 000000000000..081052f6a65e --- /dev/null +++ b/tests/rest/client/v1/test_push_rule_attrs.py @@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import synapse +from synapse.api.errors import Codes +from synapse.rest.client.v1 import login, push_rule, room + +from tests.unittest import HomeserverTestCase + + +class PushRuleAttributesTestCase(HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + push_rule.register_servlets, + ] + hijack_auth = False + + def test_enabled_on_creation(self): + """ + Tests the GET and PUT of push rules' `enabled` endpoints. + Tests that a rule is enabled upon creation, even though a rule with that + ruleId existed previously and was disabled. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET enabled for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], True) + + def test_enabled_on_recreation(self): + """ + Tests the GET and PUT of push rules' `enabled` endpoints. + Tests that a rule is enabled upon creation, even if a rule with that + ruleId existed previously and was disabled. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # disable the rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": False}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check rule disabled + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], False) + + # DELETE the rule + request, channel = self.make_request( + "DELETE", "/pushrules/global/override/best.friend", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET enabled for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], True) + + def test_enabled_disable(self): + """ + Tests the GET and PUT of push rules' `enabled` endpoints. + Tests that a rule is disabled and enabled when we ask for it. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # disable the rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": False}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check rule disabled + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], False) + + # re-enable the rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": True}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check rule enabled + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], True) + + def test_enabled_404_when_get_non_existent(self): + """ + Tests that `enabled` gives 404 when the rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET enabled for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # DELETE the rule + request, channel = self.make_request( + "DELETE", "/pushrules/global/override/best.friend", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check 404 for deleted rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_enabled_404_when_get_non_existent_server_rule(self): + """ + Tests that `enabled` gives 404 when the server-default rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/.m.muahahaha/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_enabled_404_when_put_non_existent_rule(self): + """ + Tests that `enabled` gives 404 when we put to a rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": True}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_enabled_404_when_put_non_existent_server_rule(self): + """ + Tests that `enabled` gives 404 when we put to a server-default rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/.m.muahahah/enabled", + {"enabled": True}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_get(self): + """ + Tests that `actions` gives you what you expect on a fresh rule. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET actions for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/actions", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body["actions"], ["notify", {"set_tweak": "highlight"}] + ) + + def test_actions_put(self): + """ + Tests that PUT on actions updates the value you'd get from GET. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # change the rule actions + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/actions", + {"actions": ["dont_notify"]}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET actions for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/actions", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["actions"], ["dont_notify"]) + + def test_actions_404_when_get_non_existent(self): + """ + Tests that `actions` gives 404 when the rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # DELETE the rule + request, channel = self.make_request( + "DELETE", "/pushrules/global/override/best.friend", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check 404 for deleted rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_404_when_get_non_existent_server_rule(self): + """ + Tests that `actions` gives 404 when the server-default rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/.m.muahahaha/actions", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_404_when_put_non_existent_rule(self): + """ + Tests that `actions` gives 404 when putting to a rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/actions", + {"actions": ["dont_notify"]}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_404_when_put_non_existent_server_rule(self): + """ + Tests that `actions` gives 404 when putting to a server-default rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/.m.muahahah/actions", + {"actions": ["dont_notify"]}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) From 453dfe210b577cb6900cf2560e514133dba9ec64 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 9 Sep 2020 13:25:59 +0100 Subject: [PATCH 010/245] blacklist MSC2753 sytests until it's implemented in synapse (#8285) Dendrite's implementing MSC2753 over at https://github.com/matrix-org/dendrite/pull/1370 to prove the implementation for MSC purposes, and so sytest has sprouted tests for it over at https://github.com/matrix-org/sytest/pull/944. But we don't want them to run on synapse until synapse implements it. --- changelog.d/8285.misc | 1 + sytest-blacklist | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/8285.misc diff --git a/changelog.d/8285.misc b/changelog.d/8285.misc new file mode 100644 index 000000000000..4646664ba1c8 --- /dev/null +++ b/changelog.d/8285.misc @@ -0,0 +1 @@ +Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. \ No newline at end of file diff --git a/sytest-blacklist b/sytest-blacklist index 79b2d4402aaa..b563448016f1 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -36,3 +36,11 @@ Inbound federation of state requires event_id as a mandatory paramater # Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands Can upload self-signing keys + +# Blacklisted until MSC2753 is implemented +Local users can peek into world_readable rooms by room ID +We can't peek into rooms with shared history_visibility +We can't peek into rooms with invited history_visibility +We can't peek into rooms with joined history_visibility +Local users can peek by room alias +Peeked rooms only turn up in the sync for the device who peeked them From e7fd336a53a4ca489cdafc389b494d5477019dc0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Sep 2020 16:17:50 +0100 Subject: [PATCH 011/245] Fixup pusher pool notifications --- synapse/handlers/federation.py | 2 +- synapse/handlers/message.py | 2 +- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/push/pusherpool.py | 19 ++++++++++++++++--- synapse/replication/tcp/client.py | 3 ++- tests/handlers/test_typing.py | 1 + 7 files changed, 23 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 43f2986f8955..74d7ac8a67f2 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2970,7 +2970,7 @@ async def _notify_persisted_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(max_stream_id) async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8a7b4916cd6a..d1556659e3e5 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1145,7 +1145,7 @@ def is_inviter_member_event(e): # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) - await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(max_stream_id) def _notify(): try: diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index b7ea4438e082..28bd8ab7481e 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -91,7 +91,7 @@ def on_stop(self): pass self.timed_call = None - def on_new_notifications(self, min_stream_ordering, max_stream_ordering): + def on_new_notifications(self, max_stream_ordering): if self.max_stream_ordering: self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index f21fa9b65905..26706bf3e1ee 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -114,7 +114,7 @@ def on_started(self, should_check_for_notifs): if should_check_for_notifs: self._start_processing() - def on_new_notifications(self, min_stream_ordering, max_stream_ordering): + def on_new_notifications(self, max_stream_ordering): self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 3c3262a88c53..fa8473bf8d00 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -64,6 +64,12 @@ def __init__(self, hs: "HomeServer"): self._pusher_shard_config = hs.config.push.pusher_shard_config self._instance_name = hs.get_instance_name() + # Record the last stream ID that we were poked about so we can get + # changes since then. We set this to the current max stream ID on + # startup as every individual pusher will have checked for changes on + # startup. + self._last_room_stream_id_seen = self.store.get_room_max_stream_ordering() + # map from user id to app_id:pushkey to pusher self.pushers = {} # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]] @@ -178,20 +184,27 @@ async def remove_pushers_by_access_token(self, user_id, access_tokens): ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, min_stream_id, max_stream_id): + async def on_new_notifications(self, max_stream_id): if not self.pushers: # nothing to do here. return + if max_stream_id < self._last_room_stream_id_seen: + # Nothing to do + return + + prev_stream_id = self._last_room_stream_id_seen + self._last_room_stream_id_seen = max_stream_id + try: users_affected = await self.store.get_push_action_users_in_range( - min_stream_id, max_stream_id + prev_stream_id, max_stream_id ) for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_notifications(min_stream_id, max_stream_id) + p.on_new_notifications(max_stream_id) except Exception: logger.exception("Exception in pusher on_new_notifications") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index d6ecf5b32703..ccd3147dfdaf 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -154,7 +154,8 @@ async def on_rdata( max_token = self.store.get_room_max_stream_ordering() self.notifier.on_new_room_event(event, token, max_token, extra_users) - await self.pusher_pool.on_new_notifications(token, token) + max_token = self.store.get_room_max_stream_ordering() + await self.pusher_pool.on_new_notifications(max_token) # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index ae6bc24f4cbb..f306a09bfaa7 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -80,6 +80,7 @@ def make_homeserver(self, reactor, clock): "get_user_directory_stream_pos", "get_current_state_deltas", "get_device_updates_by_remote", + "get_room_max_stream_ordering", ] ) From dc9dcdbd59d4f839c7a96780f7464460fae27851 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Sep 2020 16:19:22 +0100 Subject: [PATCH 012/245] Revert "Fixup pusher pool notifications" This reverts commit e7fd336a53a4ca489cdafc389b494d5477019dc0. --- synapse/handlers/federation.py | 2 +- synapse/handlers/message.py | 2 +- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/push/pusherpool.py | 19 +++---------------- synapse/replication/tcp/client.py | 3 +-- tests/handlers/test_typing.py | 1 - 7 files changed, 8 insertions(+), 23 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 74d7ac8a67f2..43f2986f8955 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2970,7 +2970,7 @@ async def _notify_persisted_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - await self.pusher_pool.on_new_notifications(max_stream_id) + await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d1556659e3e5..8a7b4916cd6a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1145,7 +1145,7 @@ def is_inviter_member_event(e): # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) - await self.pusher_pool.on_new_notifications(max_stream_id) + await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) def _notify(): try: diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 28bd8ab7481e..b7ea4438e082 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -91,7 +91,7 @@ def on_stop(self): pass self.timed_call = None - def on_new_notifications(self, max_stream_ordering): + def on_new_notifications(self, min_stream_ordering, max_stream_ordering): if self.max_stream_ordering: self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 26706bf3e1ee..f21fa9b65905 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -114,7 +114,7 @@ def on_started(self, should_check_for_notifs): if should_check_for_notifs: self._start_processing() - def on_new_notifications(self, max_stream_ordering): + def on_new_notifications(self, min_stream_ordering, max_stream_ordering): self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index fa8473bf8d00..3c3262a88c53 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -64,12 +64,6 @@ def __init__(self, hs: "HomeServer"): self._pusher_shard_config = hs.config.push.pusher_shard_config self._instance_name = hs.get_instance_name() - # Record the last stream ID that we were poked about so we can get - # changes since then. We set this to the current max stream ID on - # startup as every individual pusher will have checked for changes on - # startup. - self._last_room_stream_id_seen = self.store.get_room_max_stream_ordering() - # map from user id to app_id:pushkey to pusher self.pushers = {} # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]] @@ -184,27 +178,20 @@ async def remove_pushers_by_access_token(self, user_id, access_tokens): ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, max_stream_id): + async def on_new_notifications(self, min_stream_id, max_stream_id): if not self.pushers: # nothing to do here. return - if max_stream_id < self._last_room_stream_id_seen: - # Nothing to do - return - - prev_stream_id = self._last_room_stream_id_seen - self._last_room_stream_id_seen = max_stream_id - try: users_affected = await self.store.get_push_action_users_in_range( - prev_stream_id, max_stream_id + min_stream_id, max_stream_id ) for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_notifications(max_stream_id) + p.on_new_notifications(min_stream_id, max_stream_id) except Exception: logger.exception("Exception in pusher on_new_notifications") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index ccd3147dfdaf..d6ecf5b32703 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -154,8 +154,7 @@ async def on_rdata( max_token = self.store.get_room_max_stream_ordering() self.notifier.on_new_room_event(event, token, max_token, extra_users) - max_token = self.store.get_room_max_stream_ordering() - await self.pusher_pool.on_new_notifications(max_token) + await self.pusher_pool.on_new_notifications(token, token) # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index f306a09bfaa7..ae6bc24f4cbb 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -80,7 +80,6 @@ def make_homeserver(self, reactor, clock): "get_user_directory_stream_pos", "get_current_state_deltas", "get_device_updates_by_remote", - "get_room_max_stream_ordering", ] ) From c9dbee50aefc22390f600a0219ca7fa1ae9acd88 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 9 Sep 2020 16:56:08 +0100 Subject: [PATCH 013/245] Fixup pusher pool notifications (#8287) `pusher_pool.on_new_notifications` expected a min and max stream ID, however that was not what we were passing in. Instead, let's just pass it the current max stream ID and have it track the last stream ID it got passed. I believe that it mostly worked as we called the function for every event. However, it would break for events that got persisted out of order, i.e, that were persisted but the max stream ID wasn't incremented as not all preceding events had finished persisting, and push for that event would be delayed until another event got pushed to the effected users. --- changelog.d/8287.bugfix | 1 + synapse/handlers/federation.py | 2 +- synapse/handlers/message.py | 2 +- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/push/pusherpool.py | 19 ++++++++++++++++--- synapse/replication/tcp/client.py | 3 ++- tests/handlers/test_typing.py | 1 + 8 files changed, 24 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8287.bugfix diff --git a/changelog.d/8287.bugfix b/changelog.d/8287.bugfix new file mode 100644 index 000000000000..839781aa0753 --- /dev/null +++ b/changelog.d/8287.bugfix @@ -0,0 +1 @@ +Fix edge case where push could get delayed for a user until a later event was pushed. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 43f2986f8955..74d7ac8a67f2 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2970,7 +2970,7 @@ async def _notify_persisted_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(max_stream_id) async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8a7b4916cd6a..d1556659e3e5 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1145,7 +1145,7 @@ def is_inviter_member_event(e): # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) - await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(max_stream_id) def _notify(): try: diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index b7ea4438e082..28bd8ab7481e 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -91,7 +91,7 @@ def on_stop(self): pass self.timed_call = None - def on_new_notifications(self, min_stream_ordering, max_stream_ordering): + def on_new_notifications(self, max_stream_ordering): if self.max_stream_ordering: self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index f21fa9b65905..26706bf3e1ee 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -114,7 +114,7 @@ def on_started(self, should_check_for_notifs): if should_check_for_notifs: self._start_processing() - def on_new_notifications(self, min_stream_ordering, max_stream_ordering): + def on_new_notifications(self, max_stream_ordering): self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 3c3262a88c53..fa8473bf8d00 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -64,6 +64,12 @@ def __init__(self, hs: "HomeServer"): self._pusher_shard_config = hs.config.push.pusher_shard_config self._instance_name = hs.get_instance_name() + # Record the last stream ID that we were poked about so we can get + # changes since then. We set this to the current max stream ID on + # startup as every individual pusher will have checked for changes on + # startup. + self._last_room_stream_id_seen = self.store.get_room_max_stream_ordering() + # map from user id to app_id:pushkey to pusher self.pushers = {} # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]] @@ -178,20 +184,27 @@ async def remove_pushers_by_access_token(self, user_id, access_tokens): ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, min_stream_id, max_stream_id): + async def on_new_notifications(self, max_stream_id): if not self.pushers: # nothing to do here. return + if max_stream_id < self._last_room_stream_id_seen: + # Nothing to do + return + + prev_stream_id = self._last_room_stream_id_seen + self._last_room_stream_id_seen = max_stream_id + try: users_affected = await self.store.get_push_action_users_in_range( - min_stream_id, max_stream_id + prev_stream_id, max_stream_id ) for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_notifications(min_stream_id, max_stream_id) + p.on_new_notifications(max_stream_id) except Exception: logger.exception("Exception in pusher on_new_notifications") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index d6ecf5b32703..ccd3147dfdaf 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -154,7 +154,8 @@ async def on_rdata( max_token = self.store.get_room_max_stream_ordering() self.notifier.on_new_room_event(event, token, max_token, extra_users) - await self.pusher_pool.on_new_notifications(token, token) + max_token = self.store.get_room_max_stream_ordering() + await self.pusher_pool.on_new_notifications(max_token) # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index ae6bc24f4cbb..f306a09bfaa7 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -80,6 +80,7 @@ def make_homeserver(self, reactor, clock): "get_user_directory_stream_pos", "get_current_state_deltas", "get_device_updates_by_remote", + "get_room_max_stream_ordering", ] ) From 2ea1c682490a19e0e2df069702c1dbe419389fa4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Sep 2020 12:22:00 -0400 Subject: [PATCH 014/245] Remove some unused distributor signals (#8216) Removes the `user_joined_room` and stops calling it since there are no observers. Also cleans-up some other unused signals and related code. --- changelog.d/8216.misc | 1 + synapse/handlers/events.py | 4 --- synapse/handlers/federation.py | 43 +--------------------- synapse/handlers/room_member.py | 42 +++------------------- synapse/handlers/room_member_worker.py | 9 ----- synapse/replication/http/membership.py | 10 +++--- synapse/util/distributor.py | 50 +++++--------------------- 7 files changed, 18 insertions(+), 141 deletions(-) create mode 100644 changelog.d/8216.misc diff --git a/changelog.d/8216.misc b/changelog.d/8216.misc new file mode 100644 index 000000000000..b38911b0e582 --- /dev/null +++ b/changelog.d/8216.misc @@ -0,0 +1 @@ +Simplify the distributor code to avoid unnecessary work. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index b05e32f45771..fdce54c5c30b 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -39,10 +39,6 @@ class EventStreamHandler(BaseHandler): def __init__(self, hs: "HomeServer"): super(EventStreamHandler, self).__init__(hs) - self.distributor = hs.get_distributor() - self.distributor.declare("started_user_eventstream") - self.distributor.declare("stopped_user_eventstream") - self.clock = hs.get_clock() self.notifier = hs.get_notifier() diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 74d7ac8a67f2..be9b0701a062 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -69,7 +69,6 @@ ReplicationFederationSendEventsRestServlet, ReplicationStoreRoomOnInviteRestServlet, ) -from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( @@ -80,7 +79,6 @@ get_domain_from_id, ) from synapse.util.async_helpers import Linearizer, concurrently_execute -from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server @@ -141,9 +139,6 @@ def __init__(self, hs): self._replication = hs.get_replication_data_handler() self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs) - self._notify_user_membership_change = ReplicationUserJoinedLeftRoomRestServlet.make_client( - hs - ) self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client( hs ) @@ -704,31 +699,10 @@ async def _process_received_pdu( logger.debug("[%s %s] Processing event: %s", room_id, event_id, event) try: - context = await self._handle_new_event(origin, event, state=state) + await self._handle_new_event(origin, event, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) - if event.type == EventTypes.Member: - if event.membership == Membership.JOIN: - # Only fire user_joined_room if the user has acutally - # joined the room. Don't bother if the user is just - # changing their profile info. - newly_joined = True - - prev_state_ids = await context.get_prev_state_ids() - - prev_state_id = prev_state_ids.get((event.type, event.state_key)) - if prev_state_id: - prev_state = await self.store.get_event( - prev_state_id, allow_none=True - ) - if prev_state and prev_state.membership == Membership.JOIN: - newly_joined = False - - if newly_joined: - user = UserID.from_string(event.state_key) - await self.user_joined_room(user, room_id) - # For encrypted messages we check that we know about the sending device, # if we don't then we mark the device cache for that user as stale. if event.type == EventTypes.Encrypted: @@ -1550,11 +1524,6 @@ async def on_send_join_request(self, origin, pdu): event.signatures, ) - if event.type == EventTypes.Member: - if event.content["membership"] == Membership.JOIN: - user = UserID.from_string(event.state_key) - await self.user_joined_room(user, event.room_id) - prev_state_ids = await context.get_prev_state_ids() state_ids = list(prev_state_ids.values()) @@ -2984,16 +2953,6 @@ async def _clean_room_for_join(self, room_id: str) -> None: else: await self.store.clean_room_for_join(room_id) - async def user_joined_room(self, user: UserID, room_id: str) -> None: - """Called when a new user has joined the room - """ - if self.config.worker_app: - await self._notify_user_membership_change( - room_id=room_id, user_id=user.to_string(), change="joined" - ) - else: - user_joined_room(self.distributor, user, room_id) - async def get_room_complexity( self, remote_room_hosts: List[str], room_id: str ) -> Optional[dict]: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 32b7e323faa9..100f335b8027 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -40,7 +40,7 @@ from synapse.storage.roommember import RoomsForUser from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID from synapse.util.async_helpers import Linearizer -from synapse.util.distributor import user_joined_room, user_left_room +from synapse.util.distributor import user_left_room from ._base import BaseHandler @@ -148,17 +148,6 @@ async def remote_reject_invite( """ raise NotImplementedError() - @abc.abstractmethod - async def _user_joined_room(self, target: UserID, room_id: str) -> None: - """Notifies distributor on master process that the user has joined the - room. - - Args: - target - room_id - """ - raise NotImplementedError() - @abc.abstractmethod async def _user_left_room(self, target: UserID, room_id: str) -> None: """Notifies distributor on master process that the user has left the @@ -221,7 +210,6 @@ async def _local_membership_update( prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) - newly_joined = False if event.membership == Membership.JOIN: newly_joined = True if prev_member_event_id: @@ -246,12 +234,7 @@ async def _local_membership_update( requester, event, context, extra_users=[target], ratelimit=ratelimit, ) - if event.membership == Membership.JOIN and newly_joined: - # Only fire user_joined_room if the user has actually joined the - # room. Don't bother if the user is just changing their profile - # info. - await self._user_joined_room(target, room_id) - elif event.membership == Membership.LEAVE: + if event.membership == Membership.LEAVE: if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) if prev_member_event.membership == Membership.JOIN: @@ -726,17 +709,7 @@ async def send_membership_event( (EventTypes.Member, event.state_key), None ) - if event.membership == Membership.JOIN: - # Only fire user_joined_room if the user has actually joined the - # room. Don't bother if the user is just changing their profile - # info. - newly_joined = True - if prev_member_event_id: - prev_member_event = await self.store.get_event(prev_member_event_id) - newly_joined = prev_member_event.membership != Membership.JOIN - if newly_joined: - await self._user_joined_room(target_user, room_id) - elif event.membership == Membership.LEAVE: + if event.membership == Membership.LEAVE: if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) if prev_member_event.membership == Membership.JOIN: @@ -1002,10 +975,9 @@ async def _is_server_notice_room(self, room_id: str) -> bool: class RoomMemberMasterHandler(RoomMemberHandler): def __init__(self, hs): - super(RoomMemberMasterHandler, self).__init__(hs) + super().__init__(hs) self.distributor = hs.get_distributor() - self.distributor.declare("user_joined_room") self.distributor.declare("user_left_room") async def _is_remote_room_too_complex( @@ -1085,7 +1057,6 @@ async def _remote_join( event_id, stream_id = await self.federation_handler.do_invite_join( remote_room_hosts, room_id, user.to_string(), content ) - await self._user_joined_room(user, room_id) # Check the room we just joined wasn't too large, if we didn't fetch the # complexity of it before. @@ -1228,11 +1199,6 @@ async def _locally_reject_invite( ) return event.event_id, stream_id - async def _user_joined_room(self, target: UserID, room_id: str) -> None: - """Implements RoomMemberHandler._user_joined_room - """ - user_joined_room(self.distributor, target, room_id) - async def _user_left_room(self, target: UserID, room_id: str) -> None: """Implements RoomMemberHandler._user_left_room """ diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 897338fd54e2..e7f34737c684 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -57,8 +57,6 @@ async def _remote_join( content=content, ) - await self._user_joined_room(user, room_id) - return ret["event_id"], ret["stream_id"] async def remote_reject_invite( @@ -81,13 +79,6 @@ async def remote_reject_invite( ) return ret["event_id"], ret["stream_id"] - async def _user_joined_room(self, target: UserID, room_id: str) -> None: - """Implements RoomMemberHandler._user_joined_room - """ - await self._notify_change_client( - user_id=target.to_string(), room_id=room_id, change="joined" - ) - async def _user_left_room(self, target: UserID, room_id: str) -> None: """Implements RoomMemberHandler._user_left_room """ diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 741329ab5fe7..08095fdf7d2c 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -19,7 +19,7 @@ from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict, Requester, UserID -from synapse.util.distributor import user_joined_room, user_left_room +from synapse.util.distributor import user_left_room if TYPE_CHECKING: from synapse.server import HomeServer @@ -181,9 +181,9 @@ async def _serialize_payload(room_id, user_id, change): Args: room_id (str) user_id (str) - change (str): Either "joined" or "left" + change (str): "left" """ - assert change in ("joined", "left") + assert change == "left" return {} @@ -192,9 +192,7 @@ def _handle_request(self, request, room_id, user_id, change): user = UserID.from_string(user_id) - if change == "joined": - user_joined_room(self.distributor, user, room_id) - elif change == "left": + if change == "left": user_left_room(self.distributor, user, room_id) else: raise Exception("Unrecognized change: %r", change) diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index a750261e77fd..f73e95393cbe 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -16,8 +16,6 @@ import logging from twisted.internet import defer -from twisted.internet.defer import Deferred, fail, succeed -from twisted.python import failure from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process @@ -29,11 +27,6 @@ def user_left_room(distributor, user, room_id): distributor.fire("user_left_room", user=user, room_id=room_id) -# XXX: this is no longer used. We should probably kill it. -def user_joined_room(distributor, user, room_id): - distributor.fire("user_joined_room", user=user, room_id=room_id) - - class Distributor: """A central dispatch point for loosely-connected pieces of code to register, observe, and fire signals. @@ -81,28 +74,6 @@ def fire(self, name, *args, **kwargs): run_as_background_process(name, self.signals[name].fire, *args, **kwargs) -def maybeAwaitableDeferred(f, *args, **kw): - """ - Invoke a function that may or may not return a Deferred or an Awaitable. - - This is a modified version of twisted.internet.defer.maybeDeferred. - """ - try: - result = f(*args, **kw) - except Exception: - return fail(failure.Failure(captureVars=Deferred.debug)) - - if isinstance(result, Deferred): - return result - # Handle the additional case of an awaitable being returned. - elif inspect.isawaitable(result): - return defer.ensureDeferred(result) - elif isinstance(result, failure.Failure): - return fail(result) - else: - return succeed(result) - - class Signal: """A Signal is a dispatch point that stores a list of callables as observers of it. @@ -132,22 +103,17 @@ def fire(self, *args, **kwargs): Returns a Deferred that will complete when all the observers have completed.""" - def do(observer): - def eb(failure): + async def do(observer): + try: + result = observer(*args, **kwargs) + if inspect.isawaitable(result): + result = await result + return result + except Exception as e: logger.warning( - "%s signal observer %s failed: %r", - self.name, - observer, - failure, - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject(), - ), + "%s signal observer %s failed: %r", self.name, observer, e, ) - return maybeAwaitableDeferred(observer, *args, **kwargs).addErrback(eb) - deferreds = [run_in_background(do, o) for o in self.observers] return make_deferred_yieldable( From b312769c0ee2c40b1a26a6ed39ea1c8a462d4349 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Sep 2020 12:59:41 -0400 Subject: [PATCH 015/245] Do not error when thumbnailing invalid files (#8236) If a file cannot be thumbnailed for some reason (e.g. the file is empty), then catch the exception and convert it to a reasonable error message for the client. --- changelog.d/8236.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 69 ++++++++++++++++++--- synapse/rest/media/v1/thumbnail_resource.py | 5 +- synapse/rest/media/v1/thumbnailer.py | 14 ++++- tests/rest/media/v1/test_media_storage.py | 39 +++++++++--- 5 files changed, 106 insertions(+), 22 deletions(-) create mode 100644 changelog.d/8236.bugfix diff --git a/changelog.d/8236.bugfix b/changelog.d/8236.bugfix new file mode 100644 index 000000000000..6f048710159f --- /dev/null +++ b/changelog.d/8236.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 9a1b7779f7eb..69f353d46f9e 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -53,7 +53,7 @@ from .preview_url_resource import PreviewUrlResource from .storage_provider import StorageProviderWrapper from .thumbnail_resource import ThumbnailResource -from .thumbnailer import Thumbnailer +from .thumbnailer import Thumbnailer, ThumbnailError from .upload_resource import UploadResource logger = logging.getLogger(__name__) @@ -460,13 +460,30 @@ def _generate_thumbnail(self, thumbnailer, t_width, t_height, t_method, t_type): return t_byte_source async def generate_local_exact_thumbnail( - self, media_id, t_width, t_height, t_method, t_type, url_cache - ): + self, + media_id: str, + t_width: int, + t_height: int, + t_method: str, + t_type: str, + url_cache: str, + ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) - thumbnailer = Thumbnailer(input_path) + try: + thumbnailer = Thumbnailer(input_path) + except ThumbnailError as e: + logger.warning( + "Unable to generate a thumbnail for local media %s using a method of %s and type of %s: %s", + media_id, + t_method, + t_type, + e, + ) + return None + t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, @@ -506,14 +523,36 @@ async def generate_local_exact_thumbnail( return output_path + # Could not generate thumbnail. + return None + async def generate_remote_exact_thumbnail( - self, server_name, file_id, media_id, t_width, t_height, t_method, t_type - ): + self, + server_name: str, + file_id: str, + media_id: str, + t_width: int, + t_height: int, + t_method: str, + t_type: str, + ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=False) ) - thumbnailer = Thumbnailer(input_path) + try: + thumbnailer = Thumbnailer(input_path) + except ThumbnailError as e: + logger.warning( + "Unable to generate a thumbnail for remote media %s from %s using a method of %s and type of %s: %s", + media_id, + server_name, + t_method, + t_type, + e, + ) + return None + t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, @@ -559,6 +598,9 @@ async def generate_remote_exact_thumbnail( return output_path + # Could not generate thumbnail. + return None + async def _generate_thumbnails( self, server_name: Optional[str], @@ -590,7 +632,18 @@ async def _generate_thumbnails( FileInfo(server_name, file_id, url_cache=url_cache) ) - thumbnailer = Thumbnailer(input_path) + try: + thumbnailer = Thumbnailer(input_path) + except ThumbnailError as e: + logger.warning( + "Unable to generate thumbnails for remote media %s from %s using a method of %s and type of %s: %s", + media_id, + server_name, + media_type, + e, + ) + return None + m_width = thumbnailer.width m_height = thumbnailer.height diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index a83535b97b5e..30421b663a72 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -16,6 +16,7 @@ import logging +from synapse.api.errors import SynapseError from synapse.http.server import DirectServeJsonResource, set_cors_headers from synapse.http.servlet import parse_integer, parse_string @@ -173,7 +174,7 @@ async def _select_or_generate_local_thumbnail( await respond_with_file(request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") - respond_404(request) + raise SynapseError(400, "Failed to generate thumbnail.") async def _select_or_generate_remote_thumbnail( self, @@ -235,7 +236,7 @@ async def _select_or_generate_remote_thumbnail( await respond_with_file(request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") - respond_404(request) + raise SynapseError(400, "Failed to generate thumbnail.") async def _respond_remote_thumbnail( self, request, server_name, media_id, width, height, method, m_type diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index d681bf7bf03a..457ad6031ce2 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -15,7 +15,7 @@ import logging from io import BytesIO -from PIL import Image as Image +from PIL import Image logger = logging.getLogger(__name__) @@ -31,12 +31,22 @@ } +class ThumbnailError(Exception): + """An error occurred generating a thumbnail.""" + + class Thumbnailer: FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"} def __init__(self, input_path): - self.image = Image.open(input_path) + try: + self.image = Image.open(input_path) + except OSError as e: + # If an error occurs opening the image, a thumbnail won't be able to + # be generated. + raise ThumbnailError from e + self.width, self.height = self.image.size self.transpose_method = None try: diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index f4f3e5677791..5f897d49cf47 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -120,12 +120,13 @@ class _TestImage: extension = attr.ib(type=bytes) expected_cropped = attr.ib(type=Optional[bytes]) expected_scaled = attr.ib(type=Optional[bytes]) + expected_found = attr.ib(default=True, type=bool) @parameterized_class( ("test_image",), [ - # smol png + # smoll png ( _TestImage( unhexlify( @@ -161,6 +162,8 @@ class _TestImage: None, ), ), + # an empty file + (_TestImage(b"", b"image/gif", b".gif", None, None, False,),), ], ) class MediaRepoTests(unittest.HomeserverTestCase): @@ -303,12 +306,16 @@ def test_disposition_none(self): self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None) def test_thumbnail_crop(self): - self._test_thumbnail("crop", self.test_image.expected_cropped) + self._test_thumbnail( + "crop", self.test_image.expected_cropped, self.test_image.expected_found + ) def test_thumbnail_scale(self): - self._test_thumbnail("scale", self.test_image.expected_scaled) + self._test_thumbnail( + "scale", self.test_image.expected_scaled, self.test_image.expected_found + ) - def _test_thumbnail(self, method, expected_body): + def _test_thumbnail(self, method, expected_body, expected_found): params = "?width=32&height=32&method=" + method request, channel = self.make_request( "GET", self.media_id + params, shorthand=False @@ -325,11 +332,23 @@ def _test_thumbnail(self, method, expected_body): ) self.pump() - self.assertEqual(channel.code, 200) - if expected_body is not None: + if expected_found: + self.assertEqual(channel.code, 200) + if expected_body is not None: + self.assertEqual( + channel.result["body"], expected_body, channel.result["body"] + ) + else: + # ensure that the result is at least some valid image + Image.open(BytesIO(channel.result["body"])) + else: + # A 404 with a JSON body. + self.assertEqual(channel.code, 404) self.assertEqual( - channel.result["body"], expected_body, channel.result["body"] + channel.json_body, + { + "errcode": "M_NOT_FOUND", + "error": "Not found [b'example.com', b'12345?width=32&height=32&method=%s']" + % method, + }, ) - else: - # ensure that the result is at least some valid image - Image.open(BytesIO(channel.result["body"])) From a3a90ee031d3942c04ab0d985678caf30a94f9e8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 10 Sep 2020 11:45:12 +0100 Subject: [PATCH 016/245] Show a confirmation page during user password reset (#8004) This PR adds a confirmation step to resetting your user password between clicking the link in your email and your password actually being reset. This is to better align our password reset flow with the industry standard of requiring a confirmation from the user after email validation. --- UPGRADE.rst | 24 ++++ changelog.d/8004.feature | 1 + docs/sample_config.yaml | 10 +- synapse/api/urls.py | 1 + synapse/app/homeserver.py | 10 ++ synapse/config/emailconfig.py | 12 +- synapse/push/mailer.py | 2 +- .../password_reset_confirmation.html | 16 +++ synapse/rest/__init__.py | 6 +- synapse/rest/client/v2_alpha/account.py | 76 ----------- synapse/rest/synapse/__init__.py | 14 ++ synapse/rest/synapse/client/__init__.py | 14 ++ synapse/rest/synapse/client/password_reset.py | 127 ++++++++++++++++++ tests/rest/client/v2_alpha/test_account.py | 29 +++- tests/server.py | 15 ++- tests/unittest.py | 4 + 16 files changed, 271 insertions(+), 90 deletions(-) create mode 100644 changelog.d/8004.feature create mode 100644 synapse/res/templates/password_reset_confirmation.html create mode 100644 synapse/rest/synapse/__init__.py create mode 100644 synapse/rest/synapse/client/__init__.py create mode 100644 synapse/rest/synapse/client/password_reset.py diff --git a/UPGRADE.rst b/UPGRADE.rst index 77be1b2952a6..1e4da98afe9a 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -88,6 +88,30 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.21.0 +==================== + +New HTML templates +------------------ + +A new HTML template, +`password_reset_confirmation.html `_, +has been added to the ``synapse/res/templates`` directory. If you are using a +custom template directory, you may want to copy the template over and modify it. + +Note that as of v1.20.0, templates do not need to be included in custom template +directories for Synapse to start. The default templates will be used if a custom +template cannot be found. + +This page will appear to the user after clicking a password reset link that has +been emailed to them. + +To complete password reset, the page must include a way to make a `POST` +request to +``/_synapse/client/password_reset/{medium}/submit_token`` +with the query parameters from the original link, presented as a URL-encoded form. See the file +itself for more details. + Upgrading to v1.18.0 ==================== diff --git a/changelog.d/8004.feature b/changelog.d/8004.feature new file mode 100644 index 000000000000..a91b75e0e0fd --- /dev/null +++ b/changelog.d/8004.feature @@ -0,0 +1 @@ +Require the user to confirm that their password should be reset after clicking the email confirmation link. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 994b0a62c4c4..2a5b2e093537 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2039,9 +2039,13 @@ email: # * The contents of password reset emails sent by the homeserver: # 'password_reset.html' and 'password_reset.txt' # - # * HTML pages for success and failure that a user will see when they follow - # the link in the password reset email: 'password_reset_success.html' and - # 'password_reset_failure.html' + # * An HTML page that a user will see when they follow the link in the password + # reset email. The user will be asked to confirm the action before their + # password is reset: 'password_reset_confirmation.html' + # + # * HTML pages for success and failure that a user will see when they confirm + # the password reset flow using the page above: 'password_reset_success.html' + # and 'password_reset_failure.html' # # * The contents of address verification emails sent during registration: # 'registration.html' and 'registration.txt' diff --git a/synapse/api/urls.py b/synapse/api/urls.py index bbfccf955e29..6379c86ddea0 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -21,6 +21,7 @@ from synapse.config import ConfigError +SYNAPSE_CLIENT_API_PREFIX = "/_synapse/client" CLIENT_API_PREFIX = "/_matrix/client" FEDERATION_PREFIX = "/_matrix/federation" FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6014adc8509c..b08319ca77f6 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -48,6 +48,7 @@ from synapse.app import _base from synapse.app._base import listen_ssl, listen_tcp, quit_with_error from synapse.config._base import ConfigError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer @@ -209,6 +210,15 @@ def _configure_named_resource(self, name, compress=False): resources["/_matrix/saml2"] = SAML2Resource(self) + if self.get_config().threepid_behaviour_email == ThreepidBehaviour.LOCAL: + from synapse.rest.synapse.client.password_reset import ( + PasswordResetSubmitTokenResource, + ) + + resources[ + "/_synapse/client/password_reset/email/submit_token" + ] = PasswordResetSubmitTokenResource(self) + if name == "consent": from synapse.rest.consent.consent_resource import ConsentResource diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 7a796996c056..72b42bfd6278 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -228,6 +228,7 @@ def read_config(self, config, **kwargs): self.email_registration_template_text, self.email_add_threepid_template_html, self.email_add_threepid_template_text, + self.email_password_reset_template_confirmation_html, self.email_password_reset_template_failure_html, self.email_registration_template_failure_html, self.email_add_threepid_template_failure_html, @@ -242,6 +243,7 @@ def read_config(self, config, **kwargs): registration_template_text, add_threepid_template_html, add_threepid_template_text, + "password_reset_confirmation.html", password_reset_template_failure_html, registration_template_failure_html, add_threepid_template_failure_html, @@ -404,9 +406,13 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # * The contents of password reset emails sent by the homeserver: # 'password_reset.html' and 'password_reset.txt' # - # * HTML pages for success and failure that a user will see when they follow - # the link in the password reset email: 'password_reset_success.html' and - # 'password_reset_failure.html' + # * An HTML page that a user will see when they follow the link in the password + # reset email. The user will be asked to confirm the action before their + # password is reset: 'password_reset_confirmation.html' + # + # * HTML pages for success and failure that a user will see when they confirm + # the password reset flow using the page above: 'password_reset_success.html' + # and 'password_reset_failure.html' # # * The contents of address verification emails sent during registration: # 'registration.html' and 'registration.txt' diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 6c5785401809..455a1acb46a8 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -123,7 +123,7 @@ async def send_password_reset_mail(self, email_address, token, client_secret, si params = {"token": token, "client_secret": client_secret, "sid": sid} link = ( self.hs.config.public_baseurl - + "_matrix/client/unstable/password_reset/email/submit_token?%s" + + "_synapse/client/password_reset/email/submit_token?%s" % urllib.parse.urlencode(params) ) diff --git a/synapse/res/templates/password_reset_confirmation.html b/synapse/res/templates/password_reset_confirmation.html new file mode 100644 index 000000000000..def4b5162b90 --- /dev/null +++ b/synapse/res/templates/password_reset_confirmation.html @@ -0,0 +1,16 @@ + + + + +
+ + + + +

You have requested to reset your Matrix account password. Click the link below to confirm this action.

+ If you did not mean to do this, please close this page and your password will not be changed.

+

+
+ + + diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 87f927890c5e..40f5c32db2df 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -13,8 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.rest.admin from synapse.http.server import JsonResource +from synapse.rest import admin from synapse.rest.client import versions from synapse.rest.client.v1 import ( directory, @@ -123,9 +123,7 @@ def register_servlets(client_resource, hs): password_policy.register_servlets(hs, client_resource) # moving to /_synapse/admin - synapse.rest.admin.register_servlets_for_client_rest_resource( - hs, client_resource - ) + admin.register_servlets_for_client_rest_resource(hs, client_resource) # unstable shared_rooms.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 455051ac4626..c6cb9deb2bfa 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -152,81 +152,6 @@ async def on_POST(self, request): return 200, ret -class PasswordResetSubmitTokenServlet(RestServlet): - """Handles 3PID validation token submission""" - - PATTERNS = client_patterns( - "/password_reset/(?P[^/]*)/submit_token$", releases=(), unstable=True - ) - - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): server - """ - super(PasswordResetSubmitTokenServlet, self).__init__() - self.hs = hs - self.auth = hs.get_auth() - self.config = hs.config - self.clock = hs.get_clock() - self.store = hs.get_datastore() - if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: - self._failure_email_template = ( - self.config.email_password_reset_template_failure_html - ) - - async def on_GET(self, request, medium): - # We currently only handle threepid token submissions for email - if medium != "email": - raise SynapseError( - 400, "This medium is currently not supported for password resets" - ) - if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "Password reset emails have been disabled due to lack of an email config" - ) - raise SynapseError( - 400, "Email-based password resets are disabled on this server" - ) - - sid = parse_string(request, "sid", required=True) - token = parse_string(request, "token", required=True) - client_secret = parse_string(request, "client_secret", required=True) - assert_valid_client_secret(client_secret) - - # Attempt to validate a 3PID session - try: - # Mark the session as valid - next_link = await self.store.validate_threepid_session( - sid, client_secret, token, self.clock.time_msec() - ) - - # Perform a 302 redirect if next_link is set - if next_link: - if next_link.startswith("file:///"): - logger.warning( - "Not redirecting to next_link as it is a local file: address" - ) - else: - request.setResponseCode(302) - request.setHeader("Location", next_link) - finish_request(request) - return None - - # Otherwise show the success template - html = self.config.email_password_reset_template_success_html_content - status_code = 200 - except ThreepidValidationError as e: - status_code = e.code - - # Show a failure page with a reason - template_vars = {"failure_reason": e.msg} - html = self._failure_email_template.render(**template_vars) - - respond_with_html(request, status_code, html) - - class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") @@ -938,7 +863,6 @@ async def on_GET(self, request): def register_servlets(hs, http_server): EmailPasswordRequestTokenRestServlet(hs).register(http_server) - PasswordResetSubmitTokenServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) EmailThreepidRequestTokenRestServlet(hs).register(http_server) diff --git a/synapse/rest/synapse/__init__.py b/synapse/rest/synapse/__init__.py new file mode 100644 index 000000000000..c0b733488b5f --- /dev/null +++ b/synapse/rest/synapse/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py new file mode 100644 index 000000000000..c0b733488b5f --- /dev/null +++ b/synapse/rest/synapse/client/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py new file mode 100644 index 000000000000..9e4fbc0cbd08 --- /dev/null +++ b/synapse/rest/synapse/client/password_reset.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Tuple + +from twisted.web.http import Request + +from synapse.api.errors import ThreepidValidationError +from synapse.config.emailconfig import ThreepidBehaviour +from synapse.http.server import DirectServeHtmlResource +from synapse.http.servlet import parse_string +from synapse.util.stringutils import assert_valid_client_secret + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class PasswordResetSubmitTokenResource(DirectServeHtmlResource): + """Handles 3PID validation token submission + + This resource gets mounted under /_synapse/client/password_reset/email/submit_token + """ + + isLeaf = 1 + + def __init__(self, hs: "HomeServer"): + """ + Args: + hs: server + """ + super().__init__() + + self.clock = hs.get_clock() + self.store = hs.get_datastore() + + self._local_threepid_handling_disabled_due_to_email_config = ( + hs.config.local_threepid_handling_disabled_due_to_email_config + ) + self._confirmation_email_template = ( + hs.config.email_password_reset_template_confirmation_html + ) + self._email_password_reset_template_success_html = ( + hs.config.email_password_reset_template_success_html_content + ) + self._failure_email_template = ( + hs.config.email_password_reset_template_failure_html + ) + + # This resource should not be mounted if threepid behaviour is not LOCAL + assert hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL + + async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: + sid = parse_string(request, "sid", required=True) + token = parse_string(request, "token", required=True) + client_secret = parse_string(request, "client_secret", required=True) + assert_valid_client_secret(client_secret) + + # Show a confirmation page, just in case someone accidentally clicked this link when + # they didn't mean to + template_vars = { + "sid": sid, + "token": token, + "client_secret": client_secret, + } + return ( + 200, + self._confirmation_email_template.render(**template_vars).encode("utf-8"), + ) + + async def _async_render_POST(self, request: Request) -> Tuple[int, bytes]: + sid = parse_string(request, "sid", required=True) + token = parse_string(request, "token", required=True) + client_secret = parse_string(request, "client_secret", required=True) + + # Attempt to validate a 3PID session + try: + # Mark the session as valid + next_link = await self.store.validate_threepid_session( + sid, client_secret, token, self.clock.time_msec() + ) + + # Perform a 302 redirect if next_link is set + if next_link: + if next_link.startswith("file:///"): + logger.warning( + "Not redirecting to next_link as it is a local file: address" + ) + else: + next_link_bytes = next_link.encode("utf-8") + request.setHeader("Location", next_link_bytes) + return ( + 302, + ( + b'You are being redirected to %s.' + % (next_link_bytes, next_link_bytes) + ), + ) + + # Otherwise show the success template + html_bytes = self._email_password_reset_template_success_html.encode( + "utf-8" + ) + status_code = 200 + except ThreepidValidationError as e: + status_code = e.code + + # Show a failure page with a reason + template_vars = {"failure_reason": e.msg} + html_bytes = self._failure_email_template.render(**template_vars).encode( + "utf-8" + ) + + return status_code, html_bytes diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 0a51aeff92ac..93f899d86133 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -19,6 +19,7 @@ import re from email.parser import Parser from typing import Optional +from urllib.parse import urlencode import pkg_resources @@ -27,6 +28,7 @@ from synapse.api.errors import Codes from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import account, register +from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from tests import unittest from tests.unittest import override_config @@ -70,6 +72,7 @@ async def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() + self.submit_token_resource = PasswordResetSubmitTokenResource(hs) def test_basic_password_reset(self): """Test basic password reset flow @@ -251,8 +254,32 @@ def _validate_token(self, link): # Remove the host path = link.replace("https://example.com", "") + # Load the password reset confirmation page request, channel = self.make_request("GET", path, shorthand=False) - self.render(request) + request.render(self.submit_token_resource) + self.pump() + self.assertEquals(200, channel.code, channel.result) + + # Now POST to the same endpoint, mimicking the same behaviour as clicking the + # password reset confirm button + + # Send arguments as url-encoded form data, matching the template's behaviour + form_args = [] + for key, value_list in request.args.items(): + for value in value_list: + arg = (key, value) + form_args.append(arg) + + # Confirm the password reset + request, channel = self.make_request( + "POST", + path, + content=urlencode(form_args).encode("utf8"), + shorthand=False, + content_is_form=True, + ) + request.render(self.submit_token_resource) + self.pump() self.assertEquals(200, channel.code, channel.result) def _get_link_from_email(self): diff --git a/tests/server.py b/tests/server.py index 48e45c6c8b89..61ec67015500 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1,6 +1,6 @@ import json import logging -from io import BytesIO +from io import SEEK_END, BytesIO import attr from zope.interface import implementer @@ -135,6 +135,7 @@ def make_request( request=SynapseRequest, shorthand=True, federation_auth_origin=None, + content_is_form=False, ): """ Make a web request using the given method and path, feed it the @@ -150,6 +151,8 @@ def make_request( with the usual REST API path, if it doesn't contain it. federation_auth_origin (bytes|None): if set to not-None, we will add a fake Authorization header pretenting to be the given server name. + content_is_form: Whether the content is URL encoded form data. Adds the + 'Content-Type': 'application/x-www-form-urlencoded' header. Returns: Tuple[synapse.http.site.SynapseRequest, channel] @@ -181,6 +184,8 @@ def make_request( req = request(channel) req.process = lambda: b"" req.content = BytesIO(content) + # Twisted expects to be at the end of the content when parsing the request. + req.content.seek(SEEK_END) req.postpath = list(map(unquote, path[1:].split(b"/"))) if access_token: @@ -195,7 +200,13 @@ def make_request( ) if content: - req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") + if content_is_form: + req.requestHeaders.addRawHeader( + b"Content-Type", b"application/x-www-form-urlencoded" + ) + else: + # Assume the body is JSON + req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") req.requestReceived(method, path, b"1.1") diff --git a/tests/unittest.py b/tests/unittest.py index 3cb55a7e96fa..128dd4e19c43 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -353,6 +353,7 @@ def make_request( request: Type[T] = SynapseRequest, shorthand: bool = True, federation_auth_origin: str = None, + content_is_form: bool = False, ) -> Tuple[T, FakeChannel]: """ Create a SynapseRequest at the path using the method and containing the @@ -368,6 +369,8 @@ def make_request( with the usual REST API path, if it doesn't contain it. federation_auth_origin (bytes|None): if set to not-None, we will add a fake Authorization header pretenting to be the given server name. + content_is_form: Whether the content is URL encoded form data. Adds the + 'Content-Type': 'application/x-www-form-urlencoded' header. Returns: Tuple[synapse.http.site.SynapseRequest, channel] @@ -384,6 +387,7 @@ def make_request( request, shorthand, federation_auth_origin, + content_is_form, ) def render(self, request): From 5d3e306d9f6ee48ff76dad8eee0be39bb1df5b08 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 10 Sep 2020 13:24:43 +0100 Subject: [PATCH 017/245] Clean up `Notifier.on_new_room_event` code path (#8288) The idea here is that we pass the `max_stream_id` to everything, and only use the stream ID of the particular event to figure out *when* the max stream position has caught up to the event and we can notify people about it. This is to maintain the distinction between the position of an item in the stream (i.e. event A has stream ID 513) and a token that can be used to partition the stream (i.e. give me all events after stream ID 352). This distinction becomes important when the tokens are more complicated than a single number, which they will be once we start tracking the position of multiple writers in the tokens. The valid operations here are: 1. Is a position before or after a token 2. Fetching all events between two tokens 3. Merging multiple tokens to get the "max", i.e. `C = max(A, B)` means that for all positions P where P is before A *or* before B, then P is before C. Future PR will change the token type to a dedicated type. --- changelog.d/8288.misc | 1 + synapse/handlers/federation.py | 3 -- synapse/handlers/message.py | 4 -- synapse/notifier.py | 62 +++++++++++++++++++------------ synapse/push/pusherpool.py | 2 +- synapse/replication/tcp/client.py | 9 ++--- 6 files changed, 44 insertions(+), 37 deletions(-) create mode 100644 changelog.d/8288.misc diff --git a/changelog.d/8288.misc b/changelog.d/8288.misc new file mode 100644 index 000000000000..c08a53a5ee27 --- /dev/null +++ b/changelog.d/8288.misc @@ -0,0 +1 @@ +Refactor notifier code to correctly use the max event stream position. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index be9b0701a062..c195eba83001 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -128,7 +128,6 @@ def __init__(self, hs): self.keyring = hs.get_keyring() self.action_generator = hs.get_action_generator() self.is_mine_id = hs.is_mine_id - self.pusher_pool = hs.get_pusherpool() self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self._message_handler = hs.get_message_handler() @@ -2939,8 +2938,6 @@ async def _notify_persisted_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) - await self.pusher_pool.on_new_notifications(max_stream_id) - async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the server to join the room. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d1556659e3e5..276de8f8d090 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -387,8 +387,6 @@ def __init__(self, hs: "HomeServer"): # This is only used to get at ratelimit function, and maybe_kick_guest_users self.base_handler = BaseHandler(hs) - self.pusher_pool = hs.get_pusherpool() - # We arbitrarily limit concurrent event creation for a room to 5. # This is to stop us from diverging history *too* much. self.limiter = Linearizer(max_count=5, name="room_event_creation_limit") @@ -1145,8 +1143,6 @@ def is_inviter_member_event(e): # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) - await self.pusher_pool.on_new_notifications(max_stream_id) - def _notify(): try: self.notifier.on_new_room_event( diff --git a/synapse/notifier.py b/synapse/notifier.py index 71f2370874b3..16f19c938eef 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -25,7 +25,6 @@ Set, Tuple, TypeVar, - Union, ) from prometheus_client import Counter @@ -187,7 +186,7 @@ def __init__(self, hs: "synapse.server.HomeServer"): self.store = hs.get_datastore() self.pending_new_room_events = ( [] - ) # type: List[Tuple[int, EventBase, Collection[Union[str, UserID]]]] + ) # type: List[Tuple[int, EventBase, Collection[UserID]]] # Called when there are new things to stream over replication self.replication_callbacks = [] # type: List[Callable[[], None]] @@ -198,6 +197,7 @@ def __init__(self, hs: "synapse.server.HomeServer"): self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() + self._pusher_pool = hs.get_pusherpool() self.federation_sender = None if hs.should_send_federation(): @@ -247,7 +247,7 @@ def on_new_room_event( event: EventBase, room_stream_id: int, max_room_stream_id: int, - extra_users: Collection[Union[str, UserID]] = [], + extra_users: Collection[UserID] = [], ): """ Used by handlers to inform the notifier something has happened in the room, room event wise. @@ -274,47 +274,63 @@ def _notify_pending_new_room_events(self, max_room_stream_id: int): """ pending = self.pending_new_room_events self.pending_new_room_events = [] + + users = set() # type: Set[UserID] + rooms = set() # type: Set[str] + for room_stream_id, event, extra_users in pending: if room_stream_id > max_room_stream_id: self.pending_new_room_events.append( (room_stream_id, event, extra_users) ) else: - self._on_new_room_event(event, room_stream_id, extra_users) + if ( + event.type == EventTypes.Member + and event.membership == Membership.JOIN + ): + self._user_joined_room(event.state_key, event.room_id) + + users.update(extra_users) + rooms.add(event.room_id) + + if users or rooms: + self.on_new_event("room_key", max_room_stream_id, users=users, rooms=rooms) + self._on_updated_room_token(max_room_stream_id) + + def _on_updated_room_token(self, max_room_stream_id: int): + """Poke services that might care that the room position has been + updated. + """ - def _on_new_room_event( - self, - event: EventBase, - room_stream_id: int, - extra_users: Collection[Union[str, UserID]] = [], - ): - """Notify any user streams that are interested in this room event""" # poke any interested application service. run_as_background_process( - "notify_app_services", self._notify_app_services, room_stream_id + "_notify_app_services", self._notify_app_services, max_room_stream_id ) - if self.federation_sender: - self.federation_sender.notify_new_events(room_stream_id) - - if event.type == EventTypes.Member and event.membership == Membership.JOIN: - self._user_joined_room(event.state_key, event.room_id) - - self.on_new_event( - "room_key", room_stream_id, users=extra_users, rooms=[event.room_id] + run_as_background_process( + "_notify_pusher_pool", self._notify_pusher_pool, max_room_stream_id ) - async def _notify_app_services(self, room_stream_id: int): + if self.federation_sender: + self.federation_sender.notify_new_events(max_room_stream_id) + + async def _notify_app_services(self, max_room_stream_id: int): try: - await self.appservice_handler.notify_interested_services(room_stream_id) + await self.appservice_handler.notify_interested_services(max_room_stream_id) except Exception: logger.exception("Error notifying application services of event") + async def _notify_pusher_pool(self, max_room_stream_id: int): + try: + await self._pusher_pool.on_new_notifications(max_room_stream_id) + except Exception: + logger.exception("Error pusher pool of event") + def on_new_event( self, stream_key: str, new_token: int, - users: Collection[Union[str, UserID]] = [], + users: Collection[UserID] = [], rooms: Collection[str] = [], ): """ Used to inform listeners that something has happened event wise. diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index fa8473bf8d00..cc839ffce43d 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -184,7 +184,7 @@ async def remove_pushers_by_access_token(self, user_id, access_tokens): ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, max_stream_id): + async def on_new_notifications(self, max_stream_id: int): if not self.pushers: # nothing to do here. return diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index ccd3147dfdaf..e82b9e386f2a 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -29,6 +29,7 @@ EventsStreamEventRow, EventsStreamRow, ) +from synapse.types import UserID from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure @@ -98,7 +99,6 @@ class ReplicationDataHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() - self.pusher_pool = hs.get_pusherpool() self.notifier = hs.get_notifier() self._reactor = hs.get_reactor() self._clock = hs.get_clock() @@ -148,15 +148,12 @@ async def on_rdata( if event.rejected_reason: continue - extra_users = () # type: Tuple[str, ...] + extra_users = () # type: Tuple[UserID, ...] if event.type == EventTypes.Member: - extra_users = (event.state_key,) + extra_users = (UserID.from_string(event.state_key),) max_token = self.store.get_room_max_stream_ordering() self.notifier.on_new_room_event(event, token, max_token, extra_users) - max_token = self.store.get_room_max_stream_ordering() - await self.pusher_pool.on_new_notifications(max_token) - # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is # greater than the received row position. From 95d869c357542b4aa8c1d2344cfb6041d6fd0ea5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 10 Sep 2020 13:26:34 +0100 Subject: [PATCH 018/245] Add /_synapse/client to the reverse proxy docs (#8227) This PR adds a information about forwarding `/_synapse/client` endpoints through your reverse proxy. The first of these endpoints are introduced in https://github.com/matrix-org/synapse/pull/8004. --- UPGRADE.rst | 17 ++++++++++++++++- changelog.d/8227.doc | 1 + docs/reverse_proxy.md | 23 +++++++++++++++++++++-- docs/workers.md | 1 + 4 files changed, 39 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8227.doc diff --git a/UPGRADE.rst b/UPGRADE.rst index 1e4da98afe9a..7aa8a94528cc 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -7,7 +7,7 @@ Shared rooms endpoint (MSC2666) This release contains a new unstable endpoint `/_matrix/client/unstable/uk.half-shot.msc2666/user/shared_rooms/.*` for fetching rooms one user has in common with another. This feature requires the `update_user_directory` config flag to be `True`. If you are you are using a `synapse.app.user_dir` -worker, requests to this endpoint must be handled by that worker. +worker, requests to this endpoint must be handled by that worker. See `docs/workers.md `_ for more details. @@ -91,6 +91,21 @@ for example: Upgrading to v1.21.0 ==================== +Forwarding ``/_synapse/client`` through your reverse proxy +---------------------------------------------------------- + +The `reverse proxy documentation +`_ has been updated +to include reverse proxy directives for ``/_synapse/client/*`` endpoints. As the user password +reset flow now uses endpoints under this prefix, **you must update your reverse proxy +configurations for user password reset to work**. + +Additionally, note that the `Synapse worker documentation +`_ has been updated to + state that the ``/_synapse/client/password_reset/email/submit_token`` endpoint can be handled +by all workers. If you make use of Synapse's worker feature, please update your reverse proxy +configuration to reflect this change. + New HTML templates ------------------ diff --git a/changelog.d/8227.doc b/changelog.d/8227.doc new file mode 100644 index 000000000000..4a43015a8306 --- /dev/null +++ b/changelog.d/8227.doc @@ -0,0 +1 @@ +Add `/_synapse/client` to the reverse proxy documentation. diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index fd48ba0874c2..edd109fa7b2b 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -11,7 +11,7 @@ privileges. **NOTE**: Your reverse proxy must not `canonicalise` or `normalise` the requested URI in any way (for example, by decoding `%xx` escapes). -Beware that Apache *will* canonicalise URIs unless you specifify +Beware that Apache *will* canonicalise URIs unless you specify `nocanon`. When setting up a reverse proxy, remember that Matrix clients and other @@ -23,6 +23,10 @@ specification](https://matrix.org/docs/spec/server_server/latest#resolving-serve for more details of the algorithm used for federation connections, and [delegate.md]() for instructions on setting up delegation. +Endpoints that are part of the standardised Matrix specification are +located under `/_matrix`, whereas endpoints specific to Synapse are +located under `/_synapse/client`. + Let's assume that we expect clients to connect to our server at `https://matrix.example.com`, and other servers to connect at `https://example.com:8448`. The following sections detail the configuration of @@ -45,7 +49,7 @@ server { server_name matrix.example.com; - location /_matrix { + location ~* ^(\/_matrix|\/_synapse\/client) { proxy_pass http://localhost:8008; proxy_set_header X-Forwarded-For $remote_addr; # Nginx by default only allows file uploads up to 1M in size @@ -65,6 +69,10 @@ matrix.example.com { proxy /_matrix http://localhost:8008 { transparent } + + proxy /_synapse/client http://localhost:8008 { + transparent + } } example.com:8448 { @@ -79,6 +87,7 @@ example.com:8448 { ``` matrix.example.com { reverse_proxy /_matrix/* http://localhost:8008 + reverse_proxy /_synapse/client/* http://localhost:8008 } example.com:8448 { @@ -96,6 +105,8 @@ example.com:8448 { AllowEncodedSlashes NoDecode ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix + ProxyPass /_synapse/client http://127.0.0.1:8008/_synapse/client nocanon + ProxyPassReverse /_synapse/client http://127.0.0.1:8008/_synapse/client @@ -119,6 +130,7 @@ frontend https # Matrix client traffic acl matrix-host hdr(host) -i matrix.example.com acl matrix-path path_beg /_matrix + acl matrix-path path_beg /_synapse/client use_backend matrix if matrix-host matrix-path @@ -146,3 +158,10 @@ connecting to Synapse from a client. Synapse exposes a health check endpoint for use by reverse proxies. Each configured HTTP listener has a `/health` endpoint which always returns 200 OK (and doesn't get logged). + +## Synapse administration endpoints + +Endpoints for administering your Synapse instance are placed under +`/_synapse/admin`. These require authentication through an access token of an +admin user. However as access to these endpoints grants the caller a lot of power, +we do not recommend exposing them to the public internet without good reason. diff --git a/docs/workers.md b/docs/workers.md index 7a8f5c89fced..41e75e2ea4a6 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -217,6 +217,7 @@ expressions: ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$ ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$ ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/ + ^/_synapse/client/password_reset/email/submit_token$ # Registration/login requests ^/_matrix/client/(api/v1|r0|unstable)/login$ From c312ee3cde39d9c97d3552b43533a4384321dc9e Mon Sep 17 00:00:00 2001 From: Dan Callaghan Date: Fri, 11 Sep 2020 04:49:08 +1000 Subject: [PATCH 019/245] Use TLSv1.2 for fake servers in tests (#8208) Some Linux distros have begun disabling TLSv1.0 and TLSv1.1 by default for security reasons, for example in Fedora 33 onwards: https://fedoraproject.org/wiki/Changes/StrongCryptoSettings2 Use TLSv1.2 for the fake TLS servers created in the test suite, to avoid failures due to OpenSSL disallowing TLSv1.0: Signed-off-by: Dan Callaghan --- changelog.d/8208.misc | 1 + tests/http/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8208.misc diff --git a/changelog.d/8208.misc b/changelog.d/8208.misc new file mode 100644 index 000000000000..e65da88c4643 --- /dev/null +++ b/changelog.d/8208.misc @@ -0,0 +1 @@ +Fix tests on distros which disable TLSv1.0. Contributed by @danc86. diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 5d4144329311..3e5a856584bd 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -145,7 +145,7 @@ def __init__(self, sanlist): self._cert_file = create_test_cert_file(sanlist) def serverConnectionForTLS(self, tlsProtocol): - ctx = SSL.Context(SSL.TLSv1_METHOD) + ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_certificate_file(self._cert_file) ctx.use_privatekey_file(get_test_key_file()) return Connection(ctx, None) From fe8ed1b46f781faa45d1bba8f9308cf47c42010f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Sep 2020 12:22:55 +0100 Subject: [PATCH 020/245] Make `StreamToken.room_key` be a `RoomStreamToken` instance. (#8281) --- changelog.d/8281.misc | 1 + mypy.ini | 2 + synapse/handlers/admin.py | 6 +- synapse/handlers/device.py | 12 ++-- synapse/handlers/initial_sync.py | 4 +- synapse/handlers/message.py | 1 + synapse/handlers/pagination.py | 4 +- synapse/handlers/room.py | 15 +++-- synapse/handlers/sync.py | 11 ++-- synapse/notifier.py | 16 +++-- synapse/storage/__init__.py | 5 +- synapse/storage/databases/main/events.py | 21 +++++-- synapse/storage/databases/main/stream.py | 75 ++++++++++-------------- synapse/storage/persist_events.py | 16 +++-- synapse/types.py | 19 +++--- tests/test_utils/event_injection.py | 5 +- 16 files changed, 114 insertions(+), 99 deletions(-) create mode 100644 changelog.d/8281.misc diff --git a/changelog.d/8281.misc b/changelog.d/8281.misc new file mode 100644 index 000000000000..74357120a726 --- /dev/null +++ b/changelog.d/8281.misc @@ -0,0 +1 @@ +Change `StreamToken.room_key` to be a `RoomStreamToken` instance. diff --git a/mypy.ini b/mypy.ini index 460392377e01..79867814328b 100644 --- a/mypy.ini +++ b/mypy.ini @@ -46,10 +46,12 @@ files = synapse/server_notices, synapse/spam_checker_api, synapse/state, + synapse/storage/databases/main/events.py, synapse/storage/databases/main/stream.py, synapse/storage/databases/main/ui_auth.py, synapse/storage/database.py, synapse/storage/engines, + synapse/storage/persist_events.py, synapse/storage/state.py, synapse/storage/util, synapse/streams, diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 918d0e037cc5..5e5a64037dfe 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -125,8 +125,8 @@ async def export_user_data(self, user_id, writer): else: stream_ordering = room.stream_ordering - from_key = str(RoomStreamToken(0, 0)) - to_key = str(RoomStreamToken(None, stream_ordering)) + from_key = RoomStreamToken(0, 0) + to_key = RoomStreamToken(None, stream_ordering) written_events = set() # Events that we've processed in this room @@ -153,7 +153,7 @@ async def export_user_data(self, user_id, writer): if not events: break - from_key = events[-1].internal_metadata.after + from_key = RoomStreamToken.parse(events[-1].internal_metadata.after) events = await filter_events_for_client(self.storage, user_id, events) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 643d71a710b5..4b0a4f96ccb0 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -29,6 +29,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( RoomStreamToken, + StreamToken, get_domain_from_id, get_verify_key_from_cross_signing_key, ) @@ -104,18 +105,15 @@ async def get_device(self, user_id: str, device_id: str) -> Dict[str, Any]: @trace @measure_func("device.get_user_ids_changed") - async def get_user_ids_changed(self, user_id, from_token): + async def get_user_ids_changed(self, user_id: str, from_token: StreamToken): """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. - - Args: - user_id (str) - from_token (StreamToken) """ set_tag("user_id", user_id) set_tag("from_token", from_token) - now_room_key = await self.store.get_room_events_max_id() + now_room_id = self.store.get_room_max_stream_ordering() + now_room_key = RoomStreamToken(None, now_room_id) room_ids = await self.store.get_rooms_for_user(user_id) @@ -142,7 +140,7 @@ async def get_user_ids_changed(self, user_id, from_token): ) rooms_changed.update(event.room_id for event in member_events) - stream_ordering = RoomStreamToken.parse_stream_token(from_token.room_key).stream + stream_ordering = from_token.room_key.stream possibly_changed = set(changed) possibly_left = set() diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index ddb8f0712bae..ba4828c713db 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -25,7 +25,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage.roommember import RoomsForUser from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, Requester, StreamToken, UserID +from synapse.types import JsonDict, Requester, RoomStreamToken, StreamToken, UserID from synapse.util import unwrapFirstError from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.response_cache import ResponseCache @@ -167,7 +167,7 @@ async def handle_room(event: RoomsForUser): self.state_handler.get_current_state, event.room_id ) elif event.membership == Membership.LEAVE: - room_end_token = "s%d" % (event.stream_ordering,) + room_end_token = RoomStreamToken(None, event.stream_ordering,) deferred_room_state = run_in_background( self.state_store.get_state_for_events, [event.event_id] ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 276de8f8d090..e54e2b322bae 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -973,6 +973,7 @@ async def persist_and_notify_client_event( This should only be run on the instance in charge of persisting events. """ assert self._is_event_writer + assert self.storage.persistence is not None if ratelimit: # We check if this is a room admin redacting an event so that we diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index ec17d3d8884a..d929a68f7d8c 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -344,7 +344,7 @@ async def get_messages( # gets called. raise Exception("limit not set") - room_token = RoomStreamToken.parse(from_token.room_key) + room_token = from_token.room_key with await self.pagination_lock.read(room_id): ( @@ -381,7 +381,7 @@ async def get_messages( if leave_token.topological < max_topo: from_token = from_token.copy_and_replace( - "room_key", leave_token_str + "room_key", leave_token ) await self.hs.get_handlers().federation_handler.maybe_backfill( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a29305f65525..53d85ab97d5f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1091,20 +1091,19 @@ def __init__(self, hs: "HomeServer"): async def get_new_events( self, user: UserID, - from_key: str, + from_key: RoomStreamToken, limit: int, room_ids: List[str], is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[EventBase], str]: + ) -> Tuple[List[EventBase], RoomStreamToken]: # We just ignore the key for now. to_key = self.get_current_key() - from_token = RoomStreamToken.parse(from_key) - if from_token.topological: + if from_key.topological: logger.warning("Stream has topological part!!!! %r", from_key) - from_key = "s%s" % (from_token.stream,) + from_key = RoomStreamToken(None, from_key.stream) app_service = self.store.get_app_service_by_user_id(user.to_string()) if app_service: @@ -1133,14 +1132,14 @@ async def get_new_events( events[:] = events[:limit] if events: - end_key = events[-1].internal_metadata.after + end_key = RoomStreamToken.parse(events[-1].internal_metadata.after) else: end_key = to_key return (events, end_key) - def get_current_key(self) -> str: - return "s%d" % (self.store.get_room_max_stream_ordering(),) + def get_current_key(self) -> RoomStreamToken: + return RoomStreamToken(None, self.store.get_room_max_stream_ordering()) def get_current_key_for_room(self, room_id: str) -> Awaitable[str]: return self.store.get_room_events_max_id(room_id) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cc47e8b62c8f..a615c7c2f0f1 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -378,7 +378,7 @@ async def ephemeral_by_room( sync_config = sync_result_builder.sync_config with Measure(self.clock, "ephemeral_by_room"): - typing_key = since_token.typing_key if since_token else "0" + typing_key = since_token.typing_key if since_token else 0 room_ids = sync_result_builder.joined_room_ids @@ -402,7 +402,7 @@ async def ephemeral_by_room( event_copy = {k: v for (k, v) in event.items() if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - receipt_key = since_token.receipt_key if since_token else "0" + receipt_key = since_token.receipt_key if since_token else 0 receipt_source = self.event_sources.sources["receipt"] receipts, receipt_key = await receipt_source.get_new_events( @@ -533,7 +533,7 @@ async def _load_filtered_recents( if len(recents) > timeline_limit: limited = True recents = recents[-timeline_limit:] - room_key = recents[0].internal_metadata.before + room_key = RoomStreamToken.parse(recents[0].internal_metadata.before) prev_batch_token = now_token.copy_and_replace("room_key", room_key) @@ -1322,6 +1322,7 @@ async def _generate_sync_entry_for_presence( is_guest=sync_config.is_guest, include_offline=include_offline, ) + assert presence_key sync_result_builder.now_token = now_token.copy_and_replace( "presence_key", presence_key ) @@ -1484,7 +1485,7 @@ async def _have_rooms_changed( if rooms_changed: return True - stream_id = RoomStreamToken.parse_stream_token(since_token.room_key).stream + stream_id = since_token.room_key.stream for room_id in sync_result_builder.joined_room_ids: if self.store.has_room_changed_since(room_id, stream_id): return True @@ -1750,7 +1751,7 @@ async def _get_all_rooms( continue leave_token = now_token.copy_and_replace( - "room_key", "s%d" % (event.stream_ordering,) + "room_key", RoomStreamToken(None, event.stream_ordering) ) room_entries.append( RoomSyncResultBuilder( diff --git a/synapse/notifier.py b/synapse/notifier.py index 16f19c938eef..12cd84b27bfe 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -25,6 +25,7 @@ Set, Tuple, TypeVar, + Union, ) from prometheus_client import Counter @@ -41,7 +42,7 @@ from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.streams.config import PaginationConfig -from synapse.types import Collection, StreamToken, UserID +from synapse.types import Collection, RoomStreamToken, StreamToken, UserID from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -111,7 +112,9 @@ def __init__( with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) - def notify(self, stream_key: str, stream_id: int, time_now_ms: int): + def notify( + self, stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms: int, + ): """Notify any listeners for this user of a new event from an event source. Args: @@ -294,7 +297,12 @@ def _notify_pending_new_room_events(self, max_room_stream_id: int): rooms.add(event.room_id) if users or rooms: - self.on_new_event("room_key", max_room_stream_id, users=users, rooms=rooms) + self.on_new_event( + "room_key", + RoomStreamToken(None, max_room_stream_id), + users=users, + rooms=rooms, + ) self._on_updated_room_token(max_room_stream_id) def _on_updated_room_token(self, max_room_stream_id: int): @@ -329,7 +337,7 @@ async def _notify_pusher_pool(self, max_room_stream_id: int): def on_new_event( self, stream_key: str, - new_token: int, + new_token: Union[int, RoomStreamToken], users: Collection[UserID] = [], rooms: Collection[str] = [], ): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 8e5d78f6f7e7..bbff3c8d5b4c 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -47,6 +47,9 @@ def __init__(self, hs, stores: Databases): # interfaces. self.main = stores.main - self.persistence = EventsPersistenceStorage(hs, stores) self.purge_events = PurgeEventsStorage(hs, stores) self.state = StateGroupStorage(hs, stores) + + self.persistence = None + if stores.persist_events: + self.persistence = EventsPersistenceStorage(hs, stores) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b3d27a2ee70d..9cd1403b38da 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -213,7 +213,7 @@ async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[st Returns: Filtered event ids """ - results = [] + results = [] # type: List[str] def _get_events_which_are_prevs_txn(txn, batch): sql = """ @@ -631,7 +631,9 @@ def _update_forward_extremities_txn( ) @classmethod - def _filter_events_and_contexts_for_duplicates(cls, events_and_contexts): + def _filter_events_and_contexts_for_duplicates( + cls, events_and_contexts: List[Tuple[EventBase, EventContext]] + ) -> List[Tuple[EventBase, EventContext]]: """Ensure that we don't have the same event twice. Pick the earliest non-outlier if there is one, else the earliest one. @@ -641,7 +643,9 @@ def _filter_events_and_contexts_for_duplicates(cls, events_and_contexts): Returns: list[(EventBase, EventContext)]: filtered list """ - new_events_and_contexts = OrderedDict() + new_events_and_contexts = ( + OrderedDict() + ) # type: OrderedDict[str, Tuple[EventBase, EventContext]] for event, context in events_and_contexts: prev_event_context = new_events_and_contexts.get(event.event_id) if prev_event_context: @@ -655,7 +659,12 @@ def _filter_events_and_contexts_for_duplicates(cls, events_and_contexts): new_events_and_contexts[event.event_id] = (event, context) return list(new_events_and_contexts.values()) - def _update_room_depths_txn(self, txn, events_and_contexts, backfilled): + def _update_room_depths_txn( + self, + txn, + events_and_contexts: List[Tuple[EventBase, EventContext]], + backfilled: bool, + ): """Update min_depth for each room Args: @@ -664,7 +673,7 @@ def _update_room_depths_txn(self, txn, events_and_contexts, backfilled): we are persisting backfilled (bool): True if the events were backfilled """ - depth_updates = {} + depth_updates = {} # type: Dict[str, int] for event, context in events_and_contexts: # Remove the any existing cache entries for the event_ids txn.call_after(self.store._invalidate_get_event_cache, event.event_id) @@ -1436,7 +1445,7 @@ def _update_backward_extremeties(self, txn, events): Forward extremities are handled when we first start persisting the events. """ - events_by_room = {} + events_by_room = {} # type: Dict[str, List[EventBase]] for ev in events: events_by_room.setdefault(ev.room_id, []).append(ev) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 08a13a8b47b6..2e955187524e 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -310,11 +310,11 @@ def get_room_min_stream_ordering(self) -> int: async def get_room_events_stream_for_rooms( self, room_ids: Collection[str], - from_key: str, - to_key: str, + from_key: RoomStreamToken, + to_key: RoomStreamToken, limit: int = 0, order: str = "DESC", - ) -> Dict[str, Tuple[List[EventBase], str]]: + ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken]]: """Get new room events in stream ordering since `from_key`. Args: @@ -333,9 +333,9 @@ async def get_room_events_stream_for_rooms( - list of recent events in the room - stream ordering key for the start of the chunk of events returned. """ - from_id = RoomStreamToken.parse_stream_token(from_key).stream - - room_ids = self._events_stream_cache.get_entities_changed(room_ids, from_id) + room_ids = self._events_stream_cache.get_entities_changed( + room_ids, from_key.stream + ) if not room_ids: return {} @@ -364,16 +364,12 @@ async def get_room_events_stream_for_rooms( return results def get_rooms_that_changed( - self, room_ids: Collection[str], from_key: str + self, room_ids: Collection[str], from_key: RoomStreamToken ) -> Set[str]: """Given a list of rooms and a token, return rooms where there may have been changes. - - Args: - room_ids - from_key: The room_key portion of a StreamToken """ - from_id = RoomStreamToken.parse_stream_token(from_key).stream + from_id = from_key.stream return { room_id for room_id in room_ids @@ -383,11 +379,11 @@ def get_rooms_that_changed( async def get_room_events_stream_for_room( self, room_id: str, - from_key: str, - to_key: str, + from_key: RoomStreamToken, + to_key: RoomStreamToken, limit: int = 0, order: str = "DESC", - ) -> Tuple[List[EventBase], str]: + ) -> Tuple[List[EventBase], RoomStreamToken]: """Get new room events in stream ordering since `from_key`. Args: @@ -408,8 +404,8 @@ async def get_room_events_stream_for_room( if from_key == to_key: return [], from_key - from_id = RoomStreamToken.parse_stream_token(from_key).stream - to_id = RoomStreamToken.parse_stream_token(to_key).stream + from_id = from_key.stream + to_id = to_key.stream has_changed = self._events_stream_cache.has_entity_changed(room_id, from_id) @@ -441,7 +437,7 @@ def f(txn): ret.reverse() if rows: - key = "s%d" % min(r.stream_ordering for r in rows) + key = RoomStreamToken(None, min(r.stream_ordering for r in rows)) else: # Assume we didn't get anything because there was nothing to # get. @@ -450,10 +446,10 @@ def f(txn): return ret, key async def get_membership_changes_for_user( - self, user_id: str, from_key: str, to_key: str + self, user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken ) -> List[EventBase]: - from_id = RoomStreamToken.parse_stream_token(from_key).stream - to_id = RoomStreamToken.parse_stream_token(to_key).stream + from_id = from_key.stream + to_id = to_key.stream if from_key == to_key: return [] @@ -491,8 +487,8 @@ def f(txn): return ret async def get_recent_events_for_room( - self, room_id: str, limit: int, end_token: str - ) -> Tuple[List[EventBase], str]: + self, room_id: str, limit: int, end_token: RoomStreamToken + ) -> Tuple[List[EventBase], RoomStreamToken]: """Get the most recent events in the room in topological ordering. Args: @@ -518,8 +514,8 @@ async def get_recent_events_for_room( return (events, token) async def get_recent_event_ids_for_room( - self, room_id: str, limit: int, end_token: str - ) -> Tuple[List[_EventDictReturn], str]: + self, room_id: str, limit: int, end_token: RoomStreamToken + ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: """Get the most recent events in the room in topological ordering. Args: @@ -535,13 +531,11 @@ async def get_recent_event_ids_for_room( if limit == 0: return [], end_token - parsed_end_token = RoomStreamToken.parse(end_token) - rows, token = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_txn, room_id, - from_token=parsed_end_token, + from_token=end_token, limit=limit, ) @@ -619,17 +613,17 @@ def get_stream_id_for_event_txn( allow_none=allow_none, ) - async def get_stream_token_for_event(self, event_id: str) -> str: + async def get_stream_token_for_event(self, event_id: str) -> RoomStreamToken: """The stream token for an event Args: event_id: The id of the event to look up a stream token for. Raises: StoreError if the event wasn't in the database. Returns: - A "s%d" stream token. + A stream token. """ stream_id = await self.get_stream_id_for_event(event_id) - return "s%d" % (stream_id,) + return RoomStreamToken(None, stream_id) async def get_topological_token_for_event(self, event_id: str) -> str: """The stream token for an event @@ -954,7 +948,7 @@ def _paginate_room_events_txn( direction: str = "b", limit: int = -1, event_filter: Optional[Filter] = None, - ) -> Tuple[List[_EventDictReturn], str]: + ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: """Returns list of events before or after a given token. Args: @@ -1054,17 +1048,17 @@ def _paginate_room_events_txn( # TODO (erikj): We should work out what to do here instead. next_token = to_token if to_token else from_token - return rows, str(next_token) + return rows, next_token async def paginate_room_events( self, room_id: str, - from_key: str, - to_key: Optional[str] = None, + from_key: RoomStreamToken, + to_key: Optional[RoomStreamToken] = None, direction: str = "b", limit: int = -1, event_filter: Optional[Filter] = None, - ) -> Tuple[List[EventBase], str]: + ) -> Tuple[List[EventBase], RoomStreamToken]: """Returns list of events before or after a given token. Args: @@ -1083,17 +1077,12 @@ async def paginate_room_events( and `to_key`). """ - parsed_from_key = RoomStreamToken.parse(from_key) - parsed_to_key = None - if to_key: - parsed_to_key = RoomStreamToken.parse(to_key) - rows, token = await self.db_pool.runInteraction( "paginate_room_events", self._paginate_room_events_txn, room_id, - parsed_from_key, - parsed_to_key, + from_key, + to_key, direction, limit, event_filter, diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index dbaeef91dd5c..d89f6ed12868 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -18,7 +18,7 @@ import itertools import logging from collections import deque, namedtuple -from typing import Iterable, List, Optional, Set, Tuple +from typing import Dict, Iterable, List, Optional, Set, Tuple from prometheus_client import Counter, Histogram @@ -31,7 +31,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases import Databases from synapse.storage.databases.main.events import DeltaState -from synapse.types import StateMap +from synapse.types import Collection, StateMap from synapse.util.async_helpers import ObservableDeferred from synapse.util.metrics import Measure @@ -185,6 +185,8 @@ def __init__(self, hs, stores: Databases): # store for now. self.main_store = stores.main self.state_store = stores.state + + assert stores.persist_events self.persist_events_store = stores.persist_events self._clock = hs.get_clock() @@ -208,7 +210,7 @@ async def persist_events( Returns: the stream ordering of the latest persisted event """ - partitioned = {} + partitioned = {} # type: Dict[str, List[Tuple[EventBase, EventContext]]] for event, ctx in events_and_contexts: partitioned.setdefault(event.room_id, []).append((event, ctx)) @@ -305,7 +307,9 @@ async def _persist_events( # Work out the new "current state" for each room. # We do this by working out what the new extremities are and then # calculating the state from that. - events_by_room = {} + events_by_room = ( + {} + ) # type: Dict[str, List[Tuple[EventBase, EventContext]]] for event, context in chunk: events_by_room.setdefault(event.room_id, []).append( (event, context) @@ -436,7 +440,7 @@ async def _calculate_new_extremities( self, room_id: str, event_contexts: List[Tuple[EventBase, EventContext]], - latest_event_ids: List[str], + latest_event_ids: Collection[str], ): """Calculates the new forward extremities for a room given events to persist. @@ -470,7 +474,7 @@ async def _calculate_new_extremities( # Remove any events which are prev_events of any existing events. existing_prevs = await self.persist_events_store._get_events_which_are_prevs( result - ) + ) # type: Collection[str] result.difference_update(existing_prevs) # Finally handle the case where the new events have soft-failed prev diff --git a/synapse/types.py b/synapse/types.py index ba4533503880..dc09448bdc8d 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -425,7 +425,9 @@ def __str__(self) -> str: @attr.s(slots=True, frozen=True) class StreamToken: - room_key = attr.ib(type=str) + room_key = attr.ib( + type=RoomStreamToken, validator=attr.validators.instance_of(RoomStreamToken) + ) presence_key = attr.ib(type=int) typing_key = attr.ib(type=int) receipt_key = attr.ib(type=int) @@ -445,21 +447,16 @@ def from_string(cls, string): while len(keys) < len(attr.fields(cls)): # i.e. old token from before receipt_key keys.append("0") - return cls(keys[0], *(int(k) for k in keys[1:])) + return cls(RoomStreamToken.parse(keys[0]), *(int(k) for k in keys[1:])) except Exception: raise SynapseError(400, "Invalid Token") def to_string(self): - return self._SEPARATOR.join([str(k) for k in attr.astuple(self)]) + return self._SEPARATOR.join([str(k) for k in attr.astuple(self, recurse=False)]) @property def room_stream_id(self): - # TODO(markjh): Awful hack to work around hacks in the presence tests - # which assume that the keys are integers. - if type(self.room_key) is int: - return self.room_key - else: - return int(self.room_key[1:].split("-")[-1]) + return self.room_key.stream def is_after(self, other): """Does this token contain events that the other doesn't?""" @@ -475,7 +472,7 @@ def is_after(self, other): or (int(other.groups_key) < int(self.groups_key)) ) - def copy_and_advance(self, key, new_value): + def copy_and_advance(self, key, new_value) -> "StreamToken": """Advance the given key in the token to a new value if and only if the new value is after the old value. """ @@ -491,7 +488,7 @@ def copy_and_advance(self, key, new_value): else: return self - def copy_and_replace(self, key, new_value): + def copy_and_replace(self, key, new_value) -> "StreamToken": return attr.evolve(self, **{key: new_value}) diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index fb1ca9033670..e93aa84405f5 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -71,7 +71,10 @@ async def inject_event( """ event, context = await create_event(hs, room_version, prev_event_ids, **kwargs) - await hs.get_storage().persistence.persist_event(event, context) + persistence = hs.get_storage().persistence + assert persistence is not None + + await persistence.persist_event(event, context) return event From 04cc249b43e8716513f788b2a4eeb8ede24d19df Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Sep 2020 10:16:41 +0100 Subject: [PATCH 021/245] Add experimental support for sharding event persister. Again. (#8294) This is *not* ready for production yet. Caveats: 1. We should write some tests... 2. The stream token that we use for events can get stalled at the minimum position of all writers. This means that new events may not be processed and e.g. sent down sync streams if a writer isn't writing or is slow. --- changelog.d/8294.feature | 1 + synapse/config/_base.py | 21 +++++- synapse/config/_base.pyi | 1 + synapse/config/workers.py | 37 ++++++++--- synapse/handlers/federation.py | 44 +++++++++---- synapse/handlers/message.py | 14 ++-- synapse/handlers/room.py | 14 ++-- synapse/handlers/room_member.py | 7 -- synapse/replication/http/federation.py | 12 +++- synapse/replication/tcp/handler.py | 2 +- synapse/replication/tcp/streams/events.py | 4 +- synapse/storage/databases/__init__.py | 2 +- .../databases/main/event_federation.py | 2 +- synapse/storage/databases/main/events.py | 12 ++-- .../storage/databases/main/events_worker.py | 66 +++++++++++++------ .../delta/58/14events_instance_name.sql | 16 +++++ .../58/14events_instance_name.sql.postgres | 26 ++++++++ synapse/storage/util/id_generators.py | 10 +-- 18 files changed, 211 insertions(+), 80 deletions(-) create mode 100644 changelog.d/8294.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql create mode 100644 synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres diff --git a/changelog.d/8294.feature b/changelog.d/8294.feature new file mode 100644 index 000000000000..b363e929ea8c --- /dev/null +++ b/changelog.d/8294.feature @@ -0,0 +1 @@ +Add experimental support for sharding event persister. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ad5ab6ad62ac..bb9bf8598dcc 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -832,11 +832,26 @@ class ShardedWorkerHandlingConfig: def should_handle(self, instance_name: str, key: str) -> bool: """Whether this instance is responsible for handling the given key. """ - - # If multiple instances are not defined we always return true. + # If multiple instances are not defined we always return true if not self.instances or len(self.instances) == 1: return True + return self.get_instance(key) == instance_name + + def get_instance(self, key: str) -> str: + """Get the instance responsible for handling the given key. + + Note: For things like federation sending the config for which instance + is sending is known only to the sender instance if there is only one. + Therefore `should_handle` should be used where possible. + """ + + if not self.instances: + return "master" + + if len(self.instances) == 1: + return self.instances[0] + # We shard by taking the hash, modulo it by the number of instances and # then checking whether this instance matches the instance at that # index. @@ -846,7 +861,7 @@ def should_handle(self, instance_name: str, key: str) -> bool: dest_hash = sha256(key.encode("utf8")).digest() dest_int = int.from_bytes(dest_hash, byteorder="little") remainder = dest_int % (len(self.instances)) - return self.instances[remainder] == instance_name + return self.instances[remainder] __all__ = ["Config", "RootConfig", "ShardedWorkerHandlingConfig"] diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index eb911e8f9f45..b8faafa9bdce 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -142,3 +142,4 @@ class ShardedWorkerHandlingConfig: instances: List[str] def __init__(self, instances: List[str]) -> None: ... def should_handle(self, instance_name: str, key: str) -> bool: ... + def get_instance(self, key: str) -> str: ... diff --git a/synapse/config/workers.py b/synapse/config/workers.py index c784a7150897..f23e42cdf98c 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -13,12 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Union + import attr from ._base import Config, ConfigError, ShardedWorkerHandlingConfig from .server import ListenerConfig, parse_listener_def +def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]: + """Helper for allowing parsing a string or list of strings to a config + option expecting a list of strings. + """ + + if isinstance(obj, str): + return [obj] + return obj + + @attr.s class InstanceLocationConfig: """The host and port to talk to an instance via HTTP replication. @@ -33,11 +45,13 @@ class WriterLocations: """Specifies the instances that write various streams. Attributes: - events: The instance that writes to the event and backfill streams. - events: The instance that writes to the typing stream. + events: The instances that write to the event and backfill streams. + typing: The instance that writes to the typing stream. """ - events = attr.ib(default="master", type=str) + events = attr.ib( + default=["master"], type=List[str], converter=_instance_to_list_converter + ) typing = attr.ib(default="master", type=str) @@ -105,15 +119,18 @@ def read_config(self, config, **kwargs): writers = config.get("stream_writers") or {} self.writers = WriterLocations(**writers) - # Check that the configured writer for events and typing also appears in + # Check that the configured writers for events and typing also appears in # `instance_map`. for stream in ("events", "typing"): - instance = getattr(self.writers, stream) - if instance != "master" and instance not in self.instance_map: - raise ConfigError( - "Instance %r is configured to write %s but does not appear in `instance_map` config." - % (instance, stream) - ) + instances = _instance_to_list_converter(getattr(self.writers, stream)) + for instance in instances: + if instance != "master" and instance not in self.instance_map: + raise ConfigError( + "Instance %r is configured to write %s but does not appear in `instance_map` config." + % (instance, stream) + ) + + self.events_shard_config = ShardedWorkerHandlingConfig(self.writers.events) def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c195eba83001..a5734bebab3b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -896,7 +896,8 @@ async def backfill(self, dest, room_id, limit, extremities): ) ) - await self._handle_new_events(dest, ev_infos, backfilled=True) + if ev_infos: + await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) @@ -1189,7 +1190,7 @@ async def get_event(event_id: str): event_infos.append(_NewEventInfo(event, None, auth)) await self._handle_new_events( - destination, event_infos, + destination, room_id, event_infos, ) def _sanity_check_event(self, ev): @@ -1336,15 +1337,15 @@ async def do_invite_join( ) max_stream_id = await self._persist_auth_tree( - origin, auth_chain, state, event, room_version_obj + origin, room_id, auth_chain, state, event, room_version_obj ) # We wait here until this instance has seen the events come down # replication (if we're using replication) as the below uses caches. - # - # TODO: Currently the events stream is written to from master await self._replication.wait_for_stream_position( - self.config.worker.writers.events, "events", max_stream_id + self.config.worker.events_shard_config.get_instance(room_id), + "events", + max_stream_id, ) # Check whether this room is the result of an upgrade of a room we already know @@ -1593,7 +1594,7 @@ async def on_invite_request( ) context = await self.state_handler.compute_event_context(event) - await self.persist_events_and_notify([(event, context)]) + await self.persist_events_and_notify(event.room_id, [(event, context)]) return event @@ -1620,7 +1621,9 @@ async def do_remotely_reject_invite( await self.federation_client.send_leave(host_list, event) context = await self.state_handler.compute_event_context(event) - stream_id = await self.persist_events_and_notify([(event, context)]) + stream_id = await self.persist_events_and_notify( + event.room_id, [(event, context)] + ) return event, stream_id @@ -1868,7 +1871,7 @@ async def _handle_new_event( ) await self.persist_events_and_notify( - [(event, context)], backfilled=backfilled + event.room_id, [(event, context)], backfilled=backfilled ) except Exception: run_in_background( @@ -1881,6 +1884,7 @@ async def _handle_new_event( async def _handle_new_events( self, origin: str, + room_id: str, event_infos: Iterable[_NewEventInfo], backfilled: bool = False, ) -> None: @@ -1912,6 +1916,7 @@ async def prep(ev_info: _NewEventInfo): ) await self.persist_events_and_notify( + room_id, [ (ev_info.event, context) for ev_info, context in zip(event_infos, contexts) @@ -1922,6 +1927,7 @@ async def prep(ev_info: _NewEventInfo): async def _persist_auth_tree( self, origin: str, + room_id: str, auth_events: List[EventBase], state: List[EventBase], event: EventBase, @@ -1936,6 +1942,7 @@ async def _persist_auth_tree( Args: origin: Where the events came from + room_id, auth_events state event @@ -2010,17 +2017,20 @@ async def _persist_auth_tree( events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR await self.persist_events_and_notify( + room_id, [ (e, events_to_context[e.event_id]) for e in itertools.chain(auth_events, state) - ] + ], ) new_event_context = await self.state_handler.compute_event_context( event, old_state=state ) - return await self.persist_events_and_notify([(event, new_event_context)]) + return await self.persist_events_and_notify( + room_id, [(event, new_event_context)] + ) async def _prep_event( self, @@ -2871,6 +2881,7 @@ async def _check_key_revocation(self, public_key, url): async def persist_events_and_notify( self, + room_id: str, event_and_contexts: Sequence[Tuple[EventBase, EventContext]], backfilled: bool = False, ) -> int: @@ -2878,14 +2889,19 @@ async def persist_events_and_notify( necessary. Args: - event_and_contexts: + room_id: The room ID of events being persisted. + event_and_contexts: Sequence of events with their associated + context that should be persisted. All events must belong to + the same room. backfilled: Whether these events are a result of backfilling or not """ - if self.config.worker.writers.events != self._instance_name: + instance = self.config.worker.events_shard_config.get_instance(room_id) + if instance != self._instance_name: result = await self._send_events( - instance_name=self.config.worker.writers.events, + instance_name=instance, store=self.store, + room_id=room_id, event_and_contexts=event_and_contexts, backfilled=backfilled, ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e54e2b322bae..a8fe5cf4e2eb 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -376,9 +376,8 @@ def __init__(self, hs: "HomeServer"): self.notifier = hs.get_notifier() self.config = hs.config self.require_membership_for_aliases = hs.config.require_membership_for_aliases - self._is_event_writer = ( - self.config.worker.writers.events == hs.get_instance_name() - ) + self._events_shard_config = self.config.worker.events_shard_config + self._instance_name = hs.get_instance_name() self.room_invite_state_types = self.hs.config.room_invite_state_types @@ -902,9 +901,10 @@ async def handle_new_client_event( try: # If we're a worker we need to hit out to the master. - if not self._is_event_writer: + writer_instance = self._events_shard_config.get_instance(event.room_id) + if writer_instance != self._instance_name: result = await self.send_event( - instance_name=self.config.worker.writers.events, + instance_name=writer_instance, event_id=event.event_id, store=self.store, requester=requester, @@ -972,8 +972,10 @@ async def persist_and_notify_client_event( This should only be run on the instance in charge of persisting events. """ - assert self._is_event_writer assert self.storage.persistence is not None + assert self._events_shard_config.should_handle( + self._instance_name, event.room_id + ) if ratelimit: # We check if this is a room admin redacting an event so that we diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 53d85ab97d5f..eeade6ad3f13 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -804,7 +804,9 @@ async def create_room( # Always wait for room creation to progate before returning await self._replication.wait_for_stream_position( - self.hs.config.worker.writers.events, "events", last_stream_id + self.hs.config.worker.events_shard_config.get_instance(room_id), + "events", + last_stream_id, ) return result, last_stream_id @@ -1259,10 +1261,10 @@ async def shutdown_room( # We now wait for the create room to come back in via replication so # that we can assume that all the joins/invites have propogated before # we try and auto join below. - # - # TODO: Currently the events stream is written to from master await self._replication.wait_for_stream_position( - self.hs.config.worker.writers.events, "events", stream_id + self.hs.config.worker.events_shard_config.get_instance(new_room_id), + "events", + stream_id, ) else: new_room_id = None @@ -1292,7 +1294,9 @@ async def shutdown_room( # Wait for leave to come in over replication before trying to forget. await self._replication.wait_for_stream_position( - self.hs.config.worker.writers.events, "events", stream_id + self.hs.config.worker.events_shard_config.get_instance(room_id), + "events", + stream_id, ) await self.room_member_handler.forget(target_requester.user, room_id) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 100f335b8027..01a6e882629f 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -82,13 +82,6 @@ def __init__(self, hs: "HomeServer"): self._enable_lookup = hs.config.enable_3pid_lookup self.allow_per_room_profiles = self.config.allow_per_room_profiles - self._event_stream_writer_instance = hs.config.worker.writers.events - self._is_on_event_persistence_instance = ( - self._event_stream_writer_instance == hs.get_instance_name() - ) - if self._is_on_event_persistence_instance: - self.persist_event_storage = hs.get_storage().persistence - self._join_rate_limiter_local = Ratelimiter( clock=self.clock, rate_hz=hs.config.ratelimiting.rc_joins_local.per_second, diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 6b563151487b..5c8be747e140 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -65,10 +65,11 @@ def __init__(self, hs): self.federation_handler = hs.get_handlers().federation_handler @staticmethod - async def _serialize_payload(store, event_and_contexts, backfilled): + async def _serialize_payload(store, room_id, event_and_contexts, backfilled): """ Args: store + room_id (str) event_and_contexts (list[tuple[FrozenEvent, EventContext]]) backfilled (bool): Whether or not the events are the result of backfilling @@ -88,7 +89,11 @@ async def _serialize_payload(store, event_and_contexts, backfilled): } ) - payload = {"events": event_payloads, "backfilled": backfilled} + payload = { + "events": event_payloads, + "backfilled": backfilled, + "room_id": room_id, + } return payload @@ -96,6 +101,7 @@ async def _handle_request(self, request): with Measure(self.clock, "repl_fed_send_events_parse"): content = parse_json_object_from_request(request) + room_id = content["room_id"] backfilled = content["backfilled"] event_payloads = content["events"] @@ -120,7 +126,7 @@ async def _handle_request(self, request): logger.info("Got %d events from federation", len(event_and_contexts)) max_stream_id = await self.federation_handler.persist_events_and_notify( - event_and_contexts, backfilled + room_id, event_and_contexts, backfilled ) return 200, {"max_stream_id": max_stream_id} diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 1c303f3a46d8..b323841f73cb 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -109,7 +109,7 @@ def __init__(self, hs): if isinstance(stream, (EventsStream, BackfillStream)): # Only add EventStream and BackfillStream as a source on the # instance in charge of event persistence. - if hs.config.worker.writers.events == hs.get_instance_name(): + if hs.get_instance_name() in hs.config.worker.writers.events: self._streams_to_replicate.append(stream) continue diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index f929fc3954ea..ccc7ca30d8a9 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -19,7 +19,7 @@ import attr -from ._base import Stream, StreamUpdateResult, Token, current_token_without_instance +from ._base import Stream, StreamUpdateResult, Token """Handling of the 'events' replication stream @@ -117,7 +117,7 @@ def __init__(self, hs): self._store = hs.get_datastore() super().__init__( hs.get_instance_name(), - current_token_without_instance(self._store.get_current_events_token), + self._store._stream_id_gen.get_current_token_for_writer, self._update_function, ) diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index 985b12df9132..aa5d490624ce 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -75,7 +75,7 @@ def __init__(self, main_store_class, hs): # If we're on a process that can persist events also # instantiate a `PersistEventsStore` - if hs.config.worker.writers.events == hs.get_instance_name(): + if hs.get_instance_name() in hs.config.worker.writers.events: persist_events = PersistEventsStore(hs, database, main) if "state" in database_config.databases: diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 0b69aa6a940a..4c3c162acf54 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -438,7 +438,7 @@ async def _get_forward_extremeties_for_room(self, room_id, stream_ordering): """ if stream_ordering <= self.stream_ordering_month_ago: - raise StoreError(400, "stream_ordering too old") + raise StoreError(400, "stream_ordering too old %s" % (stream_ordering,)) sql = """ SELECT event_id FROM stream_ordering_to_exterm diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 9cd1403b38da..9a80f419e343 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -32,7 +32,7 @@ from synapse.storage._base import db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.databases.main.search import SearchEntry -from synapse.storage.util.id_generators import StreamIdGenerator +from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import StateMap, get_domain_from_id from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.iterutils import batch_iter @@ -97,18 +97,21 @@ def __init__( self.store = main_data_store self.database_engine = db.engine self._clock = hs.get_clock() + self._instance_name = hs.get_instance_name() self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages self.is_mine_id = hs.is_mine_id # Ideally we'd move these ID gens here, unfortunately some other ID # generators are chained off them so doing so is a bit of a PITA. - self._backfill_id_gen = self.store._backfill_id_gen # type: StreamIdGenerator - self._stream_id_gen = self.store._stream_id_gen # type: StreamIdGenerator + self._backfill_id_gen = ( + self.store._backfill_id_gen + ) # type: MultiWriterIdGenerator + self._stream_id_gen = self.store._stream_id_gen # type: MultiWriterIdGenerator # This should only exist on instances that are configured to write assert ( - hs.config.worker.writers.events == hs.get_instance_name() + hs.get_instance_name() in hs.config.worker.writers.events ), "Can only instantiate EventsStore on master" async def _persist_events_and_state_updates( @@ -809,6 +812,7 @@ def event_dict(event): table="events", values=[ { + "instance_name": self._instance_name, "stream_ordering": event.internal_metadata.stream_ordering, "topological_ordering": event.depth, "depth": event.depth, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index a7a73cc3d8bb..17f5997b89f0 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -42,7 +42,8 @@ from synapse.replication.tcp.streams.events import EventsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool -from synapse.storage.util.id_generators import StreamIdGenerator +from synapse.storage.engines import PostgresEngine +from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.types import Collection, get_domain_from_id from synapse.util.caches.descriptors import Cache, cached from synapse.util.iterutils import batch_iter @@ -78,27 +79,54 @@ class EventsWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): super(EventsWorkerStore, self).__init__(database, db_conn, hs) - if hs.config.worker.writers.events == hs.get_instance_name(): - # We are the process in charge of generating stream ids for events, - # so instantiate ID generators based on the database - self._stream_id_gen = StreamIdGenerator( - db_conn, "events", "stream_ordering", + if isinstance(database.engine, PostgresEngine): + # If we're using Postgres than we can use `MultiWriterIdGenerator` + # regardless of whether this process writes to the streams or not. + self._stream_id_gen = MultiWriterIdGenerator( + db_conn=db_conn, + db=database, + instance_name=hs.get_instance_name(), + table="events", + instance_column="instance_name", + id_column="stream_ordering", + sequence_name="events_stream_seq", ) - self._backfill_id_gen = StreamIdGenerator( - db_conn, - "events", - "stream_ordering", - step=-1, - extra_tables=[("ex_outlier_stream", "event_stream_ordering")], + self._backfill_id_gen = MultiWriterIdGenerator( + db_conn=db_conn, + db=database, + instance_name=hs.get_instance_name(), + table="events", + instance_column="instance_name", + id_column="stream_ordering", + sequence_name="events_backfill_stream_seq", + positive=False, ) else: - # Another process is in charge of persisting events and generating - # stream IDs: rely on the replication streams to let us know which - # IDs we can process. - self._stream_id_gen = SlavedIdTracker(db_conn, "events", "stream_ordering") - self._backfill_id_gen = SlavedIdTracker( - db_conn, "events", "stream_ordering", step=-1 - ) + # We shouldn't be running in worker mode with SQLite, but its useful + # to support it for unit tests. + # + # If this process is the writer than we need to use + # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets + # updated over replication. (Multiple writers are not supported for + # SQLite). + if hs.get_instance_name() in hs.config.worker.writers.events: + self._stream_id_gen = StreamIdGenerator( + db_conn, "events", "stream_ordering", + ) + self._backfill_id_gen = StreamIdGenerator( + db_conn, + "events", + "stream_ordering", + step=-1, + extra_tables=[("ex_outlier_stream", "event_stream_ordering")], + ) + else: + self._stream_id_gen = SlavedIdTracker( + db_conn, "events", "stream_ordering" + ) + self._backfill_id_gen = SlavedIdTracker( + db_conn, "events", "stream_ordering", step=-1 + ) self._get_event_cache = Cache( "*getEvent*", diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql new file mode 100644 index 000000000000..98ff76d7091c --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql @@ -0,0 +1,16 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE events ADD COLUMN instance_name TEXT; diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres new file mode 100644 index 000000000000..97c1e6a0c5d7 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres @@ -0,0 +1,26 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE SEQUENCE IF NOT EXISTS events_stream_seq; + +SELECT setval('events_stream_seq', ( + SELECT COALESCE(MAX(stream_ordering), 1) FROM events +)); + +CREATE SEQUENCE IF NOT EXISTS events_backfill_stream_seq; + +SELECT setval('events_backfill_stream_seq', ( + SELECT COALESCE(-MIN(stream_ordering), 1) FROM events +)); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 2a66b3ad4e97..1de2b915877a 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -240,8 +240,12 @@ def __init__( # gaps should be relatively rare it's still worth doing the book keeping # that allows us to skip forwards when there are gapless runs of # positions. + # + # We start at 1 here as a) the first generated stream ID will be 2, and + # b) other parts of the code assume that stream IDs are strictly greater + # than 0. self._persisted_upto_position = ( - min(self._current_positions.values()) if self._current_positions else 0 + min(self._current_positions.values()) if self._current_positions else 1 ) self._known_persisted_positions = [] # type: List[int] @@ -398,9 +402,7 @@ def get_current_token(self) -> int: equal to it have been successfully persisted. """ - # Currently we don't support this operation, as it's not obvious how to - # condense the stream positions of multiple writers into a single int. - raise NotImplementedError() + return self.get_persisted_upto_position() def get_current_token_for_writer(self, instance_name: str) -> int: """Returns the position of the given writer. From 6605470bfb8944d369b8fc73195a380b95b6de9d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 14 Sep 2020 09:05:36 -0400 Subject: [PATCH 022/245] Improve SAML error messages (#8248) --- UPGRADE.rst | 14 ++ changelog.d/8248.feature | 1 + docs/sample_config.yaml | 30 +---- synapse/config/saml2_config.py | 34 +---- synapse/handlers/oidc_handler.py | 4 +- synapse/handlers/saml_handler.py | 169 +++++++++++++++--------- synapse/res/templates/saml_error.html | 52 -------- synapse/res/templates/sso_error.html | 43 +++++- synapse/rest/saml2/response_resource.py | 16 +-- 9 files changed, 178 insertions(+), 185 deletions(-) create mode 100644 changelog.d/8248.feature delete mode 100644 synapse/res/templates/saml_error.html diff --git a/UPGRADE.rst b/UPGRADE.rst index fc8982ddfe64..49e86e628fa4 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -114,6 +114,20 @@ request to with the query parameters from the original link, presented as a URL-encoded form. See the file itself for more details. +Updated Single Sign-on HTML Templates +------------------------------------- + +The ``saml_error.html`` template was removed from Synapse and replaced with the +``sso_error.html`` template. If your Synapse is configured to use SAML and a +custom ``sso_redirect_confirm_template_dir`` configuration then any customisations +of the ``saml_error.html`` template will need to be merged into the ``sso_error.html`` +template. These templates are similar, but the parameters are slightly different: + +* The ``msg`` parameter should be renamed to ``error_description``. +* There is no longer a ``code`` parameter for the response code. +* A string ``error`` parameter is available that includes a short hint of why a + user is seeing the error page. + Upgrading to v1.18.0 ==================== diff --git a/changelog.d/8248.feature b/changelog.d/8248.feature new file mode 100644 index 000000000000..f3c4a74bc79b --- /dev/null +++ b/changelog.d/8248.feature @@ -0,0 +1 @@ +Consolidate the SSO error template across all configuration. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2a5b2e093537..fb04ff283dee 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1485,11 +1485,14 @@ trusted_key_servers: # At least one of `sp_config` or `config_path` must be set in this section to # enable SAML login. # -# (You will probably also want to set the following options to `false` to +# You will probably also want to set the following options to `false` to # disable the regular login/registration flows: # * enable_registration # * password_config.enabled # +# You will also want to investigate the settings under the "sso" configuration +# section below. +# # Once SAML support is enabled, a metadata file will be exposed at # https://:/_matrix/saml2/metadata.xml, which you may be able to # use to configure your SAML IdP with. Alternatively, you can manually configure @@ -1612,31 +1615,6 @@ saml2_config: # - attribute: department # value: "sales" - # Directory in which Synapse will try to find the template files below. - # If not set, default templates from within the Synapse package will be used. - # - # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. - # If you *do* uncomment it, you will need to make sure that all the templates - # below are in the directory. - # - # Synapse will look for the following templates in this directory: - # - # * HTML page to display to users if something goes wrong during the - # authentication process: 'saml_error.html'. - # - # When rendering, this template is given the following variables: - # * code: an HTML error code corresponding to the error that is being - # returned (typically 400 or 500) - # - # * msg: a textual message describing the error. - # - # The variables will automatically be HTML-escaped. - # - # You can see the default templates at: - # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates - # - #template_dir: "res/templates" - # OpenID Connect integration. The following settings can be used to make Synapse # use an OpenID Connect Provider for authentication, instead of its internal diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index cc7401888b24..99aa8b3bf123 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -169,10 +169,6 @@ def read_config(self, config, **kwargs): saml2_config.get("saml_session_lifetime", "15m") ) - self.saml2_error_html_template = self.read_templates( - ["saml_error.html"], saml2_config.get("template_dir") - )[0] - def _default_saml_config_dict( self, required_attributes: set, optional_attributes: set ): @@ -225,11 +221,14 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # At least one of `sp_config` or `config_path` must be set in this section to # enable SAML login. # - # (You will probably also want to set the following options to `false` to + # You will probably also want to set the following options to `false` to # disable the regular login/registration flows: # * enable_registration # * password_config.enabled # + # You will also want to investigate the settings under the "sso" configuration + # section below. + # # Once SAML support is enabled, a metadata file will be exposed at # https://:/_matrix/saml2/metadata.xml, which you may be able to # use to configure your SAML IdP with. Alternatively, you can manually configure @@ -351,31 +350,6 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # value: "staff" # - attribute: department # value: "sales" - - # Directory in which Synapse will try to find the template files below. - # If not set, default templates from within the Synapse package will be used. - # - # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. - # If you *do* uncomment it, you will need to make sure that all the templates - # below are in the directory. - # - # Synapse will look for the following templates in this directory: - # - # * HTML page to display to users if something goes wrong during the - # authentication process: 'saml_error.html'. - # - # When rendering, this template is given the following variables: - # * code: an HTML error code corresponding to the error that is being - # returned (typically 400 or 500) - # - # * msg: a textual message describing the error. - # - # The variables will automatically be HTML-escaped. - # - # You can see the default templates at: - # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates - # - #template_dir: "res/templates" """ % { "config_dir_path": config_dir_path } diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 1b06f3173fa0..4230dbaf998b 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -131,10 +131,10 @@ def __init__(self, hs: "HomeServer"): def _render_error( self, request, error: str, error_description: Optional[str] = None ) -> None: - """Renders the error template and respond with it. + """Render the error template and respond to the request with it. This is used to show errors to the user. The template of this page can - be found under ``synapse/res/templates/sso_error.html``. + be found under `synapse/res/templates/sso_error.html`. Args: request: The incoming request from the browser. diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 66b063f9918f..8715abd4d1c7 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -21,9 +21,10 @@ import saml2.response from saml2.client import Saml2Client -from synapse.api.errors import AuthError, SynapseError +from synapse.api.errors import SynapseError from synapse.config import ConfigError from synapse.config.saml2_config import SamlAttributeRequirement +from synapse.http.server import respond_with_html from synapse.http.servlet import parse_string from synapse.http.site import SynapseRequest from synapse.module_api import ModuleApi @@ -41,6 +42,10 @@ logger = logging.getLogger(__name__) +class MappingException(Exception): + """Used to catch errors when mapping the SAML2 response to a user.""" + + @attr.s class Saml2SessionData: """Data we track about SAML2 sessions""" @@ -68,6 +73,7 @@ def __init__(self, hs: "synapse.server.HomeServer"): hs.config.saml2_grandfathered_mxid_source_attribute ) self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements + self._error_template = hs.config.sso_error_template # plugin to do custom mapping from saml response to mxid self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class( @@ -84,6 +90,25 @@ def __init__(self, hs: "synapse.server.HomeServer"): # a lock on the mappings self._mapping_lock = Linearizer(name="saml_mapping", clock=self._clock) + def _render_error( + self, request, error: str, error_description: Optional[str] = None + ) -> None: + """Render the error template and respond to the request with it. + + This is used to show errors to the user. The template of this page can + be found under `synapse/res/templates/sso_error.html`. + + Args: + request: The incoming request from the browser. + We'll respond with an HTML page describing the error. + error: A technical identifier for this error. + error_description: A human-readable description of the error. + """ + html = self._error_template.render( + error=error, error_description=error_description + ) + respond_with_html(request, 400, html) + def handle_redirect_request( self, client_redirect_url: bytes, ui_auth_session_id: Optional[str] = None ) -> bytes: @@ -134,49 +159,6 @@ async def handle_saml_response(self, request: SynapseRequest) -> None: # the dict. self.expire_sessions() - # Pull out the user-agent and IP from the request. - user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ - 0 - ].decode("ascii", "surrogateescape") - ip_address = self.hs.get_ip_from_request(request) - - user_id, current_session = await self._map_saml_response_to_user( - resp_bytes, relay_state, user_agent, ip_address - ) - - # Complete the interactive auth session or the login. - if current_session and current_session.ui_auth_session_id: - await self._auth_handler.complete_sso_ui_auth( - user_id, current_session.ui_auth_session_id, request - ) - - else: - await self._auth_handler.complete_sso_login(user_id, request, relay_state) - - async def _map_saml_response_to_user( - self, - resp_bytes: str, - client_redirect_url: str, - user_agent: str, - ip_address: str, - ) -> Tuple[str, Optional[Saml2SessionData]]: - """ - Given a sample response, retrieve the cached session and user for it. - - Args: - resp_bytes: The SAML response. - client_redirect_url: The redirect URL passed in by the client. - user_agent: The user agent of the client making the request. - ip_address: The IP address of the client making the request. - - Returns: - Tuple of the user ID and SAML session associated with this response. - - Raises: - SynapseError if there was a problem with the response. - RedirectException: some mapping providers may raise this if they need - to redirect to an interstitial page. - """ try: saml2_auth = self._saml_client.parse_authn_request_response( resp_bytes, @@ -189,12 +171,23 @@ async def _map_saml_response_to_user( # in the (user-visible) exception message, so let's log the exception here # so we can track down the session IDs later. logger.warning(str(e)) - raise SynapseError(400, "Unexpected SAML2 login.") + self._render_error( + request, "unsolicited_response", "Unexpected SAML2 login." + ) + return except Exception as e: - raise SynapseError(400, "Unable to parse SAML2 response: %s." % (e,)) + self._render_error( + request, + "invalid_response", + "Unable to parse SAML2 response: %s." % (e,), + ) + return if saml2_auth.not_signed: - raise SynapseError(400, "SAML2 response was not signed.") + self._render_error( + request, "unsigned_respond", "SAML2 response was not signed." + ) + return logger.debug("SAML2 response: %s", saml2_auth.origxml) for assertion in saml2_auth.assertions: @@ -213,15 +206,73 @@ async def _map_saml_response_to_user( saml2_auth.in_response_to, None ) + # Ensure that the attributes of the logged in user meet the required + # attributes. for requirement in self._saml2_attribute_requirements: - _check_attribute_requirement(saml2_auth.ava, requirement) + if not _check_attribute_requirement(saml2_auth.ava, requirement): + self._render_error( + request, "unauthorised", "You are not authorised to log in here." + ) + return + + # Pull out the user-agent and IP from the request. + user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[ + 0 + ].decode("ascii", "surrogateescape") + ip_address = self.hs.get_ip_from_request(request) + + # Call the mapper to register/login the user + try: + user_id = await self._map_saml_response_to_user( + saml2_auth, relay_state, user_agent, ip_address + ) + except MappingException as e: + logger.exception("Could not map user") + self._render_error(request, "mapping_error", str(e)) + return + + # Complete the interactive auth session or the login. + if current_session and current_session.ui_auth_session_id: + await self._auth_handler.complete_sso_ui_auth( + user_id, current_session.ui_auth_session_id, request + ) + + else: + await self._auth_handler.complete_sso_login(user_id, request, relay_state) + + async def _map_saml_response_to_user( + self, + saml2_auth: saml2.response.AuthnResponse, + client_redirect_url: str, + user_agent: str, + ip_address: str, + ) -> str: + """ + Given a SAML response, retrieve the user ID for it and possibly register the user. + + Args: + saml2_auth: The parsed SAML2 response. + client_redirect_url: The redirect URL passed in by the client. + user_agent: The user agent of the client making the request. + ip_address: The IP address of the client making the request. + + Returns: + The user ID associated with this response. + + Raises: + MappingException if there was a problem mapping the response to a user. + RedirectException: some mapping providers may raise this if they need + to redirect to an interstitial page. + """ remote_user_id = self._user_mapping_provider.get_remote_user_id( saml2_auth, client_redirect_url ) if not remote_user_id: - raise Exception("Failed to extract remote user id from SAML response") + raise MappingException( + "Failed to extract remote user id from SAML response" + ) with (await self._mapping_lock.queue(self._auth_provider_id)): # first of all, check if we already have a mapping for this user @@ -235,7 +286,7 @@ async def _map_saml_response_to_user( ) if registered_user_id is not None: logger.info("Found existing mapping %s", registered_user_id) - return registered_user_id, current_session + return registered_user_id # backwards-compatibility hack: see if there is an existing user with a # suitable mapping from the uid @@ -260,7 +311,7 @@ async def _map_saml_response_to_user( await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) - return registered_user_id, current_session + return registered_user_id # Map saml response to user attributes using the configured mapping provider for i in range(1000): @@ -277,7 +328,7 @@ async def _map_saml_response_to_user( localpart = attribute_dict.get("mxid_localpart") if not localpart: - raise Exception( + raise MappingException( "Error parsing SAML2 response: SAML mapping provider plugin " "did not return a mxid_localpart value" ) @@ -294,8 +345,8 @@ async def _map_saml_response_to_user( else: # Unable to generate a username in 1000 iterations # Break and return error to the user - raise SynapseError( - 500, "Unable to generate a Matrix ID from the SAML response" + raise MappingException( + "Unable to generate a Matrix ID from the SAML response" ) logger.info("Mapped SAML user to local part %s", localpart) @@ -310,7 +361,7 @@ async def _map_saml_response_to_user( await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) - return registered_user_id, current_session + return registered_user_id def expire_sessions(self): expire_before = self._clock.time_msec() - self._saml2_session_lifetime @@ -323,11 +374,11 @@ def expire_sessions(self): del self._outstanding_requests_dict[reqid] -def _check_attribute_requirement(ava: dict, req: SamlAttributeRequirement): +def _check_attribute_requirement(ava: dict, req: SamlAttributeRequirement) -> bool: values = ava.get(req.attribute, []) for v in values: if v == req.value: - return + return True logger.info( "SAML2 attribute %s did not match required value '%s' (was '%s')", @@ -335,7 +386,7 @@ def _check_attribute_requirement(ava: dict, req: SamlAttributeRequirement): req.value, values, ) - raise AuthError(403, "You are not authorized to log in here.") + return False DOT_REPLACE_PATTERN = re.compile( @@ -390,7 +441,7 @@ def get_remote_user_id( return saml_response.ava["uid"][0] except KeyError: logger.warning("SAML2 response lacks a 'uid' attestation") - raise SynapseError(400, "'uid' not in SAML2 response") + raise MappingException("'uid' not in SAML2 response") def saml_response_to_user_attributes( self, diff --git a/synapse/res/templates/saml_error.html b/synapse/res/templates/saml_error.html deleted file mode 100644 index 01cd9bdaf3c5..000000000000 --- a/synapse/res/templates/saml_error.html +++ /dev/null @@ -1,52 +0,0 @@ - - - - - SSO login error - - -{# a 403 means we have actively rejected their login #} -{% if code == 403 %} -

You are not allowed to log in here.

-{% else %} -

- There was an error during authentication: -

-
{{ msg }}
-

- If you are seeing this page after clicking a link sent to you via email, make - sure you only click the confirmation link once, and that you open the - validation link in the same client you're logging in from. -

-

- Try logging in again from your Matrix client and if the problem persists - please contact the server's administrator. -

- - -{% endif %} - - diff --git a/synapse/res/templates/sso_error.html b/synapse/res/templates/sso_error.html index 43a211386bec..af8459719ae4 100644 --- a/synapse/res/templates/sso_error.html +++ b/synapse/res/templates/sso_error.html @@ -5,14 +5,49 @@ SSO error -

Oops! Something went wrong during authentication.

+{# If an error of unauthorised is returned it means we have actively rejected their login #} +{% if error == "unauthorised" %} +

You are not allowed to log in here.

+{% else %} +

+ There was an error during authentication: +

+
{{ error_description }}
+

+ If you are seeing this page after clicking a link sent to you via email, make + sure you only click the confirmation link once, and that you open the + validation link in the same client you're logging in from. +

Try logging in again from your Matrix client and if the problem persists please contact the server's administrator.

Error: {{ error }}

- {% if error_description %} -
{{ error_description }}
- {% endif %} + + +{% endif %} diff --git a/synapse/rest/saml2/response_resource.py b/synapse/rest/saml2/response_resource.py index c10188a5d72d..f6668fb5e3bf 100644 --- a/synapse/rest/saml2/response_resource.py +++ b/synapse/rest/saml2/response_resource.py @@ -13,10 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from twisted.python import failure -from synapse.api.errors import SynapseError -from synapse.http.server import DirectServeHtmlResource, return_html_error +from synapse.http.server import DirectServeHtmlResource class SAML2ResponseResource(DirectServeHtmlResource): @@ -27,21 +25,15 @@ class SAML2ResponseResource(DirectServeHtmlResource): def __init__(self, hs): super().__init__() self._saml_handler = hs.get_saml_handler() - self._error_html_template = hs.config.saml2.saml2_error_html_template async def _async_render_GET(self, request): # We're not expecting any GET request on that resource if everything goes right, # but some IdPs sometimes end up responding with a 302 redirect on this endpoint. # In this case, just tell the user that something went wrong and they should # try to authenticate again. - f = failure.Failure( - SynapseError(400, "Unexpected GET request on /saml2/authn_response") + self._saml_handler._render_error( + request, "unexpected_get", "Unexpected GET request on /saml2/authn_response" ) - return_html_error(f, request, self._error_html_template) async def _async_render_POST(self, request): - try: - await self._saml_handler.handle_saml_response(request) - except Exception: - f = failure.Failure() - return_html_error(f, request, self._error_html_template) + await self._saml_handler.handle_saml_response(request) From b82d68c0bd952131836d00994c3c2a79b3d3a267 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Mon, 14 Sep 2020 17:07:04 +0300 Subject: [PATCH 023/245] Add the topic and avatar to the room details admin API (#8305) --- changelog.d/8305.feature | 1 + docs/admin_api/rooms.md | 4 ++++ synapse/storage/databases/main/room.py | 3 ++- tests/rest/admin/test_room.py | 2 ++ 4 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8305.feature diff --git a/changelog.d/8305.feature b/changelog.d/8305.feature new file mode 100644 index 000000000000..862dfdf95986 --- /dev/null +++ b/changelog.d/8305.feature @@ -0,0 +1 @@ +Add the room topic and avatar to the room details admin API. diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 0f267d2b7bbe..fa9b914fa7c8 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -275,6 +275,8 @@ The following fields are possible in the JSON response body: * `room_id` - The ID of the room. * `name` - The name of the room. +* `topic` - The topic of the room. +* `avatar` - The `mxc` URI to the avatar of the room. * `canonical_alias` - The canonical (main) alias address of the room. * `joined_members` - How many users are currently in the room. * `joined_local_members` - How many local users are currently in the room. @@ -304,6 +306,8 @@ Response: { "room_id": "!mscvqgqpHYjBGDxNym:matrix.org", "name": "Music Theory", + "avatar": "mxc://matrix.org/AQDaVFlbkQoErdOgqWRgiGSV", + "topic": "Theory, Composition, Notation, Analysis", "canonical_alias": "#musictheory:matrix.org", "joined_members": 127 "joined_local_members": 2, diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 717df97301bb..127588ce4c4d 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -104,7 +104,8 @@ def get_room_with_stats_txn(txn, room_id): curr.local_users_in_room AS joined_local_members, rooms.room_version AS version, rooms.creator, state.encryption, state.is_federatable AS federatable, rooms.is_public AS public, state.join_rules, state.guest_access, - state.history_visibility, curr.current_state_events AS state_events + state.history_visibility, curr.current_state_events AS state_events, + state.avatar, state.topic FROM rooms LEFT JOIN room_stats_state state USING (room_id) LEFT JOIN room_stats_current curr USING (room_id) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 408c568a277c..6dfc709dc511 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1174,6 +1174,8 @@ def test_single_room(self): self.assertIn("room_id", channel.json_body) self.assertIn("name", channel.json_body) + self.assertIn("topic", channel.json_body) + self.assertIn("avatar", channel.json_body) self.assertIn("canonical_alias", channel.json_body) self.assertIn("joined_members", channel.json_body) self.assertIn("joined_local_members", channel.json_body) From d2a3eb04a49d83485c53e677e4934f7f5491330e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 14 Sep 2020 11:46:58 -0400 Subject: [PATCH 024/245] Fix typos in comments. --- synapse/logging/opentracing.py | 2 +- synapse/replication/tcp/resource.py | 2 +- synapse/rest/client/v2_alpha/account.py | 2 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/rest/media/v1/thumbnailer.py | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 7df0aa197dc5..e58850faff86 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -509,7 +509,7 @@ def start_active_span_from_edu( ] # For some reason jaeger decided not to support the visualization of multiple parent - # spans or explicitely show references. I include the span context as a tag here as + # spans or explicitly show references. I include the span context as a tag here as # an aid to people debugging but it's really not an ideal solution. references += _references diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 04d894fb3d3e..687984e7a8c8 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -93,7 +93,7 @@ def on_notifier_poke(self): """ if not self.command_handler.connected(): # Don't bother if nothing is listening. We still need to advance - # the stream tokens otherwise they'll fall beihind forever + # the stream tokens otherwise they'll fall behind forever for stream in self.streams: stream.discard_updates_and_advance() return diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index c6cb9deb2bfa..ade97a6708c7 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -301,7 +301,7 @@ async def on_POST(self, request): requester = await self.auth.get_user_by_req(request) - # allow ASes to dectivate their own users + # allow ASes to deactivate their own users if requester.app_service: await self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 5db7f81c2dde..f843f0245461 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -35,7 +35,7 @@ class RemoteKey(DirectServeJsonResource): Supports individual GET APIs and a bulk query POST API. - Requsts: + Requests: GET /_matrix/key/v2/query/remote.server.example.com HTTP/1.1 diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 457ad6031ce2..32a8e4f9605a 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -83,7 +83,7 @@ def aspect(self, max_width, max_height): Args: max_width: The largest possible width. - max_height: The larget possible height. + max_height: The largest possible height. """ if max_width * self.height < max_height * self.width: @@ -117,7 +117,7 @@ def crop(self, width, height, output_type): Args: max_width: The largest possible width. - max_height: The larget possible height. + max_height: The largest possible height. Returns: BytesIO: the bytes of the encoded image ready to be written to disk From aec294ee0d0f2fa4ccef57085d670b8939de3669 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 14 Sep 2020 12:50:06 -0400 Subject: [PATCH 025/245] Use slots in attrs classes where possible (#8296) slots use less memory (and attribute access is faster) while slightly limiting the flexibility of the class attributes. This focuses on objects which are instantiated "often" and for short periods of time. --- changelog.d/8296.misc | 1 + synapse/handlers/acme_issuing_service.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/e2e_keys.py | 2 +- synapse/handlers/federation.py | 2 +- synapse/handlers/saml_handler.py | 2 +- synapse/handlers/sync.py | 34 ++++++------------- .../http/federation/well_known_resolver.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/logging/context.py | 4 +-- synapse/metrics/__init__.py | 4 +-- synapse/notifier.py | 4 +-- synapse/replication/tcp/streams/_base.py | 4 +-- synapse/rest/media/v1/preview_url_resource.py | 2 +- synapse/state/__init__.py | 2 +- .../storage/databases/main/end_to_end_keys.py | 2 +- .../databases/main/event_push_actions.py | 2 +- synapse/storage/databases/main/ui_auth.py | 2 +- synapse/storage/prepare_database.py | 2 +- synapse/storage/relations.py | 2 +- synapse/util/__init__.py | 2 +- synapse/util/caches/__init__.py | 2 +- 22 files changed, 33 insertions(+), 50 deletions(-) create mode 100644 changelog.d/8296.misc diff --git a/changelog.d/8296.misc b/changelog.d/8296.misc new file mode 100644 index 000000000000..f593a5b34778 --- /dev/null +++ b/changelog.d/8296.misc @@ -0,0 +1 @@ +Use slotted classes where possible. diff --git a/synapse/handlers/acme_issuing_service.py b/synapse/handlers/acme_issuing_service.py index 69650ff221a9..7294649d717a 100644 --- a/synapse/handlers/acme_issuing_service.py +++ b/synapse/handlers/acme_issuing_service.py @@ -76,7 +76,7 @@ def create_issuing_service(reactor, acme_url, account_key_file, well_known_resou ) -@attr.s +@attr.s(slots=True) @implementer(ICertificateStore) class ErsatzStore: """ diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 90189869cc4a..4e658d9a4879 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1235,7 +1235,7 @@ def add_query_param_to_url(url: str, param_name: str, param: Any): return urllib.parse.urlunparse(url_parts) -@attr.s +@attr.s(slots=True) class MacaroonGenerator: hs = attr.ib() diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index d629c7c16c0d..dd40fd129936 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -1201,7 +1201,7 @@ def _one_time_keys_match(old_key_json, new_key): return old_key == new_key_copy -@attr.s +@attr.s(slots=True) class SignatureListItem: """An item in the signature list as used by upload_signatures_for_device_keys. """ diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index a5734bebab3b..262901363f63 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -86,7 +86,7 @@ logger = logging.getLogger(__name__) -@attr.s +@attr.s(slots=True) class _NewEventInfo: """Holds information about a received event, ready for passing to _handle_new_events diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 8715abd4d1c7..285c481a9604 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -46,7 +46,7 @@ class MappingException(Exception): """Used to catch errors when mapping the SAML2 response to a user.""" -@attr.s +@attr.s(slots=True) class Saml2SessionData: """Data we track about SAML2 sessions""" diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a615c7c2f0f1..9b3a4f638b13 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -89,14 +89,12 @@ class TimelineBatch: events = attr.ib(type=List[EventBase]) limited = attr.ib(bool) - def __nonzero__(self) -> bool: + def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ return bool(self.events) - __bool__ = __nonzero__ # python3 - # We can't freeze this class, because we need to update it after it's instantiated to # update its unread count. This is because we calculate the unread count for a room only @@ -114,7 +112,7 @@ class JoinedSyncResult: summary = attr.ib(type=Optional[JsonDict]) unread_count = attr.ib(type=int) - def __nonzero__(self) -> bool: + def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ @@ -127,8 +125,6 @@ def __nonzero__(self) -> bool: # else in the result, we don't need to send it. ) - __bool__ = __nonzero__ # python3 - @attr.s(slots=True, frozen=True) class ArchivedSyncResult: @@ -137,26 +133,22 @@ class ArchivedSyncResult: state = attr.ib(type=StateMap[EventBase]) account_data = attr.ib(type=List[JsonDict]) - def __nonzero__(self) -> bool: + def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ return bool(self.timeline or self.state or self.account_data) - __bool__ = __nonzero__ # python3 - @attr.s(slots=True, frozen=True) class InvitedSyncResult: room_id = attr.ib(type=str) invite = attr.ib(type=EventBase) - def __nonzero__(self) -> bool: + def __bool__(self) -> bool: """Invited rooms should always be reported to the client""" return True - __bool__ = __nonzero__ # python3 - @attr.s(slots=True, frozen=True) class GroupsSyncResult: @@ -164,11 +156,9 @@ class GroupsSyncResult: invite = attr.ib(type=JsonDict) leave = attr.ib(type=JsonDict) - def __nonzero__(self) -> bool: + def __bool__(self) -> bool: return bool(self.join or self.invite or self.leave) - __bool__ = __nonzero__ # python3 - @attr.s(slots=True, frozen=True) class DeviceLists: @@ -181,13 +171,11 @@ class DeviceLists: changed = attr.ib(type=Collection[str]) left = attr.ib(type=Collection[str]) - def __nonzero__(self) -> bool: + def __bool__(self) -> bool: return bool(self.changed or self.left) - __bool__ = __nonzero__ # python3 - -@attr.s +@attr.s(slots=True) class _RoomChanges: """The set of room entries to include in the sync, plus the set of joined and left room IDs since last sync. @@ -227,7 +215,7 @@ class SyncResult: device_one_time_keys_count = attr.ib(type=JsonDict) groups = attr.ib(type=Optional[GroupsSyncResult]) - def __nonzero__(self) -> bool: + def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if the notifier needs to wait for more events when polling for events. @@ -243,8 +231,6 @@ def __nonzero__(self) -> bool: or self.groups ) - __bool__ = __nonzero__ # python3 - class SyncHandler: def __init__(self, hs: "HomeServer"): @@ -2038,7 +2024,7 @@ def _calculate_state( return {event_id_to_key[e]: e for e in state_ids} -@attr.s +@attr.s(slots=True) class SyncResultBuilder: """Used to help build up a new SyncResult for a user @@ -2074,7 +2060,7 @@ class SyncResultBuilder: to_device = attr.ib(type=List[JsonDict], default=attr.Factory(list)) -@attr.s +@attr.s(slots=True) class RoomSyncResultBuilder: """Stores information needed to create either a `JoinedSyncResult` or `ArchivedSyncResult`. diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index e6f067ca29f0..a306faa267c1 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -311,7 +311,7 @@ def _parse_cache_control(headers: Headers) -> Dict[bytes, Optional[bytes]]: return cache_controls -@attr.s() +@attr.s(slots=True) class _FetchWellKnownFailure(Exception): # True if we didn't get a non-5xx HTTP response, i.e. this may or may not be # a temporary failure. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 5eaf3151ce24..3c86cbc546db 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -76,7 +76,7 @@ _next_id = 1 -@attr.s(frozen=True) +@attr.s(slots=True, frozen=True) class MatrixFederationRequest: method = attr.ib() """HTTP method diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 22598e02d22a..2e282d9d670e 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -217,11 +217,9 @@ def add_database_scheduled(self, sched_sec): def record_event_fetch(self, event_count): pass - def __nonzero__(self): + def __bool__(self): return False - __bool__ = __nonzero__ # python3 - SENTINEL_CONTEXT = _Sentinel() diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 2643380d9e96..a1f7ca344924 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -59,7 +59,7 @@ def collect(): yield metric -@attr.s(hash=True) +@attr.s(slots=True, hash=True) class LaterGauge: name = attr.ib(type=str) @@ -205,7 +205,7 @@ def _register_with_collector(self): all_gauges[self.name] = self -@attr.s(hash=True) +@attr.s(slots=True, hash=True) class BucketCollector: """ Like a Histogram, but allows buckets to be point-in-time instead of diff --git a/synapse/notifier.py b/synapse/notifier.py index 12cd84b27bfe..a8fd3ef886ce 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -164,11 +164,9 @@ def new_listener(self, token: StreamToken) -> _NotificationListener: class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))): - def __nonzero__(self): + def __bool__(self): return bool(self.events) - __bool__ = __nonzero__ # python3 - class Notifier: """ This class is responsible for notifying any listeners when there are diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 682d47f402be..1f609f158c14 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -383,7 +383,7 @@ class CachesStream(Stream): the cache on the workers """ - @attr.s + @attr.s(slots=True) class CachesStreamRow: """Stream to inform workers they should invalidate their cache. @@ -441,7 +441,7 @@ class DeviceListsStream(Stream): told about a device update. """ - @attr.s + @attr.s(slots=True) class DeviceListsStreamRow: entity = attr.ib(type=str) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index cd8c246594cf..987765e8770f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -102,7 +102,7 @@ _oembed_patterns[re.compile(pattern)] = endpoint -@attr.s +@attr.s(slots=True) class OEmbedResult: # Either HTML content or URL must be provided. html = attr.ib(type=Optional[str]) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index c7e3015b5dbe..56d6afb86353 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -678,7 +678,7 @@ def resolve_events_with_store( ) -@attr.s +@attr.s(slots=True) class StateResolutionStore: """Interface that allows state resolution algorithms to access the database in well defined way. diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index fba3098ea2ab..c8df0bcb3fe5 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -35,7 +35,7 @@ from synapse.handlers.e2e_keys import SignatureListItem -@attr.s +@attr.s(slots=True) class DeviceKeyLookupResult: """The type returned by get_e2e_device_keys_and_signatures""" diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 5233ed83e29b..7805fb814e30 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -969,7 +969,7 @@ def _action_has_highlight(actions): return False -@attr.s +@attr.s(slots=True) class _EventPushSummary: """Summary of pending event push actions for a given user in a given room. Used in _rotate_notifs_before_txn to manipulate results from event_push_actions. diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index b89668d561b1..3b9211a6d235 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -23,7 +23,7 @@ from synapse.util import json_encoder, stringutils -@attr.s +@attr.s(slots=True) class UIAuthSessionData: session_id = attr.ib(type=str) # The dictionary from the client root level, not the 'auth' key. diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index a7f2dfb85061..4957e77f4c27 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -638,7 +638,7 @@ def _get_or_create_schema_state(txn, database_engine): return None -@attr.s() +@attr.s(slots=True) class _DirectoryListing: """Helper class to store schema file name and the absolute path to it. diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index d30e3f11e7aa..cec96ad6a72e 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -22,7 +22,7 @@ logger = logging.getLogger(__name__) -@attr.s +@attr.s(slots=True) class PaginationChunk: """Returned by relation pagination APIs. diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 60ecc498ab78..d55b93d76385 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -45,7 +45,7 @@ def unwrapFirstError(failure): return failure.value.subFailure -@attr.s +@attr.s(slots=True) class Clock: """ A Clock wraps a Twisted reactor and provides utilities on top of it. diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 237f5886582b..8fc05be278fa 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -42,7 +42,7 @@ response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) -@attr.s +@attr.s(slots=True) class CacheMetric: _cache = attr.ib() From 576bc37d318f866f11f71e34ce7190aa45b74780 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Tue, 15 Sep 2020 09:07:19 +0100 Subject: [PATCH 026/245] Catch-up after Federation Outage (split, 4): catch-up loop (#8272) --- changelog.d/8272.bugfix | 1 + .../sender/per_destination_queue.py | 129 +++++++++++++- .../storage/databases/main/transactions.py | 43 ++++- tests/federation/test_federation_catch_up.py | 165 ++++++++++++++++++ tests/handlers/test_typing.py | 5 + 5 files changed, 338 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8272.bugfix diff --git a/changelog.d/8272.bugfix b/changelog.d/8272.bugfix new file mode 100644 index 000000000000..532d0e22fefb --- /dev/null +++ b/changelog.d/8272.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 9f0852b4a2e2..2657767fd153 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -15,7 +15,7 @@ # limitations under the License. import datetime import logging -from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Tuple +from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, cast from prometheus_client import Counter @@ -92,6 +92,21 @@ def __init__( self._destination = destination self.transmission_loop_running = False + # True whilst we are sending events that the remote homeserver missed + # because it was unreachable. We start in this state so we can perform + # catch-up at startup. + # New events will only be sent once this is finished, at which point + # _catching_up is flipped to False. + self._catching_up = True # type: bool + + # The stream_ordering of the most recent PDU that was discarded due to + # being in catch-up mode. + self._catchup_last_skipped = 0 # type: int + + # Cache of the last successfully-transmitted stream ordering for this + # destination (we are the only updater so this is safe) + self._last_successful_stream_ordering = None # type: Optional[int] + # a list of pending PDUs self._pending_pdus = [] # type: List[EventBase] @@ -138,7 +153,13 @@ def send_pdu(self, pdu: EventBase) -> None: Args: pdu: pdu to send """ - self._pending_pdus.append(pdu) + if not self._catching_up or self._last_successful_stream_ordering is None: + # only enqueue the PDU if we are not catching up (False) or do not + # yet know if we have anything to catch up (None) + self._pending_pdus.append(pdu) + else: + self._catchup_last_skipped = pdu.internal_metadata.stream_ordering + self.attempt_new_transaction() def send_presence(self, states: Iterable[UserPresenceState]) -> None: @@ -218,6 +239,13 @@ async def _transaction_transmission_loop(self) -> None: # hence why we throw the result away. await get_retry_limiter(self._destination, self._clock, self._store) + if self._catching_up: + # we potentially need to catch-up first + await self._catch_up_transmission_loop() + if self._catching_up: + # not caught up yet + return + pending_pdus = [] while True: # We have to keep 2 free slots for presence and rr_edus @@ -351,8 +379,9 @@ async def _transaction_transmission_loop(self) -> None: if e.retry_interval > 60 * 60 * 1000: # we won't retry for another hour! # (this suggests a significant outage) - # We drop pending PDUs and EDUs because otherwise they will + # We drop pending EDUs because otherwise they will # rack up indefinitely. + # (Dropping PDUs is already performed by `_start_catching_up`.) # Note that: # - the EDUs that are being dropped here are those that we can # afford to drop (specifically, only typing notifications, @@ -364,11 +393,12 @@ async def _transaction_transmission_loop(self) -> None: # dropping read receipts is a bit sad but should be solved # through another mechanism, because this is all volatile! - self._pending_pdus = [] self._pending_edus = [] self._pending_edus_keyed = {} self._pending_presence = {} self._pending_rrs = {} + + self._start_catching_up() except FederationDeniedError as e: logger.info(e) except HttpResponseException as e: @@ -378,6 +408,8 @@ async def _transaction_transmission_loop(self) -> None: e.code, e, ) + + self._start_catching_up() except RequestSendFailed as e: logger.warning( "TX [%s] Failed to send transaction: %s", self._destination, e @@ -387,16 +419,96 @@ async def _transaction_transmission_loop(self) -> None: logger.info( "Failed to send event %s to %s", p.event_id, self._destination ) + + self._start_catching_up() except Exception: logger.exception("TX [%s] Failed to send transaction", self._destination) for p in pending_pdus: logger.info( "Failed to send event %s to %s", p.event_id, self._destination ) + + self._start_catching_up() finally: # We want to be *very* sure we clear this after we stop processing self.transmission_loop_running = False + async def _catch_up_transmission_loop(self) -> None: + first_catch_up_check = self._last_successful_stream_ordering is None + + if first_catch_up_check: + # first catchup so get last_successful_stream_ordering from database + self._last_successful_stream_ordering = await self._store.get_destination_last_successful_stream_ordering( + self._destination + ) + + if self._last_successful_stream_ordering is None: + # if it's still None, then this means we don't have the information + # in our database ­ we haven't successfully sent a PDU to this server + # (at least since the introduction of the feature tracking + # last_successful_stream_ordering). + # Sadly, this means we can't do anything here as we don't know what + # needs catching up — so catching up is futile; let's stop. + self._catching_up = False + return + + # get at most 50 catchup room/PDUs + while True: + event_ids = await self._store.get_catch_up_room_event_ids( + self._destination, self._last_successful_stream_ordering, + ) + + if not event_ids: + # No more events to catch up on, but we can't ignore the chance + # of a race condition, so we check that no new events have been + # skipped due to us being in catch-up mode + + if self._catchup_last_skipped > self._last_successful_stream_ordering: + # another event has been skipped because we were in catch-up mode + continue + + # we are done catching up! + self._catching_up = False + break + + if first_catch_up_check: + # as this is our check for needing catch-up, we may have PDUs in + # the queue from before we *knew* we had to do catch-up, so + # clear those out now. + self._start_catching_up() + + # fetch the relevant events from the event store + # - redacted behaviour of REDACT is fine, since we only send metadata + # of redacted events to the destination. + # - don't need to worry about rejected events as we do not actively + # forward received events over federation. + catchup_pdus = await self._store.get_events_as_list(event_ids) + if not catchup_pdus: + raise AssertionError( + "No events retrieved when we asked for %r. " + "This should not happen." % event_ids + ) + + if logger.isEnabledFor(logging.INFO): + rooms = (p.room_id for p in catchup_pdus) + logger.info("Catching up rooms to %s: %r", self._destination, rooms) + + success = await self._transaction_manager.send_new_transaction( + self._destination, catchup_pdus, [] + ) + + if not success: + return + + sent_transactions_counter.inc() + final_pdu = catchup_pdus[-1] + self._last_successful_stream_ordering = cast( + int, final_pdu.internal_metadata.stream_ordering + ) + await self._store.set_destination_last_successful_stream_ordering( + self._destination, self._last_successful_stream_ordering + ) + def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]: if not self._pending_rrs: return @@ -457,3 +569,12 @@ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int] ] return (edus, stream_id) + + def _start_catching_up(self) -> None: + """ + Marks this destination as being in catch-up mode. + + This throws away the PDU queue. + """ + self._catching_up = True + self._pending_pdus = [] diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index c0a958252e5e..091367006e17 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -15,7 +15,7 @@ import logging from collections import namedtuple -from typing import Iterable, Optional, Tuple +from typing import Iterable, List, Optional, Tuple from canonicaljson import encode_canonical_json @@ -371,3 +371,44 @@ async def set_destination_last_successful_stream_ordering( values={"last_successful_stream_ordering": last_successful_stream_ordering}, desc="set_last_successful_stream_ordering", ) + + async def get_catch_up_room_event_ids( + self, destination: str, last_successful_stream_ordering: int, + ) -> List[str]: + """ + Returns at most 50 event IDs and their corresponding stream_orderings + that correspond to the oldest events that have not yet been sent to + the destination. + + Args: + destination: the destination in question + last_successful_stream_ordering: the stream_ordering of the + most-recently successfully-transmitted event to the destination + + Returns: + list of event_ids + """ + return await self.db_pool.runInteraction( + "get_catch_up_room_event_ids", + self._get_catch_up_room_event_ids_txn, + destination, + last_successful_stream_ordering, + ) + + @staticmethod + def _get_catch_up_room_event_ids_txn( + txn, destination: str, last_successful_stream_ordering: int, + ) -> List[str]: + q = """ + SELECT event_id FROM destination_rooms + JOIN events USING (stream_ordering) + WHERE destination = ? + AND stream_ordering > ? + ORDER BY stream_ordering + LIMIT 50 + """ + txn.execute( + q, (destination, last_successful_stream_ordering), + ) + event_ids = [row[0] for row in txn] + return event_ids diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 6cdcc378f098..cc52c3dfac0a 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,5 +1,10 @@ +from typing import List, Tuple + from mock import Mock +from synapse.events import EventBase +from synapse.federation.sender import PerDestinationQueue, TransactionManager +from synapse.federation.units import Edu from synapse.rest import admin from synapse.rest.client.v1 import login, room @@ -156,3 +161,163 @@ def test_catch_up_last_successful_stream_ordering_tracking(self): row_2["stream_ordering"], "Send succeeded but not marked as last_successful_stream_ordering", ) + + @override_config({"send_federation": True}) # critical to federate + def test_catch_up_from_blank_state(self): + """ + Runs an overall test of federation catch-up from scratch. + Further tests will focus on more narrow aspects and edge-cases, but I + hope to provide an overall view with this test. + """ + # bring the other server online + self.is_online = True + + # let's make some events for the other server to receive + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room_1 = self.helper.create_room_as("u1", tok=u1_token) + room_2 = self.helper.create_room_as("u1", tok=u1_token) + + # also critical to federate + self.get_success( + event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join") + ) + self.get_success( + event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join") + ) + + self.helper.send_state( + room_1, event_type="m.room.topic", body={"topic": "wombat"}, tok=u1_token + ) + + # check: PDU received for topic event + self.assertEqual(len(self.pdus), 1) + self.assertEqual(self.pdus[0]["type"], "m.room.topic") + + # take the remote offline + self.is_online = False + + # send another event + self.helper.send(room_1, "hi user!", tok=u1_token) + + # check: things didn't go well since the remote is down + self.assertEqual(len(self.failed_pdus), 1) + self.assertEqual(self.failed_pdus[0]["content"]["body"], "hi user!") + + # let's delete the federation transmission queue + # (this pretends we are starting up fresh.) + self.assertFalse( + self.hs.get_federation_sender() + ._per_destination_queues["host2"] + .transmission_loop_running + ) + del self.hs.get_federation_sender()._per_destination_queues["host2"] + + # let's also clear any backoffs + self.get_success( + self.hs.get_datastore().set_destination_retry_timings("host2", None, 0, 0) + ) + + # bring the remote online and clear the received pdu list + self.is_online = True + self.pdus = [] + + # now we need to initiate a federation transaction somehow… + # to do that, let's send another event (because it's simple to do) + # (do it to another room otherwise the catch-up logic decides it doesn't + # need to catch up room_1 — something I overlooked when first writing + # this test) + self.helper.send(room_2, "wombats!", tok=u1_token) + + # we should now have received both PDUs + self.assertEqual(len(self.pdus), 2) + self.assertEqual(self.pdus[0]["content"]["body"], "hi user!") + self.assertEqual(self.pdus[1]["content"]["body"], "wombats!") + + def make_fake_destination_queue( + self, destination: str = "host2" + ) -> Tuple[PerDestinationQueue, List[EventBase]]: + """ + Makes a fake per-destination queue. + """ + transaction_manager = TransactionManager(self.hs) + per_dest_queue = PerDestinationQueue(self.hs, transaction_manager, destination) + results_list = [] + + async def fake_send( + destination_tm: str, + pending_pdus: List[EventBase], + _pending_edus: List[Edu], + ) -> bool: + assert destination == destination_tm + results_list.extend(pending_pdus) + return True # success! + + transaction_manager.send_new_transaction = fake_send + + return per_dest_queue, results_list + + @override_config({"send_federation": True}) + def test_catch_up_loop(self): + """ + Tests the behaviour of _catch_up_transmission_loop. + """ + + # ARRANGE: + # - a local user (u1) + # - 3 rooms which u1 is joined to (and remote user @user:host2 is + # joined to) + # - some events (1 to 5) in those rooms + # we have 'already sent' events 1 and 2 to host2 + per_dest_queue, sent_pdus = self.make_fake_destination_queue() + + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room_1 = self.helper.create_room_as("u1", tok=u1_token) + room_2 = self.helper.create_room_as("u1", tok=u1_token) + room_3 = self.helper.create_room_as("u1", tok=u1_token) + self.get_success( + event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join") + ) + self.get_success( + event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join") + ) + self.get_success( + event_injection.inject_member_event(self.hs, room_3, "@user:host2", "join") + ) + + # create some events + self.helper.send(room_1, "you hear me!!", tok=u1_token) + event_id_2 = self.helper.send(room_2, "wombats!", tok=u1_token)["event_id"] + self.helper.send(room_3, "Matrix!", tok=u1_token) + event_id_4 = self.helper.send(room_2, "rabbits!", tok=u1_token)["event_id"] + event_id_5 = self.helper.send(room_3, "Synapse!", tok=u1_token)["event_id"] + + # destination_rooms should already be populated, but let us pretend that we already + # sent (successfully) up to and including event id 2 + event_2 = self.get_success(self.hs.get_datastore().get_event(event_id_2)) + + # also fetch event 5 so we know its last_successful_stream_ordering later + event_5 = self.get_success(self.hs.get_datastore().get_event(event_id_5)) + + self.get_success( + self.hs.get_datastore().set_destination_last_successful_stream_ordering( + "host2", event_2.internal_metadata.stream_ordering + ) + ) + + # ACT + self.get_success(per_dest_queue._catch_up_transmission_loop()) + + # ASSERT, noticing in particular: + # - event 3 not sent out, because event 5 replaces it + # - order is least recent first, so event 5 comes after event 4 + # - catch-up is completed + self.assertEqual(len(sent_pdus), 2) + self.assertEqual(sent_pdus[0].event_id, event_id_4) + self.assertEqual(sent_pdus[1].event_id, event_id_5) + self.assertFalse(per_dest_queue._catching_up) + self.assertEqual( + per_dest_queue._last_successful_stream_ordering, + event_5.internal_metadata.stream_ordering, + ) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index f306a09bfaa7..3fec09ea8a91 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -73,6 +73,7 @@ def make_homeserver(self, reactor, clock): "delivered_txn", "get_received_txn_response", "set_received_txn_response", + "get_destination_last_successful_stream_ordering", "get_destination_retry_timings", "get_devices_by_remote", "maybe_store_room_on_invite", @@ -121,6 +122,10 @@ def prepare(self, reactor, clock, hs): (0, []) ) + self.datastore.get_destination_last_successful_stream_ordering.return_value = make_awaitable( + None + ) + def get_received_txn_response(*args): return defer.succeed(None) From a3f124b821f0faf53af9e6c890870ec8cbb47ce5 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 16 Sep 2020 21:15:55 +0200 Subject: [PATCH 027/245] Switch metaclass initialization to python 3-compatible syntax (#8326) --- changelog.d/8326.misc | 1 + synapse/handlers/room_member.py | 4 +--- synapse/replication/http/_base.py | 4 +--- synapse/storage/databases/main/account_data.py | 8 +++----- synapse/storage/databases/main/push_rule.py | 7 +++---- synapse/storage/databases/main/receipts.py | 8 +++----- synapse/storage/databases/main/stream.py | 4 +--- synapse/types.py | 6 +++--- 8 files changed, 16 insertions(+), 26 deletions(-) create mode 100644 changelog.d/8326.misc diff --git a/changelog.d/8326.misc b/changelog.d/8326.misc new file mode 100644 index 000000000000..985d2c027aa9 --- /dev/null +++ b/changelog.d/8326.misc @@ -0,0 +1 @@ +Update outdated usages of `metaclass` to python 3 syntax. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 01a6e882629f..8feba8c90a39 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -51,14 +51,12 @@ logger = logging.getLogger(__name__) -class RoomMemberHandler: +class RoomMemberHandler(metaclass=abc.ABCMeta): # TODO(paul): This handler currently contains a messy conflation of # low-level API that works on UserID objects and so on, and REST-level # API that takes ID strings and returns pagination chunks. These concerns # ought to be separated out a lot better. - __metaclass__ = abc.ABCMeta - def __init__(self, hs: "HomeServer"): self.hs = hs self.store = hs.get_datastore() diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index ba16f22c91c1..b448da671038 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -33,7 +33,7 @@ logger = logging.getLogger(__name__) -class ReplicationEndpoint: +class ReplicationEndpoint(metaclass=abc.ABCMeta): """Helper base class for defining new replication HTTP endpoints. This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..` @@ -72,8 +72,6 @@ class ReplicationEndpoint: is received. """ - __metaclass__ = abc.ABCMeta - NAME = abc.abstractproperty() # type: str # type: ignore PATH_ARGS = abc.abstractproperty() # type: Tuple[str, ...] # type: ignore METHOD = "POST" diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 4436b1a83d97..5f1a2b9aa6cc 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -29,15 +29,13 @@ logger = logging.getLogger(__name__) -class AccountDataWorkerStore(SQLBaseStore): +# The ABCMeta metaclass ensures that it cannot be instantiated without +# the abstract methods being implemented. +class AccountDataWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): """This is an abstract base class where subclasses must implement `get_max_account_data_stream_id` which can be called in the initializer. """ - # This ABCMeta metaclass ensures that we cannot be instantiated without - # the abstract methods being implemented. - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs): account_max = self.get_max_account_data_stream_id() self._account_data_stream_cache = StreamChangeCache( diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 9790a3199847..b7a8d34ce129 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -61,6 +61,8 @@ def _load_rules(rawrules, enabled_map, use_new_defaults=False): return rules +# The ABCMeta metaclass ensures that it cannot be instantiated without +# the abstract methods being implemented. class PushRulesWorkerStore( ApplicationServiceWorkerStore, ReceiptsWorkerStore, @@ -68,15 +70,12 @@ class PushRulesWorkerStore( RoomMemberWorkerStore, EventsWorkerStore, SQLBaseStore, + metaclass=abc.ABCMeta, ): """This is an abstract base class where subclasses must implement `get_max_push_rules_stream_id` which can be called in the initializer. """ - # This ABCMeta metaclass ensures that we cannot be instantiated without - # the abstract methods being implemented. - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs): super(PushRulesWorkerStore, self).__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 4a0d5a320efb..6568bddd81e6 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -31,15 +31,13 @@ logger = logging.getLogger(__name__) -class ReceiptsWorkerStore(SQLBaseStore): +# The ABCMeta metaclass ensures that it cannot be instantiated without +# the abstract methods being implemented. +class ReceiptsWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): """This is an abstract base class where subclasses must implement `get_max_receipt_stream_id` which can be called in the initializer. """ - # This ABCMeta metaclass ensures that we cannot be instantiated without - # the abstract methods being implemented. - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs): super(ReceiptsWorkerStore, self).__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 2e955187524e..7dbe11513b3c 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -259,14 +259,12 @@ def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: return " AND ".join(clauses), args -class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): +class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): """This is an abstract base class where subclasses must implement `get_room_max_stream_ordering` and `get_room_min_stream_ordering` which can be called in the initializer. """ - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"): super(StreamWorkerStore, self).__init__(database, db_conn, hs) diff --git a/synapse/types.py b/synapse/types.py index dc09448bdc8d..a6fc7df22c30 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -165,7 +165,9 @@ def get_localpart_from_id(string): DS = TypeVar("DS", bound="DomainSpecificString") -class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "domain"))): +class DomainSpecificString( + namedtuple("DomainSpecificString", ("localpart", "domain")), metaclass=abc.ABCMeta +): """Common base class among ID/name strings that have a local part and a domain name, prefixed with a sigil. @@ -175,8 +177,6 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom 'domain' : The domain part of the name """ - __metaclass__ = abc.ABCMeta - SIGIL = abc.abstractproperty() # type: str # type: ignore # Deny iteration because it will bite you if you try to create a singleton From 53284c425e219fbd9ae445bbe4a8628883a3631d Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 17 Sep 2020 12:54:56 +0200 Subject: [PATCH 028/245] Fix a potential bug of UnboundLocalError (#8329) Replaced with less buggier control flow --- changelog.d/8329.bugfix | 1 + synapse/rest/client/v2_alpha/register.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8329.bugfix diff --git a/changelog.d/8329.bugfix b/changelog.d/8329.bugfix new file mode 100644 index 000000000000..2f71f1f4b956 --- /dev/null +++ b/changelog.d/8329.bugfix @@ -0,0 +1 @@ +Fix UnboundLocalError from occuring when appservices send malformed register request. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b6b90a8b300a..0705718d007d 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -431,11 +431,14 @@ async def on_POST(self, request): access_token = self.auth.get_access_token_from_request(request) - if isinstance(desired_username, str): - result = await self._do_appservice_registration( - desired_username, access_token, body - ) - return 200, result # we throw for non 200 responses + if not isinstance(desired_username, str): + raise SynapseError(400, "Desired Username is missing or not a string") + + result = await self._do_appservice_registration( + desired_username, access_token, body + ) + + return 200, result # == Normal User Registration == (everyone else) if not self._registration_enabled: From c3c9732c5363ef007dd838dea016719d3ab07a89 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 17 Sep 2020 07:04:15 -0400 Subject: [PATCH 029/245] Use admin_patterns for all admin APIs. (#8331) This reduces duplication of the admin prefix in regular expressions. --- changelog.d/8331.misc | 1 + synapse/rest/admin/__init__.py | 4 ++-- synapse/rest/admin/_base.py | 4 ++-- synapse/rest/admin/devices.py | 15 +++++---------- synapse/rest/admin/purge_room_servlet.py | 5 ++--- synapse/rest/admin/server_notice_servlet.py | 9 ++++----- synapse/rest/admin/users.py | 8 ++++---- 7 files changed, 20 insertions(+), 26 deletions(-) create mode 100644 changelog.d/8331.misc diff --git a/changelog.d/8331.misc b/changelog.d/8331.misc new file mode 100644 index 000000000000..0e1bae20efa3 --- /dev/null +++ b/changelog.d/8331.misc @@ -0,0 +1 @@ +Use the `admin_patterns` helper in additional locations. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 1c88c93f3836..abf362c7b74e 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -16,13 +16,13 @@ import logging import platform -import re import synapse from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.rest.admin._base import ( + admin_patterns, assert_requester_is_admin, historical_admin_path_patterns, ) @@ -61,7 +61,7 @@ class VersionServlet(RestServlet): - PATTERNS = (re.compile("^/_synapse/admin/v1/server_version$"),) + PATTERNS = admin_patterns("/server_version$") def __init__(self, hs): self.res = { diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index d82eaf5e38aa..db9fea263a62 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -44,7 +44,7 @@ def historical_admin_path_patterns(path_regex): ] -def admin_patterns(path_regex: str): +def admin_patterns(path_regex: str, version: str = "v1"): """Returns the list of patterns for an admin endpoint Args: @@ -54,7 +54,7 @@ def admin_patterns(path_regex: str): Returns: A list of regex patterns. """ - admin_prefix = "^/_synapse/admin/v1" + admin_prefix = "^/_synapse/admin/" + version patterns = [re.compile(admin_prefix + path_regex)] return patterns diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 8d3267733938..4670d7160dd3 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re from synapse.api.errors import NotFoundError, SynapseError from synapse.http.servlet import ( @@ -21,7 +20,7 @@ assert_params_in_dict, parse_json_object_from_request, ) -from synapse.rest.admin._base import assert_requester_is_admin +from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.types import UserID logger = logging.getLogger(__name__) @@ -32,10 +31,8 @@ class DeviceRestServlet(RestServlet): Get, update or delete the given user's device """ - PATTERNS = ( - re.compile( - "^/_synapse/admin/v2/users/(?P[^/]*)/devices/(?P[^/]*)$" - ), + PATTERNS = admin_patterns( + "/users/(?P[^/]*)/devices/(?P[^/]*)$", "v2" ) def __init__(self, hs): @@ -98,7 +95,7 @@ class DevicesRestServlet(RestServlet): Retrieve the given user's devices """ - PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P[^/]*)/devices$"),) + PATTERNS = admin_patterns("/users/(?P[^/]*)/devices$", "v2") def __init__(self, hs): """ @@ -131,9 +128,7 @@ class DeleteDevicesRestServlet(RestServlet): key which lists the device_ids to delete. """ - PATTERNS = ( - re.compile("^/_synapse/admin/v2/users/(?P[^/]*)/delete_devices$"), - ) + PATTERNS = admin_patterns("/users/(?P[^/]*)/delete_devices$", "v2") def __init__(self, hs): self.hs = hs diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py index f47406654206..8b7bb6d44ebe 100644 --- a/synapse/rest/admin/purge_room_servlet.py +++ b/synapse/rest/admin/purge_room_servlet.py @@ -12,14 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import re - from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, ) from synapse.rest.admin import assert_requester_is_admin +from synapse.rest.admin._base import admin_patterns class PurgeRoomServlet(RestServlet): @@ -35,7 +34,7 @@ class PurgeRoomServlet(RestServlet): {} """ - PATTERNS = (re.compile("^/_synapse/admin/v1/purge_room$"),) + PATTERNS = admin_patterns("/purge_room$") def __init__(self, hs): """ diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index 6e9a8741218b..375d0554455b 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -12,8 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import re - from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.http.servlet import ( @@ -22,6 +20,7 @@ parse_json_object_from_request, ) from synapse.rest.admin import assert_requester_is_admin +from synapse.rest.admin._base import admin_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import UserID @@ -56,13 +55,13 @@ def __init__(self, hs): self.snm = hs.get_server_notices_manager() def register(self, json_resource): - PATTERN = "^/_synapse/admin/v1/send_server_notice" + PATTERN = "/send_server_notice" json_resource.register_paths( - "POST", (re.compile(PATTERN + "$"),), self.on_POST, self.__class__.__name__ + "POST", admin_patterns(PATTERN + "$"), self.on_POST, self.__class__.__name__ ) json_resource.register_paths( "PUT", - (re.compile(PATTERN + "/(?P[^/]*)$"),), + admin_patterns(PATTERN + "/(?P[^/]*)$"), self.on_PUT, self.__class__.__name__, ) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index f3e77da850c8..0f537031c4d5 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -15,7 +15,6 @@ import hashlib import hmac import logging -import re from http import HTTPStatus from synapse.api.constants import UserTypes @@ -29,6 +28,7 @@ parse_string, ) from synapse.rest.admin._base import ( + admin_patterns, assert_requester_is_admin, assert_user_is_admin, historical_admin_path_patterns, @@ -60,7 +60,7 @@ async def on_GET(self, request, user_id): class UsersRestServletV2(RestServlet): - PATTERNS = (re.compile("^/_synapse/admin/v2/users$"),) + PATTERNS = admin_patterns("/users$", "v2") """Get request to list all local users. This needs user to have administrator access in Synapse. @@ -105,7 +105,7 @@ async def on_GET(self, request): class UserRestServletV2(RestServlet): - PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P[^/]+)$"),) + PATTERNS = admin_patterns("/users/(?P[^/]+)$", "v2") """Get request to list user details. This needs user to have administrator access in Synapse. @@ -642,7 +642,7 @@ class UserAdminServlet(RestServlet): {} """ - PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P[^/]*)/admin$"),) + PATTERNS = admin_patterns("/users/(?P[^/]*)/admin$") def __init__(self, hs): self.hs = hs From 837293c314b47e988fe9532115476a6536cd6406 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 17 Sep 2020 14:37:01 +0200 Subject: [PATCH 030/245] Remove obsolete __future__ imports (#8337) --- changelog.d/8337.misc | 1 + contrib/cmdclient/console.py | 2 -- contrib/cmdclient/http.py | 2 -- contrib/graph/graph.py | 2 -- contrib/graph/graph3.py | 2 -- contrib/jitsimeetbridge/jitsimeetbridge.py | 2 -- contrib/scripts/kick_users.py | 8 +------- scripts-dev/definitions.py | 2 -- scripts-dev/dump_macaroon.py | 2 -- scripts-dev/federation_client.py | 2 -- scripts-dev/hash_history.py | 2 -- scripts/move_remote_media_to_new_store.py | 2 -- scripts/register_new_matrix_user | 2 -- synapse/_scripts/register_new_matrix_user.py | 2 -- synapse/app/homeserver.py | 2 -- synapse/config/emailconfig.py | 1 - synapse/config/stats.py | 2 -- synapse/storage/databases/main/events_worker.py | 2 -- synapse/util/patch_inline_callbacks.py | 2 -- 19 files changed, 2 insertions(+), 40 deletions(-) create mode 100644 changelog.d/8337.misc diff --git a/changelog.d/8337.misc b/changelog.d/8337.misc new file mode 100644 index 000000000000..4daf27220443 --- /dev/null +++ b/changelog.d/8337.misc @@ -0,0 +1 @@ +Remove `__future__` imports related to Python 2 compatibility. \ No newline at end of file diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index dfc1d294dcfb..ab1e1f1f4c95 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -15,8 +15,6 @@ # limitations under the License. """ Starts a synapse client console. """ -from __future__ import print_function - import argparse import cmd import getpass diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index cd3260b27de8..345120b61267 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import json import urllib from pprint import pformat diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index de33fac1c70f..fdbac087bdab 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import argparse import cgi import datetime diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py index 91db98e7efcb..dd0c19368b9e 100644 --- a/contrib/graph/graph3.py +++ b/contrib/graph/graph3.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import argparse import cgi import datetime diff --git a/contrib/jitsimeetbridge/jitsimeetbridge.py b/contrib/jitsimeetbridge/jitsimeetbridge.py index 69aa74bd34d0..b3de468687a6 100644 --- a/contrib/jitsimeetbridge/jitsimeetbridge.py +++ b/contrib/jitsimeetbridge/jitsimeetbridge.py @@ -10,8 +10,6 @@ Requires: npm install jquery jsdom """ -from __future__ import print_function - import json import subprocess import time diff --git a/contrib/scripts/kick_users.py b/contrib/scripts/kick_users.py index 372dbd9e4f32..f8e0c732fb0a 100755 --- a/contrib/scripts/kick_users.py +++ b/contrib/scripts/kick_users.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -from __future__ import print_function import json import sys @@ -8,11 +7,6 @@ import requests -try: - raw_input -except NameError: # Python 3 - raw_input = input - def _mkurl(template, kws): for key in kws: @@ -58,7 +52,7 @@ def main(hs, room_id, access_token, user_id_prefix, why): print("The following user IDs will be kicked from %s" % room_name) for uid in kick_list: print(uid) - doit = raw_input("Continue? [Y]es\n") + doit = input("Continue? [Y]es\n") if len(doit) > 0 and doit.lower() == "y": print("Kicking members...") # encode them all diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 9eddb6d515d7..15e6ce6e16d2 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -1,7 +1,5 @@ #! /usr/bin/python -from __future__ import print_function - import argparse import ast import os diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index 22b30fa78e43..980b5e709f96 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -1,7 +1,5 @@ #!/usr/bin/env python2 -from __future__ import print_function - import sys import pymacaroons diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index ad12523c4d62..848a826f1742 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import argparse import base64 import json diff --git a/scripts-dev/hash_history.py b/scripts-dev/hash_history.py index 89acb52e6a58..8d6c3d24dbbf 100644 --- a/scripts-dev/hash_history.py +++ b/scripts-dev/hash_history.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import sqlite3 import sys diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py index b5b63933ab19..ab2e76338675 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/scripts/move_remote_media_to_new_store.py @@ -32,8 +32,6 @@ PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py """ -from __future__ import print_function - import argparse import logging import os diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index b450712ab7ee..8b9d30877de9 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from synapse._scripts.register_new_matrix_user import main if __name__ == "__main__": diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 55cce2db22cc..da0996edbc1d 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import argparse import getpass import hashlib diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b08319ca77f6..dff739e1062d 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import gc import logging import math diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 72b42bfd6278..cceffbfee25e 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -14,7 +14,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function # This file can't be called email.py because if it is, we cannot: import email.utils diff --git a/synapse/config/stats.py b/synapse/config/stats.py index 62485189eaa7..b559bfa4113c 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division - import sys from ._base import Config diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 17f5997b89f0..cd3739c16c37 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division - import itertools import logging import threading diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index 54c046b6e1cd..72574d3af257 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import functools import sys from typing import Any, Callable, List From efb6b6629c78409251f61857f2bfe6c2f8f8fb8d Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 17 Sep 2020 22:45:22 +0200 Subject: [PATCH 031/245] Move lint dependencies to extras_require (#8330) Lint dependencies can now be installed with pip install -e ".[lint]" This should help keep the version in sync between tox and documentation. --- CONTRIBUTING.md | 6 +++--- changelog.d/8330.misc | 1 + synapse/python_dependencies.py | 5 ++++- tox.ini | 10 ++-------- 4 files changed, 10 insertions(+), 12 deletions(-) create mode 100644 changelog.d/8330.misc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 062413e92531..524f82433dba 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,9 +17,9 @@ https://help.github.com/articles/using-pull-requests/) to ask us to pull your changes into our repo. Some other points to follow: - + * Please base your changes on the `develop` branch. - + * Please follow the [code style requirements](#code-style). * Please include a [changelog entry](#changelog) with each PR. @@ -46,7 +46,7 @@ locally. You'll need python 3.6 or later, and to install a number of tools: ``` # Install the dependencies -pip install -U black flake8 flake8-comprehensions isort +pip install -e ".[lint]" # Run the linter script ./scripts-dev/lint.sh diff --git a/changelog.d/8330.misc b/changelog.d/8330.misc new file mode 100644 index 000000000000..c51370f215d4 --- /dev/null +++ b/changelog.d/8330.misc @@ -0,0 +1 @@ +Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. \ No newline at end of file diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index ff0c67228bea..67f019fd2237 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -104,13 +104,16 @@ # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) "redis": ["txredisapi>=1.4.7", "hiredis"], + # We pin black so that our tests don't start failing on new releases. + "lint": ["isort==5.0.3", "black==19.10b0", "flake8-comprehensions", "flake8"], } ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] for name, optional_deps in CONDITIONAL_REQUIREMENTS.items(): # Exclude systemd as it's a system-based requirement. - if name not in ["systemd"]: + # Exclude lint as it's a dev-based requirement. + if name not in ["systemd", "lint"]: ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS diff --git a/tox.ini b/tox.ini index df473bd234a6..ddcab0198fc2 100644 --- a/tox.ini +++ b/tox.ini @@ -118,20 +118,14 @@ commands = check-manifest [testenv:check_codestyle] -skip_install = True -deps = - flake8 - flake8-comprehensions - # We pin so that our tests don't start failing on new releases of black. - black==19.10b0 +extras = lint commands = python -m black --check --diff . /bin/sh -c "flake8 synapse tests scripts scripts-dev contrib synctl {env:PEP8SUFFIX:}" {toxinidir}/scripts-dev/config-lint.sh [testenv:check_isort] -skip_install = True -deps = isort==5.0.3 +extras = lint commands = /bin/sh -c "isort -c --df --sp setup.cfg synapse tests scripts-dev scripts" [testenv:check-newsfragment] From 7c407efdc80abf2a991844d107a896d629e3965a Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 18 Sep 2020 13:56:40 +0200 Subject: [PATCH 032/245] Update test logging to be able to accept braces (#8335) --- changelog.d/8335.misc | 1 + tests/test_utils/logging_setup.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8335.misc diff --git a/changelog.d/8335.misc b/changelog.d/8335.misc new file mode 100644 index 000000000000..7e0a4c7d839f --- /dev/null +++ b/changelog.d/8335.misc @@ -0,0 +1 @@ +Fix test logging to allow braces in log output. \ No newline at end of file diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 2d96b0fa8da4..fdfb840b6222 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -29,8 +29,7 @@ def emit(self, record): log_entry = self.format(record) log_level = record.levelname.lower().replace("warning", "warn") self.tx_log.emit( - twisted.logger.LogLevel.levelWithName(log_level), - log_entry.replace("{", r"(").replace("}", r")"), + twisted.logger.LogLevel.levelWithName(log_level), "{entry}", entry=log_entry ) From 68c7a6936f8921744d083e6dc8a2a085cce30b2a Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Fri, 18 Sep 2020 14:55:13 +0100 Subject: [PATCH 033/245] Allow appservice users to /login (#8320) Add ability for ASes to /login using the `uk.half-shot.msc2778.login.application_service` login `type`. Co-authored-by: Patrick Cloke --- changelog.d/8320.feature | 1 + synapse/rest/client/v1/login.py | 49 ++++++++--- tests/rest/client/v1/test_login.py | 134 ++++++++++++++++++++++++++++- 3 files changed, 173 insertions(+), 11 deletions(-) create mode 100644 changelog.d/8320.feature diff --git a/changelog.d/8320.feature b/changelog.d/8320.feature new file mode 100644 index 000000000000..475a5fe62d97 --- /dev/null +++ b/changelog.d/8320.feature @@ -0,0 +1 @@ +Add `uk.half-shot.msc2778.login.application_service` login type to allow appservices to login. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index a14618ac84fb..dd8cdc0d9f7e 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -18,6 +18,7 @@ from synapse.api.errors import Codes, LoginError, SynapseError from synapse.api.ratelimiting import Ratelimiter +from synapse.appservice import ApplicationService from synapse.handlers.auth import ( convert_client_dict_legacy_fields_to_identifier, login_id_phone_to_thirdparty, @@ -44,6 +45,7 @@ class LoginRestServlet(RestServlet): TOKEN_TYPE = "m.login.token" JWT_TYPE = "org.matrix.login.jwt" JWT_TYPE_DEPRECATED = "m.login.jwt" + APPSERVICE_TYPE = "uk.half-shot.msc2778.login.application_service" def __init__(self, hs): super(LoginRestServlet, self).__init__() @@ -61,6 +63,8 @@ def __init__(self, hs): self.cas_enabled = hs.config.cas_enabled self.oidc_enabled = hs.config.oidc_enabled + self.auth = hs.get_auth() + self.auth_handler = self.hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() self.handlers = hs.get_handlers() @@ -107,6 +111,8 @@ def on_GET(self, request: SynapseRequest): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) + flows.append({"type": LoginRestServlet.APPSERVICE_TYPE}) + return 200, {"flows": flows} def on_OPTIONS(self, request: SynapseRequest): @@ -116,8 +122,12 @@ async def on_POST(self, request: SynapseRequest): self._address_ratelimiter.ratelimit(request.getClientIP()) login_submission = parse_json_object_from_request(request) + try: - if self.jwt_enabled and ( + if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE: + appservice = self.auth.get_appservice_by_req(request) + result = await self._do_appservice_login(login_submission, appservice) + elif self.jwt_enabled and ( login_submission["type"] == LoginRestServlet.JWT_TYPE or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED ): @@ -134,6 +144,33 @@ async def on_POST(self, request: SynapseRequest): result["well_known"] = well_known_data return 200, result + def _get_qualified_user_id(self, identifier): + if identifier["type"] != "m.id.user": + raise SynapseError(400, "Unknown login identifier type") + if "user" not in identifier: + raise SynapseError(400, "User identifier is missing 'user' key") + + if identifier["user"].startswith("@"): + return identifier["user"] + else: + return UserID(identifier["user"], self.hs.hostname).to_string() + + async def _do_appservice_login( + self, login_submission: JsonDict, appservice: ApplicationService + ): + logger.info( + "Got appservice login request with identifier: %r", + login_submission.get("identifier"), + ) + + identifier = convert_client_dict_legacy_fields_to_identifier(login_submission) + qualified_user_id = self._get_qualified_user_id(identifier) + + if not appservice.is_interested_in_user(qualified_user_id): + raise LoginError(403, "Invalid access_token", errcode=Codes.FORBIDDEN) + + return await self._complete_login(qualified_user_id, login_submission) + async def _do_other_login(self, login_submission: JsonDict) -> Dict[str, str]: """Handle non-token/saml/jwt logins @@ -219,15 +256,7 @@ async def _do_other_login(self, login_submission: JsonDict) -> Dict[str, str]: # by this point, the identifier should be an m.id.user: if it's anything # else, we haven't understood it. - if identifier["type"] != "m.id.user": - raise SynapseError(400, "Unknown login identifier type") - if "user" not in identifier: - raise SynapseError(400, "User identifier is missing 'user' key") - - if identifier["user"].startswith("@"): - qualified_user_id = identifier["user"] - else: - qualified_user_id = UserID(identifier["user"], self.hs.hostname).to_string() + qualified_user_id = self._get_qualified_user_id(identifier) # Check if we've hit the failed ratelimit (but don't update it) self._failed_attempts_ratelimiter.ratelimit( diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 2668662c9e51..5d987a30c7e9 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -7,8 +7,9 @@ import jwt import synapse.rest.admin +from synapse.appservice import ApplicationService from synapse.rest.client.v1 import login, logout -from synapse.rest.client.v2_alpha import devices +from synapse.rest.client.v2_alpha import devices, register from synapse.rest.client.v2_alpha.account import WhoamiRestServlet from tests import unittest @@ -748,3 +749,134 @@ def test_login_jwt_invalid_signature(self): channel.json_body["error"], "JWT validation failed: Signature verification failed", ) + + +AS_USER = "as_user_alice" + + +class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + register.register_servlets, + ] + + def register_as_user(self, username): + request, channel = self.make_request( + b"POST", + "/_matrix/client/r0/register?access_token=%s" % (self.service.token,), + {"username": username}, + ) + self.render(request) + + def make_homeserver(self, reactor, clock): + self.hs = self.setup_test_homeserver() + + self.service = ApplicationService( + id="unique_identifier", + token="some_token", + hostname="example.com", + sender="@asbot:example.com", + namespaces={ + ApplicationService.NS_USERS: [ + {"regex": r"@as_user.*", "exclusive": False} + ], + ApplicationService.NS_ROOMS: [], + ApplicationService.NS_ALIASES: [], + }, + ) + self.another_service = ApplicationService( + id="another__identifier", + token="another_token", + hostname="example.com", + sender="@as2bot:example.com", + namespaces={ + ApplicationService.NS_USERS: [ + {"regex": r"@as2_user.*", "exclusive": False} + ], + ApplicationService.NS_ROOMS: [], + ApplicationService.NS_ALIASES: [], + }, + ) + + self.hs.get_datastore().services_cache.append(self.service) + self.hs.get_datastore().services_cache.append(self.another_service) + return self.hs + + def test_login_appservice_user(self): + """Test that an appservice user can use /login + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": AS_USER}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + def test_login_appservice_user_bot(self): + """Test that the appservice bot can use /login + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": self.service.sender}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + def test_login_appservice_wrong_user(self): + """Test that non-as users cannot login with the as token + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": "fibble_wibble"}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) + + def test_login_appservice_wrong_as(self): + """Test that as users cannot login with wrong as token + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": AS_USER}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.another_service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) + + def test_login_appservice_no_token(self): + """Test that users must provide a token when using the appservice + login method + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": AS_USER}, + } + request, channel = self.make_request(b"POST", LOGIN_URL, params) + + self.render(request) + self.assertEquals(channel.result["code"], b"401", channel.result) From 8a4a4186ded34bab1ffb4ee1cebcb476890da207 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 09:56:44 -0400 Subject: [PATCH 034/245] Simplify super() calls to Python 3 syntax. (#8344) This converts calls like super(Foo, self) -> super(). Generated with: sed -i "" -Ee 's/super\([^\(]+\)/super()/g' **/*.py --- changelog.d/8344.misc | 1 + scripts-dev/definitions.py | 2 +- scripts-dev/federation_client.py | 2 +- synapse/api/errors.py | 50 +++++++++---------- synapse/api/filtering.py | 2 +- synapse/app/generic_worker.py | 6 +-- synapse/appservice/api.py | 2 +- synapse/config/consent_config.py | 2 +- synapse/config/registration.py | 2 +- synapse/config/server_notices_config.py | 2 +- synapse/crypto/keyring.py | 4 +- synapse/federation/federation_client.py | 2 +- synapse/federation/federation_server.py | 2 +- synapse/federation/transport/server.py | 10 ++-- synapse/groups/groups_server.py | 2 +- synapse/handlers/admin.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/device.py | 4 +- synapse/handlers/directory.py | 2 +- synapse/handlers/events.py | 4 +- synapse/handlers/federation.py | 2 +- synapse/handlers/groups_local.py | 2 +- synapse/handlers/identity.py | 2 +- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/profile.py | 4 +- synapse/handlers/read_marker.py | 2 +- synapse/handlers/receipts.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/room_list.py | 2 +- synapse/handlers/room_member_worker.py | 2 +- synapse/handlers/search.py | 2 +- synapse/handlers/set_password.py | 2 +- synapse/handlers/user_directory.py | 2 +- synapse/http/__init__.py | 2 +- synapse/logging/formatter.py | 2 +- synapse/logging/scopecontextmanager.py | 6 +-- synapse/push/__init__.py | 2 +- synapse/replication/http/devices.py | 2 +- synapse/replication/http/federation.py | 8 +-- synapse/replication/http/login.py | 2 +- synapse/replication/http/membership.py | 6 +-- synapse/replication/http/register.py | 4 +- synapse/replication/http/send_event.py | 2 +- synapse/replication/slave/storage/_base.py | 2 +- .../replication/slave/storage/account_data.py | 2 +- .../replication/slave/storage/client_ips.py | 2 +- .../replication/slave/storage/deviceinbox.py | 2 +- synapse/replication/slave/storage/devices.py | 2 +- synapse/replication/slave/storage/events.py | 2 +- .../replication/slave/storage/filtering.py | 2 +- synapse/replication/slave/storage/groups.py | 2 +- synapse/replication/slave/storage/presence.py | 2 +- synapse/replication/slave/storage/pushers.py | 2 +- synapse/replication/slave/storage/receipts.py | 2 +- synapse/replication/slave/storage/room.py | 2 +- synapse/replication/tcp/streams/_base.py | 2 +- synapse/rest/admin/devices.py | 2 +- synapse/rest/client/v1/directory.py | 6 +-- synapse/rest/client/v1/events.py | 4 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 4 +- synapse/rest/client/v1/logout.py | 4 +- synapse/rest/client/v1/presence.py | 2 +- synapse/rest/client/v1/profile.py | 6 +-- synapse/rest/client/v1/push_rule.py | 2 +- synapse/rest/client/v1/pusher.py | 6 +-- synapse/rest/client/v1/room.py | 38 +++++++------- synapse/rest/client/v1/voip.py | 2 +- synapse/rest/client/v2_alpha/account.py | 22 ++++---- synapse/rest/client/v2_alpha/account_data.py | 4 +- .../rest/client/v2_alpha/account_validity.py | 4 +- synapse/rest/client/v2_alpha/auth.py | 2 +- synapse/rest/client/v2_alpha/capabilities.py | 2 +- synapse/rest/client/v2_alpha/devices.py | 6 +-- synapse/rest/client/v2_alpha/filter.py | 4 +- synapse/rest/client/v2_alpha/groups.py | 48 +++++++++--------- synapse/rest/client/v2_alpha/keys.py | 12 ++--- synapse/rest/client/v2_alpha/notifications.py | 2 +- synapse/rest/client/v2_alpha/openid.py | 2 +- .../rest/client/v2_alpha/password_policy.py | 2 +- synapse/rest/client/v2_alpha/read_marker.py | 2 +- synapse/rest/client/v2_alpha/receipts.py | 2 +- synapse/rest/client/v2_alpha/register.py | 10 ++-- synapse/rest/client/v2_alpha/relations.py | 8 +-- synapse/rest/client/v2_alpha/report_event.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 6 +-- .../v2_alpha/room_upgrade_rest_servlet.py | 2 +- synapse/rest/client/v2_alpha/sendtodevice.py | 2 +- synapse/rest/client/v2_alpha/shared_rooms.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/tags.py | 4 +- synapse/rest/client/v2_alpha/thirdparty.py | 8 +-- synapse/rest/client/v2_alpha/tokenrefresh.py | 2 +- .../rest/client/v2_alpha/user_directory.py | 2 +- synapse/rest/client/versions.py | 2 +- synapse/storage/databases/main/__init__.py | 2 +- .../storage/databases/main/account_data.py | 4 +- synapse/storage/databases/main/appservice.py | 2 +- synapse/storage/databases/main/client_ips.py | 4 +- synapse/storage/databases/main/deviceinbox.py | 4 +- synapse/storage/databases/main/devices.py | 4 +- .../databases/main/event_federation.py | 2 +- .../databases/main/event_push_actions.py | 4 +- .../databases/main/events_bg_updates.py | 2 +- .../storage/databases/main/events_worker.py | 2 +- .../databases/main/media_repository.py | 6 +-- .../databases/main/monthly_active_users.py | 4 +- synapse/storage/databases/main/push_rule.py | 2 +- synapse/storage/databases/main/receipts.py | 4 +- .../storage/databases/main/registration.py | 6 +-- synapse/storage/databases/main/room.py | 6 +-- synapse/storage/databases/main/roommember.py | 6 +-- synapse/storage/databases/main/search.py | 4 +- synapse/storage/databases/main/state.py | 6 +-- synapse/storage/databases/main/stats.py | 2 +- synapse/storage/databases/main/stream.py | 2 +- .../storage/databases/main/transactions.py | 2 +- .../storage/databases/main/user_directory.py | 4 +- synapse/storage/databases/state/bg_updates.py | 2 +- synapse/storage/databases/state/store.py | 2 +- synapse/util/manhole.py | 2 +- synapse/util/retryutils.py | 2 +- tests/handlers/test_e2e_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 2 +- .../replication/slave/storage/test_events.py | 2 +- tests/rest/test_well_known.py | 2 +- tests/server.py | 2 +- tests/storage/test_appservice.py | 2 +- tests/storage/test_devices.py | 2 +- tests/test_state.py | 2 +- tests/unittest.py | 2 +- 133 files changed, 272 insertions(+), 281 deletions(-) create mode 100644 changelog.d/8344.misc diff --git a/changelog.d/8344.misc b/changelog.d/8344.misc new file mode 100644 index 000000000000..0b342d513727 --- /dev/null +++ b/changelog.d/8344.misc @@ -0,0 +1 @@ +Simplify `super()` calls to Python 3 syntax. diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 15e6ce6e16d2..313860df139a 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -11,7 +11,7 @@ class DefinitionVisitor(ast.NodeVisitor): def __init__(self): - super(DefinitionVisitor, self).__init__() + super().__init__() self.functions = {} self.classes = {} self.names = {} diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index 848a826f1742..abcec48c4f3b 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -321,7 +321,7 @@ def get_connection(self, url, proxies=None): url = urlparse.urlunparse( ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) ) - return super(MatrixConnectionAdapter, self).get_connection(url, proxies) + return super().get_connection(url, proxies) if __name__ == "__main__": diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 94a9e58eae26..cd6670d0a266 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -87,7 +87,7 @@ class CodeMessageException(RuntimeError): """ def __init__(self, code: Union[int, HTTPStatus], msg: str): - super(CodeMessageException, self).__init__("%d: %s" % (code, msg)) + super().__init__("%d: %s" % (code, msg)) # Some calls to this method pass instances of http.HTTPStatus for `code`. # While HTTPStatus is a subclass of int, it has magic __str__ methods @@ -138,7 +138,7 @@ def __init__(self, code: int, msg: str, errcode: str = Codes.UNKNOWN): msg: The human-readable error message. errcode: The matrix error code e.g 'M_FORBIDDEN' """ - super(SynapseError, self).__init__(code, msg) + super().__init__(code, msg) self.errcode = errcode def error_dict(self): @@ -159,7 +159,7 @@ def __init__( errcode: str = Codes.UNKNOWN, additional_fields: Optional[Dict] = None, ): - super(ProxiedRequestError, self).__init__(code, msg, errcode) + super().__init__(code, msg, errcode) if additional_fields is None: self._additional_fields = {} # type: Dict else: @@ -181,7 +181,7 @@ def __init__(self, msg: str, consent_uri: str): msg: The human-readable error message consent_url: The URL where the user can give their consent """ - super(ConsentNotGivenError, self).__init__( + super().__init__( code=HTTPStatus.FORBIDDEN, msg=msg, errcode=Codes.CONSENT_NOT_GIVEN ) self._consent_uri = consent_uri @@ -201,7 +201,7 @@ def __init__(self, msg: str): Args: msg: The human-readable error message """ - super(UserDeactivatedError, self).__init__( + super().__init__( code=HTTPStatus.FORBIDDEN, msg=msg, errcode=Codes.USER_DEACTIVATED ) @@ -225,7 +225,7 @@ def __init__(self, destination: Optional[str]): self.destination = destination - super(FederationDeniedError, self).__init__( + super().__init__( code=403, msg="Federation denied with %s." % (self.destination,), errcode=Codes.FORBIDDEN, @@ -244,9 +244,7 @@ class InteractiveAuthIncompleteError(Exception): """ def __init__(self, session_id: str, result: "JsonDict"): - super(InteractiveAuthIncompleteError, self).__init__( - "Interactive auth not yet complete" - ) + super().__init__("Interactive auth not yet complete") self.session_id = session_id self.result = result @@ -261,14 +259,14 @@ def __init__(self, *args, **kwargs): message = "Unrecognized request" else: message = args[0] - super(UnrecognizedRequestError, self).__init__(400, message, **kwargs) + super().__init__(400, message, **kwargs) class NotFoundError(SynapseError): """An error indicating we can't find the thing you asked for""" def __init__(self, msg: str = "Not found", errcode: str = Codes.NOT_FOUND): - super(NotFoundError, self).__init__(404, msg, errcode=errcode) + super().__init__(404, msg, errcode=errcode) class AuthError(SynapseError): @@ -279,7 +277,7 @@ class AuthError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.FORBIDDEN - super(AuthError, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class InvalidClientCredentialsError(SynapseError): @@ -335,7 +333,7 @@ def __init__( ): self.admin_contact = admin_contact self.limit_type = limit_type - super(ResourceLimitError, self).__init__(code, msg, errcode=errcode) + super().__init__(code, msg, errcode=errcode) def error_dict(self): return cs_error( @@ -352,7 +350,7 @@ class EventSizeError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.TOO_LARGE - super(EventSizeError, self).__init__(413, *args, **kwargs) + super().__init__(413, *args, **kwargs) class EventStreamError(SynapseError): @@ -361,7 +359,7 @@ class EventStreamError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.BAD_PAGINATION - super(EventStreamError, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class LoginError(SynapseError): @@ -384,7 +382,7 @@ def __init__( error_url: Optional[str] = None, errcode: str = Codes.CAPTCHA_INVALID, ): - super(InvalidCaptchaError, self).__init__(code, msg, errcode) + super().__init__(code, msg, errcode) self.error_url = error_url def error_dict(self): @@ -402,7 +400,7 @@ def __init__( retry_after_ms: Optional[int] = None, errcode: str = Codes.LIMIT_EXCEEDED, ): - super(LimitExceededError, self).__init__(code, msg, errcode) + super().__init__(code, msg, errcode) self.retry_after_ms = retry_after_ms def error_dict(self): @@ -418,9 +416,7 @@ def __init__(self, current_version: str): Args: current_version: the current version of the store they should have used """ - super(RoomKeysVersionError, self).__init__( - 403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION - ) + super().__init__(403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION) self.current_version = current_version @@ -429,7 +425,7 @@ class UnsupportedRoomVersionError(SynapseError): not support.""" def __init__(self, msg: str = "Homeserver does not support this room version"): - super(UnsupportedRoomVersionError, self).__init__( + super().__init__( code=400, msg=msg, errcode=Codes.UNSUPPORTED_ROOM_VERSION, ) @@ -440,7 +436,7 @@ class ThreepidValidationError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.FORBIDDEN - super(ThreepidValidationError, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class IncompatibleRoomVersionError(SynapseError): @@ -451,7 +447,7 @@ class IncompatibleRoomVersionError(SynapseError): """ def __init__(self, room_version: str): - super(IncompatibleRoomVersionError, self).__init__( + super().__init__( code=400, msg="Your homeserver does not support the features required to " "join this room", @@ -473,7 +469,7 @@ def __init__( msg: str = "This password doesn't comply with the server's policy", errcode: str = Codes.WEAK_PASSWORD, ): - super(PasswordRefusedError, self).__init__( + super().__init__( code=400, msg=msg, errcode=errcode, ) @@ -488,7 +484,7 @@ class RequestSendFailed(RuntimeError): """ def __init__(self, inner_exception, can_retry): - super(RequestSendFailed, self).__init__( + super().__init__( "Failed to send request: %s: %s" % (type(inner_exception).__name__, inner_exception) ) @@ -542,7 +538,7 @@ def __init__( self.source = source msg = "%s %s: %s" % (level, code, reason) - super(FederationError, self).__init__(msg) + super().__init__(msg) def get_dict(self): return { @@ -570,7 +566,7 @@ def __init__(self, code: int, msg: str, response: bytes): msg: reason phrase from HTTP response status line response: body of response """ - super(HttpResponseException, self).__init__(code, msg) + super().__init__(code, msg) self.response = response def to_synapse_error(self): diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index bb33345be60e..5caf336fd0cb 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -132,7 +132,7 @@ def matrix_user_id_validator(user_id_str): class Filtering: def __init__(self, hs): - super(Filtering, self).__init__() + super().__init__() self.store = hs.get_datastore() async def get_user_filter(self, user_localpart, filter_id): diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index f985810e88d3..c38413c8937b 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -152,7 +152,7 @@ class PresenceStatusStubServlet(RestServlet): PATTERNS = client_patterns("/presence/(?P[^/]*)/status") def __init__(self, hs): - super(PresenceStatusStubServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() async def on_GET(self, request, user_id): @@ -176,7 +176,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(KeyUploadServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.http_client = hs.get_simple_http_client() @@ -646,7 +646,7 @@ def get_presence_handler(self): class GenericWorkerReplicationHandler(ReplicationDataHandler): def __init__(self, hs): - super(GenericWorkerReplicationHandler, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index bb6fa8299a8d..1514c0f69142 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -88,7 +88,7 @@ class ApplicationServiceApi(SimpleHttpClient): """ def __init__(self, hs): - super(ApplicationServiceApi, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() self.protocol_meta_cache = ResponseCache( diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index aec9c4bbce8d..fbddebeeab2a 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -77,7 +77,7 @@ class ConsentConfig(Config): section = "consent" def __init__(self, *args): - super(ConsentConfig, self).__init__(*args) + super().__init__(*args) self.user_consent_version = None self.user_consent_template_dir = None diff --git a/synapse/config/registration.py b/synapse/config/registration.py index a1856557745c..5ffbb934fe2d 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -30,7 +30,7 @@ class AccountValidityConfig(Config): def __init__(self, config, synapse_config): if config is None: return - super(AccountValidityConfig, self).__init__() + super().__init__() self.enabled = config.get("enabled", False) self.renew_by_email_enabled = "renew_at" in config diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py index 6c427b6f927e..57f69dc8e27d 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices_config.py @@ -62,7 +62,7 @@ class ServerNoticesConfig(Config): section = "servernotices" def __init__(self, *args): - super(ServerNoticesConfig, self).__init__(*args) + super().__init__(*args) self.server_notices_mxid = None self.server_notices_mxid_display_name = None self.server_notices_mxid_avatar_url = None diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 32c31b1cd14b..42e4087a926e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -558,7 +558,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the "perspectives" servers""" def __init__(self, hs): - super(PerspectivesKeyFetcher, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() self.key_servers = self.config.key_servers @@ -728,7 +728,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" def __init__(self, hs): - super(ServerKeyFetcher, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index a2e8d96ea27c..639d19f696bb 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -79,7 +79,7 @@ class InvalidResponseError(RuntimeError): class FederationClient(FederationBase): def __init__(self, hs): - super(FederationClient, self).__init__(hs) + super().__init__(hs) self.pdu_destination_tried = {} self._clock.looping_call(self._clear_tried_cache, 60 * 1000) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ff00f0b3022e..2dcd081cbc24 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -90,7 +90,7 @@ class FederationServer(FederationBase): def __init__(self, hs): - super(FederationServer, self).__init__(hs) + super().__init__(hs) self.auth = hs.get_auth() self.handler = hs.get_handlers().federation_handler diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index cc7e9a973ba2..3a6b95631eae 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -68,7 +68,7 @@ def __init__(self, hs, servlet_groups=None): self.clock = hs.get_clock() self.servlet_groups = servlet_groups - super(TransportLayerServer, self).__init__(hs, canonical_json=False) + super().__init__(hs, canonical_json=False) self.authenticator = Authenticator(hs) self.ratelimiter = hs.get_federation_ratelimiter() @@ -376,9 +376,7 @@ class FederationSendServlet(BaseFederationServlet): RATELIMIT = False def __init__(self, handler, server_name, **kwargs): - super(FederationSendServlet, self).__init__( - handler, server_name=server_name, **kwargs - ) + super().__init__(handler, server_name=server_name, **kwargs) self.server_name = server_name # This is when someone is trying to send us a bunch of data. @@ -773,9 +771,7 @@ class PublicRoomList(BaseFederationServlet): PATH = "/publicRooms" def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access): - super(PublicRoomList, self).__init__( - handler, authenticator, ratelimiter, server_name - ) + super().__init__(handler, authenticator, ratelimiter, server_name) self.allow_access = allow_access async def on_GET(self, origin, content, query): diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 1dd20ee4e1d0..e5f85b472dd2 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -336,7 +336,7 @@ async def get_rooms_in_group(self, group_id, requester_user_id): class GroupsServerHandler(GroupsServerWorkerHandler): def __init__(self, hs): - super(GroupsServerHandler, self).__init__(hs) + super().__init__(hs) # Ensure attestations get renewed hs.get_groups_attestation_renewer() diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 5e5a64037dfe..dd981c597eff 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -28,7 +28,7 @@ class AdminHandler(BaseHandler): def __init__(self, hs): - super(AdminHandler, self).__init__(hs) + super().__init__(hs) self.storage = hs.get_storage() self.state_store = self.storage.state diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 4e658d9a4879..0322b60cfc63 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -145,7 +145,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): """ - super(AuthHandler, self).__init__(hs) + super().__init__(hs) self.checkers = {} # type: Dict[str, UserInteractiveAuthChecker] for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 25169157c1ca..0635ad570866 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -29,7 +29,7 @@ class DeactivateAccountHandler(BaseHandler): """Handler which deals with deactivating user accounts.""" def __init__(self, hs): - super(DeactivateAccountHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 4b0a4f96ccb0..55a978743988 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -48,7 +48,7 @@ class DeviceWorkerHandler(BaseHandler): def __init__(self, hs): - super(DeviceWorkerHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self.state = hs.get_state_handler() @@ -251,7 +251,7 @@ async def on_federation_query_user_devices(self, user_id): class DeviceHandler(DeviceWorkerHandler): def __init__(self, hs): - super(DeviceHandler, self).__init__(hs) + super().__init__(hs) self.federation_sender = hs.get_federation_sender() diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 46826eb7848f..62aa9a2da8f2 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -37,7 +37,7 @@ class DirectoryHandler(BaseHandler): def __init__(self, hs): - super(DirectoryHandler, self).__init__(hs) + super().__init__(hs) self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index fdce54c5c30b..0875b74ea89c 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -37,7 +37,7 @@ class EventStreamHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(EventStreamHandler, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() @@ -142,7 +142,7 @@ async def get_stream( class EventHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(EventHandler, self).__init__(hs) + super().__init__(hs) self.storage = hs.get_storage() async def get_event( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 262901363f63..96eeff7b1ba5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -115,7 +115,7 @@ class FederationHandler(BaseHandler): """ def __init__(self, hs): - super(FederationHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 44df567983f6..9684e60fc8b6 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -240,7 +240,7 @@ async def bulk_get_publicised_groups(self, user_ids, proxy=True): class GroupsLocalHandler(GroupsLocalWorkerHandler): def __init__(self, hs): - super(GroupsLocalHandler, self).__init__(hs) + super().__init__(hs) # Ensure attestations get renewed hs.get_groups_attestation_renewer() diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 0ce6ddfbe4e6..ab15570f7a97 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -45,7 +45,7 @@ class IdentityHandler(BaseHandler): def __init__(self, hs): - super(IdentityHandler, self).__init__(hs) + super().__init__(hs) self.http_client = SimpleHttpClient(hs) # We create a blacklisting instance of SimpleHttpClient for contacting identity diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index ba4828c713db..8cd7eb22a303 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -42,7 +42,7 @@ class InitialSyncHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(InitialSyncHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self.state = hs.get_state_handler() self.clock = hs.get_clock() diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 0cb8fad89a2c..5453e6dfc87a 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -44,7 +44,7 @@ class BaseProfileHandler(BaseHandler): """ def __init__(self, hs): - super(BaseProfileHandler, self).__init__(hs) + super().__init__(hs) self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -369,7 +369,7 @@ class MasterProfileHandler(BaseProfileHandler): PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs): - super(MasterProfileHandler, self).__init__(hs) + super().__init__(hs) assert hs.config.worker_app is None diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index e3b528d27146..c32f314a1c0e 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -24,7 +24,7 @@ class ReadMarkerHandler(BaseHandler): def __init__(self, hs): - super(ReadMarkerHandler, self).__init__(hs) + super().__init__(hs) self.server_name = hs.config.server_name self.store = hs.get_datastore() self.read_marker_linearizer = Linearizer(name="read_marker") diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index bdd8e52edd3b..722592375796 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -23,7 +23,7 @@ class ReceiptsHandler(BaseHandler): def __init__(self, hs): - super(ReceiptsHandler, self).__init__(hs) + super().__init__(hs) self.server_name = hs.config.server_name self.store = hs.get_datastore() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index cde2dbca92bb..538f4b2a61de 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -42,7 +42,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): """ - super(RegistrationHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index eeade6ad3f13..11bf146bedcd 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -70,7 +70,7 @@ class RoomCreationHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(RoomCreationHandler, self).__init__(hs) + super().__init__(hs) self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 5dd7b2839194..4a13c8e91291 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -38,7 +38,7 @@ class RoomListHandler(BaseHandler): def __init__(self, hs): - super(RoomListHandler, self).__init__(hs) + super().__init__(hs) self.enable_room_list_search = hs.config.enable_room_list_search self.response_cache = ResponseCache(hs, "room_list") self.remote_response_cache = ResponseCache( diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index e7f34737c684..f2e88f6a5b5d 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -30,7 +30,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): def __init__(self, hs): - super(RoomMemberWorkerHandler, self).__init__(hs) + super().__init__(hs) self._remote_join_client = ReplRemoteJoin.make_client(hs) self._remote_reject_client = ReplRejectInvite.make_client(hs) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index d58f9788c5b9..6a76c20d7913 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -32,7 +32,7 @@ class SearchHandler(BaseHandler): def __init__(self, hs): - super(SearchHandler, self).__init__(hs) + super().__init__(hs) self._event_serializer = hs.get_event_client_serializer() self.storage = hs.get_storage() self.state_store = self.storage.state diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index 4d245b618b17..a5d67f828f6a 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -27,7 +27,7 @@ class SetPasswordHandler(BaseHandler): """Handler which deals with changing user account passwords""" def __init__(self, hs): - super(SetPasswordHandler, self).__init__(hs) + super().__init__(hs) self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() self._password_policy_handler = hs.get_password_policy_handler() diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index e21f8dbc58e3..79393c8829fc 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -37,7 +37,7 @@ class UserDirectoryHandler(StateDeltasHandler): """ def __init__(self, hs): - super(UserDirectoryHandler, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.state = hs.get_state_handler() diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 3880ce0d944a..8eb363859146 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -27,7 +27,7 @@ class RequestTimedOutError(SynapseError): """Exception representing timeout of an outbound request""" def __init__(self): - super(RequestTimedOutError, self).__init__(504, "Timed out") + super().__init__(504, "Timed out") def cancelled_to_request_timed_out_error(value, timeout): diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py index d736ad5b9bc8..11f60a77f795 100644 --- a/synapse/logging/formatter.py +++ b/synapse/logging/formatter.py @@ -30,7 +30,7 @@ class LogFormatter(logging.Formatter): """ def __init__(self, *args, **kwargs): - super(LogFormatter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def formatException(self, ei): sio = StringIO() diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 026854b4c715..7b9c65745627 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -107,7 +107,7 @@ def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close) finish_on_close (Boolean): if True finish the span when the scope is closed """ - super(_LogContextScope, self).__init__(manager, span) + super().__init__(manager, span) self.logcontext = logcontext self._finish_on_close = finish_on_close self._enter_logcontext = enter_logcontext @@ -120,9 +120,9 @@ def __enter__(self): def __exit__(self, type, value, traceback): if type == twisted.internet.defer._DefGen_Return: - super(_LogContextScope, self).__exit__(None, None, None) + super().__exit__(None, None, None) else: - super(_LogContextScope, self).__exit__(type, value, traceback) + super().__exit__(type, value, traceback) if self._enter_logcontext: self.logcontext.__exit__(type, value, traceback) else: # the logcontext existed before the creation of the scope diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index edf45dc5990f..5a437f981072 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -16,4 +16,4 @@ class PusherConfigException(Exception): def __init__(self, msg): - super(PusherConfigException, self).__init__(msg) + super().__init__(msg) diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 20f3ba76c09d..807b85d2e124 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -53,7 +53,7 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint): CACHE = False def __init__(self, hs): - super(ReplicationUserDevicesResyncRestServlet, self).__init__(hs) + super().__init__(hs) self.device_list_updater = hs.get_device_handler().device_list_updater self.store = hs.get_datastore() diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 5c8be747e140..5393b9a9e7dd 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -57,7 +57,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): PATH_ARGS = () def __init__(self, hs): - super(ReplicationFederationSendEventsRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.storage = hs.get_storage() @@ -150,7 +150,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): PATH_ARGS = ("edu_type",) def __init__(self, hs): - super(ReplicationFederationSendEduRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -193,7 +193,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): CACHE = False def __init__(self, hs): - super(ReplicationGetQueryRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -236,7 +236,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): PATH_ARGS = ("room_id",) def __init__(self, hs): - super(ReplicationCleanRoomRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index fb326bb86911..4c81e2d784bd 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -32,7 +32,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): PATH_ARGS = ("user_id",) def __init__(self, hs): - super(RegisterDeviceReplicationServlet, self).__init__(hs) + super().__init__(hs) self.registration_handler = hs.get_registration_handler() @staticmethod diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 08095fdf7d2c..30680baee813 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -45,7 +45,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): PATH_ARGS = ("room_id", "user_id") def __init__(self, hs): - super(ReplicationRemoteJoinRestServlet, self).__init__(hs) + super().__init__(hs) self.federation_handler = hs.get_handlers().federation_handler self.store = hs.get_datastore() @@ -107,7 +107,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): PATH_ARGS = ("invite_event_id",) def __init__(self, hs: "HomeServer"): - super(ReplicationRemoteRejectInviteRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -168,7 +168,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): CACHE = False # No point caching as should return instantly. def __init__(self, hs): - super(ReplicationUserJoinedLeftRoomRestServlet, self).__init__(hs) + super().__init__(hs) self.registeration_handler = hs.get_registration_handler() self.store = hs.get_datastore() diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index a02b27474d9a..7b12ec906025 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -29,7 +29,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): PATH_ARGS = ("user_id",) def __init__(self, hs): - super(ReplicationRegisterServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.registration_handler = hs.get_registration_handler() @@ -104,7 +104,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): PATH_ARGS = ("user_id",) def __init__(self, hs): - super(ReplicationPostRegisterActionsServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.registration_handler = hs.get_registration_handler() diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index f13d4524264b..9a3a694d5dfa 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -52,7 +52,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): PATH_ARGS = ("event_id",) def __init__(self, hs): - super(ReplicationSendEventRestServlet, self).__init__(hs) + super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastore() diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 60f2e1245f99..d25fa49e1a9e 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -26,7 +26,7 @@ class BaseSlavedStore(CacheInvalidationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(BaseSlavedStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if isinstance(self.database_engine, PostgresEngine): self._cache_id_gen = MultiWriterIdGenerator( db_conn, diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index bb66ba9b80f8..4268565fc82d 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -34,7 +34,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): ], ) - super(SlavedAccountDataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_account_data_stream_id(self): return self._account_data_id_gen.get_current_token() diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index a6fdedde6357..1f8dafe7ea40 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -22,7 +22,7 @@ class SlavedClientIpStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedClientIpStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, max_entries=50000 diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index 533d927701d3..5b045bed02f9 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -24,7 +24,7 @@ class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedDeviceInboxStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._device_inbox_id_gen = SlavedIdTracker( db_conn, "device_inbox", "stream_id" ) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 3b788c96250d..e0d86240dd19 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -24,7 +24,7 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedDeviceStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.hs = hs diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index da1cc836cf70..fbffe6d85c28 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -56,7 +56,7 @@ class SlavedEventStore( BaseSlavedStore, ): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedEventStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) events_max = self._stream_id_gen.get_current_token() curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 2562b6fc383f..6a232528610b 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -21,7 +21,7 @@ class SlavedFilteringStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedFilteringStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Filters are immutable so this cache doesn't need to be expired get_user_filter = FilteringStore.__dict__["get_user_filter"] diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 567b4a5cc1cc..30955bcbfe0f 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -23,7 +23,7 @@ class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedGroupServerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.hs = hs diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py index 025f6f6be8e6..55620c03d8c3 100644 --- a/synapse/replication/slave/storage/presence.py +++ b/synapse/replication/slave/storage/presence.py @@ -25,7 +25,7 @@ class SlavedPresenceStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedPresenceStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._presence_id_gen = SlavedIdTracker(db_conn, "presence_stream", "stream_id") self._presence_on_startup = self._get_active_presence(db_conn) # type: ignore diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index 9da218bfe855..c418730ba86a 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -24,7 +24,7 @@ class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedPusherStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._pushers_id_gen = SlavedIdTracker( db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")] ) diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py index 5c2986e05017..619591737652 100644 --- a/synapse/replication/slave/storage/receipts.py +++ b/synapse/replication/slave/storage/receipts.py @@ -30,7 +30,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): db_conn, "receipts_linearized", "stream_id" ) - super(SlavedReceiptsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_current_token() diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py index 80ae803ad9ab..109ac6bea141 100644 --- a/synapse/replication/slave/storage/room.py +++ b/synapse/replication/slave/storage/room.py @@ -23,7 +23,7 @@ class RoomStore(RoomWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._public_room_id_gen = SlavedIdTracker( db_conn, "public_room_list_stream", "stream_id" ) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 1f609f158c14..54dccd15a627 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -345,7 +345,7 @@ class PushRulesStream(Stream): def __init__(self, hs): self.store = hs.get_datastore() - super(PushRulesStream, self).__init__( + super().__init__( hs.get_instance_name(), self._current_token, self.store.get_all_push_rule_updates, diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 4670d7160dd3..a16386332205 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -36,7 +36,7 @@ class DeviceRestServlet(RestServlet): ) def __init__(self, hs): - super(DeviceRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index b210015173b7..faabeeb91c82 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -40,7 +40,7 @@ class ClientDirectoryServer(RestServlet): PATTERNS = client_patterns("/directory/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryServer, self).__init__() + super().__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() self.auth = hs.get_auth() @@ -120,7 +120,7 @@ class ClientDirectoryListServer(RestServlet): PATTERNS = client_patterns("/directory/list/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryListServer, self).__init__() + super().__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() self.auth = hs.get_auth() @@ -160,7 +160,7 @@ class ClientAppserviceDirectoryListServer(RestServlet): ) def __init__(self, hs): - super(ClientAppserviceDirectoryListServer, self).__init__() + super().__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 25effd026108..985d994f6bb5 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -30,7 +30,7 @@ class EventStreamRestServlet(RestServlet): DEFAULT_LONGPOLL_TIME_MS = 30000 def __init__(self, hs): - super(EventStreamRestServlet, self).__init__() + super().__init__() self.event_stream_handler = hs.get_event_stream_handler() self.auth = hs.get_auth() @@ -74,7 +74,7 @@ class EventRestServlet(RestServlet): PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(EventRestServlet, self).__init__() + super().__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 910b3b4eeb94..d7042786ce0c 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -24,7 +24,7 @@ class InitialSyncRestServlet(RestServlet): PATTERNS = client_patterns("/initialSync$", v1=True) def __init__(self, hs): - super(InitialSyncRestServlet, self).__init__() + super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index dd8cdc0d9f7e..250b03a02536 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -48,7 +48,7 @@ class LoginRestServlet(RestServlet): APPSERVICE_TYPE = "uk.half-shot.msc2778.login.application_service" def __init__(self, hs): - super(LoginRestServlet, self).__init__() + super().__init__() self.hs = hs # JWT configuration variables. @@ -429,7 +429,7 @@ class CasTicketServlet(RestServlet): PATTERNS = client_patterns("/login/cas/ticket", v1=True) def __init__(self, hs): - super(CasTicketServlet, self).__init__() + super().__init__() self._cas_handler = hs.get_cas_handler() async def on_GET(self, request: SynapseRequest) -> None: diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index b0c30b65beac..f792b50cdc02 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -25,7 +25,7 @@ class LogoutRestServlet(RestServlet): PATTERNS = client_patterns("/logout$", v1=True) def __init__(self, hs): - super(LogoutRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() @@ -53,7 +53,7 @@ class LogoutAllRestServlet(RestServlet): PATTERNS = client_patterns("/logout/all$", v1=True) def __init__(self, hs): - super(LogoutAllRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 970fdd5834ba..79d8e3057fec 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -30,7 +30,7 @@ class PresenceStatusRestServlet(RestServlet): PATTERNS = client_patterns("/presence/(?P[^/]*)/status", v1=True) def __init__(self, hs): - super(PresenceStatusRestServlet, self).__init__() + super().__init__() self.hs = hs self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index e7fe50ed72c4..b686cd671ffd 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -25,7 +25,7 @@ class ProfileDisplaynameRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)/displayname", v1=True) def __init__(self, hs): - super(ProfileDisplaynameRestServlet, self).__init__() + super().__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() self.auth = hs.get_auth() @@ -73,7 +73,7 @@ class ProfileAvatarURLRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)/avatar_url", v1=True) def __init__(self, hs): - super(ProfileAvatarURLRestServlet, self).__init__() + super().__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() self.auth = hs.get_auth() @@ -124,7 +124,7 @@ class ProfileRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)", v1=True) def __init__(self, hs): - super(ProfileRestServlet, self).__init__() + super().__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index ddf8ed5e9ca1..f9eecb7cf5cd 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -38,7 +38,7 @@ class PushRuleRestServlet(RestServlet): ) def __init__(self, hs): - super(PushRuleRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 5f65cb7d83d5..28dabf1c7ab6 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -44,7 +44,7 @@ class PushersRestServlet(RestServlet): PATTERNS = client_patterns("/pushers$", v1=True) def __init__(self, hs): - super(PushersRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() @@ -68,7 +68,7 @@ class PushersSetRestServlet(RestServlet): PATTERNS = client_patterns("/pushers/set$", v1=True) def __init__(self, hs): - super(PushersSetRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.notifier = hs.get_notifier() @@ -153,7 +153,7 @@ class PushersRemoveRestServlet(RestServlet): SUCCESS_HTML = b"You have been unsubscribed" def __init__(self, hs): - super(PushersRemoveRestServlet, self).__init__() + super().__init__() self.hs = hs self.notifier = hs.get_notifier() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 84baf3d59bca..7e64a2e0fe36 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -57,7 +57,7 @@ class TransactionRestServlet(RestServlet): def __init__(self, hs): - super(TransactionRestServlet, self).__init__() + super().__init__() self.txns = HttpTransactionCache(hs) @@ -65,7 +65,7 @@ class RoomCreateRestServlet(TransactionRestServlet): # No PATTERN; we have custom dispatch rules here def __init__(self, hs): - super(RoomCreateRestServlet, self).__init__(hs) + super().__init__(hs) self._room_creation_handler = hs.get_room_creation_handler() self.auth = hs.get_auth() @@ -111,7 +111,7 @@ def on_OPTIONS(self, request): # TODO: Needs unit testing for generic events class RoomStateEventRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomStateEventRestServlet, self).__init__(hs) + super().__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() @@ -229,7 +229,7 @@ async def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): # TODO: Needs unit testing for generic events + feedback class RoomSendEventRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomSendEventRestServlet, self).__init__(hs) + super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() @@ -280,7 +280,7 @@ def on_PUT(self, request, room_id, event_type, txn_id): # TODO: Needs unit testing for room ID + alias joins class JoinRoomAliasServlet(TransactionRestServlet): def __init__(self, hs): - super(JoinRoomAliasServlet, self).__init__(hs) + super().__init__(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -343,7 +343,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): PATTERNS = client_patterns("/publicRooms$", v1=True) def __init__(self, hs): - super(PublicRoomListRestServlet, self).__init__(hs) + super().__init__(hs) self.hs = hs self.auth = hs.get_auth() @@ -448,7 +448,7 @@ class RoomMemberListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/members$", v1=True) def __init__(self, hs): - super(RoomMemberListRestServlet, self).__init__() + super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() @@ -499,7 +499,7 @@ class JoinedRoomMemberListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/joined_members$", v1=True) def __init__(self, hs): - super(JoinedRoomMemberListRestServlet, self).__init__() + super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() @@ -518,7 +518,7 @@ class RoomMessageListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/messages$", v1=True) def __init__(self, hs): - super(RoomMessageListRestServlet, self).__init__() + super().__init__() self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() @@ -557,7 +557,7 @@ class RoomStateRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/state$", v1=True) def __init__(self, hs): - super(RoomStateRestServlet, self).__init__() + super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() @@ -577,7 +577,7 @@ class RoomInitialSyncRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/initialSync$", v1=True) def __init__(self, hs): - super(RoomInitialSyncRestServlet, self).__init__() + super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() @@ -596,7 +596,7 @@ class RoomEventServlet(RestServlet): ) def __init__(self, hs): - super(RoomEventServlet, self).__init__() + super().__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() @@ -628,7 +628,7 @@ class RoomEventContextServlet(RestServlet): ) def __init__(self, hs): - super(RoomEventContextServlet, self).__init__() + super().__init__() self.clock = hs.get_clock() self.room_context_handler = hs.get_room_context_handler() self._event_serializer = hs.get_event_client_serializer() @@ -675,7 +675,7 @@ async def on_GET(self, request, room_id, event_id): class RoomForgetRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomForgetRestServlet, self).__init__(hs) + super().__init__(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -701,7 +701,7 @@ def on_PUT(self, request, room_id, txn_id): # TODO: Needs unit testing class RoomMembershipRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomMembershipRestServlet, self).__init__(hs) + super().__init__(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -792,7 +792,7 @@ def on_PUT(self, request, room_id, membership_action, txn_id): class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomRedactEventRestServlet, self).__init__(hs) + super().__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() @@ -841,7 +841,7 @@ class RoomTypingRestServlet(RestServlet): ) def __init__(self, hs): - super(RoomTypingRestServlet, self).__init__() + super().__init__() self.presence_handler = hs.get_presence_handler() self.typing_handler = hs.get_typing_handler() self.auth = hs.get_auth() @@ -914,7 +914,7 @@ class SearchRestServlet(RestServlet): PATTERNS = client_patterns("/search$", v1=True) def __init__(self, hs): - super(SearchRestServlet, self).__init__() + super().__init__() self.handlers = hs.get_handlers() self.auth = hs.get_auth() @@ -935,7 +935,7 @@ class JoinedRoomsRestServlet(RestServlet): PATTERNS = client_patterns("/joined_rooms$", v1=True) def __init__(self, hs): - super(JoinedRoomsRestServlet, self).__init__() + super().__init__() self.store = hs.get_datastore() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 50277c6cf6c3..b8d491ca5c95 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -25,7 +25,7 @@ class VoipRestServlet(RestServlet): PATTERNS = client_patterns("/voip/turnServer$", v1=True) def __init__(self, hs): - super(VoipRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ade97a6708c7..c3ce0f62592a 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -52,7 +52,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): PATTERNS = client_patterns("/account/password/email/requestToken$") def __init__(self, hs): - super(EmailPasswordRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.datastore = hs.get_datastore() self.config = hs.config @@ -156,7 +156,7 @@ class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") def __init__(self, hs): - super(PasswordRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -282,7 +282,7 @@ class DeactivateAccountRestServlet(RestServlet): PATTERNS = client_patterns("/account/deactivate$") def __init__(self, hs): - super(DeactivateAccountRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -330,7 +330,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/email/requestToken$") def __init__(self, hs): - super(EmailThreepidRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.config = hs.config self.identity_handler = hs.get_handlers().identity_handler @@ -427,7 +427,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): def __init__(self, hs): self.hs = hs - super(MsisdnThreepidRequestTokenRestServlet, self).__init__() + super().__init__() self.store = self.hs.get_datastore() self.identity_handler = hs.get_handlers().identity_handler @@ -606,7 +606,7 @@ class ThreepidRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid$") def __init__(self, hs): - super(ThreepidRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -662,7 +662,7 @@ class ThreepidAddRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/add$") def __init__(self, hs): - super(ThreepidAddRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -713,7 +713,7 @@ class ThreepidBindRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/bind$") def __init__(self, hs): - super(ThreepidBindRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -742,7 +742,7 @@ class ThreepidUnbindRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/unbind$") def __init__(self, hs): - super(ThreepidUnbindRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -773,7 +773,7 @@ class ThreepidDeleteRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/delete$") def __init__(self, hs): - super(ThreepidDeleteRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -852,7 +852,7 @@ class WhoamiRestServlet(RestServlet): PATTERNS = client_patterns("/account/whoami$") def __init__(self, hs): - super(WhoamiRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() async def on_GET(self, request): diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index c1d4cd0caf99..87a5b1b86bd8 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -34,7 +34,7 @@ class AccountDataServlet(RestServlet): ) def __init__(self, hs): - super(AccountDataServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() @@ -86,7 +86,7 @@ class RoomAccountDataServlet(RestServlet): ) def __init__(self, hs): - super(RoomAccountDataServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index d06336ceea9a..bd7f9ae2039b 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -32,7 +32,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(AccountValidityRenewServlet, self).__init__() + super().__init__() self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() @@ -67,7 +67,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(AccountValiditySendMailServlet, self).__init__() + super().__init__() self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 8e585e915305..097538f96864 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -124,7 +124,7 @@ class AuthRestServlet(RestServlet): PATTERNS = client_patterns(r"/auth/(?P[\w\.]*)/fallback/web") def __init__(self, hs): - super(AuthRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index fe9d019c442b..76879ac55905 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -32,7 +32,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(CapabilitiesRestServlet, self).__init__() + super().__init__() self.hs = hs self.config = hs.config self.auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index c0714fcfb105..7e174de692f6 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -35,7 +35,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(DevicesRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() @@ -57,7 +57,7 @@ class DeleteDevicesRestServlet(RestServlet): PATTERNS = client_patterns("/delete_devices") def __init__(self, hs): - super(DeleteDevicesRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() @@ -102,7 +102,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(DeviceRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index b28da017cd51..7cc692643b1d 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -28,7 +28,7 @@ class GetFilterRestServlet(RestServlet): PATTERNS = client_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") def __init__(self, hs): - super(GetFilterRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.filtering = hs.get_filtering() @@ -64,7 +64,7 @@ class CreateFilterRestServlet(RestServlet): PATTERNS = client_patterns("/user/(?P[^/]*)/filter") def __init__(self, hs): - super(CreateFilterRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.filtering = hs.get_filtering() diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 13ecf7005d13..a3bb095c2d59 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -32,7 +32,7 @@ class GroupServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/profile$") def __init__(self, hs): - super(GroupServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -66,7 +66,7 @@ class GroupSummaryServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/summary$") def __init__(self, hs): - super(GroupSummaryServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -97,7 +97,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): ) def __init__(self, hs): - super(GroupSummaryRoomsCatServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -137,7 +137,7 @@ class GroupCategoryServlet(RestServlet): ) def __init__(self, hs): - super(GroupCategoryServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -181,7 +181,7 @@ class GroupCategoriesServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/categories/$") def __init__(self, hs): - super(GroupCategoriesServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -204,7 +204,7 @@ class GroupRoleServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/roles/(?P[^/]+)$") def __init__(self, hs): - super(GroupRoleServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -248,7 +248,7 @@ class GroupRolesServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/roles/$") def __init__(self, hs): - super(GroupRolesServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -279,7 +279,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): ) def __init__(self, hs): - super(GroupSummaryUsersRoleServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -317,7 +317,7 @@ class GroupRoomServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/rooms$") def __init__(self, hs): - super(GroupRoomServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -343,7 +343,7 @@ class GroupUsersServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/users$") def __init__(self, hs): - super(GroupUsersServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -366,7 +366,7 @@ class GroupInvitedUsersServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/invited_users$") def __init__(self, hs): - super(GroupInvitedUsersServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -389,7 +389,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") def __init__(self, hs): - super(GroupSettingJoinPolicyServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.groups_handler = hs.get_groups_local_handler() @@ -413,7 +413,7 @@ class GroupCreateServlet(RestServlet): PATTERNS = client_patterns("/create_group$") def __init__(self, hs): - super(GroupCreateServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -444,7 +444,7 @@ class GroupAdminRoomsServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminRoomsServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -481,7 +481,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminRoomsConfigServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -507,7 +507,7 @@ class GroupAdminUsersInviteServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminUsersInviteServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -536,7 +536,7 @@ class GroupAdminUsersKickServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminUsersKickServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -560,7 +560,7 @@ class GroupSelfLeaveServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/leave$") def __init__(self, hs): - super(GroupSelfLeaveServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -584,7 +584,7 @@ class GroupSelfJoinServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/join$") def __init__(self, hs): - super(GroupSelfJoinServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -608,7 +608,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/accept_invite$") def __init__(self, hs): - super(GroupSelfAcceptInviteServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -632,7 +632,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/update_publicity$") def __init__(self, hs): - super(GroupSelfUpdatePublicityServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -655,7 +655,7 @@ class PublicisedGroupsForUserServlet(RestServlet): PATTERNS = client_patterns("/publicised_groups/(?P[^/]*)$") def __init__(self, hs): - super(PublicisedGroupsForUserServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -676,7 +676,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): PATTERNS = client_patterns("/publicised_groups$") def __init__(self, hs): - super(PublicisedGroupsForUsersServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -700,7 +700,7 @@ class GroupsForUserServlet(RestServlet): PATTERNS = client_patterns("/joined_groups$") def __init__(self, hs): - super(GroupsForUserServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 24bb090822a7..7abd6ff333b2 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -64,7 +64,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(KeyUploadServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -147,7 +147,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): """ - super(KeyQueryServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -177,7 +177,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): """ - super(KeyChangesServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() @@ -222,7 +222,7 @@ class OneTimeKeyServlet(RestServlet): PATTERNS = client_patterns("/keys/claim$") def __init__(self, hs): - super(OneTimeKeyServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -250,7 +250,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(SigningKeyUploadServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -308,7 +308,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(SignaturesUploadServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index aa911d75ee89..87063ec8b1e1 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -27,7 +27,7 @@ class NotificationsServlet(RestServlet): PATTERNS = client_patterns("/notifications$") def __init__(self, hs): - super(NotificationsServlet, self).__init__() + super().__init__() self.store = hs.get_datastore() self.auth = hs.get_auth() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index 6ae9a5a8e9fe..5b996e2d6318 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -60,7 +60,7 @@ class IdTokenServlet(RestServlet): EXPIRES_MS = 3600 * 1000 def __init__(self, hs): - super(IdTokenServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py index 968403cca455..68b27ff23a46 100644 --- a/synapse/rest/client/v2_alpha/password_policy.py +++ b/synapse/rest/client/v2_alpha/password_policy.py @@ -30,7 +30,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(PasswordPolicyServlet, self).__init__() + super().__init__() self.policy = hs.config.password_policy self.enabled = hs.config.password_policy_enabled diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index 67cbc37312ac..55c6688f529f 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -26,7 +26,7 @@ class ReadMarkerRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/read_markers$") def __init__(self, hs): - super(ReadMarkerRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.receipts_handler = hs.get_receipts_handler() self.read_marker_handler = hs.get_read_marker_handler() diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 92555bd4a957..6f7246a39429 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -31,7 +31,7 @@ class ReceiptRestServlet(RestServlet): ) def __init__(self, hs): - super(ReceiptRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.receipts_handler = hs.get_receipts_handler() diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 0705718d007d..ffa2dfce42d0 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -76,7 +76,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(EmailRegisterRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.config = hs.config @@ -174,7 +174,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(MsisdnRegisterRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler @@ -249,7 +249,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(RegistrationSubmitTokenServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.config = hs.config @@ -319,7 +319,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(UsernameAvailabilityRestServlet, self).__init__() + super().__init__() self.hs = hs self.registration_handler = hs.get_registration_handler() self.ratelimiter = FederationRateLimiter( @@ -363,7 +363,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(RegisterRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index e29f49f7f57d..18c75738f87d 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -61,7 +61,7 @@ class RelationSendServlet(RestServlet): ) def __init__(self, hs): - super(RelationSendServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.event_creation_handler = hs.get_event_creation_handler() self.txns = HttpTransactionCache(hs) @@ -138,7 +138,7 @@ class RelationPaginationServlet(RestServlet): ) def __init__(self, hs): - super(RelationPaginationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -233,7 +233,7 @@ class RelationAggregationPaginationServlet(RestServlet): ) def __init__(self, hs): - super(RelationAggregationPaginationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.event_handler = hs.get_event_handler() @@ -311,7 +311,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): ) def __init__(self, hs): - super(RelationAggregationGroupPaginationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index e15927c4ea66..215d619ca102 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -32,7 +32,7 @@ class ReportEventRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/report/(?P[^/]*)$") def __init__(self, hs): - super(ReportEventRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 59529707dfa2..53de97923fa1 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -37,7 +37,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @@ -248,7 +248,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysNewVersionServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @@ -301,7 +301,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysVersionServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 39a551861400..bf030e0ff49d 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -53,7 +53,7 @@ class RoomUpgradeRestServlet(RestServlet): ) def __init__(self, hs): - super(RoomUpgradeRestServlet, self).__init__() + super().__init__() self._hs = hs self._room_creation_handler = hs.get_room_creation_handler() self._auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index db829f309888..bc4f43639a7e 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -36,7 +36,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(SendToDeviceRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.txns = HttpTransactionCache(hs) diff --git a/synapse/rest/client/v2_alpha/shared_rooms.py b/synapse/rest/client/v2_alpha/shared_rooms.py index 2492634dace1..c866d5151c99 100644 --- a/synapse/rest/client/v2_alpha/shared_rooms.py +++ b/synapse/rest/client/v2_alpha/shared_rooms.py @@ -34,7 +34,7 @@ class UserSharedRoomsServlet(RestServlet): ) def __init__(self, hs): - super(UserSharedRoomsServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.user_directory_active = hs.config.update_user_directory diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index a0b00135e1cb..51e395cc6424 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -74,7 +74,7 @@ class SyncRestServlet(RestServlet): ALLOWED_PRESENCE = {"online", "offline", "unavailable"} def __init__(self, hs): - super(SyncRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.sync_handler = hs.get_sync_handler() diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index a3f12e8a774d..bf3a79db44d4 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -31,7 +31,7 @@ class TagListServlet(RestServlet): PATTERNS = client_patterns("/user/(?P[^/]*)/rooms/(?P[^/]*)/tags") def __init__(self, hs): - super(TagListServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() @@ -56,7 +56,7 @@ class TagServlet(RestServlet): ) def __init__(self, hs): - super(TagServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 23709960ad85..0c127a1b5fd8 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -28,7 +28,7 @@ class ThirdPartyProtocolsServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/protocols") def __init__(self, hs): - super(ThirdPartyProtocolsServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() @@ -44,7 +44,7 @@ class ThirdPartyProtocolServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/protocol/(?P[^/]+)$") def __init__(self, hs): - super(ThirdPartyProtocolServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() @@ -65,7 +65,7 @@ class ThirdPartyUserServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/user(/(?P[^/]+))?$") def __init__(self, hs): - super(ThirdPartyUserServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() @@ -87,7 +87,7 @@ class ThirdPartyLocationServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/location(/(?P[^/]+))?$") def __init__(self, hs): - super(ThirdPartyLocationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 83f3b6b70ad7..79317c74bae1 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -28,7 +28,7 @@ class TokenRefreshRestServlet(RestServlet): PATTERNS = client_patterns("/tokenrefresh") def __init__(self, hs): - super(TokenRefreshRestServlet, self).__init__() + super().__init__() async def on_POST(self, request): raise AuthError(403, "tokenrefresh is no longer supported.") diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index bef91a2d3ed1..ad598cefe00e 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -31,7 +31,7 @@ def __init__(self, hs): Args: hs (synapse.server.HomeServer): server """ - super(UserDirectorySearchRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.user_directory_handler = hs.get_user_directory_handler() diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 24ac57f35d8a..d5018afbdacc 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -28,7 +28,7 @@ class VersionsRestServlet(RestServlet): PATTERNS = [re.compile("^/_matrix/client/versions$")] def __init__(self, hs): - super(VersionsRestServlet, self).__init__() + super().__init__() self.config = hs.config def on_GET(self, request): diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 2ae2fbd5d75f..ccb3384db9d2 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -172,7 +172,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): else: self._cache_id_gen = None - super(DataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._presence_on_startup = self._get_active_presence(db_conn) diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 5f1a2b9aa6cc..c5a36990e417 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -42,7 +42,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): "AccountDataAndTagsChangeCache", account_max ) - super(AccountDataWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) @abc.abstractmethod def get_max_account_data_stream_id(self): @@ -313,7 +313,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): ], ) - super(AccountDataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_account_data_stream_id(self) -> int: """Get the current max stream id for the private user data stream diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 454c0bc50cb7..85f6b1e3fdf7 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -52,7 +52,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): ) self.exclusive_user_regex = _make_exclusive_regex(self.services_cache) - super(ApplicationServiceWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_app_services(self): return self.services_cache diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index c2fc847fbc78..239c7a949cba 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -31,7 +31,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(ClientIpBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( "user_ips_device_index", @@ -358,7 +358,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): name="client_ip_last_seen", keylen=4, max_entries=50000 ) - super(ClientIpStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.user_ips_max_age = hs.config.user_ips_max_age diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 00444331102e..e71217a41f3c 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -283,7 +283,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceInboxBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( "device_inbox_stream_index", @@ -313,7 +313,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore) DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceInboxStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Map of (user_id, device_id) to the last stream_id that has been # deleted up to. This is so that we can no op deletions. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 306fc6947c19..c04374e43d11 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -701,7 +701,7 @@ def _mark_remote_user_device_list_as_unsubscribed_txn(txn): class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( "device_lists_stream_idx", @@ -826,7 +826,7 @@ def _txn(txn): class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 4c3c162acf54..6d3689c09e59 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -600,7 +600,7 @@ class EventFederationStore(EventFederationWorkerStore): EVENT_AUTH_STATE_ONLY = "event_auth_state_only" def __init__(self, database: DatabasePool, db_conn, hs): - super(EventFederationStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( self.EVENT_AUTH_STATE_ONLY, self._background_delete_non_state_event_auth diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 7805fb814e30..62f1738732f3 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -68,7 +68,7 @@ def _deserialize_action(actions, is_highlight): class EventPushActionsWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(EventPushActionsWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # These get correctly set by _find_stream_orderings_for_times_txn self.stream_ordering_month_ago = None @@ -661,7 +661,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" def __init__(self, database: DatabasePool, db_conn, hs): - super(EventPushActionsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( self.EPA_HIGHLIGHT_INDEX, diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index e53c6373a89e..5e4af2eb5102 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -29,7 +29,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, database: DatabasePool, db_conn, hs): - super(EventsBackgroundUpdatesStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index cd3739c16c37..de9e8d1dc6db 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -75,7 +75,7 @@ class EventRedactBehaviour(Names): class EventsWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(EventsWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if isinstance(database.engine, PostgresEngine): # If we're using Postgres than we can use `MultiWriterIdGenerator` diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 1d76c761a603..cc538c5c104f 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -24,9 +24,7 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(MediaRepositoryBackgroundUpdateStore, self).__init__( - database, db_conn, hs - ) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( update_name="local_media_repository_url_idx", @@ -94,7 +92,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): """Persistence for attachments and avatars""" def __init__(self, database: DatabasePool, db_conn, hs): - super(MediaRepositoryStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]: """Get the metadata for a local piece of media diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 1d793d3debdf..e0cedd1aacc9 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -28,7 +28,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(MonthlyActiveUsersWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._clock = hs.get_clock() self.hs = hs @@ -120,7 +120,7 @@ async def user_last_seen_monthly_active(self, user_id: str) -> int: class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._limit_usage_by_mau = hs.config.limit_usage_by_mau self._mau_stats_only = hs.config.mau_stats_only diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index b7a8d34ce129..e20a16f90758 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -77,7 +77,7 @@ class PushRulesWorkerStore( """ def __init__(self, database: DatabasePool, db_conn, hs): - super(PushRulesWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if hs.config.worker.worker_app is None: self._push_rules_stream_id_gen = StreamIdGenerator( diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 6568bddd81e6..f880b5e562cc 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -39,7 +39,7 @@ class ReceiptsWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(ReceiptsWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._receipts_stream_cache = StreamChangeCache( "ReceiptsRoomChangeCache", self.get_max_receipt_stream_id() @@ -386,7 +386,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): db_conn, "receipts_linearized", "stream_id" ) - super(ReceiptsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 01f20c03c213..675e81fe3436 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -36,7 +36,7 @@ class RegistrationWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RegistrationWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config self.clock = hs.get_clock() @@ -764,7 +764,7 @@ def delete_threepid_session_txn(txn): class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RegistrationBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.clock = hs.get_clock() self.config = hs.config @@ -892,7 +892,7 @@ def _bg_user_threepids_grandfather_txn(txn): class RegistrationStore(RegistrationBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RegistrationStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._account_validity = hs.config.account_validity self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 127588ce4c4d..bd6f9553c60c 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -69,7 +69,7 @@ class RoomSortOrder(Enum): class RoomWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config @@ -863,7 +863,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore): ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column" def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config @@ -1074,7 +1074,7 @@ def set_room_is_public(self, room_id, is_public): class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 91a8b43da3e5..4fa8767b012e 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -55,7 +55,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomMemberWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Is the current_state_events.membership up to date? Or is the # background update still running? @@ -819,7 +819,7 @@ def _is_local_host_in_room_ignoring_users_txn(txn): class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomMemberBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( _MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile ) @@ -973,7 +973,7 @@ def _background_current_state_membership_txn(txn, last_processed_room): class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomMemberStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def forget(self, user_id: str, room_id: str) -> None: """Indicate that user_id wishes to discard history for room_id.""" diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index f01cf2fd02e9..e34fce6281b0 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -89,7 +89,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" def __init__(self, database: DatabasePool, db_conn, hs): - super(SearchBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if not hs.config.enable_search: return @@ -342,7 +342,7 @@ def reindex_search_txn(txn): class SearchStore(SearchBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SearchStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def search_msgs(self, room_ids, search_term, keys): """Performs a full text search over events with given keys. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 5c6168e30171..3c1e33819b88 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -56,7 +56,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(StateGroupWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def get_room_version(self, room_id: str) -> RoomVersion: """Get the room_version of a given room @@ -320,7 +320,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" def __init__(self, database: DatabasePool, db_conn, hs): - super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.server_name = hs.hostname @@ -506,4 +506,4 @@ class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(StateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 30840dbbaa31..d7816a8606ab 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -61,7 +61,7 @@ class StatsStore(StateDeltasStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(StatsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.server_name = hs.hostname self.clock = self.hs.get_clock() diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 7dbe11513b3c..5dac78e574b8 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -266,7 +266,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): """ def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"): - super(StreamWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._instance_name = hs.get_instance_name() self._send_federation = hs.should_send_federation() diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 091367006e17..99cffff50cc2 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -48,7 +48,7 @@ class TransactionStore(SQLBaseStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(TransactionStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index f2f9a5799ac5..5a390ff2f612 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -38,7 +38,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): SHARE_PRIVATE_WORKING_SET = 500 def __init__(self, database: DatabasePool, db_conn, hs): - super(UserDirectoryBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.server_name = hs.hostname @@ -564,7 +564,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): SHARE_PRIVATE_WORKING_SET = 500 def __init__(self, database: DatabasePool, db_conn, hs): - super(UserDirectoryStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def remove_from_user_dir(self, user_id: str) -> None: def _remove_from_user_dir_txn(txn): diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 139085b67292..acb24e33af59 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -181,7 +181,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" def __init__(self, database: DatabasePool, db_conn, hs): - super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, self._background_deduplicate_state, diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index e924f1ca3b31..bec3780a32b1 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -52,7 +52,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(StateGroupDataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Originally the state store used a single DictionaryCache to cache the # event IDs for the state types in a given state group to avoid hammering diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 631654f2974e..da24ba0470b6 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -94,7 +94,7 @@ class SynapseManhole(ColoredManhole): """Overrides connectionMade to create our own ManholeInterpreter""" def connectionMade(self): - super(SynapseManhole, self).connectionMade() + super().connectionMade() # replace the manhole interpreter with our own impl self.interpreter = SynapseManholeInterpreter(self, self.namespace) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 79869aaa4493..a5cc9d055186 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -45,7 +45,7 @@ def __init__(self, retry_last_ts, retry_interval, destination): """ msg = "Not retrying server %s." % (destination,) - super(NotRetryingDestination, self).__init__(msg) + super().__init__(msg) self.retry_last_ts = retry_last_ts self.retry_interval = retry_interval diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 210ddcbb882f..366dcfb6701a 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -30,7 +30,7 @@ class E2eKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): - super(E2eKeysHandlerTestCase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.hs = None # type: synapse.server.HomeServer self.handler = None # type: synapse.handlers.e2e_keys.E2eKeysHandler diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 3362050ce001..7adde9b9de8d 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -47,7 +47,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): - super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.hs = None # type: synapse.server.HomeServer self.handler = None # type: synapse.handlers.e2e_keys.E2eRoomKeysHandler diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 561258a3562a..bc578411d69e 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -58,7 +58,7 @@ def setUp(self): # Patch up the equality operator for events so that we can check # whether lists of events match using assertEquals self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)] - return super(SlavedEventStoreTestCase, self).setUp() + return super().setUp() def prepare(self, *args, **kwargs): super().prepare(*args, **kwargs) diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index b090bb974cf9..dcd65c2a503f 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -21,7 +21,7 @@ class WellKnownTests(unittest.HomeserverTestCase): def setUp(self): - super(WellKnownTests, self).setUp() + super().setUp() # replace the JsonResource with a WellKnownResource self.resource = WellKnownResource(self.hs) diff --git a/tests/server.py b/tests/server.py index 61ec67015500..b404ad4e2a49 100644 --- a/tests/server.py +++ b/tests/server.py @@ -260,7 +260,7 @@ def getHostByName(self, name, timeout=None): return succeed(lookups[name]) self.nameResolver = SimpleResolverComplexifier(FakeResolver()) - super(ThreadedMemoryReactorClock, self).__init__() + super().__init__() def listenUDP(self, port, protocol, interface="", maxPacketSize=8196): p = udp.Port(port, protocol, interface, maxPacketSize, self) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index cb808d4de4d7..46f94914ffb4 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -413,7 +413,7 @@ def test_get_appservices_by_state_multiple(self): # required for ApplicationServiceTransactionStoreTestCase tests class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(TestTransactionStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) class ApplicationServiceStoreConfigTestCase(unittest.TestCase): diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index 34ae8c9da7fc..ecb00f4e0293 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -23,7 +23,7 @@ class DeviceStoreTestCase(tests.unittest.TestCase): def __init__(self, *args, **kwargs): - super(DeviceStoreTestCase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.store = None # type: synapse.storage.DataStore @defer.inlineCallbacks diff --git a/tests/test_state.py b/tests/test_state.py index 2d58467932ed..80b0ccbc405b 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -125,7 +125,7 @@ async def get_room_version_id(self, room_id): class DictObj(dict): def __init__(self, **kwargs): - super(DictObj, self).__init__(kwargs) + super().__init__(kwargs) self.__dict__ = self diff --git a/tests/unittest.py b/tests/unittest.py index 128dd4e19c43..dabf69cff405 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -92,7 +92,7 @@ class TestCase(unittest.TestCase): root logger's logging level while that test (case|method) runs.""" def __init__(self, methodName, *args, **kwargs): - super(TestCase, self).__init__(methodName, *args, **kwargs) + super().__init__(methodName, *args, **kwargs) method = getattr(self, methodName) From 36efbcaf511790d6f1dd7df2260900f07489bda6 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Fri, 18 Sep 2020 14:59:13 +0100 Subject: [PATCH 035/245] Catch-up after Federation Outage (bonus): Catch-up on Synapse Startup (#8322) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Olivier Wilkinson (reivilibre) Co-authored-by: Patrick Cloke * Fix _set_destination_retry_timings This came about because the code assumed that retry_interval could not be NULL — which has been challenged by catch-up. --- changelog.d/8230.bugfix | 1 + changelog.d/8230.misc | 1 - changelog.d/8247.bugfix | 1 + changelog.d/8247.misc | 1 - changelog.d/8258.bugfix | 1 + changelog.d/8258.misc | 1 - changelog.d/8322.bugfix | 1 + synapse/federation/sender/__init__.py | 51 ++++++++++ .../storage/databases/main/transactions.py | 66 ++++++++++++- tests/federation/test_federation_catch_up.py | 99 +++++++++++++++++++ 10 files changed, 218 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8230.bugfix delete mode 100644 changelog.d/8230.misc create mode 100644 changelog.d/8247.bugfix delete mode 100644 changelog.d/8247.misc create mode 100644 changelog.d/8258.bugfix delete mode 100644 changelog.d/8258.misc create mode 100644 changelog.d/8322.bugfix diff --git a/changelog.d/8230.bugfix b/changelog.d/8230.bugfix new file mode 100644 index 000000000000..532d0e22fefb --- /dev/null +++ b/changelog.d/8230.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8230.misc b/changelog.d/8230.misc deleted file mode 100644 index bf0ba767307d..000000000000 --- a/changelog.d/8230.misc +++ /dev/null @@ -1 +0,0 @@ -Track the latest event for every destination and room for catch-up after federation outage. diff --git a/changelog.d/8247.bugfix b/changelog.d/8247.bugfix new file mode 100644 index 000000000000..532d0e22fefb --- /dev/null +++ b/changelog.d/8247.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8247.misc b/changelog.d/8247.misc deleted file mode 100644 index 3c27803be45f..000000000000 --- a/changelog.d/8247.misc +++ /dev/null @@ -1 +0,0 @@ -Track the `stream_ordering` of the last successfully-sent event to every destination, so we can use this information to 'catch up' a remote server after an outage. diff --git a/changelog.d/8258.bugfix b/changelog.d/8258.bugfix new file mode 100644 index 000000000000..532d0e22fefb --- /dev/null +++ b/changelog.d/8258.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8258.misc b/changelog.d/8258.misc deleted file mode 100644 index 3c27803be45f..000000000000 --- a/changelog.d/8258.misc +++ /dev/null @@ -1 +0,0 @@ -Track the `stream_ordering` of the last successfully-sent event to every destination, so we can use this information to 'catch up' a remote server after an outage. diff --git a/changelog.d/8322.bugfix b/changelog.d/8322.bugfix new file mode 100644 index 000000000000..532d0e22fefb --- /dev/null +++ b/changelog.d/8322.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 41a726878dfa..8bb17b3a05d3 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -55,6 +55,15 @@ "Total number of PDUs queued for sending across all destinations", ) +# Time (in s) after Synapse's startup that we will begin to wake up destinations +# that have catch-up outstanding. +CATCH_UP_STARTUP_DELAY_SEC = 15 + +# Time (in s) to wait in between waking up each destination, i.e. one destination +# will be woken up every seconds after Synapse's startup until we have woken +# every destination has outstanding catch-up. +CATCH_UP_STARTUP_INTERVAL_SEC = 5 + class FederationSender: def __init__(self, hs: "synapse.server.HomeServer"): @@ -125,6 +134,14 @@ def __init__(self, hs: "synapse.server.HomeServer"): 1000.0 / hs.config.federation_rr_transactions_per_room_per_second ) + # wake up destinations that have outstanding PDUs to be caught up + self._catchup_after_startup_timer = self.clock.call_later( + CATCH_UP_STARTUP_DELAY_SEC, + run_as_background_process, + "wake_destinations_needing_catchup", + self._wake_destinations_needing_catchup, + ) + def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: """Get or create a PerDestinationQueue for the given destination @@ -560,3 +577,37 @@ async def get_replication_rows( # Dummy implementation for case where federation sender isn't offloaded # to a worker. return [], 0, False + + async def _wake_destinations_needing_catchup(self): + """ + Wakes up destinations that need catch-up and are not currently being + backed off from. + + In order to reduce load spikes, adds a delay between each destination. + """ + + last_processed = None # type: Optional[str] + + while True: + destinations_to_wake = await self.store.get_catch_up_outstanding_destinations( + last_processed + ) + + if not destinations_to_wake: + # finished waking all destinations! + self._catchup_after_startup_timer = None + break + + destinations_to_wake = [ + d + for d in destinations_to_wake + if self._federation_shard_config.should_handle(self._instance_name, d) + ] + + for last_processed in destinations_to_wake: + logger.info( + "Destination %s has outstanding catch-up, waking up.", + last_processed, + ) + self.wake_destination(last_processed) + await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC) diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 99cffff50cc2..97aed1500e3e 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -218,6 +218,7 @@ def _set_destination_retry_timings( retry_interval = EXCLUDED.retry_interval WHERE EXCLUDED.retry_interval = 0 + OR destinations.retry_interval IS NULL OR destinations.retry_interval < EXCLUDED.retry_interval """ @@ -249,7 +250,11 @@ def _set_destination_retry_timings( "retry_interval": retry_interval, }, ) - elif retry_interval == 0 or prev_row["retry_interval"] < retry_interval: + elif ( + retry_interval == 0 + or prev_row["retry_interval"] is None + or prev_row["retry_interval"] < retry_interval + ): self.db_pool.simple_update_one_txn( txn, "destinations", @@ -397,7 +402,7 @@ async def get_catch_up_room_event_ids( @staticmethod def _get_catch_up_room_event_ids_txn( - txn, destination: str, last_successful_stream_ordering: int, + txn: LoggingTransaction, destination: str, last_successful_stream_ordering: int, ) -> List[str]: q = """ SELECT event_id FROM destination_rooms @@ -412,3 +417,60 @@ def _get_catch_up_room_event_ids_txn( ) event_ids = [row[0] for row in txn] return event_ids + + async def get_catch_up_outstanding_destinations( + self, after_destination: Optional[str] + ) -> List[str]: + """ + Gets at most 25 destinations which have outstanding PDUs to be caught up, + and are not being backed off from + Args: + after_destination: + If provided, all destinations must be lexicographically greater + than this one. + + Returns: + list of up to 25 destinations with outstanding catch-up. + These are the lexicographically first destinations which are + lexicographically greater than after_destination (if provided). + """ + time = self.hs.get_clock().time_msec() + + return await self.db_pool.runInteraction( + "get_catch_up_outstanding_destinations", + self._get_catch_up_outstanding_destinations_txn, + time, + after_destination, + ) + + @staticmethod + def _get_catch_up_outstanding_destinations_txn( + txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str] + ) -> List[str]: + q = """ + SELECT destination FROM destinations + WHERE destination IN ( + SELECT destination FROM destination_rooms + WHERE destination_rooms.stream_ordering > + destinations.last_successful_stream_ordering + ) + AND destination > ? + AND ( + retry_last_ts IS NULL OR + retry_last_ts + retry_interval < ? + ) + ORDER BY destination + LIMIT 25 + """ + txn.execute( + q, + ( + # everything is lexicographically greater than "" so this gives + # us the first batch of up to 25. + after_destination or "", + now_time_ms, + ), + ) + + destinations = [row[0] for row in txn] + return destinations diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index cc52c3dfac0a..1a3ccb263dae 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -321,3 +321,102 @@ def test_catch_up_loop(self): per_dest_queue._last_successful_stream_ordering, event_5.internal_metadata.stream_ordering, ) + + @override_config({"send_federation": True}) + def test_catch_up_on_synapse_startup(self): + """ + Tests the behaviour of get_catch_up_outstanding_destinations and + _wake_destinations_needing_catchup. + """ + + # list of sorted server names (note that there are more servers than the batch + # size used in get_catch_up_outstanding_destinations). + server_names = ["server%02d" % number for number in range(42)] + ["zzzerver"] + + # ARRANGE: + # - a local user (u1) + # - a room which u1 is joined to (and remote users @user:serverXX are + # joined to) + + # mark the remotes as online + self.is_online = True + + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room_id = self.helper.create_room_as("u1", tok=u1_token) + + for server_name in server_names: + self.get_success( + event_injection.inject_member_event( + self.hs, room_id, "@user:%s" % server_name, "join" + ) + ) + + # create an event + self.helper.send(room_id, "deary me!", tok=u1_token) + + # ASSERT: + # - All servers are up to date so none should have outstanding catch-up + outstanding_when_successful = self.get_success( + self.hs.get_datastore().get_catch_up_outstanding_destinations(None) + ) + self.assertEqual(outstanding_when_successful, []) + + # ACT: + # - Make the remote servers unreachable + self.is_online = False + + # - Mark zzzerver as being backed-off from + now = self.clock.time_msec() + self.get_success( + self.hs.get_datastore().set_destination_retry_timings( + "zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day + ) + ) + + # - Send an event + self.helper.send(room_id, "can anyone hear me?", tok=u1_token) + + # ASSERT (get_catch_up_outstanding_destinations): + # - all remotes are outstanding + # - they are returned in batches of 25, in order + outstanding_1 = self.get_success( + self.hs.get_datastore().get_catch_up_outstanding_destinations(None) + ) + + self.assertEqual(len(outstanding_1), 25) + self.assertEqual(outstanding_1, server_names[0:25]) + + outstanding_2 = self.get_success( + self.hs.get_datastore().get_catch_up_outstanding_destinations( + outstanding_1[-1] + ) + ) + self.assertNotIn("zzzerver", outstanding_2) + self.assertEqual(len(outstanding_2), 17) + self.assertEqual(outstanding_2, server_names[25:-1]) + + # ACT: call _wake_destinations_needing_catchup + + # patch wake_destination to just count the destinations instead + woken = [] + + def wake_destination_track(destination): + woken.append(destination) + + self.hs.get_federation_sender().wake_destination = wake_destination_track + + # cancel the pre-existing timer for _wake_destinations_needing_catchup + # this is because we are calling it manually rather than waiting for it + # to be called automatically + self.hs.get_federation_sender()._catchup_after_startup_timer.cancel() + + self.get_success( + self.hs.get_federation_sender()._wake_destinations_needing_catchup(), by=5.0 + ) + + # ASSERT (_wake_destinations_needing_catchup): + # - all remotes are woken up, save for zzzerver + self.assertNotIn("zzzerver", woken) + # - all destinations are woken exactly once; they appear once in woken. + self.assertCountEqual(woken, server_names[:-1]) From d688b4bafca58dfff1be35615d6ff1e202d47cc6 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 18 Sep 2020 16:26:36 +0200 Subject: [PATCH 036/245] Admin API for querying rooms where a user is a member (#8306) Add a new admin API `GET /_synapse/admin/v1/users//joined_rooms` to list all rooms where a user is a member. --- changelog.d/8306.feature | 1 + docs/admin_api/user_admin_api.rst | 37 ++++++++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/users.py | 26 +++++++++ tests/rest/admin/test_user.py | 96 ++++++++++++++++++++++++++++++- 5 files changed, 160 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8306.feature diff --git a/changelog.d/8306.feature b/changelog.d/8306.feature new file mode 100644 index 000000000000..5c23da4030ff --- /dev/null +++ b/changelog.d/8306.feature @@ -0,0 +1 @@ +Add an admin API for querying rooms where a user is a member. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index e21c78a9c62b..7ca902faba25 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -304,6 +304,43 @@ To use it, you will need to authenticate by providing an ``access_token`` for a server admin: see `README.rst `_. +List room memberships of an user +================================ +Gets a list of all ``room_id`` that a specific ``user_id`` is member. + +The API is:: + + GET /_synapse/admin/v1/users//joined_rooms + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "joined_rooms": [ + "!DuGcnbhHGaSZQoNQR:matrix.org", + "!ZtSaPCawyWtxfWiIy:matrix.org" + ], + "total": 2 + } + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - fully qualified: for example, ``@user:server.com``. + +**Response** + +The following fields are returned in the JSON response body: + +- ``joined_rooms`` - An array of ``room_id``. +- ``total`` - Number of rooms. + + User devices ============ diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index abf362c7b74e..4a75c0648024 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -49,6 +49,7 @@ ResetPasswordRestServlet, SearchUsersRestServlet, UserAdminServlet, + UserMembershipRestServlet, UserRegisterServlet, UserRestServletV2, UsersRestServlet, @@ -209,6 +210,7 @@ def register_servlets(hs, http_server): SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) UserAdminServlet(hs).register(http_server) + UserMembershipRestServlet(hs).register(http_server) UserRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server) DeviceRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 0f537031c4d5..20dc1d0e057c 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -683,3 +683,29 @@ async def on_PUT(self, request, user_id): await self.store.set_server_admin(target_user, set_admin_to) return 200, {} + + +class UserMembershipRestServlet(RestServlet): + """ + Get room list of an user. + """ + + PATTERNS = admin_patterns("/users/(?P[^/]+)/joined_rooms$") + + def __init__(self, hs): + self.is_mine = hs.is_mine + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + async def on_GET(self, request, user_id): + await assert_requester_is_admin(self.auth, request) + + if not self.is_mine(UserID.from_string(user_id)): + raise SynapseError(400, "Can only lookup local users") + + room_ids = await self.store.get_rooms_for_user(user_id) + if not room_ids: + raise NotFoundError("User not found") + + ret = {"joined_rooms": list(room_ids), "total": len(room_ids)} + return 200, ret diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index b8b7758d2447..f96011fc1c8f 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -22,8 +22,8 @@ import synapse.rest.admin from synapse.api.constants import UserTypes -from synapse.api.errors import HttpResponseException, ResourceLimitError -from synapse.rest.client.v1 import login +from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError +from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import sync from tests import unittest @@ -995,3 +995,95 @@ def test_accidental_deactivation_prevention(self): # Ensure they're still alive self.assertEqual(0, channel.json_body["deactivated"]) + + +class UserMembershipRestTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url = "/_synapse/admin/v1/users/%s/joined_rooms" % urllib.parse.quote( + self.other_user + ) + + def test_no_auth(self): + """ + Try to list rooms of an user without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + other_user_token = self.login("user", "pass") + + request, channel = self.make_request( + "GET", self.url, access_token=other_user_token, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + url = "/_synapse/admin/v1/users/@unknown_person:test/joined_rooms" + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_user_is_not_local(self): + """ + Tests that a lookup for a user that is not a local returns a 400 + """ + url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/joined_rooms" + + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only lookup local users", channel.json_body["error"]) + + def test_get_rooms(self): + """ + Tests that a normal lookup for rooms is successfully + """ + # Create rooms and join + other_user_tok = self.login("user", "pass") + number_rooms = 5 + for n in range(number_rooms): + self.helper.create_room_as(self.other_user, tok=other_user_tok) + + # Get rooms + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(number_rooms, channel.json_body["total"]) + self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"])) From babc0275431c68e64050db11959d74a636afbd3e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 12:54:04 -0400 Subject: [PATCH 037/245] Fix a bad merge from release-v1.20.0. (#8354) --- changelog.d/8354.misc | 1 + synapse/handlers/pagination.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8354.misc diff --git a/changelog.d/8354.misc b/changelog.d/8354.misc new file mode 100644 index 000000000000..1d33cde2da6c --- /dev/null +++ b/changelog.d/8354.misc @@ -0,0 +1 @@ +Fix bad merge from `release-v1.20.0` branch to `develop`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index f132ed3368df..a0b3bdb5e0c3 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -385,7 +385,7 @@ async def get_messages( ) await self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, curr_topo, limit=source_config.limit, + room_id, curr_topo, limit=pagin_config.limit, ) to_room_key = None From 4f3096d866a9810b1c982669d9567fe47b2db73f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 21 Sep 2020 12:34:06 +0100 Subject: [PATCH 038/245] Add a comment re #1691 --- synapse/crypto/context_factory.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 2b03f5ac76ef..79668a402e14 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -45,7 +45,11 @@ class ServerContextFactory(ContextFactory): """Factory for PyOpenSSL SSL contexts that are used to handle incoming - connections.""" + connections. + + TODO: replace this with an implementation of IOpenSSLServerConnectionCreator, + per https://github.com/matrix-org/synapse/issues/1691 + """ def __init__(self, config): # TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version, From 37ca5924bddccc37521798236339b539677d101f Mon Sep 17 00:00:00 2001 From: Dionysis Grigoropoulos Date: Tue, 22 Sep 2020 13:42:55 +0300 Subject: [PATCH 039/245] Create function to check for long names in devices (#8364) * Create a new function to verify that the length of a device name is under a certain threshold. * Refactor old code and tests to use said function. * Verify device name length during registration of device * Add a test for the above Signed-off-by: Dionysis Grigoropoulos --- changelog.d/8364.bugfix | 2 ++ synapse/handlers/device.py | 30 ++++++++++++++++++++++++------ tests/handlers/test_device.py | 11 +++++++++++ tests/rest/admin/test_device.py | 2 +- 4 files changed, 38 insertions(+), 7 deletions(-) create mode 100644 changelog.d/8364.bugfix diff --git a/changelog.d/8364.bugfix b/changelog.d/8364.bugfix new file mode 100644 index 000000000000..7b82cbc3881c --- /dev/null +++ b/changelog.d/8364.bugfix @@ -0,0 +1,2 @@ +Fix a bug where during device registration the length of the device name wasn't +limited. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 55a978743988..4149520d6c56 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -20,6 +20,7 @@ from synapse.api import errors from synapse.api.constants import EventTypes from synapse.api.errors import ( + Codes, FederationDeniedError, HttpResponseException, RequestSendFailed, @@ -265,6 +266,24 @@ def __init__(self, hs): hs.get_distributor().observe("user_left_room", self.user_left_room) + def _check_device_name_length(self, name: str): + """ + Checks whether a device name is longer than the maximum allowed length. + + Args: + name: The name of the device. + + Raises: + SynapseError: if the device name is too long. + """ + if name and len(name) > MAX_DEVICE_DISPLAY_NAME_LEN: + raise SynapseError( + 400, + "Device display name is too long (max %i)" + % (MAX_DEVICE_DISPLAY_NAME_LEN,), + errcode=Codes.TOO_LARGE, + ) + async def check_device_registered( self, user_id, device_id, initial_device_display_name=None ): @@ -282,6 +301,9 @@ async def check_device_registered( Returns: str: device id (generated if none was supplied) """ + + self._check_device_name_length(initial_device_display_name) + if device_id is not None: new_device = await self.store.store_device( user_id=user_id, @@ -397,12 +419,8 @@ async def update_device(self, user_id: str, device_id: str, content: dict) -> No # Reject a new displayname which is too long. new_display_name = content.get("display_name") - if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN: - raise SynapseError( - 400, - "Device display name is too long (max %i)" - % (MAX_DEVICE_DISPLAY_NAME_LEN,), - ) + + self._check_device_name_length(new_display_name) try: await self.store.update_device( diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 6aa322bf3ac8..969d44c78711 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -35,6 +35,17 @@ def prepare(self, reactor, clock, hs): # These tests assume that it starts 1000 seconds in. self.reactor.advance(1000) + def test_device_is_created_with_invalid_name(self): + self.get_failure( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="foo", + initial_device_display_name="a" + * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1), + ), + synapse.api.errors.SynapseError, + ) + def test_device_is_created_if_doesnt_exist(self): res = self.get_success( self.handler.check_device_registered( diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index faa7f381a96b..92c9058887e2 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -221,7 +221,7 @@ def test_update_device_too_long_display_name(self): self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) - self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) + self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"]) # Ensure the display name was not updated. request, channel = self.make_request( From 4da01f9c614f36a293235d6a1fd3602d550f2001 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 22 Sep 2020 19:15:04 +0200 Subject: [PATCH 040/245] Admin API for reported events (#8217) Add an admin API to read entries of table `event_reports`. API: `GET /_synapse/admin/v1/event_reports` --- changelog.d/8217.feature | 1 + docs/admin_api/event_reports.rst | 129 +++++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/event_reports.py | 88 ++++++ synapse/storage/databases/main/room.py | 95 ++++++ tests/rest/admin/test_event_reports.py | 382 +++++++++++++++++++++++++ 6 files changed, 697 insertions(+) create mode 100644 changelog.d/8217.feature create mode 100644 docs/admin_api/event_reports.rst create mode 100644 synapse/rest/admin/event_reports.py create mode 100644 tests/rest/admin/test_event_reports.py diff --git a/changelog.d/8217.feature b/changelog.d/8217.feature new file mode 100644 index 000000000000..899cbf14ef56 --- /dev/null +++ b/changelog.d/8217.feature @@ -0,0 +1 @@ +Add an admin API `GET /_synapse/admin/v1/event_reports` to read entries of table `event_reports`. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/event_reports.rst b/docs/admin_api/event_reports.rst new file mode 100644 index 000000000000..461be012300e --- /dev/null +++ b/docs/admin_api/event_reports.rst @@ -0,0 +1,129 @@ +Show reported events +==================== + +This API returns information about reported events. + +The api is:: + + GET /_synapse/admin/v1/event_reports?from=0&limit=10 + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +It returns a JSON body like the following: + +.. code:: jsonc + + { + "event_reports": [ + { + "content": { + "reason": "foo", + "score": -100 + }, + "event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY", + "event_json": { + "auth_events": [ + "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M", + "$oggsNXxzPFRE3y53SUNd7nsj69-QzKv03a1RucHu-ws" + ], + "content": { + "body": "matrix.org: This Week in Matrix", + "format": "org.matrix.custom.html", + "formatted_body": "matrix.org:
This Week in Matrix", + "msgtype": "m.notice" + }, + "depth": 546, + "hashes": { + "sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw" + }, + "origin": "matrix.org", + "origin_server_ts": 1592291711430, + "prev_events": [ + "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M" + ], + "prev_state": [], + "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", + "sender": "@foobar:matrix.org", + "signatures": { + "matrix.org": { + "ed25519:a_JaEG": "cs+OUKW/iHx5pEidbWxh0UiNNHwe46Ai9LwNz+Ah16aWDNszVIe2gaAcVZfvNsBhakQTew51tlKmL2kspXk/Dg" + } + }, + "type": "m.room.message", + "unsigned": { + "age_ts": 1592291711430, + } + }, + "id": 2, + "reason": "foo", + "received_ts": 1570897107409, + "room_alias": "#alias1:matrix.org", + "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", + "sender": "@foobar:matrix.org", + "user_id": "@foo:matrix.org" + }, + { + "content": { + "reason": "bar", + "score": -100 + }, + "event_id": "$3IcdZsDaN_En-S1DF4EMCy3v4gNRKeOJs8W5qTOKj4I", + "event_json": { + // hidden items + // see above + }, + "id": 3, + "reason": "bar", + "received_ts": 1598889612059, + "room_alias": "#alias2:matrix.org", + "room_id": "!eGvUQuTCkHGVwNMOjv:matrix.org", + "sender": "@foobar:matrix.org", + "user_id": "@bar:matrix.org" + } + ], + "next_token": 2, + "total": 4 + } + +To paginate, check for ``next_token`` and if present, call the endpoint again +with ``from`` set to the value of ``next_token``. This will return a new page. + +If the endpoint does not return a ``next_token`` then there are no more +reports to paginate through. + +**URL parameters:** + +- ``limit``: integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to ``100``. +- ``from``: integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of ``next_token`` from a previous call. + Defaults to ``0``. +- ``dir``: string - Direction of event report order. Whether to fetch the most recent first (``b``) or the + oldest first (``f``). Defaults to ``b``. +- ``user_id``: string - Is optional and filters to only return users with user IDs that contain this value. + This is the user who reported the event and wrote the reason. +- ``room_id``: string - Is optional and filters to only return rooms with room IDs that contain this value. + +**Response** + +The following fields are returned in the JSON response body: + +- ``id``: integer - ID of event report. +- ``received_ts``: integer - The timestamp (in milliseconds since the unix epoch) when this report was sent. +- ``room_id``: string - The ID of the room in which the event being reported is located. +- ``event_id``: string - The ID of the reported event. +- ``user_id``: string - This is the user who reported the event and wrote the reason. +- ``reason``: string - Comment made by the ``user_id`` in this report. May be blank. +- ``content``: object - Content of reported event. + + - ``reason``: string - Comment made by the ``user_id`` in this report. May be blank. + - ``score``: integer - Content is reported based upon a negative score, where -100 is "most offensive" and 0 is "inoffensive". + +- ``sender``: string - This is the ID of the user who sent the original message/event that was reported. +- ``room_alias``: string - The alias of the room. ``null`` if the room does not have a canonical alias set. +- ``event_json``: object - Details of the original event that was reported. +- ``next_token``: integer - Indication for pagination. See above. +- ``total``: integer - Total number of event reports related to the query (``user_id`` and ``room_id``). + diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 4a75c0648024..5c5f00b21376 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -31,6 +31,7 @@ DeviceRestServlet, DevicesRestServlet, ) +from synapse.rest.admin.event_reports import EventReportsRestServlet from synapse.rest.admin.groups import DeleteGroupAdminRestServlet from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet @@ -216,6 +217,7 @@ def register_servlets(hs, http_server): DeviceRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) DeleteDevicesRestServlet(hs).register(http_server) + EventReportsRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py new file mode 100644 index 000000000000..5b8d0594cddc --- /dev/null +++ b/synapse/rest/admin/event_reports.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin + +logger = logging.getLogger(__name__) + + +class EventReportsRestServlet(RestServlet): + """ + List all reported events that are known to the homeserver. Results are returned + in a dictionary containing report information. Supports pagination. + The requester must have administrator access in Synapse. + + GET /_synapse/admin/v1/event_reports + returns: + 200 OK with list of reports if success otherwise an error. + + Args: + The parameters `from` and `limit` are required only for pagination. + By default, a `limit` of 100 is used. + The parameter `dir` can be used to define the order of results. + The parameter `user_id` can be used to filter by user id. + The parameter `room_id` can be used to filter by room id. + Returns: + A list of reported events and an integer representing the total number of + reported events that exist given this query + """ + + PATTERNS = admin_patterns("/event_reports$") + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + async def on_GET(self, request): + await assert_requester_is_admin(self.auth, request) + + start = parse_integer(request, "from", default=0) + limit = parse_integer(request, "limit", default=100) + direction = parse_string(request, "dir", default="b") + user_id = parse_string(request, "user_id") + room_id = parse_string(request, "room_id") + + if start < 0: + raise SynapseError( + 400, + "The start parameter must be a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + if limit < 0: + raise SynapseError( + 400, + "The limit parameter must be a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + if direction not in ("f", "b"): + raise SynapseError( + 400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM + ) + + event_reports, total = await self.store.get_event_reports_paginate( + start, limit, direction, user_id, room_id + ) + ret = {"event_reports": event_reports, "total": total} + if (start + limit) < total: + ret["next_token"] = start + len(event_reports) + + return 200, ret diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index bd6f9553c60c..3ee097abf7b3 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1328,6 +1328,101 @@ async def add_event_report( desc="add_event_report", ) + async def get_event_reports_paginate( + self, + start: int, + limit: int, + direction: str = "b", + user_id: Optional[str] = None, + room_id: Optional[str] = None, + ) -> Tuple[List[Dict[str, Any]], int]: + """Retrieve a paginated list of event reports + + Args: + start: event offset to begin the query from + limit: number of rows to retrieve + direction: Whether to fetch the most recent first (`"b"`) or the + oldest first (`"f"`) + user_id: search for user_id. Ignored if user_id is None + room_id: search for room_id. Ignored if room_id is None + Returns: + event_reports: json list of event reports + count: total number of event reports matching the filter criteria + """ + + def _get_event_reports_paginate_txn(txn): + filters = [] + args = [] + + if user_id: + filters.append("er.user_id LIKE ?") + args.extend(["%" + user_id + "%"]) + if room_id: + filters.append("er.room_id LIKE ?") + args.extend(["%" + room_id + "%"]) + + if direction == "b": + order = "DESC" + else: + order = "ASC" + + where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" + + sql = """ + SELECT COUNT(*) as total_event_reports + FROM event_reports AS er + {} + """.format( + where_clause + ) + txn.execute(sql, args) + count = txn.fetchone()[0] + + sql = """ + SELECT + er.id, + er.received_ts, + er.room_id, + er.event_id, + er.user_id, + er.reason, + er.content, + events.sender, + room_aliases.room_alias, + event_json.json AS event_json + FROM event_reports AS er + LEFT JOIN room_aliases + ON room_aliases.room_id = er.room_id + JOIN events + ON events.event_id = er.event_id + JOIN event_json + ON event_json.event_id = er.event_id + {where_clause} + ORDER BY er.received_ts {order} + LIMIT ? + OFFSET ? + """.format( + where_clause=where_clause, order=order, + ) + + args += [limit, start] + txn.execute(sql, args) + event_reports = self.db_pool.cursor_to_dict(txn) + + if count > 0: + for row in event_reports: + try: + row["content"] = db_to_json(row["content"]) + row["event_json"] = db_to_json(row["event_json"]) + except Exception: + continue + + return event_reports, count + + return await self.db_pool.runInteraction( + "get_event_reports_paginate", _get_event_reports_paginate_txn + ) + def get_current_public_room_stream_id(self): return self._public_room_id_gen.get_current_token() diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py new file mode 100644 index 000000000000..bf79086f7813 --- /dev/null +++ b/tests/rest/admin/test_event_reports.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import synapse.rest.admin +from synapse.api.errors import Codes +from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import report_event + +from tests import unittest + + +class EventReportsTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + report_event.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.room_id1 = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.helper.join(self.room_id1, user=self.admin_user, tok=self.admin_user_tok) + + self.room_id2 = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.helper.join(self.room_id2, user=self.admin_user, tok=self.admin_user_tok) + + # Two rooms and two users. Every user sends and reports every room event + for i in range(5): + self._create_event_and_report( + room_id=self.room_id1, user_tok=self.other_user_tok, + ) + for i in range(5): + self._create_event_and_report( + room_id=self.room_id2, user_tok=self.other_user_tok, + ) + for i in range(5): + self._create_event_and_report( + room_id=self.room_id1, user_tok=self.admin_user_tok, + ) + for i in range(5): + self._create_event_and_report( + room_id=self.room_id2, user_tok=self.admin_user_tok, + ) + + self.url = "/_synapse/admin/v1/event_reports" + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error 403 is returned. + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.other_user_tok, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_default_success(self): + """ + Testing list of reported events + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + def test_limit(self): + """ + Testing list of reported events with limit + """ + + request, channel = self.make_request( + "GET", self.url + "?limit=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 5) + self.assertEqual(channel.json_body["next_token"], 5) + self._check_fields(channel.json_body["event_reports"]) + + def test_from(self): + """ + Testing list of reported events with a defined starting point (from) + """ + + request, channel = self.make_request( + "GET", self.url + "?from=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 15) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + def test_limit_and_from(self): + """ + Testing list of reported events with a defined starting point and limit + """ + + request, channel = self.make_request( + "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(channel.json_body["next_token"], 15) + self.assertEqual(len(channel.json_body["event_reports"]), 10) + self._check_fields(channel.json_body["event_reports"]) + + def test_filter_room(self): + """ + Testing list of reported events with a filter of room + """ + + request, channel = self.make_request( + "GET", + self.url + "?room_id=%s" % self.room_id1, + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 10) + self.assertEqual(len(channel.json_body["event_reports"]), 10) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + for report in channel.json_body["event_reports"]: + self.assertEqual(report["room_id"], self.room_id1) + + def test_filter_user(self): + """ + Testing list of reported events with a filter of user + """ + + request, channel = self.make_request( + "GET", + self.url + "?user_id=%s" % self.other_user, + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 10) + self.assertEqual(len(channel.json_body["event_reports"]), 10) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + for report in channel.json_body["event_reports"]: + self.assertEqual(report["user_id"], self.other_user) + + def test_filter_user_and_room(self): + """ + Testing list of reported events with a filter of user and room + """ + + request, channel = self.make_request( + "GET", + self.url + "?user_id=%s&room_id=%s" % (self.other_user, self.room_id1), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 5) + self.assertEqual(len(channel.json_body["event_reports"]), 5) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + for report in channel.json_body["event_reports"]: + self.assertEqual(report["user_id"], self.other_user) + self.assertEqual(report["room_id"], self.room_id1) + + def test_valid_search_order(self): + """ + Testing search order. Order by timestamps. + """ + + # fetch the most recent first, largest timestamp + request, channel = self.make_request( + "GET", self.url + "?dir=b", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + report = 1 + while report < len(channel.json_body["event_reports"]): + self.assertGreaterEqual( + channel.json_body["event_reports"][report - 1]["received_ts"], + channel.json_body["event_reports"][report]["received_ts"], + ) + report += 1 + + # fetch the oldest first, smallest timestamp + request, channel = self.make_request( + "GET", self.url + "?dir=f", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + report = 1 + while report < len(channel.json_body["event_reports"]): + self.assertLessEqual( + channel.json_body["event_reports"][report - 1]["received_ts"], + channel.json_body["event_reports"][report]["received_ts"], + ) + report += 1 + + def test_invalid_search_order(self): + """ + Testing that a invalid search order returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?dir=bar", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual("Unknown direction: bar", channel.json_body["error"]) + + def test_limit_is_negative(self): + """ + Testing that a negative list parameter returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_from_is_negative(self): + """ + Testing that a negative from parameter returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?from=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_next_token(self): + """ + Testing that `next_token` appears at the right place + """ + + # `next_token` does not appear + # Number of results is the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=20", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does not appear + # Number of max results is larger than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=21", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does appear + # Number of max results is smaller than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 19) + self.assertEqual(channel.json_body["next_token"], 19) + + # Check + # Set `from` to value of `next_token` for request remaining entries + # `next_token` does not appear + request, channel = self.make_request( + "GET", self.url + "?from=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 1) + self.assertNotIn("next_token", channel.json_body) + + def _create_event_and_report(self, room_id, user_tok): + """Create and report events + """ + resp = self.helper.send(room_id, tok=user_tok) + event_id = resp["event_id"] + + request, channel = self.make_request( + "POST", + "rooms/%s/report/%s" % (room_id, event_id), + json.dumps({"score": -100, "reason": "this makes me sad"}), + access_token=user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + def _check_fields(self, content): + """Checks that all attributes are present in a event report + """ + for c in content: + self.assertIn("id", c) + self.assertIn("received_ts", c) + self.assertIn("room_id", c) + self.assertIn("event_id", c) + self.assertIn("user_id", c) + self.assertIn("reason", c) + self.assertIn("content", c) + self.assertIn("sender", c) + self.assertIn("room_alias", c) + self.assertIn("event_json", c) + self.assertIn("score", c["content"]) + self.assertIn("reason", c["content"]) + self.assertIn("auth_events", c["event_json"]) + self.assertIn("type", c["event_json"]) + self.assertIn("room_id", c["event_json"]) + self.assertIn("sender", c["event_json"]) + self.assertIn("content", c["event_json"]) From 8998217540bc41975e64e44c507632361ca95698 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 22 Sep 2020 19:19:01 +0200 Subject: [PATCH 041/245] Fixed a bug with reactivating users with the admin API (#8362) Fixes: #8359 Trying to reactivate a user with the admin API (`PUT /_synapse/admin/v2/users/`) causes an internal server error. Seems to be a regression in #8033. --- changelog.d/8362.bugfix | 1 + .../storage/databases/main/user_erasure_store.py | 2 +- tests/rest/admin/test_user.py | 14 ++++++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8362.bugfix diff --git a/changelog.d/8362.bugfix b/changelog.d/8362.bugfix new file mode 100644 index 000000000000..4e50067c87b5 --- /dev/null +++ b/changelog.d/8362.bugfix @@ -0,0 +1 @@ +Fixed a regression in v1.19.0 with reactivating users through the admin API. diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index 2f7c95fc7431..f9575b1f1fd8 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -100,7 +100,7 @@ def f(txn): return # They are there, delete them. - self.simple_delete_one_txn( + self.db_pool.simple_delete_one_txn( txn, "erased_users", keyvalues={"user_id": user_id} ) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index f96011fc1c8f..98d062373497 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -874,6 +874,10 @@ def test_reactivate_user(self): ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self._is_erased("@user:test", False) + d = self.store.mark_user_erased("@user:test") + self.assertIsNone(self.get_success(d)) + self._is_erased("@user:test", True) # Attempt to reactivate the user (without a password). request, channel = self.make_request( @@ -906,6 +910,7 @@ def test_reactivate_user(self): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) + self._is_erased("@user:test", False) def test_set_user_as_admin(self): """ @@ -996,6 +1001,15 @@ def test_accidental_deactivation_prevention(self): # Ensure they're still alive self.assertEqual(0, channel.json_body["deactivated"]) + def _is_erased(self, user_id, expect): + """Assert that the user is erased or not + """ + d = self.store.is_user_erased(user_id) + if expect: + self.assertTrue(self.get_success(d)) + else: + self.assertFalse(self.get_success(d)) + class UserMembershipRestTestCase(unittest.HomeserverTestCase): From 4325be1a52b9054a2c1096dcdb29ee79d9ad4ead Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Sep 2020 19:39:29 +0100 Subject: [PATCH 042/245] Fix missing null character check on guest_access room state When updating room_stats_state, we try to check for null bytes slipping in to the content for state events. It turns out we had added guest_access as a field to room_stats_state without including it in the null byte check. Lo and behold, a null byte in a m.room.guest_access event then breaks room_stats_state updates. This PR adds the check for guest_access. A further PR will improve this function so that this hopefully does not happen again in future. --- synapse/storage/databases/main/stats.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index d7816a8606ab..5beb302be343 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -210,6 +210,7 @@ async def update_room_state(self, room_id: str, fields: Dict[str, Any]) -> None: * topic * avatar * canonical_alias + * guest_access A is_federatable key can also be included with a boolean value. @@ -234,6 +235,7 @@ async def update_room_state(self, room_id: str, fields: Dict[str, Any]) -> None: "topic", "avatar", "canonical_alias", + "guest_access", ): field = fields.get(col, sentinel) if field is not sentinel and (not isinstance(field, str) or "\0" in field): From 48336eeb85457e356a7a23619776dc598ebd2189 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Sep 2020 14:54:23 +0100 Subject: [PATCH 043/245] Changelog --- changelog.d/8373.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8373.bugfix diff --git a/changelog.d/8373.bugfix b/changelog.d/8373.bugfix new file mode 100644 index 000000000000..e9d66a2088c4 --- /dev/null +++ b/changelog.d/8373.bugfix @@ -0,0 +1 @@ +Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. \ No newline at end of file From a4e63e5a47a855884ae3aea41dfbfa464bddb744 Mon Sep 17 00:00:00 2001 From: Julian Fietkau <1278511+jfietkau@users.noreply.github.com> Date: Wed, 23 Sep 2020 12:14:08 +0200 Subject: [PATCH 044/245] Add note to reverse_proxy.md about disabling Apache's mod_security2 (#8375) This change adds a note and a few lines of configuration settings for Apache users to disable ModSecurity for Synapse's virtual hosts. With ModSecurity enabled and running with its default settings, Matrix clients are unable to send chat messages through the Synapse installation. With this change, ModSecurity can be disabled only for the Synapse virtual hosts. --- changelog.d/8375.doc | 1 + docs/reverse_proxy.md | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/8375.doc diff --git a/changelog.d/8375.doc b/changelog.d/8375.doc new file mode 100644 index 000000000000..d291fb92fa18 --- /dev/null +++ b/changelog.d/8375.doc @@ -0,0 +1 @@ +Add note to the reverse proxy settings documentation about disabling Apache's mod_security2. Contributed by Julian Fietkau (@jfietkau). diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index edd109fa7b2b..46d8f3577122 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -121,6 +121,14 @@ example.com:8448 { **NOTE**: ensure the `nocanon` options are included. +**NOTE 2**: It appears that Synapse is currently incompatible with the ModSecurity module for Apache (`mod_security2`). If you need it enabled for other services on your web server, you can disable it for Synapse's two VirtualHosts by including the following lines before each of the two `
` above: + +``` + + SecRuleEngine off + +``` + ### HAProxy ``` From bbde4038dff379fdf48b914782a73a6889135a56 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 23 Sep 2020 06:45:37 -0400 Subject: [PATCH 045/245] Do not check lint/test dependencies at runtime. (#8377) moves non-runtime dependencies out of synapse.python_dependencies (test and lint) --- changelog.d/8330.misc | 2 +- changelog.d/8377.misc | 1 + setup.py | 16 ++++++++++++++++ synapse/python_dependencies.py | 13 ++++--------- tox.ini | 8 +++----- 5 files changed, 25 insertions(+), 15 deletions(-) create mode 100644 changelog.d/8377.misc diff --git a/changelog.d/8330.misc b/changelog.d/8330.misc index c51370f215d4..fbfdd524730a 100644 --- a/changelog.d/8330.misc +++ b/changelog.d/8330.misc @@ -1 +1 @@ -Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. \ No newline at end of file +Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/changelog.d/8377.misc b/changelog.d/8377.misc new file mode 100644 index 000000000000..fbfdd524730a --- /dev/null +++ b/changelog.d/8377.misc @@ -0,0 +1 @@ +Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/setup.py b/setup.py index 54ddec8f9f59..926b1bc86fa8 100755 --- a/setup.py +++ b/setup.py @@ -94,6 +94,22 @@ def exec_file(path_segments): # Make `pip install matrix-synapse[all]` install all the optional dependencies. CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS) +# Developer dependencies should not get included in "all". +# +# We pin black so that our tests don't start failing on new releases. +CONDITIONAL_REQUIREMENTS["lint"] = [ + "isort==5.0.3", + "black==19.10b0", + "flake8-comprehensions", + "flake8", +] + +# Dependencies which are exclusively required by unit test code. This is +# NOT a list of all modules that are necessary to run the unit tests. +# Tests assume that all optional dependencies are installed. +# +# parameterized_class decorator was introduced in parameterized 0.7.0 +CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"] setup( name="matrix-synapse", diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 67f019fd2237..288631477eaf 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -37,6 +37,9 @@ # installed when that optional dependency requirement is specified. It is passed # to setup() as extras_require in setup.py # +# Note that these both represent runtime dependencies (and the versions +# installed are checked at runtime). +# # [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers. REQUIREMENTS = [ @@ -92,20 +95,12 @@ "oidc": ["authlib>=0.14.0"], "systemd": ["systemd-python>=231"], "url_preview": ["lxml>=3.5.0"], - # Dependencies which are exclusively required by unit test code. This is - # NOT a list of all modules that are necessary to run the unit tests. - # Tests assume that all optional dependencies are installed. - # - # parameterized_class decorator was introduced in parameterized 0.7.0 - "test": ["mock>=2.0", "parameterized>=0.7.0"], "sentry": ["sentry-sdk>=0.7.2"], "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"], "jwt": ["pyjwt>=1.6.4"], # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) "redis": ["txredisapi>=1.4.7", "hiredis"], - # We pin black so that our tests don't start failing on new releases. - "lint": ["isort==5.0.3", "black==19.10b0", "flake8-comprehensions", "flake8"], } ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] @@ -113,7 +108,7 @@ for name, optional_deps in CONDITIONAL_REQUIREMENTS.items(): # Exclude systemd as it's a system-based requirement. # Exclude lint as it's a dev-based requirement. - if name not in ["systemd", "lint"]: + if name not in ["systemd"]: ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS diff --git a/tox.ini b/tox.ini index ddcab0198fc2..4d132eff4cab 100644 --- a/tox.ini +++ b/tox.ini @@ -2,13 +2,12 @@ envlist = packaging, py35, py36, py37, py38, check_codestyle, check_isort [base] +extras = test deps = - mock python-subunit junitxml coverage coverage-enable-subprocess - parameterized # cyptography 2.2 requires setuptools >= 18.5 # @@ -36,7 +35,7 @@ setenv = [testenv] deps = {[base]deps} -extras = all +extras = all, test whitelist_externals = sh @@ -84,7 +83,6 @@ deps = # Old automat version for Twisted Automat == 0.3.0 - mock lxml coverage coverage-enable-subprocess @@ -97,7 +95,7 @@ commands = /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs -d"\n" pip install' # Install Synapse itself. This won't update any libraries. - pip install -e . + pip install -e ".[test]" {envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} From 916bb9d0d15cf941e73b2e808c553a1edd1c2eb9 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 23 Sep 2020 17:06:28 +0200 Subject: [PATCH 046/245] Don't push if an user account has expired (#8353) --- changelog.d/8353.bugfix | 1 + synapse/api/auth.py | 6 +----- synapse/push/pusherpool.py | 18 ++++++++++++++++++ synapse/storage/databases/main/registration.py | 14 ++++++++++++++ 4 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8353.bugfix diff --git a/changelog.d/8353.bugfix b/changelog.d/8353.bugfix new file mode 100644 index 000000000000..45fc0adb8dd5 --- /dev/null +++ b/changelog.d/8353.bugfix @@ -0,0 +1 @@ +Don't send push notifications to expired user accounts. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 75388643ee6f..1071a0576e5e 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -218,11 +218,7 @@ async def get_user_by_req( # Deny the request if the user account has expired. if self._account_validity.enabled and not allow_expired: user_id = user.to_string() - expiration_ts = await self.store.get_expiration_ts_for_user(user_id) - if ( - expiration_ts is not None - and self.clock.time_msec() >= expiration_ts - ): + if await self.store.is_account_expired(user_id, self.clock.time_msec()): raise AuthError( 403, "User account has expired", errcode=Codes.EXPIRED_ACCOUNT ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index cc839ffce43d..76150e117b2b 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -60,6 +60,8 @@ def __init__(self, hs: "HomeServer"): self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() + self._account_validity = hs.config.account_validity + # We shard the handling of push notifications by user ID. self._pusher_shard_config = hs.config.push.pusher_shard_config self._instance_name = hs.get_instance_name() @@ -202,6 +204,14 @@ async def on_new_notifications(self, max_stream_id: int): ) for u in users_affected: + # Don't push if the user account has expired + if self._account_validity.enabled: + expired = await self.store.is_account_expired( + u, self.clock.time_msec() + ) + if expired: + continue + if u in self.pushers: for p in self.pushers[u].values(): p.on_new_notifications(max_stream_id) @@ -222,6 +232,14 @@ async def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids) ) for u in users_affected: + # Don't push if the user account has expired + if self._account_validity.enabled: + expired = await self.store.is_account_expired( + u, self.clock.time_msec() + ) + if expired: + continue + if u in self.pushers: for p in self.pushers[u].values(): p.on_new_receipts(min_stream_id, max_stream_id) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 675e81fe3436..33825e894936 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -116,6 +116,20 @@ async def get_expiration_ts_for_user(self, user_id: str) -> Optional[int]: desc="get_expiration_ts_for_user", ) + async def is_account_expired(self, user_id: str, current_ts: int) -> bool: + """ + Returns whether an user account is expired. + + Args: + user_id: The user's ID + current_ts: The current timestamp + + Returns: + Whether the user account has expired + """ + expiration_ts = await self.get_expiration_ts_for_user(user_id) + return expiration_ts is not None and current_ts >= expiration_ts + async def set_account_validity_for_user( self, user_id: str, From cbabb312e0b59090e5a8cf9e7e016a8618e62867 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Sep 2020 16:11:18 +0100 Subject: [PATCH 047/245] Use `async with` for ID gens (#8383) This will allow us to hit the DB after we've finished using the generated stream ID. --- changelog.d/8383.misc | 1 + .../storage/databases/main/account_data.py | 4 +- synapse/storage/databases/main/deviceinbox.py | 4 +- synapse/storage/databases/main/devices.py | 6 +- .../storage/databases/main/end_to_end_keys.py | 2 +- synapse/storage/databases/main/events.py | 6 +- .../storage/databases/main/group_server.py | 2 +- synapse/storage/databases/main/presence.py | 4 +- synapse/storage/databases/main/push_rule.py | 8 +- synapse/storage/databases/main/pusher.py | 4 +- synapse/storage/databases/main/receipts.py | 2 +- synapse/storage/databases/main/room.py | 6 +- synapse/storage/databases/main/tags.py | 4 +- synapse/storage/util/id_generators.py | 130 ++++++++++-------- tests/storage/test_id_generators.py | 66 +++++---- 15 files changed, 144 insertions(+), 105 deletions(-) create mode 100644 changelog.d/8383.misc diff --git a/changelog.d/8383.misc b/changelog.d/8383.misc new file mode 100644 index 000000000000..cb8318bf5704 --- /dev/null +++ b/changelog.d/8383.misc @@ -0,0 +1 @@ +Refactor ID generators to use `async with` syntax. diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index c5a36990e417..ef81d73573f0 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -339,7 +339,7 @@ async def add_account_data_to_room( """ content_json = json_encoder.encode(content) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: # no need to lock here as room_account_data has a unique constraint # on (user_id, room_id, account_data_type) so simple_upsert will # retry if there is a conflict. @@ -387,7 +387,7 @@ async def add_account_data_for_user( """ content_json = json_encoder.encode(content) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: # no need to lock here as account_data has a unique constraint on # (user_id, account_data_type) so simple_upsert will retry if # there is a conflict. diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index e71217a41f3c..d42faa3f1f69 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -362,7 +362,7 @@ def add_messages_txn(txn, now_ms, stream_id): rows.append((destination, stream_id, now_ms, edu_json)) txn.executemany(sql, rows) - with await self._device_inbox_id_gen.get_next() as stream_id: + async with self._device_inbox_id_gen.get_next() as stream_id: now_ms = self.clock.time_msec() await self.db_pool.runInteraction( "add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id @@ -411,7 +411,7 @@ def add_messages_txn(txn, now_ms, stream_id): txn, stream_id, local_messages_by_user_then_device ) - with await self._device_inbox_id_gen.get_next() as stream_id: + async with self._device_inbox_id_gen.get_next() as stream_id: now_ms = self.clock.time_msec() await self.db_pool.runInteraction( "add_messages_from_remote_to_device_inbox", diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index c04374e43d11..fdf394c61205 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -377,7 +377,7 @@ async def add_user_signature_change_to_streams( THe new stream ID. """ - with await self._device_list_id_gen.get_next() as stream_id: + async with self._device_list_id_gen.get_next() as stream_id: await self.db_pool.runInteraction( "add_user_sig_change_to_streams", self._add_user_signature_change_txn, @@ -1093,7 +1093,7 @@ async def add_device_change_to_streams( if not device_ids: return - with await self._device_list_id_gen.get_next_mult( + async with self._device_list_id_gen.get_next_mult( len(device_ids) ) as stream_ids: await self.db_pool.runInteraction( @@ -1108,7 +1108,7 @@ async def add_device_change_to_streams( return stream_ids[-1] context = get_active_span_text_map() - with await self._device_list_id_gen.get_next_mult( + async with self._device_list_id_gen.get_next_mult( len(hosts) * len(device_ids) ) as stream_ids: await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index c8df0bcb3fe5..22e1ed15d056 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -831,7 +831,7 @@ async def set_e2e_cross_signing_key(self, user_id, key_type, key): key (dict): the key data """ - with await self._cross_signing_id_gen.get_next() as stream_id: + async with self._cross_signing_id_gen.get_next() as stream_id: return await self.db_pool.runInteraction( "add_e2e_cross_signing_key", self._set_e2e_cross_signing_key_txn, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 9a80f419e343..7723d82496c2 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -156,15 +156,15 @@ async def _persist_events_and_state_updates( # Note: Multiple instances of this function cannot be in flight at # the same time for the same room. if backfilled: - stream_ordering_manager = await self._backfill_id_gen.get_next_mult( + stream_ordering_manager = self._backfill_id_gen.get_next_mult( len(events_and_contexts) ) else: - stream_ordering_manager = await self._stream_id_gen.get_next_mult( + stream_ordering_manager = self._stream_id_gen.get_next_mult( len(events_and_contexts) ) - with stream_ordering_manager as stream_orderings: + async with stream_ordering_manager as stream_orderings: for (event, context), stream in zip(events_and_contexts, stream_orderings): event.internal_metadata.stream_ordering = stream diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index ccfbb2135eba..721819196530 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -1265,7 +1265,7 @@ def _register_user_group_membership_txn(txn, next_id): return next_id - with await self._group_updates_id_gen.get_next() as next_id: + async with self._group_updates_id_gen.get_next() as next_id: res = await self.db_pool.runInteraction( "register_user_group_membership", _register_user_group_membership_txn, diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index c9f655dfb707..dbbb99cb95fb 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -23,11 +23,11 @@ class PresenceStore(SQLBaseStore): async def update_presence(self, presence_states): - stream_ordering_manager = await self._presence_id_gen.get_next_mult( + stream_ordering_manager = self._presence_id_gen.get_next_mult( len(presence_states) ) - with stream_ordering_manager as stream_orderings: + async with stream_ordering_manager as stream_orderings: await self.db_pool.runInteraction( "update_presence", self._update_presence_txn, diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index e20a16f90758..711d5aa23d6a 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -338,7 +338,7 @@ async def add_push_rule( ) -> None: conditions_json = json_encoder.encode(conditions) actions_json = json_encoder.encode(actions) - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() if before or after: @@ -585,7 +585,7 @@ def delete_push_rule_txn(txn, stream_id, event_stream_ordering): txn, stream_id, event_stream_ordering, user_id, rule_id, op="DELETE" ) - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() await self.db_pool.runInteraction( @@ -616,7 +616,7 @@ async def set_push_rule_enabled( Raises: NotFoundError if the rule does not exist. """ - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() await self.db_pool.runInteraction( "_set_push_rule_enabled_txn", @@ -754,7 +754,7 @@ def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering): data={"actions": actions_json}, ) - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index c38846827338..df8609b97bea 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -281,7 +281,7 @@ async def add_pusher( last_stream_ordering, profile_tag="", ) -> None: - with await self._pushers_id_gen.get_next() as stream_id: + async with self._pushers_id_gen.get_next() as stream_id: # no need to lock because `pushers` has a unique key on # (app_id, pushkey, user_name) so simple_upsert will retry await self.db_pool.simple_upsert( @@ -344,7 +344,7 @@ def delete_pusher_txn(txn, stream_id): }, ) - with await self._pushers_id_gen.get_next() as stream_id: + async with self._pushers_id_gen.get_next() as stream_id: await self.db_pool.runInteraction( "delete_pusher", delete_pusher_txn, stream_id ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index f880b5e562cc..c79ddff6806f 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -524,7 +524,7 @@ def graph_to_linear(txn): "insert_receipt_conv", graph_to_linear ) - with await self._receipts_id_gen.get_next() as stream_id: + async with self._receipts_id_gen.get_next() as stream_id: event_ts = await self.db_pool.runInteraction( "insert_linearized_receipt", self.insert_linearized_receipt_txn, diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3ee097abf7b3..3c7630857f5d 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1137,7 +1137,7 @@ def store_room_txn(txn, next_id): }, ) - with await self._public_room_id_gen.get_next() as next_id: + async with self._public_room_id_gen.get_next() as next_id: await self.db_pool.runInteraction( "store_room_txn", store_room_txn, next_id ) @@ -1204,7 +1204,7 @@ def set_room_is_public_txn(txn, next_id): }, ) - with await self._public_room_id_gen.get_next() as next_id: + async with self._public_room_id_gen.get_next() as next_id: await self.db_pool.runInteraction( "set_room_is_public", set_room_is_public_txn, next_id ) @@ -1284,7 +1284,7 @@ def set_room_is_public_appservice_txn(txn, next_id): }, ) - with await self._public_room_id_gen.get_next() as next_id: + async with self._public_room_id_gen.get_next() as next_id: await self.db_pool.runInteraction( "set_room_is_public_appservice", set_room_is_public_appservice_txn, diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 96ffe26cc9da..9f120d3cb66c 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -210,7 +210,7 @@ def add_tag_txn(txn, next_id): ) self._update_revision_txn(txn, user_id, room_id, next_id) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: await self.db_pool.runInteraction("add_tag", add_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) @@ -232,7 +232,7 @@ def remove_tag_txn(txn, next_id): txn.execute(sql, (user_id, room_id, tag)) self._update_revision_txn(txn, user_id, room_id, next_id) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: await self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 1de2b915877a..b0353ac2dcca 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -12,14 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import contextlib import heapq import logging import threading from collections import deque -from typing import Dict, List, Set +from contextlib import contextmanager +from typing import Dict, List, Optional, Set, Union +import attr from typing_extensions import Deque from synapse.storage.database import DatabasePool, LoggingTransaction @@ -86,7 +86,7 @@ class StreamIdGenerator: upwards, -1 to grow downwards. Usage: - with await stream_id_gen.get_next() as stream_id: + async with stream_id_gen.get_next() as stream_id: # ... persist event ... """ @@ -101,10 +101,10 @@ def __init__(self, db_conn, table, column, extra_tables=[], step=1): ) self._unfinished_ids = deque() # type: Deque[int] - async def get_next(self): + def get_next(self): """ Usage: - with await stream_id_gen.get_next() as stream_id: + async with stream_id_gen.get_next() as stream_id: # ... persist event ... """ with self._lock: @@ -113,7 +113,7 @@ async def get_next(self): self._unfinished_ids.append(next_id) - @contextlib.contextmanager + @contextmanager def manager(): try: yield next_id @@ -121,12 +121,12 @@ def manager(): with self._lock: self._unfinished_ids.remove(next_id) - return manager() + return _AsyncCtxManagerWrapper(manager()) - async def get_next_mult(self, n): + def get_next_mult(self, n): """ Usage: - with await stream_id_gen.get_next(n) as stream_ids: + async with stream_id_gen.get_next(n) as stream_ids: # ... persist events ... """ with self._lock: @@ -140,7 +140,7 @@ async def get_next_mult(self, n): for next_id in next_ids: self._unfinished_ids.append(next_id) - @contextlib.contextmanager + @contextmanager def manager(): try: yield next_ids @@ -149,7 +149,7 @@ def manager(): for next_id in next_ids: self._unfinished_ids.remove(next_id) - return manager() + return _AsyncCtxManagerWrapper(manager()) def get_current_token(self): """Returns the maximum stream id such that all stream ids less than or @@ -282,59 +282,23 @@ def _load_next_id_txn(self, txn) -> int: def _load_next_mult_id_txn(self, txn, n: int) -> List[int]: return self._sequence_gen.get_next_mult_txn(txn, n) - async def get_next(self): + def get_next(self): """ Usage: - with await stream_id_gen.get_next() as stream_id: + async with stream_id_gen.get_next() as stream_id: # ... persist event ... """ - next_id = await self._db.runInteraction("_load_next_id", self._load_next_id_txn) - - # Assert the fetched ID is actually greater than what we currently - # believe the ID to be. If not, then the sequence and table have got - # out of sync somehow. - with self._lock: - assert self._current_positions.get(self._instance_name, 0) < next_id - - self._unfinished_ids.add(next_id) - - @contextlib.contextmanager - def manager(): - try: - # Multiply by the return factor so that the ID has correct sign. - yield self._return_factor * next_id - finally: - self._mark_id_as_finished(next_id) - return manager() + return _MultiWriterCtxManager(self) - async def get_next_mult(self, n: int): + def get_next_mult(self, n: int): """ Usage: - with await stream_id_gen.get_next_mult(5) as stream_ids: + async with stream_id_gen.get_next_mult(5) as stream_ids: # ... persist events ... """ - next_ids = await self._db.runInteraction( - "_load_next_mult_id", self._load_next_mult_id_txn, n - ) - # Assert the fetched ID is actually greater than any ID we've already - # seen. If not, then the sequence and table have got out of sync - # somehow. - with self._lock: - assert max(self._current_positions.values(), default=0) < min(next_ids) - - self._unfinished_ids.update(next_ids) - - @contextlib.contextmanager - def manager(): - try: - yield [self._return_factor * i for i in next_ids] - finally: - for i in next_ids: - self._mark_id_as_finished(i) - - return manager() + return _MultiWriterCtxManager(self, n) def get_next_txn(self, txn: LoggingTransaction): """ @@ -482,3 +446,61 @@ def _add_persisted_position(self, new_id: int): # There was a gap in seen positions, so there is nothing more to # do. break + + +@attr.s(slots=True) +class _AsyncCtxManagerWrapper: + """Helper class to convert a plain context manager to an async one. + + This is mainly useful if you have a plain context manager but the interface + requires an async one. + """ + + inner = attr.ib() + + async def __aenter__(self): + return self.inner.__enter__() + + async def __aexit__(self, exc_type, exc, tb): + return self.inner.__exit__(exc_type, exc, tb) + + +@attr.s(slots=True) +class _MultiWriterCtxManager: + """Async context manager returned by MultiWriterIdGenerator + """ + + id_gen = attr.ib(type=MultiWriterIdGenerator) + multiple_ids = attr.ib(type=Optional[int], default=None) + stream_ids = attr.ib(type=List[int], factory=list) + + async def __aenter__(self) -> Union[int, List[int]]: + self.stream_ids = await self.id_gen._db.runInteraction( + "_load_next_mult_id", + self.id_gen._load_next_mult_id_txn, + self.multiple_ids or 1, + ) + + # Assert the fetched ID is actually greater than any ID we've already + # seen. If not, then the sequence and table have got out of sync + # somehow. + with self.id_gen._lock: + assert max(self.id_gen._current_positions.values(), default=0) < min( + self.stream_ids + ) + + self.id_gen._unfinished_ids.update(self.stream_ids) + + if self.multiple_ids is None: + return self.stream_ids[0] * self.id_gen._return_factor + else: + return [i * self.id_gen._return_factor for i in self.stream_ids] + + async def __aexit__(self, exc_type, exc, tb): + for i in self.stream_ids: + self.id_gen._mark_id_as_finished(i) + + if exc_type is not None: + return False + + return False diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 20636fc40027..fb8f5bc255f7 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -111,7 +111,7 @@ def test_single_instance(self): # advanced after we leave the context manager. async def _get_next_async(): - with await id_gen.get_next() as stream_id: + async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) self.assertEqual(id_gen.get_positions(), {"master": 7}) @@ -139,10 +139,10 @@ def test_out_of_order_finish(self): ctx3 = self.get_success(id_gen.get_next()) ctx4 = self.get_success(id_gen.get_next()) - s1 = ctx1.__enter__() - s2 = ctx2.__enter__() - s3 = ctx3.__enter__() - s4 = ctx4.__enter__() + s1 = self.get_success(ctx1.__aenter__()) + s2 = self.get_success(ctx2.__aenter__()) + s3 = self.get_success(ctx3.__aenter__()) + s4 = self.get_success(ctx4.__aenter__()) self.assertEqual(s1, 8) self.assertEqual(s2, 9) @@ -152,22 +152,22 @@ def test_out_of_order_finish(self): self.assertEqual(id_gen.get_positions(), {"master": 7}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) - ctx2.__exit__(None, None, None) + self.get_success(ctx2.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 7}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) - ctx1.__exit__(None, None, None) + self.get_success(ctx1.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 9}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) - ctx4.__exit__(None, None, None) + self.get_success(ctx4.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 9}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) - ctx3.__exit__(None, None, None) + self.get_success(ctx3.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 11}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 11) @@ -190,7 +190,7 @@ def test_multi_instance(self): # advanced after we leave the context manager. async def _get_next_async(): - with await first_id_gen.get_next() as stream_id: + async with first_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) self.assertEqual( @@ -208,7 +208,7 @@ async def _get_next_async(): # stream ID async def _get_next_async(): - with await second_id_gen.get_next() as stream_id: + async with second_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 9) self.assertEqual( @@ -305,9 +305,13 @@ def test_get_persisted_upto_position_get_next(self): self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) self.assertEqual(id_gen.get_persisted_upto_position(), 3) - with self.get_success(id_gen.get_next()) as stream_id: - self.assertEqual(stream_id, 6) - self.assertEqual(id_gen.get_persisted_upto_position(), 3) + + async def _get_next_async(): + async with id_gen.get_next() as stream_id: + self.assertEqual(stream_id, 6) + self.assertEqual(id_gen.get_persisted_upto_position(), 3) + + self.get_success(_get_next_async()) self.assertEqual(id_gen.get_persisted_upto_position(), 6) @@ -373,16 +377,22 @@ def test_single_instance(self): """ id_gen = self._create_id_generator() - with self.get_success(id_gen.get_next()) as stream_id: - self._insert_row("master", stream_id) + async def _get_next_async(): + async with id_gen.get_next() as stream_id: + self._insert_row("master", stream_id) + + self.get_success(_get_next_async()) self.assertEqual(id_gen.get_positions(), {"master": -1}) self.assertEqual(id_gen.get_current_token_for_writer("master"), -1) self.assertEqual(id_gen.get_persisted_upto_position(), -1) - with self.get_success(id_gen.get_next_mult(3)) as stream_ids: - for stream_id in stream_ids: - self._insert_row("master", stream_id) + async def _get_next_async2(): + async with id_gen.get_next_mult(3) as stream_ids: + for stream_id in stream_ids: + self._insert_row("master", stream_id) + + self.get_success(_get_next_async2()) self.assertEqual(id_gen.get_positions(), {"master": -4}) self.assertEqual(id_gen.get_current_token_for_writer("master"), -4) @@ -402,18 +412,24 @@ def test_multiple_instance(self): id_gen_1 = self._create_id_generator("first") id_gen_2 = self._create_id_generator("second") - with self.get_success(id_gen_1.get_next()) as stream_id: - self._insert_row("first", stream_id) - id_gen_2.advance("first", stream_id) + async def _get_next_async(): + async with id_gen_1.get_next() as stream_id: + self._insert_row("first", stream_id) + id_gen_2.advance("first", stream_id) + + self.get_success(_get_next_async()) self.assertEqual(id_gen_1.get_positions(), {"first": -1}) self.assertEqual(id_gen_2.get_positions(), {"first": -1}) self.assertEqual(id_gen_1.get_persisted_upto_position(), -1) self.assertEqual(id_gen_2.get_persisted_upto_position(), -1) - with self.get_success(id_gen_2.get_next()) as stream_id: - self._insert_row("second", stream_id) - id_gen_1.advance("second", stream_id) + async def _get_next_async2(): + async with id_gen_2.get_next() as stream_id: + self._insert_row("second", stream_id) + id_gen_1.advance("second", stream_id) + + self.get_success(_get_next_async2()) self.assertEqual(id_gen_1.get_positions(), {"first": -1, "second": -2}) self.assertEqual(id_gen_2.get_positions(), {"first": -1, "second": -2}) From 302dc89f6a16f69e076943cb0a9b94f1e41741f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Sep 2020 16:42:14 +0100 Subject: [PATCH 048/245] Fix bug which caused failure on join with malformed membership events (#8385) --- changelog.d/8385.bugfix | 1 + synapse/storage/databases/main/events.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8385.bugfix diff --git a/changelog.d/8385.bugfix b/changelog.d/8385.bugfix new file mode 100644 index 000000000000..c42502a8e020 --- /dev/null +++ b/changelog.d/8385.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 7723d82496c2..18def01f5041 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -17,7 +17,7 @@ import itertools import logging from collections import OrderedDict, namedtuple -from typing import TYPE_CHECKING, Dict, Iterable, List, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple import attr from prometheus_client import Counter @@ -1108,6 +1108,10 @@ def _store_event_reference_hashes_txn(self, txn, events): def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database. """ + + def str_or_none(val: Any) -> Optional[str]: + return val if isinstance(val, str) else None + self.db_pool.simple_insert_many_txn( txn, table="room_memberships", @@ -1118,8 +1122,8 @@ def _store_room_members_txn(self, txn, events, backfilled): "sender": event.user_id, "room_id": event.room_id, "membership": event.membership, - "display_name": event.content.get("displayname", None), - "avatar_url": event.content.get("avatar_url", None), + "display_name": str_or_none(event.content.get("displayname")), + "avatar_url": str_or_none(event.content.get("avatar_url")), } for event in events ], From 91c60f304256c08e8aff53ed13d5b282057277d6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Sep 2020 16:42:44 +0100 Subject: [PATCH 049/245] Improve logging of state resolution (#8371) I'd like to get a better insight into what we are doing with respect to state res. The list of state groups we are resolving across should be short (if it isn't, that's a massive problem in itself), so it should be fine to log it in ite entiretly. I've done some grepping and found approximately zero cases in which the "shortcut" code delivered the result, so I've ripped that out too. --- changelog.d/8371.misc | 1 + synapse/state/__init__.py | 64 ++++++++++----------------------------- 2 files changed, 17 insertions(+), 48 deletions(-) create mode 100644 changelog.d/8371.misc diff --git a/changelog.d/8371.misc b/changelog.d/8371.misc new file mode 100644 index 000000000000..6a54a9496afb --- /dev/null +++ b/changelog.d/8371.misc @@ -0,0 +1 @@ +Improve logging of state resolution. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 56d6afb86353..5a5ea39e0103 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -25,7 +25,6 @@ Sequence, Set, Union, - cast, overload, ) @@ -42,7 +41,7 @@ from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo -from synapse.types import Collection, MutableStateMap, StateMap +from synapse.types import Collection, StateMap from synapse.util import Clock from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache @@ -472,10 +471,9 @@ class StateResolutionHandler: def __init__(self, hs): self.clock = hs.get_clock() - # dict of set of event_ids -> _StateCacheEntry. - self._state_cache = None self.resolve_linearizer = Linearizer(name="state_resolve_lock") + # dict of set of event_ids -> _StateCacheEntry. self._state_cache = ExpiringCache( cache_name="state_cache", clock=self.clock, @@ -519,57 +517,28 @@ async def resolve_state_groups( Returns: The resolved state """ - logger.debug("resolve_state_groups state_groups %s", state_groups_ids.keys()) - group_names = frozenset(state_groups_ids.keys()) with (await self.resolve_linearizer.queue(group_names)): - if self._state_cache is not None: - cache = self._state_cache.get(group_names, None) - if cache: - return cache + cache = self._state_cache.get(group_names, None) + if cache: + return cache logger.info( - "Resolving state for %s with %d groups", room_id, len(state_groups_ids) + "Resolving state for %s with groups %s", room_id, list(group_names), ) state_groups_histogram.observe(len(state_groups_ids)) - # start by assuming we won't have any conflicted state, and build up the new - # state map by iterating through the state groups. If we discover a conflict, - # we give up and instead use `resolve_events_with_store`. - # - # XXX: is this actually worthwhile, or should we just let - # resolve_events_with_store do it? - new_state = {} # type: MutableStateMap[str] - conflicted_state = False - for st in state_groups_ids.values(): - for key, e_id in st.items(): - if key in new_state: - conflicted_state = True - break - new_state[key] = e_id - if conflicted_state: - break - - if conflicted_state: - logger.info("Resolving conflicted state for %r", room_id) - with Measure(self.clock, "state._resolve_events"): - # resolve_events_with_store returns a StateMap, but we can - # treat it as a MutableStateMap as it is above. It isn't - # actually mutated anymore (and is frozen in - # _make_state_cache_entry below). - new_state = cast( - MutableStateMap, - await resolve_events_with_store( - self.clock, - room_id, - room_version, - list(state_groups_ids.values()), - event_map=event_map, - state_res_store=state_res_store, - ), - ) + with Measure(self.clock, "state._resolve_events"): + new_state = await resolve_events_with_store( + self.clock, + room_id, + room_version, + list(state_groups_ids.values()), + event_map=event_map, + state_res_store=state_res_store, + ) # if the new state matches any of the input state groups, we can # use that state group again. Otherwise we will generate a state_id @@ -579,8 +548,7 @@ async def resolve_state_groups( with Measure(self.clock, "state.create_group_ids"): cache = _make_state_cache_entry(new_state, state_groups_ids) - if self._state_cache is not None: - self._state_cache[group_names] = cache + self._state_cache[group_names] = cache return cache From 2983049a77557512519f3856fc88e3bc5f1915ed Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Sep 2020 18:18:43 +0100 Subject: [PATCH 050/245] Factor out `_send_dummy_event_for_room` (#8370) this makes it possible to use from the manhole, and seems cleaner anyway. --- changelog.d/8370.misc | 1 + synapse/handlers/message.py | 102 +++++++++++++++++++----------------- 2 files changed, 55 insertions(+), 48 deletions(-) create mode 100644 changelog.d/8370.misc diff --git a/changelog.d/8370.misc b/changelog.d/8370.misc new file mode 100644 index 000000000000..1aaac1e0bf90 --- /dev/null +++ b/changelog.d/8370.misc @@ -0,0 +1 @@ +Factor out a `_send_dummy_event_for_room` method. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a8fe5cf4e2eb..6ee559fd1ded 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1182,54 +1182,7 @@ async def _send_dummy_events_to_fill_extremities(self): ) for room_id in room_ids: - # For each room we need to find a joined member we can use to send - # the dummy event with. - - latest_event_ids = await self.store.get_prev_events_for_room(room_id) - - members = await self.state.get_current_users_in_room( - room_id, latest_event_ids=latest_event_ids - ) - dummy_event_sent = False - for user_id in members: - if not self.hs.is_mine_id(user_id): - continue - requester = create_requester(user_id) - try: - event, context = await self.create_event( - requester, - { - "type": "org.matrix.dummy_event", - "content": {}, - "room_id": room_id, - "sender": user_id, - }, - prev_event_ids=latest_event_ids, - ) - - event.internal_metadata.proactively_send = False - - # Since this is a dummy-event it is OK if it is sent by a - # shadow-banned user. - await self.send_nonmember_event( - requester, - event, - context, - ratelimit=False, - ignore_shadow_ban=True, - ) - dummy_event_sent = True - break - except ConsentNotGivenError: - logger.info( - "Failed to send dummy event into room %s for user %s due to " - "lack of consent. Will try another user" % (room_id, user_id) - ) - except AuthError: - logger.info( - "Failed to send dummy event into room %s for user %s due to " - "lack of power. Will try another user" % (room_id, user_id) - ) + dummy_event_sent = await self._send_dummy_event_for_room(room_id) if not dummy_event_sent: # Did not find a valid user in the room, so remove from future attempts @@ -1242,6 +1195,59 @@ async def _send_dummy_events_to_fill_extremities(self): now = self.clock.time_msec() self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now + async def _send_dummy_event_for_room(self, room_id: str) -> bool: + """Attempt to send a dummy event for the given room. + + Args: + room_id: room to try to send an event from + + Returns: + True if a dummy event was successfully sent. False if no user was able + to send an event. + """ + + # For each room we need to find a joined member we can use to send + # the dummy event with. + latest_event_ids = await self.store.get_prev_events_for_room(room_id) + members = await self.state.get_current_users_in_room( + room_id, latest_event_ids=latest_event_ids + ) + for user_id in members: + if not self.hs.is_mine_id(user_id): + continue + requester = create_requester(user_id) + try: + event, context = await self.create_event( + requester, + { + "type": "org.matrix.dummy_event", + "content": {}, + "room_id": room_id, + "sender": user_id, + }, + prev_event_ids=latest_event_ids, + ) + + event.internal_metadata.proactively_send = False + + # Since this is a dummy-event it is OK if it is sent by a + # shadow-banned user. + await self.send_nonmember_event( + requester, event, context, ratelimit=False, ignore_shadow_ban=True, + ) + return True + except ConsentNotGivenError: + logger.info( + "Failed to send dummy event into room %s for user %s due to " + "lack of consent. Will try another user" % (room_id, user_id) + ) + except AuthError: + logger.info( + "Failed to send dummy event into room %s for user %s due to " + "lack of power. Will try another user" % (room_id, user_id) + ) + return False + def _expire_rooms_to_exclude_from_dummy_event_insertion(self): expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY to_expire = set() From 13099ae4311436b82ae47ca252cac1fa7fa58cc6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 24 Sep 2020 08:13:55 -0400 Subject: [PATCH 051/245] Mark the shadow_banned column as boolean in synapse_port_db. (#8386) --- .buildkite/test_db.db | Bin 18825216 -> 19279872 bytes changelog.d/8386.bugfix | 1 + scripts/synapse_port_db | 1 + 3 files changed, 2 insertions(+) create mode 100644 changelog.d/8386.bugfix diff --git a/.buildkite/test_db.db b/.buildkite/test_db.db index f20567ba73e97bf2568a9577efb0f132d66c429c..361369a581771bed36692a848aa396df96ad59d9 100644 GIT binary patch delta 168113 zcmb4s2V7J~*Y~a4?-mg0urx(_6BHZRQ54n|JD{MVND+HO5Fu(*Fv=LCf|{6^7zLx= z6bq(lnnJ2+1`=bM1ryVw`OeJTU0CzH&-;GvZ+DpgIdjgmJ9lQznQ`5^dzy7^yRR&9h0#z57JR z_FhuBC@&{BYj)9+!o^XgB?Vy#JUfMu;uqyDELxhEh0guXb}z#;_jsO-C$zXlMMVp< zN*CuWE+vFGJ0W&SY2Ko&{M@Y4yppKm6}@74_92oYW=>vi-lCkv`9+0U#YLrA`Gt_W zaCTl+Va~!lETdP9gEe0{7t<$eVP0;2PF88ooV=_#iy*fpOL7+F6fVv$B;-cd5%QAa z+?>UESw)L-^U4 zx^VjU;MpPnlT6GP5>m#q8H9uOiPF5n#s8rgfx$eRhS_N2&0ex-5oTPNw{RBZb6$Qi z?h--J&X^}RZ)rZXe*vy@TsctE**UZ4<>78sK-wrQ5c(oz)fD96nkdLc>kOP5>5cvY zkn29Z6QglA%*(=cR-8rpC2kqWb%d2?XAp+`xrI=DSqt)3lx7tc6)Fu_3UJQiC5vbi zc>2RLfw_r@MW7D)N)hQ*9)3LA+d%>;Wm$QPb4YW<`ts~tLZTJ3xM<<5(#1uEdATGj zrL^ni<}ZR?T(oEf&X$i4^cb2XHwX7^XuZ6`(xOEPnA*!5ss~dGmn_K3%3D~xcm*`> z!iA9M#rb)Pyt3R`m^avqXUAaPdHK1yd4*ZDMu8`vlpnJ|7w_e%NX%P`8>1j+RvvWX ze|0m=?d}236&Bc)l@Q#W2-EVG7w6N`1a^le$4Qjtqz@?`0pJPGTSUf`a5tWvMtMjx zl6D}^WNsb?a!GEM(C6*>$pemX1-&&TjH%7^g6UCHfoYJ*D7hQIH~eH&OYMd$hO>t4 z#*5PHh6RQ}(m~@hQl-JFzoBn6Zq%QY=Ig8UxyAx%m~o7;hdxQK*In10)$P%h=!V0{ z__Ou{?Mdx+ZMJr})=P6s^MU3i&6bndU$L4fZ-Gr>@gw-}F|l`qz)qpGe-DZYo&p};a)|2i~A1q$pi zn&N*L8_NnJ{RDOZ<@ry8V?-zUKaGyzp#p28ssH=%7}iT*$5Y0x<6~Ku0n$D`_6if& zi7Xka{?`HWzq587A$tZ0@TPuHYMFi9#g9mjo&uXeS^n!784@h8X@u{;43fd5(f_wm z(u)q0L8N<9=6?^9fh6nr>HD9?$$%(@mV0=D*4BB3%6VEdfnZFoQyXh6vK~tqE&os;wViJwt7+*I&Y20AUGY&WQG-{;p zrMIMKrAMTN(m=^%Xfre!Dh%TdA^KnS7uYHK$JmGU^ZA+j{(Q9F$TPYxxmH~xcR*Lc zmFmWGLvio!=~t?E zaTK5p)9X_UIMyNd4iOSiJHTFo4g#G2I@rN-n;^D~&RV&J+EzJ)-X!E!t<_O!H;`$y zgZv{Qm%H5!sxSVHXVoZwWqL2AwtgkF+Tfq;%6}n1`|wITaF+n)Y2$)*v<-fSc0>Uw z(u?Toiu6f}oZksoiJwD9a;J{p2(2mV#~_p$0PIqT1jvGiRR??#M7%>v+mqjIGeFKRyRIw)gWy*Qy zNiRqJe1Qh6b>#hXn!lHQ?SdX#>!_d42yOpiyAn}NfY`Xsj`FDkq4^M>@)Kf3#P;)b zwRh4_90+P#n?6&ivnvG6`OMc9qiQs?EZtKfeN0G$D&F_x*do+iobIWnmH!cr^hD~C zbQcBru&yNCirP9^KERQlXh(^k#jgK-e<5B(wIzPtsHXL0>3tMU?-5NU$GntGoG(is zR3T*Z0@mhh^&q z-AJ85+oIi}oviiIT-6-V%+Z9Ze^5WCE>g#;epa1Stx)w7?}_Kd_2Mv5Exavk6KsMT z{|Uc`&*lTU>)bQkLN1p5h4p%oUCpMj0`ms5sc&VH!6pi0A4abIM@f%2+4*ou>zMkq_!)6tb4&u4HF=7c!`ic2eJZ z_3x@)-M2E5(v{NZe9v~JL;XcK6yW-^^^5&i{e?SeNZoI9&S98C0Is0c z+OP*JsFzcE_LIb}IS~6+hC0#rS9h%ev0r6RhkTsD*k59UygQo@w{H)p^mls4J|I7Y z(v=#py&c(=4mA+$q~Oky2Wy~TWst(5C4G#&6xp?oG#uBjGSEpz>iH2}IaD~80W=rT z%D9?RUwtqaRwoH}UU$*~{kUJHzY|C5=CH0hNKf}ubSORjqLU7&N?#`lC9m}8DuI;4 z$BCnM==iRc&GY>#y`30x9w`m-ap>rFLCPgeJjmQWXq(`2gIA4=&k)d zyVAwvN+Y2|^^wkYC&cNQ)IkkNP690J`#d1P;3QywppyVKG?L!IKpJW6f-Y@9YDnj# zp=6e|s|FaCv`+N$){w4rcq%nc3R2@A?Ct$3)lM8uGlKrrN-8HE?ZZ!Z)dA0g=pU8 zP7LKEAM6e5S2}4ZFLCPc{noE=PT%*zLME?Y?%=?qhGk{C)3}qoewh=!Wa_veI>R9^ zdF}+ub_~?)X1tgR#$+(PYV@OnA&w~#tDHez5DvHf&cXxMo!ytLs9d;-i(z$aDyM@-l85!xrQpZMECyq_C zO&k`Ax5ax|d!ffV*aI^9iRD#1SO?Jlj-#On3QNaW7sXxUwy?55*R=dyY7dxczF zj@crOWK18Mj+!=@rkmoWk+Mq*mmsM^!GvLB5xXfpkhRDqUvQ6(GwMtiV9sY*ZTidf ztLcJlj^Slfl9;p_>>v^AQ^4YJYrTnKjQuNSGK}FsMHIv69<6{Bnn%mWF^rlk@5wOy zn1|peJ>paf&(=K8X7bkOh0MSGk7u)H#9D4TLq-ybSvcdLxn>uFYSbUL~x$C z#P?Nc6IAXql8_}(@IXaIP9@9XLK}<5m6;})Dn?FJyZ7zOR)#OmnN^U7np{-f(Y`R@ ztD*nNG+=KB>)4D5*0B@Q)2%~?4NskzKEWDeZyIYTwJhZ4Li@;lq6KRdI=Fpq7)p!X z5UgPssd|EtXRE*MVqqiE)o)%2l<&*L`{M{A4aVX4};HS*)5wT*dfr97%hJA%`#)qkh|O8>e3BQ=UO@As6U zBpOaWds2H*#cJg0)7mvZ^WE9GNZqLO_2#^}tA=6vlltDKP~B?Sh=LE(EFD$9twv4O zF+ONoqs}Psy_x&CFn`K(n{=%#`wfn?%U&mR*Eb)}7R)DwZhuFgTBjF^Mb`8;V?wo= z*W9q;t0q0iavHq;u}D5U)rJ5$CDV119@F-*x-g51vq9frw~?nu9w`}4bG*g09jVQR z0M-NrIFmyPU_#b?I)P`mS|A39xiE>P{w3rigmzfW7-L=XqUX>=PIfF`u4%VB+gIImQX^u^O3TlSc zmrpFWkyfu5kG_3JdLbaf+SMXy9h^1-x_qzl@bXNx`S(UB-)4!_5SXE=HsxhCcv8amUKV^g-aIL>FFecODjU~FvG1E(Win-yD`FwO+@zm%fGWOS2+2M=yF+Iz1-*`FE_eA!Wwv! z#F&novQ3ilB^he#IU_oC+BFkN{;q#LuW9+)BnvUF_p$+Rsw8Ql+7P%tVp=e^A z>sfgca-GDZ)V;1pfUQ=+ln#2X8YeT^#9yQbaJ`%;61 z0@?y+;pVtuP`UIc+a)EcL&|Q8b`_*cCSYyS!}cvN-UetNbouZHp(e7t;3Hv39!=A} zULA}&KJ-_K-@$9>byWE1N`;B5gLlz0gsJuJ%pfyer*lONBU~ito$=#>bP5_^%15oY z*=QL0gg4RsE$Mb4X;2aq&5-Y1$Cl9_sD1UNSYS4%pnXq$fRni-pwzS@p&lfI+%*o; z@un7B4+BjdVD^%m)_wObhYNn!5ap7n=S5D@eI4GSm%7At5Pz4%7jFHbhGBIFGOec? zyQwoh)i4t6HZ3*IlkQ1dBv!g;{0O?5#(3zY)vZoKR!yW?#e!z@Mbz56rw@uz4Y8m< zB)4Z(revs$3#dHgs294u&EFzG3z^XV(vQ7S{g@~hS!Htj%iJGw>&|?_nC2Pp8EcJ+ zQa$YLjL}!?e$;-VU8i|LGh6+V+Eq1LyerfR8QgL%oZZWOqN;^tjD*S?RSBrX>NZdQ zW@Yz!R=ynKwwmWBR7^qb$8|oaXQW$Slp5i7LH;h%4Yu}xb0X>!?RHT!sbZYUJ%0#? zQh#CHQH|BD8;TBaQzMr^H-GsJtD6feCkMKn;N;Q0+;X^6F>XSKo;QtWOczXBOygm^ ze(JghuGe3TY%*$^DI_A#=MYgQ;)kkzVw&(Q z|2sdIdqP@lSfzhkpKOZKJ!^d4hznK3q5!kIhro)=Dm>sV<8{vh)XdFQ_o-N}avvRn zTEGjVOfkF0b~O}O{wVKLu}tkA62eu)k};+$wd>!O--nDFmO5dWbwcXk^kLSnBx|S= z&Cey{c!ZUxQvN1WKcJvzPwGtQ3wW=hqeHcNRJO9aepD!xlb@@kf%(kRils2-g>dDT z{KDM4<&=TiF0zzhF*tAJI#DdYq4qtlu2KJq&zdn-$seVLI;6;37IjY%Hhwyb=_5M!#6U_MiVkc8Jmsc zq-)YrNn_Y;h|r(aXXx6%rp##{)rM$f&0zJr>W5T!RGU<8VuKhioDx#`clb$M8#kZ5 z%dSNhTj&7O8rHa=Tog16Oax#8jB1M9i@C)?s9~*pu-usK{+_X_*uARQqpH}ms@SWl z*!xtmk5J5}A+gP;CyHwG@kL|We7sOTM1#f8#KYn;*bj9T+J$4n8s#%u7|ow#Bbhe- z2+RZ9PI$-2SKE9(<~U(AN-at1jRwBz6OP=k`6O^Ei6SknJ`wWKYd%>pfB6$bF^GN5 zCq^#1?z5TW^y4U(coDc}eDCuM%Sp>A^xpBo;qr+eeO`lQ$*JW@XwpH?@aj)*`oPy0 zzJBtjH~qt0*w~$-b;!<1)inEsYW-X-NLQF_{M1ZG$2yAD@{1a)zX;CZosqBRfDgb_}oCm*m zDo4TZvz1}++ffN)q~+)J3t$k-UOxuL>=Wyw8Adf4uS?WhD@uTRw_-fQXj?0K!)w8} zqC5Qhl`mr$eMb2d7_Fkq;caGQ%Dv(DX?VSHXJ^2$`Nlx_%f%mrGqFrHlVh3*Z}GFn zg+?FgZE2;{3;YzS4T<`1^gHxJbU*11>c;60*$x&E%gd@nCcVN!>T0l zy0}vuD*P-QfQ@aAZ-Q-Y6L%IiwSCyP*_CWB<_c2{V^>>0Kjc?2(;H>E?e2|UA936l zbNf-)%vDtAQ%xsCCz7C8|k-XoLKm5}kshDPhmW zqi676*7{nf#|}wkewP~2^4x9$ha^D^k@G7{DnB`F&3ihZawXS&f;i3j^6ft z$^{w6L-Zva{W87U1r>d>BM2EXu&z5*N^}N}7I)k0f{tS5=Q4mfOMh62PTvMaSvR&C zO6GG2s5shKx{*dXHk;-%Tkmg1KltqqM9XeMKCO#^Vc!IhcDe%i{cl3kk*KsDqPGXS zSu1=ag*LMaAL#HF5&_WuCM+iq?)69g@A*&TzTC+!VD8QpgKqT+83xS;xO z`vcK^EU3o&pao^PK{Vr9t}mK-3k3Q92y%D0?e;;(e%#R;6?~QJgQDj_GG9;NyXFQ` zF5iHf5O3}XVkh9-k45djtOMD1t1L+G158!jfhofsDrDC!5OxJeGe19&Fl?;^&x=1( z#hBKa7~?vao2)V1GpsRa^&7#7aidPF-3Xhn8#P+>Mlb|FtP;g4Q6*FfD!z(WaaEj( ztzuOu_3Vd()ocAl+q674l-lkGCc86Vd!UYW{w8#}&>EqiUzA_C){oNJT3?_?9tL`r z)`UXb{1v({r8i}$15u0jArI7;rS3M;-0 zHgBj2?e(=w^`i9DJ3w!Ye9WUR%GZP<_xLGFJr#1ScOa?>-ROZbUrsQgsK$( zFcjqFEX`Z%LR3;(rf)G6X}-#Y+ISF5+QHI=Zkx-NgXXAYE5`8nx~*%tEK{)+8_f&K<~Tuyr~otm@+BpYNB1ONJ-~M z*h!!Ffd<0$U(N`a=80@t(3>5V0S$25WI~(2g(A`BUCRRp%K5`Hn2L?=XBW#+vE|>p zD;l$I*eimiq|{IBq@rlg2mx!XvIbOH1FNh-Ro37tYe0rCmSrqUb}$*!Xd9*7$%XO1r*yRljVC9Ti0+|Rjn!xlU2px_p^2io-DpD9%$Qya0_}aDVcWXvdIt9*S!#7Mp;wr@*kq~ z)PTN9`jqehGm@s->60F$7r&ceM*V;9qvUTQrPo&UP}0|+I5S%Nr=325(y>X0=C4ui zL&uJ?@e1ilc&kI!L(z}-?Wx8&lRQwvMR@<;9+IeNwNZNeh(smn&9nBTna(67>5&ok zq#2Z6J85V#>e!uNLD^}EN?nb0kVc`~r+O>8#!$3mT!Ny@XN+A|x-&`9wRb{-l615} zudtW!=2^ScQO+bK>F~;UC21O^TOKngT3?=Ompam!q$K_Ff<5U7N-uAZRgzwwX-_)b znWQA$+8(259Y*QxlZTE%Yrl(C(4iD9afu-UJy1$+tkQ-9XNfKr(OOaO1~2TFFhi^`A7`B z)Z}P7UYu$Rhi!iS^XxAt#fVZ?u45xsao)p(|M4Rmg@`BB^|9~j*r{mUFjz%e3=ti? zTK6k^MYzJq#*~ytSt&x_LvPi0(;M}i?r*u#5@F#)APZOIv#t?+)PwaJ29^vWFL6Q& z`5mP9av%c;(qA9vK#~cPNPc0Vfbr@+-Vt&i@5py@lMU+g49s5ww6HY|dvfDU#U_<; zpD{{$O?{qk24B9L_K!5x`GW?oi+qt2!qA;fpUvWhFxcDJx_cJh9_4u5&w^u1@)R$h zua4{udvFa0$RJn6Y3Ej`RC%^|i9vS~A|ueH(J*vnCVHw-gAguNM{kXUZcA6^Ca0#4&f;(bIvrotc9kF}$Ja_`4+L%gD)I3w@BhUCd_7IDj{ysSk z&3g0QG1brBkCmUjA2*4W#Ge^(DQ48$)q_+wc6N&&2v!k+J((GQ5U=@|Kk!2Em+Bj; zc=?7Z!R>Ig7|uj61&r{iuu_bg18hJ zYlijmnx@sk=F=S20+qM;wzx_R7sE4P8ljnHLe6t4O+p6DA29AbmjWE-4S;vfp^61> zGr%xk!0coKWuHy}3=;+n<7ouMV0wUK&eI3LVOpRC+^}7$UEJJedFpUg;QeywU-JS2@m{<5dp6 zn0jV3-X9yG&VYF#QPg#sY=T5EB_!Iq&XI97!E_Mg&J$$7VH${Wr-?DZFa;#A(}Y+^ zfK33|(q@{>sxu>iV}=QpY^U#K57fLr02F10D|nzCuld{`XCRD9tStw;;Y^r2Dn1>C z@m>_a1CC4-&weigf|U~5e+v$*?|2-i$Uc>T39vB2RT}%UZ{WUzI6=#k0T_n`5ynfl!vv*x zh%3zSH-}&x7D5=eTnq}VZ~Mi>deoH%VH{RLgg-72=8ZFk;!?rt2SZIV>Flu9!LjzF zz`)1bLU0Dwl7z6x!FXzaF7WY|D8L)8U&8`naf5N&01(*t0<74~F9%{A7Bv{RNEC;< z+7*xM1Qs)-UplSu0EeXv=67D%WhF>l4HM4ZV7P&TA8fy^eI=^?mgb^;EE^oKoegT*PLv zQ1lVr5|+y|Tb6Bw?idfsp*z}N_w>aK-7%8l&M!G3K`3VGQ#*XvA?nOH%3*)oJ5t6{ z9KXszP7IBaS2%u80~Af6&i6BZ{{kLGap(808J-=w#h3{*MlEF3R!9WH{19inJ!Ax# z%X`$lo`8kGv=GDlpBX@5)EwOt<1ih>I9r#b;5D`=jKf?Ik z$GUEW1?0e_5996TVRn4RFpR^D591|24N>r#&#f4TnI6Wmzao`WW2%nBHL=yFyX^Mwd{G!`u$zmUvjHll-)FCvsp;hw-Mj?0B(jD&~g?9mZQf9H{W8 zB=o{KOyn?bTN|$6W)C6tdqpbusDNxq`ZYiAQ2gpV_YJ-m0He za_5H&1#kIvM*`$MF@0K8V5yU9&40MC2quOD$i{f}Ykm5_w=aB?;oA?s{oy-6eyz{I z{@;T6A8dmLJ^Z=^k!hS@Y%)%VBXq~58DPdZX~;If8}^|;HP{~3!Er8uDCcEYpZc~V zde%}u<5L$lP=zg^E&(X^`ujepW!G&>{qi|18zc?})QU=0d;w;|UGZ_E752kDP;8G2 zzNmTB$7VEW=#@Z`y$1W`sJzDo*gd@dBy8Ni^-`dUZGv5M3$nphWZmZn;>9x9UmuR{ z+=bo4Y<`y+U7EBPm~623z8S^NDTV#wW!YwQ_MIv~gJEBOE%a3Az8dI)GRF@MWV5Pb z2AzsYVa;m8pX}4(ItCfC_dM@kDXo>3ON*oen5mDI221^=L@7oJlY%8b$wP9fHzysm=r%eXI6|ScnMT}YfK7!Q{g)pzC++URK7K4m}`Z=UuT9Rv1QmG zl`u-%3_en0TZWC|xJ`0h%dkwI)BH|*=FpE`ijdPj7`8~o=`&H2;FEyd@}`6%pU;LR zPz%ol{%m;l@y~|Aces4~vk@6X1)-4{iA3wj$tcS@(kJ8@ak)5L>?V9AtQFD)FaCR8 z=FadN`0;!Ix0$<5KX{^l9yUHbMHCY|HeB8oFfxGW#LsCqw)^}Pj$A@UzQt+sD6(X& zEnFVhV`NVb)vh?+2VKa|2&=vjI#Rw6nwFg8FRfxGvW*Nk+;rKr*R;T-HhySaWK1#j zHJZTb;|L1QPTMJ;%uaj5z!?t`%RL{%=b@axV21_;;E7#w;?lI8YMSonPmhPok!#aFV4*KFnQ-)g z`IXmj-MQiXE^aCF27i&g&Q|gBxI=6Y{&(&Qdy4PFPBOKa8eoZ-Vlo@sj7N=Y;Iyi@ z^rQ5$v{{-ag&Y1bTrli3zoRZt?@=eKb#R30Y1MMoa4<)HEjB?99Sd`q8^USf5n-x;QkO*g z>!Z|K8$BGsk)?(tnnI#jJBFJ6@eDGm0g~(xhZjUf%;n;M$Sj|v)Y&n#fA4@G!e+KZ zu%d<8J;Ah{*qvEdf+Uc}V+cE3b7Yx9lnR|}cr^nqAt6z!PVCOsG@LZX01%|rvK(2J znN2FpXzFCdOFoi4&W@qlddMDTv)CcX9&FZ=?CCqPrZ`WkSL}kpVvD5JbYghrM$#HP zF}$MWp{CF?>(XL42tQXsV|iK>jt^AALM7Z+PWf6OG?+r7(?AyS!A2!L!h(!C9tt~B z!B~Wkv6-|S2#OhjL8v@%Ie=qoq7H)yRis7Z@L~m(`ey~z8n&Lsr?02+N0jiig5Of& z>6MheS_zL-(sWmq(7b`tQu+t0jJP3z@#^RW_;E-+PF$OE!_ZP^%j^7 zgc<%cyk*#Bm}lq%7mHlcKZO!&(%#3V(AQG!{grTGZQ3l1w?Z60f#sf#vX?CF7i0#H zWT>`s4@bQPe0?;Che|?owo^$+2`?()AKTMnvBZc+X_y5ek&K^DrB+|;5o9K{8dHu3 z6^gchA7QWS-<7oCJE)H7O1MP{&nn?RyyvLJH{JS-uDSQ zqLn+w0u=jDh=MuBD%9TV)gbcN53i&0m(|fSJ)?wQD4`kA9=sM;6X~wlgAMn%m^F^_ z<$N9JVxo$mGBWI zJgtN`l+b54)k=6|H`RAl3C)kwp&;Lpxt!8Os8BK#%u~R+Lj!_odoodT0izFb z>Y)KaIv6yWs5uxzUp$@`iaUm84^`bq3Fj%{u02%sTT1*-B@Ek}76XmSM9p-FZvE?K zFc!6CZ`uqT-q=e;`s}0eRr_fCSta~T3H9|<#ejO+o`v;kQ383o8{jW2sfC7B4QZzu zMhP{8t=P1^*7&z^rS!YB)Nt3ZNZ+9^)ZNg{*Iw5af>+sEjY_>$-Cea$)dRer6NR(j zuz3kQg|2dQ*jwyUa6bm)c8X@ndRX(%u(=^N`0yx{a`=oE%`m*;Nlb)5goQsQHoaR7 zTT`xQLMg|w17{o@up5CnYWIOr!rJ$kJ6gvzTPT~~*jvMF=Lc+2*orfncQ{nR_M0=? zx?yJ(HtUC-8He3C%m%0R6lrZojw)? z79VC~{Y5*QPm{d}>K7eKVaJasg(Ff*y*=}yqwZnX4-=cd*rgEHegC8)qvp9z8L$UP za+E$DHcP&fLus{_Nu3<98Q7HrHvNPX7SxhHU5i>)yy(a?>}+k3Cm>oqK-4hnVK;FG zcokTB9Zp6mYT%HgPJ~OutDtN>Y%XGREg`n#?gpFUNSRu+%htmNqe2^n&W(1!u)m1m zob=<0+87NMnl6VEOL*CO*l2WSsObTvJTmvBT`=r8Iv7YtuW z{GYOxI2gJ;$dH+S+=6a(GQhsMvxFGA&Vqa`C+vBE4Rp*9J8G=b^x5N2fs55}hYHw9 zcVeh}0R*J2aL{2(-HDDempub{OgpKV=TkpQQa4^>xGr2Z z$igwh!@@elRKaG55CZl0`JeTd`PcLf{7(HMevW=9pQP{3^15%;`*bg;i*y^+DZO-4 z)fQd2>MQL%)$`g*s!DBxwnQ~iJB0Z_)l>Xk>!!I5u9XjKrl`p=3v6EMUT^-H2M3^6 zOJf7+DJ{;IdTob?*&VLAZs_PT`yB?Hijd2vj0;wdz28T+6?Q6nk5DZz-BV7yb1db5 z(tfD|#}FVkZM^+ngMTnkH!#eRgeM%WFZXaD96?wRLX~nNDuXO35j6_v%%7I9mamFd z#B;6$2ngzw??RB;N#pD+76RCAN7!$V;LI4|h!x3R5bB7Imk)NtNF{$CX_q4rAZJ#j zgT+7)JhQSV($hq}I;&TQ)~!f?NU6=g(6JTvn-*FvckRj8KS?r-D0drNqH^AhbTpG6 zvX!Ps(JK~8(tF!8^@ASF*7bnti*zy{UyC4?URb66v<-v%J4i~9yrm^L;sup zA{>HQpzp7jbXRqax^i$%4btA#zOF^udD^~ULj6p0M6*USR%2D)QNN;oRGp_zRI622 zREJe7Rijlt;w|x|$@nW1Hd?*|ema)@>k!*zE$=u_A;N1Ds?05WD_C-F6-2}TW zEcYH5ZWo(cO$WFo9I=OlaNKa^9+yX#B--zB;ZjM#?qu2@H!g($txFRf?Kp@amZV9J z?mrA8u#Evrc6+}aN|?@fKF31{VEch}wDn-qP_P>Gj8K|+ec`bH+6?ST6uH&$xQ@V) zBqX|L`P+pLC$d=gEPJ~Z698ukHP^0GqfJ}WUqN>>#|P9m-+}Q;e3Q^|A?W2Ii?*eQ zDdX2bDr-feT^8+>wJQ@H#T!8HCLtCV+$~VcHn{bpbXjMuDQ9DrIfUd;g{wMitsm)H zIos4U3t|b)@|sQ#dX~ll2+ib$^^Q#TCCs+93Q5J#0+ol`uYckC5Dox(so9PAZ)gtK zEwR^F644$YPbad~wBwiGN^&%6B25p7@+h1oK)|&m`@qnfJ{xCjG<$3W`m(~&NC`NS zguaOz=V+vOBA`8PoFb8Z0vXpk62#$%az#w-fN_pdDHaV_pPqo;zn0|ak}*V3E9@R9 zkN9o^SlGUf+($bQCHLD15TNbr5FCY?D$;wS@2b)Rl=na+YOk_qp|xMEgEIn|tJ5bb zOyOu?b$TM2@L{6WUYB7+w(UcEW3va*!fHpsdf|vNJaeIF4@8un){~?w9~f(|Vmd@x z4h{%5(340#Xh*{rVUVJVi>a#dj#p2J15kQ)0s+py+BqTyI}oM1XkF~@6cR)@FrWyb zZzfBiqc;T-4glbypGMfhn2EQHrz#Jsur)mfRTdtzDiqF$6=!IyXP@rQCZfcP&-PH+oLwqDF1#w<2+CNS*p9eks)_`BeUa8b_x<}Z1Hfq&NkQg zL*XbmC}#;tf(e{d&hdyZ;z#iQ+;7}v?lL-EW1G$k_tNN%( zZM1bqD;sS61>T)^M<*I=pYVDOI7_6MvskoliJypS*y2FB`$5}JYEFL%wT`$Sj(R_9 zdyF&qqVqN1^g!0vM~LY40$9XgP3XgUsR8wu2e`lmPuDl}3#i_H#3pY)GOp-(8`!Qx z_;(n=PkmMWh&oyIovKbXRQw5!xQrG47M|hX`Tw=8qjA~e)4)8Vmdo45h4HnSf;uyo zn%v3{fqgPN0K8LErV=|dxI+@-^+V|}naB2Fb9|IpOwhzI^yHR0b2d%c`X!WvrW{LB z&Z78!xW$Cx#RJvYWSFC9(qt-l&3R&a%%*tkUFZeGKemnHSrjLCLJ>UX6~aH0;jf#Dhj>inFYPoylTIKIUKV@Eq?-Qqig%of9ZIT^-Ycl39P~=M3xo z*uHFi1{gQD%U@a|Dp^yT==4F^t`SzbDrmxWxT_$!({d?495&%NT)V|EezmDy#F$w< zJz@fUC&G6Ud>?}EWO;hTl&9C1B{EwX)9a>9rVLXz<0s%GnFkZq+tLYX1zb8Pf^*Jx z!(@Y>{*r#JJ_@F%TXh4qx3v4TV>S0RM>R7wM)g^Bq1sP%NwroLC0-S`iUZ-kfqlYQ z{yu+{pUE5Htz$KL}2dTxRRw&dXqG4{ltk z*n`ow<8fe!t{QGZXJn|^=;t9yyu?ZLOH3tB;WJ(UY8j>yAModJ^fHbTm+xD-;Op#v z0-{$x^tYffF9Nj)M>9i%mFPJfjh>qrk4z`wubRt!E$AlZ%fiuy5{Pbj38Kdcb#0yn zeSxF@;HXs%Zs*6a-^1-w_UJ+ZUfJhfjDwR!e5?gsdO0ot`QvEz!bCXHb2`q7%3iQD z7cYeTZIkU$zXcG@aEOx5fTAxkwej6}MbRKflqfR21d@rL_q#6xa}idAqdU(*E-qs| zBq`Rzj-_>MT^#Cyh^Bh1iO`9rXv|)e6%8e9#07o{qRoBdEa>VnC}EZ(imB`nDy*X_ z&IPsULD`q5fjwp;G#H7R&Ox;0c@S100X5@gh+6N$nX*5P5G^|c(T@F4d+8`m*cMnoVJS zHRXMP#HRGFhr3AFShftMJQvqTT%9b$+7P>KxGP${IV^~M5jAWMOJ-$s`??rDwg9GR$4jNF#vAAS*AKJGSVtY`=HVAf~ zYM#P23fxU>+%KM*IpS$?sW{Z6V#8tMIZbF|{(y@HXDxc)(a} z9B4F1pK>AG%V06yt%)(X!%d$@?H}y+87^Nap4x+B1!UWNFA=@5ad$Y%D4qHyC%RCR z{2R50!_mscQ+ILVClslQZwyC^mrTv$q@fszLDpwi$H@JbPJQFBpowL`s&1Xu3&mKc zEr$Cp_BXTzp*EjgD)}Ajv^>R>{(jNri0U1o)8IQDzBAxEQ{EApwNMuH5!mIXEo(Fy z*OoO#-rJV74K|^}8lq6<)9GGl+m7sR)erxe1>bD==D>HB{P2&nSDZj>L&EDS6WtNB zeSARKQft9mX3{9fmYMNEhLn#FSkQv*j!ik~K?Z9dIGgz8T89J!Whe=3QdC@)PQal& z&lC2&NBx5g#VH1?)XaJ`M&gWr+R2uA=c#I2EV~K5pMR)I7-0b_+I* zcLg=s8PpFlm`_6g7d8XxnoKlRjhrG0S+!&&^r}J?F?%a1~)jXN%n{v*B@0%SOL>8y!8q< z%PBN+K=;1NAsyyp&J5(p?n09ze=sw{47Kk@6A3T_tUlVG)?Woo&zxy7yWnuG`Qf8V zAFF_QnlnT5!Z0&>>-{74yu+*wGqAUOsr0(9^1)qev!iS7ih9430j73c8A8#Q<K z9B)OBw556S7nzHwrNO@k@i~&;?U*x$=d{1jaf=_C8!oTDJ7ssGpZr}Z0{03NDT^ud&wxenS=7qKjTgoXc{XHz zsx%Vakm3WY=MR|=-v#pgAq8lUz}#RWVGJvOuWmtV_AbM4%AE|u+I>)El)y}GP7hSF2&;Cz>W z2I&*w;Q1AJ1!qKL1ge;=nZgVkdMp2?JG%5rf(t>Be$YrbAw4t7g8q4~BM4O*5@AYo zw8I5`d1pib3XwnrL=)J}sOeZoB&vVXBLKM`>$pjyG0*x0z}gj)&OIK=v1S%slt4m* zXS${SlU{IRGV(Oq<6&Vqess`Cm?wmch)4Q(P<<}Cr=|WKN{JuoTlYSk%k$q927D$+ zm;IH$rT$zvPWPJt@wPzVHp2@)5Fs(qw>}g8j7Q(XpQt$%W+L;WV2zWcO7yL}2D86Y zM`2deQw>DyuZ*@u192INf}1kBilY~B6niJ5tFyrQ*j)qB7|(uiHn9ojJWJ@DXPaA3 z7XqMjP1<8g6eP(Iia8uFEt!3FjDlETb9E19LT|W}on~ph7*& z`ZjJ416AQr5BWRbuWWCSQWFkYdaMqjnFg4jeu7!B3o|8XcS5xJ!+{CNYe$$5YP$-G zvOXRN2hr;wid~m+G!VUr<)wtcd^hqG==~nc!w$?e`sSk$Z3%*z*tySv`423#?FQ5q ziLSs@?A1)8^-XwUb`z++cv5~FN3m}+iLyDTtVL{#powGBk=Ri*9!>2i8Ywq)6m3^# z71vM9i;#EzUR2;d9b96=+2aCj7-MMupS3h>e#UiT!6To(9RyaETp+Xjv>)2)~N9XbBcj9~J` z9QbBln$rA6p#>KkOud+ZulA&bw!uY&0482cIFFAa5na~cL4*LNT}%jg95%gZCZ^g5 z0c^TNl;0$Ukn+rYLV&3)CX`g@%;|T$#w3x6z)UG^x%mPAysVpYo>xvZ}#j^+j zrm&b0Fga8qw7f-iz|@tb(7_fRIr(-rpVTGHSTSCE6N*7_*HHwAi7Lj~d9?e&v=m2U ze}L|b*--P8T*3#_QA{|`D{@iF!*d7$rlFY7I&Hj?qGdhpFEIVYgq*zbq+7sb6Gv-b znXTkF^X)uR6qspZ0=|%xmNC0_Bq6{I6B7dJU?YT7aAQ2Jd6-#Z!v5vZ%!E)A(vzfs z86_s*dzESXH|```S1^&pxTOxxosfRf5=Rc@!2A*8_}*EHXG4Ec_FJsQ_b&VEA7y$o z$M7BLkH8j)!PpAk%Dtug;QYHz3NTzlVI9TewLdY+2fz2@Ww;*anhu7q2hRW?*Uj$- z)VDlqfqNUR_lmE*$NGb(cS*9g0(L;QFbkLkD05ZsAhe*Rq?Y5ZqtrfUx5-;tOQx`> z_~^sA)uY}okw?A1NPb7)&oZg7*<3nQKal%XAHYAtPvtw%)P~Y{Z6A}0e&m{l(kM9H zP(&7Za>v2aGH@Rk@yXS>Xu!6NaG8I)6gp90MPDR7+_w>{>sK+9zCzkiYx5w3+7&&!8CdnOVdn zi;_PtE#{=3>ReYBBCjt>uW+Ua)Y`gW4w`zk^gXB`)X^<|9zXZtWMs@+H1||nDch*# z{h6U$2!s60K7J}#>aOM|HEd_AdD~4m^-GElbMyXZYo4Q_GS$#H(*ajg< zKg%J~_U3Bhs2Ee80~mFQZ2c@a*ns`hn5gn>%!$^%za&&g;#nl#2yv|Nv-hb2;|Emn zpbwVBz_Pp;0-|Rs(SyBQ_m8Y3dZNpxD0;Lgwp&)P5yV85Pj>LtE=1JPGx0+zI{!l| zdbbk3t%QFoVdO`YK2r&|DPgk`-uZ}>Ii`G!q8FN`-xI;q|L&oWsgg|}Qz0)a;SDA9 zy+ZM^N?4|ZjY`-KArv&Ge27DM>(1ALO}O%+$_G12RkF2$x^yo6gbI9039l%j_EU-{ zE8zkq+@pl=D&c)4jQ))BXDeYXgrvr?OEh)`)uNKM9i+yi$`c$|&OU>J#XItb{~Hn` zc3duxb6_QxMg{R84^@3m^~g&2of3L}LGe*axJn6+DB%}MXuL}K23=KJ0eeT2URED4 zl4og6#gtLAt>$zu$vfJX-R3wo+zW`8Xo<@&Mab8`UN)ZR^w)_MpRVl+ zzFoEwj%j>KxazaDE^N3w^2V~$Uk~zQPBW&}#@~%grJtl?!wtiHxIT6coTbTLA`)8E|`1?58k4r3^tJ zp-6f<9N%74R*_2ubkZBY*9s8tw3QODpX zY5vz%faMTt-=DZeNt_)5i7)Lvq-X~VB2L>$Z=AT_jP~w0Xs-Y;C1QpGf zZWr{=w+HQ|1B0UzLtWe!SOe~HWDpFGn4WVL8USaI(En^c;7~t>s*inXwW5Js9kzVz z6A|`2fWa1vNd0E5Zg9w$ zNM+c9H;zJkuQfy%iMwbyQ!#-OSQl6fkt%MvQ4L#HD-T47gymG*a@OpxFJ*%H4;f*R zX|hQ$Ho}2O4OpJCC4=DwL$3Y;3hY=OBWHFj57P4n?=JUR2)6R^JX$wnaTKa~`p2MZ zpTC#OK7X(H=Fu|g9y6Btn}L0hgFM4622ZD9f;-op|DOGpe~Ep8-^_00XNb}4RFM%{ zbytOhx?{o;-8x~o&ZhZ9^NcP~vr7B3CSCiQ##g&jeM>t>eNLOCuF-1LIc&H(PI6K6 zhObp07*41T88)bv878Sl8hWZc^}mZh=-(7yj%5B8AJOj?XXqDzd*&r@$*eb(O2wvO zQY!ZEGPxVSGrp*ojGK(pj1dy5wQlcLn+f-KceguVNdp|7E^~<6MQrjGhhxhOg1~;0 z1F=!pjD-%DkW9+bxZc5%K@i(9hY#gg0%ZSC;BaXggAsc0%I?+zzJSp)NB0fO2js2)Ajz~QraT>*Q>i}xm0)hxzs%^Z&Mm=UMkmq{hkA>Q*z=7)L#`TKB zmzYNql6S8*D z7CW$)aMW5i*?|TE;6pdmhpS|B`c5?D&>=??Pj|?)*ZVXo1BBX)$vG)R#P0)QQX_$oc0-+9ebqOJ}ARpp?$Yd{hJx`ah;|`Pjb$ z|Bq>k=hORlXWDx#d4TXKH@ojvq2jw0;2nhEj%!hjBz@X48Fog9)1Y}{`qrw3m8Tk3 z3C%(pcZ)$j>+-x&Q1^U&uuQvG_+mN+k?SyXJQu~L^W~WBh=dsHs!utCPT+G!6M=mlb)P&{ya)^?sT^;SkNs zx=p&tx*p)EdRP037NyQihKo!FzN3=g&Mr$3X%k5l-FC z&}orJ%!YV0CT4@Dk%c=|SPy6$@(=rlP2|{Dhy_3ir=Zf<4H=wi5(cNBz;olmQPjqc zKzV-Lh8qUrO(r)D+K_1#*k73#xD9W^6c_d!yP8d9zGBwfKXP&QhAI}!68~EDzkjVV z0M&5Ou5!if4P}pngD3xBIC+fCx|&$+&u|gdP2)iQx5f{Q#hO3VNpQ>kPR(q+zgkqi zkK#HuWD9sbFV}Qz7@+2~=V=wanYl7tj{beac9t`EQs|o-*CXWMKR3Xozs3XSYInOr z9#Q7zS5>I(-iBA`I{2-&>*2ES&xU*10(%D=X|t<(s+iX`Z)kR)#O$g}Kf@^YoZt?P zJCJ{#FXuC)p}epD1otCcrVJmsA-l>3HxDak(BE!b9U)JcRrQ1l75g1eLcg|Fgv(R& ztFHb(!oCAMisJqM_WGq>0@6zgJ%NO#bP_s9Nl@v8P^5{FP^G3qXoiH6K@cv0f*>U* za)Lkt2m(?P6%-H%9YG;2xz5iEGKO3Aww}WIt;4!c zgEL}|W;kPx&fVT?to}o2f$=_rXpk zbgC8+w-9yPA0@~-&@~rz`}e{dFy>_{r)wsKmKIv)rlOdwq{udM52II**F*r6lZo0u zm=FRW^c?|Qo($JO4it==!8zU*KvDf^?f^I%3t;f=F)Gm7i*q!R996yC0dO>w9Ahwu z+-eZDZ#3sX8YNeVa);~59+VGB6WQCx5I zw=k1%AnLomuyZ1!43c}ydwI`dzJpkM3lVuFXnrHsC1`#M{&t{Sn@rdVaXklxq1ZUiYiGi zWym(QVKI4E-|~7h(D>LUPht0~Gr9rmy{QD@tg|}>vcfu}Ls+M;QQX$Bbs)=Z`ZSc? zSP%!}C3NNN5*6QBh+>efoSju&)_y#@O~gs1hJv>l&=#9ZU^D!@Lm}RaErKEDswh^+Fgc8fI78l4C9R#>1GY+B1JHZObi@u9cj64jR zXw^eNzJ**;Xs;loZYYzID&k;Cf8J#89#`xqn?}XuM{yt!3~v-tqw2Ka;=_L&RNrC`~U~6)Vm2FDx68bTOYDcPKDWVM zpAgjpo<03h?COPLVKxlogHY7#mXf8ulA|EEoMMNQA5;PVZYqgoUz3o$mAKw*#I=gJ ziU;8St{Q;iT`!*Iv75Hs9LF!N-M)%*S-)raIq7O_1i(E0t^tRQcr z|70~4mT)o49R`I;xouf-!tIO%WE5=6!Y>=Y#rQ43Zz+Du@XKKZ+gLllFx|(B?ts67 z0GfT3F|2eR>+X!tV^*Wy&&a2S&VxHxv`tPFh5v}7*w?1i&6vLrE607y@%s$F75J@m z&foX>wCDjRBTs_(`xD`P z)>k9IcZmqrbQYg#Y&mZ_lT7%FR0;S#5kc(7i{z^?VL}#s9uhuP3Gkib60WIQviLsp z@%K4alYs9Pm*6o2Vw*Q)suJ+sB7y*qnGnkiSf~=<`$Yt9GoqIZ0}wcTp{gjucZ>+M znGyTBg4REL&p4)ia0*_5xqms086P(yaD8}+zaMR9FDq*j@R9Q*$a@V5e*G4x&EvC2 z1g$grA7d63f_dr1eCp)su93Rfe1XOuIytM0h|?xz;mX`95XZtm8Ar2W*WsXbgyP+ zs6z)A2Hu25KvP#XIb^nanQ&#`1gQi1AAZ`Jf^ZNGAG5xqIRas zz4J+_aCIO#o)cR(FHANxF%>QkBuAUz3K7ZQq-rAI{6HeK=`M3TN|7FP_EmH2tLNAq zbL_c0?Q4|C=EkwQ(g?&0FFJ3(HGWNf4*N9pKIPrY>o>14winhMtI4v`($IVmfv`>> zcCyER&K6N?cIXYh?^sy0z1hz+1?j-NPOclRH;p1Y>}Xbea@``zDnp@-ZG#uan?H=L ziD1RSQ`U87g>8!boVAXuJEGV7zO$idz+lDsqVpQlLgCNJX>V}N0Wp`3n=zN+$-QiEj`H|MDhyI3ct!sCO+OPlD6@#;U$ zHRN#p;6sjSKl5#vBJSwKRQPoeqigzaUx7QC+bJzbguX~hg@Xsb=h`|Tre&(I*Gx=> zj|XwPmI8U#k`OGSDQ@?;&r`~(A_9a6*TMi~stCIfp{)&aR-*mL*jZZp{*qZ;BXEM} zWs}rL&QA*PSSpC?FY!y(o)=#s#&hAo*7uvGj)aep<61zJP)~BZO-B|ZmZZt?7IIw6 z2~PIs-kEBDykvhomK7}Oi>WqNa&e|6$0qyZvB+TC-k+}a$13FPR~H*XMBc{fnh3a- z(4Eni9_+$#jT_!2{JwfDL4=5??k*7~DFO=;cJaNbBG&nYR3oapv6P{ogo*`F@f>w1 z#_XP|-ByEOb}i~b)laK_Uq|pt zAp&@aHOsj;RoVzs;FBV2yRb`{Q{}5E1uiL6t@)3!;+uo)Y(#@8D#6S*2*|;V^;a7a3{5ni6yAX>WEAMCiZ0Lnwei3w!TWFT(Ox&ahS{#z;;nyL`&s_5yl?)+ z9Ai3e>S#PZ$gtWQ4n~#*0C0W6Vx%8Ae(S^v186ZCaT7;l<`CWo1pZyL*Rb+ z)v>K-K_Q~3q$COvE{fusc+{HIQe`M92}DueO-9zfzIN9A$p_(nGWko&I8Pz9*oBKO z{;`7J<=Y1rz83z1HAR>5F~P_j14llVnUbh=Y>bfUVk4J8Hnx&>@gE6-M9np@@Jgb1 z&LMR)fh=~u>h>3OJ+Xy)$)g0L%U7`Q4sJ~T^+a`8M|u*}V%=|`U&73q!cEoa@7FDa ze_)RHI84yyC#rF2QlzO`BVgom$i^`k0p6WO`6aatdS z5(WC8`6w3qpcCDq$7-FB#%dP@c`;&qKh|V$lXY3)&M30(_(Def+;N%=X|(oqcx3m| zTr#-Tx~!XHV{)glNY|DAu_i;Btvy{I+06WL>R5BDHTe$IfyRZrD#-nz<0Etba3wo05&`#z^lyu7A$%QETTL4dxjhmfzGV26#(;QO_CIer*-SVEM!i zWrZEqer527r#z(srkBSxb8PBB!-uh~_}r3k)_jvA#CfFC+G}Q^s!{VK?94j8L&?|? zyVkiQc3rC;mw$=!&Y> zOBLQku+Z=u5zt>$tyJs)(vc z{oQ|cy%bAf6=g8TruBJUL~5wkr)W#dP@@GG%YKItI@;EYMq0?@xb>ou-1V}N$|R8Y zJvIq0Rulv@wUX9IO507ck&0R)WP25j4BsFcS-wFu^0Pd?+8`Q<_)6e|zv81;FSWEL zIQXy8QgA6vFa_N~a6cMmil!=txv0azP@|PDrRGM_Nu0z7|aLU7^k2B?Qt2|zkNBdSm7mcGI zxwlP5BiuDk&;5Vs#zm$~(pui{lc4^woRO@SZQ^2@ZCf{l#&h*HF4B2w+qwpJZ}SPA zuX0l7+svmQi!4}I#!EL&{l|g|>s4jJh4rc#%_o#WWW4FGI8W|h_u6RiIc9S1FWfL+ zq1-G+*6;W_c%rU_C+axX{KUEu*#4}KBZhwZoWeB3@h)8Vs;bfkn-bQIU}H;ks)Ey>B4f{`t>uv&j#p*U1?1be(g=(=S#mxA^5?`7=R>bS7W6Hch}8 z(kjLoYdR-neHn^WD>}q~E!}Vg^+CyrPk_ER_e-Zacm4CKMq?L>#ayty32R-jzSCRf z_yojZQs5M%7eb;;YLHu)pJ3cg4Gk0h6C{*NS=)Bk@-E?Jy?MQz$KgzY20Ou(A^ zk10_9jYjP6Q$uG5vhHV5yvvIR$Kc~b#nvAI&D_^Fklm$X+L#Gda<=rsVw*}?1inrM zEibNG(Y^{Rc!KFFFW#kM)Rn{l)_N;ckCQ(}62#nI&;nmP2i~8wB1fxh#ZRvegX$6+ zj;;=aM|JN+Y?v7f&X_4fp`St=a<-d_xxE0CqqF#BU9q=l9Mxrv!Plxk_ALycVxBVr zj$`N1))p#W8lo1fuWFBNE&mLHT76zzD67=cv?`a_cU!)VT5?$ZoKyUK<#Rjs->QK~om>pLoXGU2ObDibP23PzvT>AWt0B- zalf$b7t2&)1=y9qD_GtV>i)WZ(3vj+UMMc!zN%pZ=HtM{@0|<&+;yV@u>Wc(ZqrG& zeMwgsC2GNV;WrQ1rCR9HAUeycg==kV| z6mQsB{FO0kWIbiL&du8Uw(nKnEZ+}(1AX#+#`u)?K8)bpL0%`krg_!0{bozC)q^YP zTx(;?70V)|mH!J7Q`(wJOsh;?j8BbU7-JEn;A=x4B=r79KS=MR>{5nzPnlwbwKO<8 zy0l_k7GS?_jY-Aoj`k3!`yjdD3~M{SufW53lx&Vz-4WpCG}ivB8Gt2C$RH!j)ZFBVZrK!fHnPLC*VNgrW%|Bixv*7^g)OkBtB(%B?~!# zwF&{z2AHKeP#7s1#}b8r``7VG;6U<2!GVdRE4a+d(UPuY`$R6FuK(`rU&GM6`K5o7-vC{VqEmc& z`_%IO&-7bzw~erFw&hr7dXKSo_ikye>g{JK@w#N$Z&~7%XZg@8#qy5D>ebVH)+^M! z$@a)R!*!UR&4`k)lxp!eRld}__vdx3!zPJ zT}$sXM|!B18rD+{_PIbf+nt`IIor+PH58p)b-$Y`ob5snYc-$nn!;*Yc(EzGBG@)l z^~amc)-0P=OM3JPKg{jVtuUU|ML{x7<6?mTbcOljy{nMR>&^IHH-gcTD?1 zP+eZ`qjqEnOI$UrF$>)U4|}vJBLpUH`c##@dpY0<=9!u&>vYk2hjcYS;4EH|GOx3C zNCU@q<{A^l%{*Z}|%Evu_XUt}$xTfKzuCwkZ zy)w|2sXU~0k>NW-nxUulFh$4l>Ect)`-XRnS1%SGxn;aw*^dlNTi!LW;y?dsz)o4Z zMLIh-+EUlX5;BLq!`fH55T0?i8&3k-Ak6&#($r-5>=1!#qwu0;mY<4M5h?=2ROQWb6Z3=x5AM=_t# z-*yq;bL2@tTaMYQIhur?g21))n9uq*XJ`bmZxZlTKn2@qwGT0Zz;ll>JL)}JQ+)Oy zin8p!x>=d!`Ha?d!`-EP70kg-W_Bu5YrmVoJ$5v+u@M^HRp7Lnpg>ffAK%1V~s7J%pMmvgX&&|&)e7n|Y z2SMPu37UQSpC+NbAn@D_&F+0WL~Wv-AaHGp&V4mZeInZm%(X8%s3rC}*W0Z%-{wt% ztf52HYmOF}Yx8t2F%R0MwH);(0qw44%|~hktp$N=n{~-Etnqt1|3DqJR&NsUeb}e= zX%borf<(EozZUb~wp6|F7M_^c^v__qK2s@`28{lJ9n!^tPScRoq0uViZUtpVS3?&}ICU6l_PoYf>>ju1o++*qj!^j%vl$uYgvl2ehEI4NjNC zfqtYFXB~#9J-k?dS|CgKH9447SdE5C&LtyO-A_QbM1VJIwE;1x{-}bE*{NgoSiwgV zg7w%g&2!NOjll>-EbSW1u2;{1O5w^Zs5Qvo9v6x+;l+Y;RUpgJ6{GvkH0U2rL;d5s zL@kcH1aun_CtooHviR#*C&dvOG!_GN>e{eSXh6`d`0K+0S;Uzs!EEP{300Z>`mg|& z+-zDfyEBXitNFB2Ebq$HVAkS8QEbFMrL%&D-5~UfsUhs5R^0bHda!3PRAKi*yWw_U z;2nZeX3i-owF`9~bcZM}&KhRm$`%sS&pw~T_llT43yj@DJzwZiNBi$~Y|33e8 z|C)YxScRC~@izS^`HuxhHH~7Ye)!koEQr~C&c}0GimAB?$+TJhW_sB6vAZIp#-QEK z8iRf~pJz1m)I~9qeb1Qk%y3J2gxF14hMiW$kdA0mK|Bc6=g#@|J#c0*Z4wIAFNgmY z?)~T(5jWo6*H_W|UT`9k!{;Va)i%iRZ?zAX8 zw7>;J%FeeAu{N{ZMgaE}mhqM@mJstZ*uFQh^n!vKCQNz0GqAz&LGL@EN*<}_p@WxQ z=o{?V)`UU%ndzMAE3e)9 ze<*UHDNuRf^*6;SG!~l^%&je>Elu!FZHTypGvu6Xhqv~Rt;jtXr`J~#!5OMQnjXd$ zt~t0|kGC7&@ME%m5ys}MJ=jQZIY~8L)31jX)3e0HN=Ma8}@%zcyuFsLZ8!Xl?y6Z-xj^!!gRlcinbfCg!D@S9D`@KK( z=2_qAR{V@8&Xm&LYCPJ;Xz>0}$0*DUyP4d;kDXlDAkewcdi1QG1toSJASz>0QF+<> zXwXEXz7NG&Og`F?bx%IpOTkhnr5vK1QmRGg@#Lcm#?h5ck0njpZe|7)kBmJ z-A9IAN@d-#WmOtBs4o(Wsu*Lv))1axxppa_6lN30q zbp&ufWq|}rK0Cw#_~lTqwCH$@g4-#LSWJ?CAM~iUfVUqZu^EAscCqy(k>8JHO;ostX`^EH_UBbq#oUXC}C|?zJbEH-hMJ+=W)m0RAo;j%qs2~7W z7OJSPw+0YUUI3mMs0b)0#cMgID47%iWd-0$A4Peg2q+^(d1Qhj;4KMw-5#S{NSCCW+ zaY}$q0Et>?(*y)2CBQ00X#q(ooG1Yn0l315=Is@LSpcpGpz(;`NgO8Gh8DMzN8J>f z%%Ec##-N{_#-QV`9Y)1wEbX+|(+BAl#gPq25fGJaoL3mo-=SS|6jc)na@0KWgFSx6m)yZ=&BwzX8tfLFqdc*1dRf7iY{b z$FG``yg9eXh+uO&x4NBM+|JEz=O(vvquaT`?OgA6uDG4+#Cg)*k$>ux*DgSL<#ztk z?fiw?`E$4PXKv^Jxt;&(cK+1u{E6H7WA7vyJAU=bBRAlo+xY{x^ZRb+C2r^U+|KX1 zo&V!@{{=3`x8P|CY<+Q7y{N{H4tK0c6tkI?8$DIjT zyY{G$euJ}k^yd^GpHqBdPVvb(#i!;J7w#-BQVNVpHcbn1e@S9H7clpX$943beA0B_3=XYfDjRrzLq9CQ@z! z9w{xTH5U6@U$}V~Hk0dZkMsp>Yu&+OK|U&_sVTNTbWc2w!L&gm1KEp1gX((2?=y)u zW0Ka%BgFyRY#F4oz9U$9u7kZl4G3hXF2<|t{I>}!y^P9?8?8e3G4Z;-}M)2kgiIj+;$w`dHv1S7`r%X(; zxC&6s(!fe?^jB3jm}e2@nW};9deBep8s=M^*&}BI8`QMFN`QG65qKtVfU}@RfC&~6 zxUx9JZ-I|T%OQPKoem~iMBtgufo0zBr8a?C7ZG^obzrL=X#|*ki6-0=Jg`Bd`lu2x z3nK#0Ob=|YRU^P`jQ7DkH1jiR8U#LSaKjI9mkLF+Yz6{cy#?3qA<)%#dVA4TChiCJWl_!JM- zn2sN)*NnLvF}t!%j`#job>dz`< zb*;%-+gAiuyJcdg4#vmuczDmKH*7b=8~l-$VH|=IoMA?#S$74Yowf?{AS6)ToQ@!s~&8nH2SmbH6TARG5M8QQ1vC?6X74x1aH)4(d23>Im^jmjoVGiH_VKSU~UI3U{86;7I-U?u|`lH>Wl_5UnDMFst zs{@Nn#VSB!dSVM4FOH#N{1R-LOCogiT@v~LcVypyRnSc}V+twZ$V$!P2144Fs*sl4 z0n)ajs8^Z@&0OvZ_|5pz_!YK=e(4p*-eIxn(7f2&&9t*i7s`Y(zfvIJTLACIK~`KT z$l@FRMDZ#H$o^m=6o=^(LJ|F#bC*0vG_zAw(LjouJ)t4ndjL(Q8|IcE;Ef$giK-%7{s*qj>B z#>FHda8>3pppB_$zYU!dFaA!&f{wVPh-w&~X*D6Vm{4A$(!1^QlT>k6qtv>~YYKsRjC|M;J?bO!Z34&iLPys3fl~x~{ta z6ob=imSLV(4?{n%Amc0Bed8(H0b{OhiE+Gbn6a&`v9YxBo&JR~M}NZSvURG@H`Xpb zY1WE9?_2)%sczZp{lv1!`zK4h_i{^p?@@?;+06W_x3~EVuRqO`l-}kJhA?wk>sO{* zetk{5{OXt%_&zrc@I7v->$@skx#l}oUrMQ^Z=?H9zuLF8KEcn<;-TVJ z%cj*c>3JLX@2U}%*n5koz0KZBnjWkgPq(uAD_|Qu0n;^$S~9H>3&n;aX$bs*b5nnW z&5te5R?X)rvt-;T)--!s6ZXZjc``pAi(CP_=+Mi~TuCyJ?$gNK_|^uh}V=`Fj(W5Zrl?)~k)YhQ>*4QlZOQ*@7{c=!vb$xijyTz^j9Z#(l&8-qfc;WMi&QU+QT%PGq$%SV=`$l!U} zyw14I+}iAGI&YlG>J*;trB@z0XBD14s~Aih_1^{9L-is8?q}uBhp{ccoqnM=Ckp-O z2h%5pGoLf3Kh#@46g5L^Yr>tsojL8Z8*b(G(#RPde%f&2Pb`xa>W5tJ5ox~CO&dXD z8_X4(2qMzFwaTJ9lS7TPWr0n}KEt;se1#(-2v=w4)Dxe@XSjcVtHpw1oGh$#a&S8BCJ?S|+ceVvaoNmLu92mm}I-1As;?l|llGQ46SFhmTsl z)Q_|}3SUMeSub_8Ru_9i;n9RsM`=|>(~pQawK+^&+D}Y0^B)t!A&<5dqQ03tek6~d%i{@od?k+& zD+PVLJZ9s_+wG}dizB6#pCYe0R;!Htuaa6BqgJw%dKfUijiZNFru|(WP(qyf@qcvk^0XRf^#AA&{LbQc&e{K?^Xp63 zg(jfA?x@adpZ8y03%&Yz)v`UY{iyG2TVfk*i?IG|{meSTYET|qiYyMx$E-o}`JQEb z|McDD`-yJ{-_pn+yxk|sryKoaImzdr>#Z-vi{N-d8gvIK=e;a?uY00u&JXW~JLBh_ z&r%dWC-#3=Ht7A<5fsT{SN4j`NXR^o-=EHe%nR88r3@W(o%MTk#=gdIct{r-)*2?V zGPxHJ=PUQZX1$wXCF$bhKxgi{3&ZvpLX9(>miUVyFp@lW+mV$qbJ#`a%wd<(7qrDN zRn>i~gX`lfza@SFzUzIfAnE@Fwu824YreH5toBW@U!$>UyD7rB#TaJT zXsD_GQXi_UQ5v(DtVcr;Fca=MP+#-)nMck&Hui6AeFil)EtPoky)nfflwy_td0$<* zLX%C*o{9Nbu{~B@gF&NB1g;EynE@IGr8Z%nDKgpeQW^o2+eF}*36phu5Ti9Q z%yH$@ELn-2rURe!(DV;bhm#B!1!dMm_>S&sFQ67D0?*W&>|QC20P1lfaOL1ku2&P0 zcuQ*pP?LKSP?Aoz>bGuc6HuJzGCW9Xb(s=JjMNTm;ai zbLnHrf#^{ewNYr&d9_EfQ})P30F62kcqRa4?>^QfK(kH+m#agsL+Mg;4&kykKb82% z6e!pusOy_rVmroM82$VGtMS52N4y;4sa6XjFlcu-zAL&nDh1}c2;4a(=baGNCR!^v>|s?l4vitl$CX0OU45%>noU z6369t;dKu~Dojq1qKyv(pk!Xu0KxIT0D`y75p^Ik3*-xfymY3mUI!oWeOn; zfcLMnl(J)nB#zo7#CMX!8_r1}@4KVC-Hrl?zK^XFT)h2j!O=kg0cVy-AbxBS=V&j0 z#PMg@#segj&HqH+xzC5SHP zzn%O~0zP!aA3XuKX3k&SMi}N{4Y&u8y4ar)bdVG0o!-WYq0uKG`j;Oh zker>*0jTo{2wsk-9voZ>)c90adH?EQ@OTL4eYDPQleV z1=n^KTvsA3%41!W?zxV6m0YjKnw4DdsW-gmoL_SNBSmkR&3G=BU;P`0XUue3KrkNG!G8pZsyWES5j|8tJLsmW=|y7{^Xa?K?nYkOAvnyh8efne6to=^@` zVJL{p2q(qY_Pvc*M2!RR0xX5IWI9XI7hv<=MZ6wXKY~Kl?u}x?{CtGEdw}Aim3so= zJh%@D!wB^sR|;tD&q7!?DkinxCyQ^@f~eOOA#^yMCD+*-$hLL(CX{_&>i|N;org~N z-m+aG%vu}hzDwJ%rE)oneW}=EEVZ#3#phH^SidVU$7c^dR8-0@-W~|;Ex@jIP)z#k zOGtWBKa{Pm1Hr|uN;P7430SmlD|QWsrF&e$o7 zN2r*0(kYAO>!D%$uN$C4C*sjmjOqGKBlZKGkspBod?KiW@9jkL-FEl{42U>TC5Q5= z^3oQRus?Q_EdKNo9XjcS!4$k$!$RtQ>e(3 zjN1PM`U@4uZQP13{=6G~%F9W(F*IXn$xZxjIfs_~wK_>GOOnL0WV-J&r0ad`GtnDS zLcH#HjYbIKk=C2m;n+Yr*j#KLgbdgHjpvMg4SyJV>(A&vKxD(wy1!U_LglKI5vh`8 zTBe zp!encJoVwdhixVQB!R~^#~h2}JQw1sM=Cz?O+4$ZngMLX6BVBz+b6v;=jXW+XBM>; z^U)ODcp^a59F2gdOYGOFojS2hd{Y8XuQ>hM2eO9=0>57!BhFjk2^Oux+emzzzylgq zmN?J27;#bU;n+9v!ZNsmd*4RM9x5LbL3y_Qm?nTSGp^bZ749oz@{AE8Toh3k!XTnl zlK2hVp(d1bC39 z$PVT-L%dSo+rY&FWsne{;mm%e6#@_RY$3ac)da1a_D?s<)_Lmrvq85$qOWqASBkQvh8aQj+9TRm ze$Q|obelM%PY@J&V~AWlU5mxMcgOk-gq`O_gcy>Qmc*)`#QSu3JsDEBBz#vO$^`>yQl;O%3>Gv-da@0>gB z!BkUOzi3^!rI*hCl>ch~G5*c`t$t_xzVI8*DkMMX#aM_n#o^kSR7_ugWWvtD7 z;9Q&ea8Q8JdX(I!b05wwWt?emMHD`)>+DzfaD>g^ebG(9WR6WY89t14 z4RFS&a}V)*LT&%B&A_>LIm2}l8tSAO2x21A^G_#K^oRBo&+>6 zW!>8zS0!K#!&}zor!nCN@Fny)q!M5)<4HghSZp)X2(YFhf?)XdvKDQQseD+`P_?Ij zFA}V4En{Uv1fG7rNN{&lm4Fov5xD$)6CUlu4v}AvNF7`nRysuB^83v?cT^oUEOrR< z@a@Ia<>FD58H*kwPJB!-<$x`dE{I zbrTVIIukR?wS%ezte=R$)2WyZyme5%&C;-rLQu9f-|R;&)~4IaGWZtD&0=X-NfEEh zz1V*DU;y*_cBy)SSXOxu8|~1 zHI`UZ?ebL4Yg5p|NJ^#jCp(@(v2O6zwZ}$@r*TS4HnQMpJ%evMUq|pBJ>ouZEO z@>n8|!M$FPu_7`pO0z)}6)X(3TX?F&wAN}r#6DRAOigTOuNT}V^p`wV>Mfe;)?3sk z%kp-Vx#%XE=qmJI^>6nMPsBgsjA9#KGbh$2#>B{hlh1o8S9QZO=cA&K3qNm1rOvd>KhiQbHs$Is>Qq&Uure=#Ti<<9t5N_|8LsOcZsBo|3`L*rJJRQ$&8oEnS!`%+9f*lS3}y9$HYB)5+xDHJM-T16&h>n+Jd4?T zwOr0Lq3*&2kLyu>xe470>RHqXp=!j$jsT@%FAi|c5ax4KmpBpAEv~$c@wIUU5aw|N zr2I^}bJFTY{&R-|n7|Pbyzgq4j*avWbXKywl}r7^R3DK-g5Pd7X57%hV@8gfFtJ&q z28|jwu3s*7tbiK-4-}J%`7Q~It&fc#ysvp5z!|1K3tI8An>8j4rVS2zFA7;v2k9At zT6|4@{b{ghPCj^2jhhebw!(72-<|do^x7QFF5V> z$)Y~0+%w*F7%iwi_?xR;=t|6@xP8@AL1u4sgOiy=ee}9jygp~`Q(kQp)!FCDgsd#A zhaNzLLDVH8*f`adTbU4{aALgxf~STFfR%iEl>>?to|Ytmh|WU1P5}4+>LP*2GXD}s zYLWnyyQ3vgJn;etW(lCkuYm-zhF;?U3=MRhiIWdVAfeZJ4q$0cKy>I~-14)Ju?4BO z0I)LWBQpmpNDc%Y1^`_omgEFnj{JrL{JF(ioa$m7@SouCa%9Z;g;>4yp7LJ0@3kmekvvLui@ zTwE^}&>TpC0v8;-U96d@?*8Qwk~nJ}Mh)VzSmxpzHkLsAuwo8il}td&E<7Ee!9E18)LFiB2TNT7f?q)o&Hp;%&-}#!tZ@mr+yN>#4#eEVpDio|Slkk@{k&?7 z33^)qSk)4sEV&~&^4DJF8+5RoB_NTVkwDak!hC~eECEG7T~%D=4xA#Vc}lQYl2*)rjO-6ZcosPYGk4*Lm;#u^72ve8$TA}m|E-{71ZhL(3_$#{J3$nXiLouoIX>#^OaWxfNzkf&;?@_V)K3wNMLc7j~l!D7A z8nRV%7JsV19L~Rop@7{?#gcE1(av!cUsADkUr5QHi^1uB8^x%lM{sR~4p@YH zGw#79404xJ z9LDWCisEo8=I23ayqbzmj&8sZ-=(589cW&E^y);P15je4n6rVd^ccW$cOlr`0%Nm+ z6ss}*KL)Y|f0luwl!~nf9zbk7qH9qdTG*@^`?0FS-*Zb(07gRc;B}0Cb{9`^qI@*#n%dZKiL{<4Y545 z{AgKb8BPJD%x95yZHBpN@0IZLcq*+*H0-<|+M^aNR3p8ORE%YKEH~h0I)2GOh;g6Z=|`zI03SW7N1Z1`GGj z%g&EcT~(j+io}OURF^&j$uTjik3VEyo6XYG&|sv3bc9WO z@~uW`U>DkK?j;;M4Z_=1IoekSu4&SCa~%mOywUm_W=Uv-3(|3@==ry*mn&<#Sq+8r znq%1~`l{Gr7xs!{gAZ%i7rJt# z+FVeZ%@)?S{pJuU>j`Vwesgc>B`X3|4Sue5ZbkdeeI)jXOA1)%lGFgQEU3)gB0E;G(dH>XUI(_CM^Q z%raV^vn2MKsZ!h!6!$-X5XwW!qB?K3vRZ962g`n6XTkWZSZGuhnv{iRWuZk`XjQ6t z`A*fs7UEO0&IjHl?Dk-cLHEULSTg z%yAwWlXAo^wzZ2#gf05j&!*)6>gT1@OED^0x^@l|az71hpgUr1VAWYp`hKN*X8GLz zbJZu-cd~h%IZ;1f-$!4ALOUS9L!1&$|5$EvpGGWU_*pZ19n-pw<6p>Gt~bsSnt=5= z(P8-yvb~grh>Q zn4zjJky(Pug#{X48U2BTM6<1KiFRsg#%us=zuWDyL zcU)R@skw5aSc6zQr>aB>RZ5DEtFPd=6{3nr7*_|$)vHEIAD|Q+S75nQz|dB7*z0jhz$Ze!o;@hT6&#i~l8DVzZ#J>JJT1 zN(m;>oG-{)*Uo+~sT1x+YMxn8SoQyq1zZddB^d6}kLpNO6*N^OHF0rmwM$Rx1hBk+ z5Il%$?q@+?MWS5-AD_??aX*`T8}q=Y(B!Vi^0R<9!ggQ3 zMBA1s#}XD)_h(%#Dqc(A0Y^eZ9L10Qm2x>Lq5f>a3>B{_`5T0Aeq>$QSIy46yj8r0 z8y-KfT}{?3B}6t|T|nENjXEO*R*;Uzf7n8`u2Vv(73WEb!Qt=rCVn*?$P}XFrTq+Yb_WfUm%zfmq(q&JK+U z5|*IMcpoJ*-dEWd@24nv7TW{7D_81R6?-K_eGYnJbkwo?E5U&UwoKaq%WGSh^*@pF z*NRC{+wnB#x+*(;Kr1yD{d|I{EKc@K^F?8=`f)9hO{w zrD+#LG2UXGWsSkK?6KttazG8T1e>p!zcP<8*E2mZ?KDj@wJ^Ri9x%=^b~5@PoZVtW zFGB_WMSZS*sJ^ChN7AMJrFM=E>Z&C^D4Tpi@Z8C~5w zXfQNQ8|fh+dpGmENh1V5#+NRTjMJ4|L+nLYINU=3C6sdOp->X^nkno+(yAhqY&rOf)eNPtT#;?=mFQqThzUQW@!Vo2569ip+s_f(99Tv z{u%*|5~6Lqc^~_Ev=FO^^3VSI}MyMtykN`x?%l zmzobsyIvb6gFYptrTl1iPDX|iu_mWY+$hy&A$+vE_D=*e7 zO&q3}0Hs&(aBE{^;@D|OH}o_F>F?_I(A-jaWGSMsVf4?D3%U@;YW#nIUSC5@9mkIN zI?OR3Hb6P1%`gwK%i;O+dIl)L2BUSk@|dQ9_mP4f@tj!YJ;TBrr+Nn{h+x%ENI3Yc zXITETJ^_k(N={69qgnBvg?w1VG|}IhG#lzzvWVSp^l6C0_h~t$mSark%8LBL6bC5# ziUSp6WQC@>BRc&!B#rpn|E7Moey&$9-7~M<{wKYwID+h@l#C4GaMf&_+jBH<$-=0@=fkC2`Qi#j3MW#*}~CKZ7_ zor@hXMj*AU(1gQ~ZXM_-s#Z=36*C}5NuD1x(SSM%dU`oK^4_W>H0LmzLx>}IUs;8e zooTR=lhEjlHDU(E)$6c~^U6$c(zJ?K!Y)pgYsL=>aYXK_ASz)Cr^@?JkBG{0j)Lof zirw7E3r3ly4pTu{rM;so{$Iytby^o;Q_l#zC+y?v@hETkc|bbomG;8N?jPj$m0u0tt%xa;N$=ra*Sv<d9&`1zlsve@WW{W)OCB)Ytdr6)`5YJv*9+Bd!0gwa*w~W7BEy7~$UMyrf-LCvMNJxJs6^&ze2~({AJE7!Q6(}@Q-mPfd*HD~hWRRy zxeOB}%O1(=OG?LNmEIHdU2z#F*rXMYq-i}JvsEI{j1=tHlLrEulwMUx_B2+olEUg% zmi*bn3VaN`K1{D-s*er?zs+)cJgLP_HN9&WG+t*P2pVfrdS%f}tZLps6b{);QhFtc zd6+ra$k_X8A1ew1Pjd%bRJueZs2~VD%^vJzA1Pr{dU+7g7_a6ekvewbcb3}0g!zM3r8=N{7h(h&bW;eZ^zN$I7)N>7Z3K}C?+_x_`i1qw1x z^NJt~%6E|k2r^G&iy%v?>>{%ZGEdWsAe&#+MFwLh_07`|BO499tvvy-ebR&GX_jGU zZ{Aj)HW)hjHNre2xQlD3+>_E_?Ih6C;3~*w{w>MyD0+*rj1*&;T=sW4mOcKFG&%M# zW4VnUyH^PhBAu2*ri30vs*MxMaZS zN))k8V6BIWqO2q6(=v*5XG)hY(1Y8&lHj%qE)Z5(;-u9TS8h1s-?A$Kj>7I`6!p&@ zMp_~n{I3vR&Jon>EyeE0%nnrKZb>B__F8hFjec*Y6cbgJ#B6Nlt%RwWIn!agB|$Et znDvNK>=@h8b*Zr863PDlFf@^_#ZfY$lzMy9VafFb1%Hj#E`Atxi6pz%i;$dOU)6GC z1_x-v2-_}U0qvx07T>m%-O+MdDUAX~UU!OGjvrRw8pX4lF8!bf*SIOwJS?i+YrCUI zOK*(?R$=0Ik*uyrqwu;$fHs)04--kzudli|{(NZHSYaOK^BWT(v$H=E_v#Itf~ETnLEvdrX)Fnv{0l$cUJCv|143v~Vm z{TKRo_V@GK<~QCER9Hy~@`1~X_K&7U?^^R0{8341i)gSq%Z2?mPeNH<`VN*)8D4i#v8_AhAW16sC)(}zbhZ;3c)V_ zOG@{W<4T!`RDT^-weoH4>9!F;c~+!3?&KETY@t2YR>`iR;er5c_lDP4&r>Z&vOK#C>t>XK%_^Ub=jvy4-}K%AQ5$)X5sN ze4o19h2sCbslFNH+tkHe0!-$XP3JFD~-E{l|a5tJ+`R^u$JFmR{cBW8#UWwyDFQRU93uwZ&lZ})u^ZE0@(5jS5yh| zo$9%Lm7T35J2@$hUZ|K3Xxms>Ue-l*&YLD*q}pCq0^#1f_@XLazD8Y}T7#BdRGo+A zTU6WEidHYF{g$s!*Zx-fGxR&kDg{qUqjxB}<+;z5W+^uIb?l#V#-5ZWU!=6_mF53= zK@~4wq1x`(oac~U=%;mCzCRU@O|UF;%{f(oe0#b!!u>j4?ib{{+P9L=3S>yh@hUqzCCJ35Z4*y_9Uf85`XgBy{ z;*t>cyX=+C5!KY7ww06~CFL~Ht8K+Me`0s+@iJ(m)LM|lBEOI@cSQaHS*z+O+DDID z4Y9;a?kXr8xnm7Tf6)BFEAikAPj6{zNF5@b9LwwRq-@S#H^s!+@#2}H$SM5r^cI3_ z&G;&UjNSXrHPU@L1{4$xfYosY0+bl4uXdPIjsAnTo^CNVw+U z$>P#A3^qpoD7rg(vivy`pOjt?I1Pd7@R@fB7nk+VMJu~h{<-$&0Yf8Ek^866K$P@* zrhzar66oO!DySkZxTs)hBr12;P(kF??uigZi!)}lw>n@R30k^f{YPS+wtA|9ncThS zt{LHoHz*vrCO?oQAMn%uGKLEhGAxK`f6QyUHtqlL4RH0R7O)Sh?XR&96BI=Y<_h+e z$;Uxa`6pKcL*Jla2|wZu_40ie#SlTU=5jg|h?ojv#$MWnd24uY7tP@Rp~+3LqNOcA z`D%Z{@q#8`d#sRC+wr=upN0;411;xB_=1)W*NeQgtBDgdMf+!{J;}6g5glkI#U;rsiT;*0I1>HYtQCLx`!rSPQdTKb7r$eJVM zu)^U_?T$K+T`PsYZ<09TXOozg^L@07?jvZFYm^6*FY5juYi}N3(~9@AH=28x&JJgP}3g$%vtP# za^dI;_k*F^#2SXa|6ovJzI2p4lt6c#wFvL4EPjuYtQ|~mEkd7v5UKcwOYX|x+BG@A z-bW`_()K#yumqQ3QT84b;@dqnO0w5KS(M_vTJTgNj%D!2sTnn1j`O9Q3a#JuoyCOh z`A=G2=UVQH>mes=>>et|mESSrBJY~Qbysose&ba2u2jwDFW_{@(OUiYR2IMA%@%-C zj?+Kx%Fe?zELf!HaqQkN(40!~H_US2C|E?7QP*IK_(8Q`cYaQ-hxVwptL8UNCuzUb z-ntgt%P~F6JO{LT^uT+-7tL*uWuE&6 zX@mQxRxcpsx6fOiI?FN(8VfOy(kstfTrp({83TttRyQGe`6t}zUO3WLudZg9B}@$m zI})K+2hLfV&n#Zva46!QyY-x<_+|l9<5whZ$gc+6_&aM!z${&A7~pusol=fkQ!tB` z8V4lncbv0aC=-GO8t4t@B=q`{wRl3RpclbO3H{vJnyFckyy2{b9yB;(DU?Nuyy3Kj zrUzOX%p#=5c}d)lD7ACw8B4v*Vx-22N$g56IISwNFqoxC4Fh{Qp|R=KEX-2G>g|LY z{e%?GL|C&h%MdFAyF;O|KbKf4VHP76wOmm~uu*i66nI4oZwJd3{Un5=;sd;j_I1Gu*krvb>VivG9 zEc_}nl?}-=fB8(ac(uIF97x}57g%U!xoV-|P?l$V1I7H?a``vYJUjav!-NH9ftYEY zm*rbucg}8s>#uE^TCTrlnx|$Kh9FA$bg{X+Wtt~t7Mge(8a3NP76$XAtmcX6>_=a_ zQ`|Ib>O3cdP7n;mgYp9@yp^?HJPWhXtnW)xw_nL3?J{}x#Rm+n4^7AZNjTu>y1@F0 z&2z6B4m?GTd(S`QT1v>%FV3U{C)PwJsegiD+UHgV^Q_Ctz;9O5l8Skj);CYNhQ9H3 zHN~vTwJ?}xTzu?EykQNWbn`8=HfrwHx2)mi3%S2OJKs{g*8gDO$FAx7`qtdd)2lZh zy{2yhtPEHm5`+3 zC!TP1Y2)0^*`SYyBcoiW<4$3YUpZX2zh)m}mv1}EwzIZ@=8*JAa(8`fv&*$L*!jO* z;;-dTT9%uGXB&UbOnvg$#H7jNCk&mC6hCrY%EZ+1@#9ib#>6Kl;(u@89_>2xrc3qf zycPO42DxVs`R|eCKr=Du-7FV-&wCPMq*j{P?8t0uV<#jJ9sQqb{5PscqfFl{M+eW29W_}o6NU~Oo%Da&=WpEqjn;@W z@`!k&`2UTUSpz9g>tV9TCmfj+PkAny+Vk^Cv?yh4P06*Mupl zNkO4ag}|EGI+ zglE>uvUl(d3)5tM@E;rKpV@hY{m*s?lxr#{)6ZHDw^3+e(Kq5JB#%jw+jr^QrN{en zkDmB{-}l}_10;=`lr%0edH4kR{f;00oicoMGQRSEqD&l@{7>dFNfSOw!TOP|mecs* zJ$ zJ3BkI{BNu*kp5S>$ZulZTIn;gw2*9VG+8d9kNmg9{u{0_ywPX=8=D-clmGUgS?J+I z)6e@|oax)?bzNxkbUY`K=A!XbgAuC@Z`HBJ3PR&)@NL^^W=7MMdUf7*!nm6!=1bZZx!mb8X6NP{+j##ZRDbkGj;QjQVvPyi_hZaA;y;QX=AVvXX;m7bM$G zB+J7Y$^Jb}yjlU78EB!*IqFhJBqm!Dv&u#9EE@;Uc1Wv(XiGBGuOs9GC&<6eC9>H} z0UN?y5q}Q#7-y72F z_DrAYkU7J_vs;j6roZS*qD99gO<>uXGB$q9(8=+mlSd@+xa5BDCfVlSZ*=cBiT@|A zoHAB^lO=GB+uza;iq4#lWdR4T^f$TU8^0NjnHiS(se@-wkd#?Rw9lL5H$!dT=vHsi z0@dOqCkjCkDkn|i;hmiLzBxQ5d91baW}y@+|Gh=5U|jDm-P`r<&qCQUN^6n}l7qrC zr_tSCbfGQWU(TGIIn}|_WYT1H6opSrnv^^|X}tL#_ZM%Jx1I`9<3a~7imh$`HWe4> z`bmu_WP`3v^xK(J(B)uE^c0^?E&k}C!;+x$L``8#h#-Htzf;|x)0M@wYW4VUygb_f z&R_sBDgt6hC(8n53Xghb(6Y&*sfF zSrMY){;74?xcN_*&5bvj^gpq@p^+d<)@eOlXya2{$11Fr?BCE1C{?Q2f`L*VJK_Hy zjcfa>}C$cU+v7s_^X)-_Y>}~hvLtx7I?VW#@Qv1i-;Exo#XUA zE^zMXx3APo$=m5AnQW6mQT$H2OWsORo(tB`c75p}>FxdmjJsF^Pva){V7-^!4o-ls z4A!@_eTbiwb~oB9x79Vg7=7ug&R)|M7_P-X*^C#x*%OrV=VrZ~2S8_P{L2blOFp~I zK)WMz^mZ2j>S)8H1U#^GZ?>0RKEe)ac>N~{UUpn#TiUR(Kkl--q`LuN3IC(F3ng*XEM{xqSA$*Qx;K{n< z_8-}Y*&VbSYF8J&pS#++X-{hxXq#$|X-48v$wg9gWM=t~LbB!FRB%lmxy7MGJ6vNI z72!D0kK8p~aglnIy&JvKzooN_3TJR=-w?cSG|SyU$Ioa1hFL&-fw!=EcGUGP9l4#= z9<}ch>^RUrh-MD;H03VwG*H0-*_pn%pmn8s=d?%dq5`ZjtCn%hR{?%H^8Xz%yEbs_ z?;l1d2Zfj@;d4(nHEPg#t-*G9eaHTE@=uvxTk>?LG2K0#$o&*v4~nVh*dN{^H}v*1 zQPC>7uAy;a^7z!zL#N1%WBlGAc~bd#t&{C=*^*LGJx?P|I;G9Ff7b^Y2a)>#B$7I) zt~p~av+Zzi3wz3-x+rWzPaT~t)`r*)_X1(Vrb&*?yP&rnqw$RN2v5``)Kg~{{FWmg?h)ZtrCPmRkf)WtV~wn{ z3#w}kXP-bgstz|_ev+q&d_VolWY@B`V}HN-dq#NCm^tolG-_*qon6aXz|=36Vf1#R zMHjU?r^X(R-Te5=Q@+t#R$``_B%NJjcYw6vBhLoP&S?xZF*tVjBj03Cf12^EF2Zgc z217074q|xOHF327Wcb+ALkUx(xmeM3pp@~F<2qZxa+5bM*sqm!y2g5QF&t^=mAXN? z#yV@*cb$x|lO?R=oHt4U94!EyT z*Cy^cZgk9Y^mf?d(95B&eSv*fy9&D*c5m5k#FOebv>CWfwn@`Nx+P^wUN+RYxNd(< z;!tuwtMyUld@4yMYV%4Cr4z4YUnLg8&z-_HVvsCnu=#6Qrh*RDMsj+M;35vno$DEB zJvPE=;%m7fc~$_6JJQ!cJ${jOw0nMqJ(a$e{pr&R*^grT`x!`I8@;kmg&a)Rm|5QD zXQV9^NUzIv1jPltU583_S-^ zAO@$DmTp3hJ#Qe^6~=t)hI`SL8_1{gO$77&w;L!T1w-Y^DQ!bq%UD$kM#-z;z`neR zlxy8$z78nQ&6{$l)SEWkLRRb7ZZ%T(TXKk9R4*O?euMnrd07;+pN^`3cLc~PoA z9N4|;=BKld?dCWTBVlM7Mq5Iq{Hu4o7|03g(?!X|$`~*)hV8){Xb9 zOR-x)uiwcM-tyfYd9MrOu>*ngl>bmLz;k2?`dRLn5^pjJnQ%ffs$KM>Y9Y1uG9d0@l zIDF#J*1^vHsQoe(j|eRJ>RAmj+Y*J{h_(x-GgxeLp?@Qs2*q z@(#oIZky&oI*rZuDnof|F!a=bA?>4k(8mseUK*SE=E%%#Yyg`30i;!M&lk(q`(!;{ z4BK!TB0g^cnl@%XI#O$`Uliqdrr_Dbt8G1~U_Y30I1%XaaP+-Eq_H6o1&ECdGE$B$ zjJg5j9l=&N1B{ImQvH4m}H832% zXR8s*^IBoyJ}?}Wnq@9nG#-E0@e>MG5R8IJ6*s+Ty`F3K6>SLigBO$#XpkIx6e#sr zGn|BS4s_Dke4)~^2@f>vwnw&CMx#Xv#zw&q@ouK0jKi`I$qs&AQkKe@_nT~zGS%Oy z@eu}c8y0~=%v1}J-4$vp!rV1c;LpZ#(FvB#JA>&&lkjD&@ zRlx0t>?KX1awos~QXzbK%Jrok6yxvbXTPhyZG^vudX5;{-LsCvIEU448;})@^!kx%$nFHjcCtN)VYkhA46r=QQ9%^_Bf6# zDkRRLly(4-&Qm%5aXhESYE14ICwtl3oDn(ZXaapTHYL>axZJ?$q?w}$<-4M=y<)l{ zk$qGMn*DCsHo^}+0JqAfEm}9fBX~+JUJt)LE%8|r#>Ap4EM%(|zJHS6F_7>10YDy5 zI}g8>(mjZr{w7K{dK-k=r}Z9wsnQ*_L&vp#*PSk%!Wg=3?&0;|zVyiD0T>#JG+C1H z;3vI{zEc90>nckTA-N4r(6`r@;7eUq(SpMtVrYEd9~qacNVbHhO?;bo(^1YDbmw9h zFpQnl86%|`;>v+|%Ci_jj6uIozVf0Iv)nP}VM)>lb^M~>iOKJb+)}}TV3eU?DVP(E zy|1I}b4V($J4Sk=Rv6Hymv7F+OnKvcj+(Bgdp#YDy`8$AWITD0x3{SQ;-1r zeV0$Tjt-doqLiEr2Yeny>F-5pmsw^tfM@T^i5y{#TEkTKhMxC(i0DwCF?ZhJj;KMu z58vt5uUo&a{cTpMjBtzYMLE$ZbHv+t5%t1F8OoqBYW(_q2nNEBDwl9>o-l?pSfL`q zS^2JdER&a$pn{!IG~c*-eAc85A>Fff(KeGR@qN zI;7TWhkf}5;W{UqB|I4muo*-SRdRUn8T+yJ4t9BVQ8OSVQz;98IuGf1OfP z;RK2>%a<@EcDS!WN%_JRB}jXTgtV_v?`$XuBb|FGhvZgiZEb^W7TC-;EU6Ryi@uxg znr@n|t8?B;$}8>jPSRy^X=#u+jRa0sPn;RLxQpTc;!?G<|7u#M0?MMgh>%tnhEKx z3%AB#_32Ba&V>Zpx0)H|IM_V-oztYPJMBpCtkWjV3?=vTNJ(xEk@@;uNOLfTSQ)K? z42;1lWA^VM9l;Z16?_K~_z8HeW zn-@1;`2^Z49(|qh67zbdSj8G;T>v*8Bb5oyG`ve0{L3QZoT+bVNC@33MGV6sh~@`P zf^@W_6fxp10<*|D0|i|S381ZvRe4|_?I}f2zJwqPKKkP#V*HDPMP|`wP+6)jAvcTs zbEaP}A-4}NgU@gWPL$R*KyMDa46KN;a>lS|WuOI@LjuqV_FN80LIb^RZ6NUm0;UAq z+HcWAPTV>zpv7e&A>j73a^txJ6Bz5Nj8*0tflhfZkmcVM{KW{m3z2#svkGMsqf%{E z(&D88Z^b9bR|-OkwX7*xrV*S!7TzF}Epr5&vJ2c{O7CEnwR!txqvDUBVaj#^SY`}5 zm)qO zxn?Vj$4)8mu~**b?%WV}CB?YnpihY9BA4BPV#=HS4GN)CH7ik)z2!^okD62nmlqT=- zqB%K3J;?W~k9E1XqT%tfJ6M)!s|Av$HGqW5S z4a4IvR5oGx0FT_GqxF)nXcf3~d00|3u7flhHO(3&hU8740R}Wl|&>Q~DHjIZ+#S zmi%!7%7#9P451%lj2fvH7bitR?+Zq=jSEKXVi%+yUWwEVBvqp2vx8$y65O;)d^I|MI{xuA)c>O60&esk=VibE}=R7^%ANjVlPd?$+*#63KW%*IF}@p z!bxou+377KwG54kqLbTJp#AqJ!ZvKvSx;B*gEgbpaCb^YkcB=zWo}rEmvZGIx6tdm zppR>XQWZCZfu|qKIcU{yP_<_eqP0adjS8fWO)z0$Sq&7=gK2m*95EXmV8t5K8AjME zwK(NoT@9p!$F!GhXz2Ns=&^wT9%P8%{?Qtr?Dt;oWOxvb2-Tbznk2c8fxPDT$Bg1i zG!~og4{@iR5ivUHsi5b64D#jE19XFI9<$*VY%_-)Ep&9f1b3YETu!)*gd5JEo%`z_ z>ho~Zucz*&E?wt|XHip}92^zLNIV=p(*CtQ*+*ctJl^)H?Ht=^ZHYEr>xqj!12qnk zB6YL5Mj5iHxxUxTfsTz;?L;#tb@s{Cz!r*W(Jeq4po*lbizq$MZz#V-w=k(cooLam z4-LJlg?|2|vlk7H1o{D_2hhgXfTncn=Rs~QyG82zT2hV`vqfJ@ZP_gtOABDCnFyU} z*)2@p=Wm=j_JO_>8U@ai%&Bz8)DUwdv{e*x>P5Swkkiyw-SFtUWwu1!TXhSgtBhIZ zOOezy8bO{ck#{tx8#!p5F45DC9Qq=oRe%&q854ASsB8B(7N zO`-ko7KcPivBIzr?}%<&f;1COh6LF+oEd{uaoe)1Cr|#Jo&9IFWg7$kImXh^=H0yP zd(3RZi+MSxLh91}lSrtwYN3cnMGMcPEfP+_LW!ntPlfa*zw}C9dVMOSixfp&N_CaWRQ|?+o&3 zN>k4OZ=&LsDP40m&iV$~e9r^o9oLJl|H9q2qb`Xq2IrrgWA%6SpXnRve$!3RIXi82 zigSG7_?=^{!|x8CIe6LsW}jg1Z1;m*ob4?v6Nchpu2iifJdw1Nis2L=n+Iw8n#aK7 zT_UdJw&^U>niYv~K=)%JI#F8$YyF7eG7ctia2f}7D-q1&;5818TbYRNG!I*4omVCX z(|M*=?HY>lPx#)1qb|4pE7g)uu?SM}N>9_#hRpS$4YKyjzHBJ{9!; zStBL$?rclSnUdf|JwF+aZ*(XPS}jW5w3S=Lm}u@a(3CGn^V(Ay)}F4xOexrs3Jdi3 z5UgTi|$%Yjm;u+doEES=LW4eWY#| z=e3!RW$R5);v-ccW_dp^G^f~Gj{SFkFDXg=T`>F|nZeh_nk3RRv zeQ&xR?I{wMuZ+Ml76XILxvfC42C2V`Vw|wF_iQ?H9%x1yPsKtOvky>#?4??mlhYFP zP<>c{Tp@i(Ior?yV)ZhV{-+6g%HD<|#I5ZB(=v84rj)Du`Q%PV(KOqEC$z<0RpGY& zFkabKEo#~wCfZN^^f>MX7N70?y|E7@5-eIe#Xt)yw0b((cO4dwUS{D_cA1Q##hDqZ zC%^>isoET=OTCc&jD!x9_H#ejJinmWhIEuh+KF5n`NKfnwFBScJ(5=UYe^fHv7vlu zh_}?00oZD*`=MW8ho2hV(t1npn$a1b;LgA;txoE!0&~WV#?#DeP~NNSIM(|x27J3r zM4>vWG10g}lq8RT`ElPov2V1u)J~;m7xL2U zk6fmU6QJIY4$&(!&l=f6%S10tMObR1vgREfVv^dbzsvEP>ivS&&+CVRG&%;h)+DX# z*TSg<8%hmv`v8;npn6|BG_WtmaRD2<@9i4d{$`t?{R@7l0yRbO=T;^COOnM+gCuQI zChUScOsQaB+a)!bR#-xFYIPSG`h&zr6 zazpTS`W<{1MDPX->2(}ze;(AhZ$36sMhnC+okxt-j72OTV4zaWIB3Rs@OfN7;foJ{ z?2dVaGu=EtIfSNO09(~H4B!~XG9o+MQlATme-#AFl){;c7~cULA3`)sY};gfjgJyA z(T_xy^-DoAQMb~`eRzt&4JYOtCrT$rI=*cYh9{jys9LGf#mT|6_ad^kPDCiZB^;40 zx`?cUFM&`qAt`K$478U(m6GPQD7@!wqch!;g1uH zG=s6|%fJp_M%CxEMFSQxww$phWk@b2#z=N$_>>C~!sH#PhX?Do<+#fT zycU4@44o}Q!He=?3GkGLa`O{IC0CV6IWgLYUgsyiN989lXEWs|22u7ha9U?Il(Z}{ zjGixp^0&-uoaw|eDF0#0fn{5!Hq?AM_zo|JL6Gf_$w-1rl8%-z*5n89mA8gjq5lD9 z#X=6sY?6%Upk@Vv2^=it;Kv-y<6wssiT-RJ^$xIs|G@9v8oB=FI@`6r%MO=5&Uc)% zoK1SmT2?KmHBKGiDt5f1-eI}J2>Vy|xo{7Az;1}`6WecX8)^%+{owxa3yqJoS?XbP z!)69M;)8n4?4>rsoSz^I(hm-?qtzAK0Ls6vg;Db|i$wzjVBx_b_T+IL1g9Gy4F4Qkw?~E;XfIHjIUPnZ zm-Iu`LI-cffK+=A87q)%gMTE}vNlH_H=kf1U^>;abQ%-*z$}6j6HMTPi#*s5@m4bPryrePa2s_hEN=`aX7*UF? z2@6f1KxzA}U~Otz4kdPfwjTRC!d8jDKO1I4TWlT8ah&KaDJoiU1WO80~4lU%a z&ckak1Li`Hf3I@R(M|M{=BvL`8-C?Z-DwmS)D|K_TvHf!#RKQ^f*VdGU@y-9OOM<= z`QlIm9~?AnTnx+acx^qkW3umCDevPbL(T$@+kF9SmN{ympz3QZC4Z0NWvf8mq){+p zoF~F+Sq$d96I@Ya3brRtz+u`q%+ykvt@7e#7gTMt`JxnwC1`T5_NeSalu{g4$XG`n z)a?*T^%b{G3Tf=wXpFDpPD@=U>ZLCzjjhPIHuJGdxO5aOw%)T~GPxmhacCiaUbYAA zT7<%7shp|tb5Zn6HBhx-0#?!%BQw7+ZpaSPy>UiCwN<@R`rv$mv=lWkyffW|qY2`7 z>{djHe+QQ2bQO@3KV^-dts{9J^3Q($1=byyK24<;Z-#*pVT%Hd^FbTLeUpv+r>L~5 zYSg~X?%I0ZSK^$qmyUv6m5g?X|9rd$McXaWV=5;KB!A^)B$IDS6kZw--8XMLchliG znNeoMj%3w#bklmv(E$r!gF1~kD!MGg5?~{;^0&c z$~zFe!@;W@!~m12j{|~T9XT!scW`hc2QkcKs_%%P(TU^kL~u3-Go4Tj9&2>p5v8i+ zpyBZZ6ZLgkf)yIw62IJ8tm${98EBsqB5Vgl1)o4QG}IwNJw1XM^I&(cW0rY3u*_nt zYCAr~ea5O8d&F4kci`js4GqyF?f_?CvCCmaAK>6#4kpa!qzVzOsWYMlyKwQ$vmG2- zU~I0yEE67j>5PItcR}2^V|6f;r@(?A;ezyrxdJPHF&63VMtUVKhVhUng&i%p1mv-Rea94S+N z!Lg56v^n;__>f}C)6 z19lm{gG9%959TlW2m^i0+Vzmk$Kl)&tJ_thR$>ui{_GACPIyejIgGqJh>cSm82VFJ zV-8+X0Oq4A4KLbzNr%0Da}6}Q8EQE%%TJK6P+ zWViGAdh=0}2i|BQ!*9^cXtEk)xLT>lQM26hwC+r&|m)Y?$&$3T`= zsmaOq6VcoFkV$$E#N1Ju4`t2TyjK~@{Hj|s<59JC9O~uyDv`oOFyW;AGEFG z<+S|&A^Y3y&BwjzBP+2!v_C(D*6hv4%+hRxU{wvB`{Bnpp%oC`k11PT5dZD$y z=7-Vh14u6JNe0dd9{`KTK?JRK2}BEWoXQv*F1f#?+0&f^sN_|KYOIwW2f^0$H()SS za0vJy%D$0<3EO94w{8K{q|a|)F&+XYF0B_b_61{^j9HBt%GiWF{`d`fR31WU3#v0P zF*$^i^*)SXxmSA_d~D+s9zxvhhdE1cT>tltnh!zvZ^R8bjJWlVaORKEsoB<{-TzLr zqj^l21p+RWL)>i!RL#&M<`rhpD~><>2(ST1xlq1Hm@ObmXXYbJ*vkaAgs5;cn42C2 z7JLjPlYh*?%YAGRy=LY`Oqk1rj81diso56jC+)HPhV<+h_n<21r=(-3!O-If!b-w& z&@nXH;p0f&VkkLN`gD$h0PBh7ONw7HF5n!`QSUa_}#oBq=5V0#}nd?%!gj(<0x5|14lMB9# z;6+v$NbwIgVM*X)^w0NL)E&FODHJoop~c)DEDD=H*c7Tx0`m&j`?KrO__BPn!Bf04T_d@(PH#cm zPpyoixomzIN>+MFm(7>N5?4SFUtFP=%2lA~mlY-)6Bakf;;zH4DOG=GRN(g|azDKi z3(d`-&BQaDuRd6(qmx^3+e|#eS#TQK%X}wIUcUuf?H{hwVXGZy1aNPR-{TBb4Hj+rq->h6O^dU$pv zu1UzNQ0U!TIO()dBrP7~T(=pua9~~}YOlsIY(Z`8RHLR*9W3UHYZO>bM;v~aGy5D0 zY^$0tzJMw{Re`G0 zn_$ZfS*NFAr7N*c|Mo66d3N#iI;V3rlIU27BtTbs^{y+yLiu?e+N2Z~%GE-omoV%d z_uQRmkwF5du~ zXV(fnwf_)hj=8kb8|T;&8Rw*Tt*AraURoI?Ria11zN~UY@yDJA@3_k-ZFvKvnzC}e zC~Yn7ac}d}sfEbqwi+S(@=BBW;CEFe+R(5Yv>QxoiSyr_W^O~IasL96%;{F`r5Lv9 zaJnVD6l~YbNif4$2Cnk=3l&#l=ylmjJKKhEt%~XL+fDdn;G3(d(e@?wqwKYIi|m@( z9O{_fPT4+bN8AJ2d0{^B8#g2d4rk zvI3a3vNYxa_!P#tdUW=|sbI3ZeyV=%?iD5)UvVmwQm%toRB@^^_ST?0e5uifT&%ra zK!WdHM}oVrZi2wPvjTg4rN9cVpYkXFYn$p((NF8KA9Qt77|pr133*=K)R~_0uPUEM|4Q9+NUwyI92brTDYx zlK0C{oo%;Kor20uGUjUU(pz_sQr`NV-t>9prVz@wgPh9O@ARU39K6NBjCq)J@LY^` zm#na-{&x^R?k<;TK9p`o$qI-X#EwfnsPtggMG%dqd2()HWuW3AJ}?J4f~XWwK(%QaEy{ z#jmahF*l#QM=y1v4L@vciA`vT%%ZMWu)5H=1V{IdmwM+;#Y4++EAV1w2j+pSJgz|t zEx5?Ji7mx~6Tp2GtVE;BR z0WFepUZR047XWoqU#>c4$6F3Tex#h#k>9GVzPV2~o3OVoC~>>a8#S6{Y9Q~WJ#ILL z<1!pH7R!9MiKSjBf3Nd=Vof|C9y6TF(AXTA{qdy($$haI4T%$ySS8PyzNZE6Fnd$P zD|>jra&y4Bxk>m!sT;SLkXXCVT#=Y~Ox!p53=#|F#JqB!A$ExPN#*SiLb0qtli+Nm zs1V;&lqR5Uvs;#!G@2mz;6?_;ElW^%EcbIj_`vgx;${dG2>Y~~zobN$I~kedF>#o> zzi=<|SNRJbZ!^%vpOHy@70CD#jda7xJ-Om zQtGW1I3Wb`Gh)IX9i{$-mN5MLAg(z7iC~qg;%^8`G zOCFb@(R@X(H>u3R;hk?^tafOyNX)%nc(}X#K6l@@xdbhwUi?oemEV$ePI3Q_>2vnW zGF<&S2Q>m?NKda`l!emsmt`={POrp-=w(?L4SQ9F1-kWH(9u2w%kh_BIn0>FB#NXa zuMwj>21~ultFjPU$iW(m$nG^#iF*Vr;c+3aK5oIb&FeA<8pbT<5cXPLm-&;=A1IJ8 zmS#Ufw153k=9e3~4!v|YVm*6=SROY|K@|T{_AX83Uv4mxXx|^mw)`ekR>nD$yZcRO zp@^$cG_eEMnP~G(=pEvq{0s$PtBAaB0ei)m#V(4Zk=Kx()hMD3!p_@$8uGQ}6=<`AjHRwZ1=t|UJp&Or z`xbbGNfgV$JZ3ZAM$W=G%3#d#>Zyh_^EPrWeu)CJnMAwKAk9B+fw}TF=VP&!a?c{d zz}vukS8_?LMpG;k_Ay~Q6RhSFrJqGgA(bH1y8}Y8KkQL9qo{~u%wxhVCS;VZH07QH zRL#(%J1C;Xrh+O2KKu^w0e89RYhm)iw!#$RE@Bpyp+mXy;@a+W$gAmH@CD!Fyezg> z?(c}di1E3=VRG@wSG4Bydq}OQ97~Qpf7Jf4s9Gkd&XCKK}s5Fe|lwW>OH;^ z&sjrIQpCi{5Qj;hHMKI4Rb*1#@s(|9=lDwW#Z|aQO%p4_D0EUK4goBK_e;o41I-^_ z89)st0E=6XTp>9PWH$-ecgzR*X`-j&5$)>R=EY(ruC*b*T8j^_{bH+#tf;eF=nB?$hqVxqWm)kx; zY>%fLI{@Pf;#E&$o|J{!`C6JEmugwko_hY2PzQs_l%)K}gT^9J$r2MzG)vhK6td<2riHeDj`zZ&o$ZV~d4ib^xnp>=9#D zfCx+RWmNn=Y|!U)1YummQ-?hrFeW#KhkHy&Fe&S_y0%IDb2 zm^Ibpgc?jM1=3AbdGIq}l=`W@U*t=VVNG4C!WLrR(^$A3QGrkNTPe2lMj~#;Wl%4@ ze_D_2NZ~3b`!`q;Gt6#d47V}cXmA4wPgfD~{8&5?x-#}2)|~sE_QF=Ai1@n2buZkQ z&N)=t0_WfaB-c8RjIhf7V^lvJa)xP$16 zIr>BuQ5IL2D0?_^j!i1@lE$m(jN}S;rErL|B#gXTxE>!V-EORyMe{SVhL9RV+8_eTNBH{QmVJBs$ft~^#5zDG)zN0jKXHP7t1 zfj5k~qtS5jKjN#zjl@^MzC2FlMc%+nP^sV|Y}kYo-o68n1&)?-7UIF0bs&;-0Cr&B z%&VA79WUimk6bu9aa}g@8Nq?4@!dawskRhE${u(c7D&_?So3N8ZIc8;?g=!|aQ=xa z6=N|_Qcsk6W9Ftt9&{R(%WudsyCdY5&y&Z9H&mrnaY`qS)dn&|MT++#sd)8@bl#I7 z19|Q%ZHIID!iq^9j88krY!HRqfGsyr4dgA+o9N0AZmO^J7?`2x41U8)aBIfr9D2)S zF6xKoG{W$;BSxd~%!#hAx`30C93$;83ONUcA}n9giOV~~x_43Z+EBM)C zXyH!CclSOx_lRY}Y$jAP!BRVCdhpld5Qr0CRUtRL=!0hnZs(w3a;Y(Q1Y+%bhFJBg zA46!EtBmBB5b_*^Sxm509ZP}F5&w~3R?w71%va5P172{pmUf`FBS9Em4Z>a~q<*8v zu{FkW*ysv=2`u~31*|JiggINtgr+aRJeLX9#-UeCSj2?qf>5S|y~Q!weguN{B?!Y_ zA%^_I5sP4BF^`(Tgi%a5EC`eKU?rci62_XmMrHCAUc=TtGrwX&^eYf>5DN#HEW(M} zCWEk?3F|(E2F#1N0b!GT631m;1H)-7+(*L*s*1RQBh9ZL2T_kHP~Lgb=%izoZN+uu zPoV_2PJt4vYJ^^X=hI5ed8djXSYjGO9r#ZH_AkaPQ$x&yryyDtW3j`bV*cPDj%8ty z&pc0bromGYt?qObs@wF60hrh3SNc;_8j6c)qfE8aU>INp9&I=V!M)QFT%Lwt0tbUP z+J0YQuT*~EjH`QH`a8oSFgV`8Mbb|9abppzVaf)9#$Bp1Dw_oG_5fzHC?B2T1%^;1 z11MY|FTaF^iT6+Il+OfuOP~pf<&alBkmOx~#^K5X_JJDQ@KXALAw<~*01d?tVnQQ1 zD}@5(+Yc~*`<^x_4Zskld?f&ACzJBE0Qk-WKF*>FS$Pg1KLInFW2J5!S)ikTPC?6Bx8; zEnI#F=ua0OJw|sl>n^C2tw$cKadjM zTQid9AK?F)gK{LkBorCU>?%a4{U?UIJOFB??nA_w`sd?%)aYsnPSrEZ=YN2uf?0%4 zyU*A?VA2S+C34@pZ_1qqe@lJ;1ej!Qo;Vkrpl6*%u}s?q8r^%0ysKWj;tU`YikRU4 z_%V*`eKy?MUKuM3uqu0^6x(2*4KpVe`z1zc>Nt+`7q_Nx{P%<;{hL#J*N#N4JY+?9ms zjuMcqD*Nt;SMb*N(LKE~4o|jV+IHa;*2p_lW7WOYb?@BddtTDNRXaoY$-<2Xn2e0# zJq#45uOH_uc*!x&TQ+?H}kA9BjIrYHh?tafb za$9rt7OISME022iq*a2?}jk3*mXJ z5n{u86jl@$KEkF(`}>Fi_fk!+!$`c3;ze{po6Hu*-#olOeG{FTk6p?W{_voW6YmK( zQk+_&rq@jpJVo*Est)&!bfpEFSpFXGctyW+pTDT^8I@A_od;dqk1mS?mu$gW-K$~n zh*1$YhP%txY9RI%8fS2`>w2fz|1V4J^B{G^DjDbpSaWFk+*wI~;sbGi^{EeJu%#~; zlsiBJl&*gX^anV5LzgfpeZUot| zK?<{h1}K$4Va<&}x4tLXdOv5NtlHjJ=?_|vvRwcfuW=7Iye;DG0~(;zt41EV(bsXO z9`rC}7Kh1@g62o}F(~tZ1}KjN+UPpUcc0N}fev`Vyq2D1P=)^{t(ewm#Mc5H_KMN$yEjcr5*Wgi z!ve7OGv$~-o4gjKHg_#78Z~`luSA12K*{L^Rk$cM0yiMIo6&0~Ef=I`Ov07|=2N?e z-_ub{Gm!L1DnJ?9n@N^l2u%jmUpWZ0J~gWQ7Z&3>K+zvX0%1zyKFnJ_;SwHw0z;&- zKmh2PMk@LahQ~6X0gCiKYu2LS74Es{nYr-lr|7^Kpd<@|wa-#4lg0_sNkK|2{LY4G3d!65}NxIDpeZ${gL6?$Hvmi`OFCl{pEg5(6@j$+0UCt4vB9#E;ydXhcAO}XT3bY-pC}=E;>Y%nbeWi2-Yk;y!kYZat!aV2N zO&I1(+9*iwFsfiyOoQkX8l{j)Mq!XmVG^{9iC!_72~cd`@3u{qqza9+7f;YtD00O>8wMj|)nq_docsQPdCo{&CysjDvLwT1{RcgdV9^v;>I5G7Zd9Sx( zXj&JqUisdx%?jy}lJ>1D;;T;_NBj$3Xvwcch9bmmP$w(AEC;D2_I3R_q6P{3Yf0GMp&ab`D)PT-p}={daN5sTwCD%up)cpfe~6T3Wvp5ipb8Y(zB<)Z!+*Y)-_`{Wack#IttL z>YS?jx}5I%q*>OnoafCtA_!Luk~FSir?mI?P{KyVuh)=3ewoHREUd-Rlss{D=i zLC81i3MxF6ft?>>^OA6`6~D!kYYE+vokj!KWk8e%olrlG28UV^T2#z$vhB*^GLM_V z4H^3p#BBF&uzdKatDObA ztTaOS$z7|BN}V5#kb6@B_$hIyuu+*VkgEczLZyvL+e$bjQtkkU())=yW8dOVS%~^O zDKiAM5BQ<{m}+ecol+=hI|R+RifIL>&Kr~nm|HsKl_1tflmI0YL|L?xxAG;B5T#Ne zWoS8X<$*wYuLcjv??G0)zRo2Y2$le4J77Phsxd!a=(`O=R@ntSOsThq^W!#COkjyr zW(mO3WY8?EBj$kwrN|_;_2@Oj)dC*yGsCehku()1v07;k+Hf#KH!_W4e}oBC*u5H# zx|noCkb>7TNgi7&Xb*JY zbVf%B^e|8?He9`FRE_}%Q<|)2px^?Anmol3-=R4;;IB-;hGtO43V;x$$s^X`^B>@H zM>FVR2hc;5g$ycju;t;_3`iJCqs|{K7QIe6BRHo$^pWcE^R{f(>y$Bw7={OQLU79; z-b9r%2={=Kt0%J351awY$A7T;hFvc!g#i4OVgOiN0l>bF8Y)qLB0;wAWu*g10m?D} zXsuQN)(AlNSR}zfEzkm#aRPwNFDugoZ~*{L_I{7)cU3FcA=d=j9#)yG#EoF%4RZj4 z(hY0@%4&wGDDt_tvK~N);=$G&tbQ-W6C|;AVL2hw^sWDO4n|Snb)eqTZ(Jpl()KR* zz}2?wLdXd!#!pGD4RI^{pX-#@?7~avKd5C;Wo1S%OR(QCAek`bJjbHq_7Jv0 zhbb{PIMP)R1H_!%?{CUXr=$yNS44t#hW7$=?HIgj{BOVpy0$;Rp<;kXf_h3El$ud_ zUCwD4!55%R0gjKoiADd3L;6|t&9xPk&2rc^Sjl>f{E;XX8`f$A;t#N7GwWXs%HS-Vz`J|7OfhuQPzAF1gp zMdchN6$%pVB`UWMNx_WOb8)l5ZB$}zay3G9jL%~HZyGaQ(B=u+BM~RoyEcD;Pb8$c zTTrV7b$~M$F%?wk{O%r{pP&v$EDU5ptx>aRo?D#j5kU=h;kX5NJkY6%I${K*3u045 z!jJ|LUAGhD=xzZo67X||RrRYhzru1!3nV~_H`iQ!50!MoaxFT4JTKll9k

ONoqBn?9RSWEp3BYBgY9pS; zWh+3Z%n$=|njl_aVvRunJtTUJu}uW02tw1x&crbmrT6GY%52>v%Chb&DAIc zvGDq@_~Py-N09>Icj^J`M-OF5U*xP5z)P36@@f{$YUFIr1 z-#r`??=M}YNOtqVEa^KNxC?KLSRd?#l3{fTPQ{eiES^bh%;djifW4$zg6yqiY+xa} z1ak-!+`|mUb>iBDac#Ke@w9~^-)BodVXf#O1|`45h@|3{h_-7ig2)O-p_Bq6S2T5( z`U&7L&P(zA@@+t|r;R*=ffEkJ2Yx0|C{!QC!6_?mtX^}=;s``}ID{_9C{2htmsIpN zlN79SD6Z#C_;$TYMXP_yA8Xxi!5(O4DEbJ`cxbc%<`fH#*5;1Me&Un|8{R@eG7wh} zSt4>&3?Fd55>7rTZs)jC_0Y$Bl;ZB(`NFyLX|(msX;t0f(k&GjP64p#o9*e#x#3u{ zFpF_k)m?e@xhpcj5jlopH}hN9ZkLd!c%nmz+s5|N*u!FYNGek*$S|jI0-2*UdK+mX zKD#ngicDT$A4AAgxpJ9nU3Hq(vTqk4l+&>qWi@gcG}THP=q>puvX|1+vo@kN!em7+ zP9!TC5XG?H|59>3=UHi^U(|V%n72GNgW2186qa%%>4_POy;TR;!@sKvxEexkDQ69wdJ|v9%kS0#J_B7{r*uGKaPVkW zRT+bf_#WvvQms?kBE23;Ik=24ggEv>5nCM;W6=MxxB?_ViS}Tn*%RZ!U5pEZa>T@V zi6~JVhJ{i25x>I}eJ$pUJ%bOboIzalV#KYl7&dZAQ_$yi$|N9g@`w~Li z6;Kq{+!@WSOd-?C3c<2WQ$S%{a>JBk`P1u?rlug3+2(>vnUzW|S-DYZiv`+li@x7; z&YhWa`M;m{^;6tup65CHexBz!k9L{G(6%|{jHpjH2_4qZ7&?Oot2oCGrla^thF{0< ze>1%MrN)TiKVa~ZFlsTTbV9>OK3{87z|ld=MUCO?5}LEMPZ|CK!}EDN86i91*hF&> zn!WAXdWPT5@HZoA4t{1iSOw`&oaRQ?_G-C{>ENQn#IDr?X9sN-@YrAsqx@`WXmc3; zD)6Yy;25%N-vHlU8;CCN#R|dZDU1#F>}Z@;4m9?ybTnyiAZYsy*v@*z5u-H&O9$sv)C@V1d(7% zi{XM3<1Wi8ls8UQm{+LLLD4| zSjH)peD>QFuKN{jN~gMCoo%bkL(DGIHdFe($SmvMierjD1sl^a`o1lx?6R!2c5n(N zM6KgKJPfRD?~bm6aoC-W!xq|z-E`?I=!Bv7roRtbCm*4n?|wyXX8U9EW@ zZFawAI7IbI0m{mW%{(7`YkI>Pd2~Heq(w9)A)1G(s45n z)Nb1SJ!otjfx!lzzJjcE1}0uBW}I%UIZd^j;KZA&z~cnXR-WOYcGKGSq?n0Vg<`aI zEYvE7$-G2E9RklV?Iu>9NVf9Wwg14=LHnEWTpdbBLJpRc2<>rT;q-apPY5OS;RJ8QfC_+mE3tj zV)l%niL)(~_7^xiXv-LM2=pHOA zndo3mRi%4((}NgD=v8fauacE++No!oNg3!rl(W0kMGsMco`N}?_7qv^e6SE6a&i>y z)|_V2R_R&3t;ur5M6zeO?OF+5y}pOO+6~1fw&hv*2RvFl&Eei#Jn+WSA;Au9CQ1yx z!A&0zq5Gq&vs*(A;ZL}qn4wi)YAT(TrgK7B0`^vTRfIBS&Tbn?KB;MIr!R<5M8{sZ zEzP9QZR4#xhS_rvEcBIayo|>Xhi>D-ls+;7Nnc_@Q`u2S~)q{_&(|%wbOx z(9esJLXUb2Kz4_^48GZ{n)y{16!dSl+DSaPxZ4cKTk{1Kx3 zbvL}!qATyYOI&Mf`P$B2viGyiOlvEOtdJ=k4ku?_!o_nNhaYfDzK%Ch6u9U z|GAe;XaT~Wn(y_We|4gzqvC5u9dvdxlv=1zy8QZ?Sn|_ZWHL+r4wmWk=epoeMlDIZ z7x}CCEsC6c92x3y4q+j9?n0I^Xvx=I@Mu18z3Y7rGtei%l6W3JnfBk67ryqA0{XMI z9fZXbp9FPommAHQ3QZaP<^3GQ#H}zs$hpVnB_}~79o;tE&gOu6lu}QRKL%NH0;nyL zK`nm~>7-8wb>9V0+0WjiFMG*U`je^4@o!lVN2OMediGVt?eRVy#~2h`H0;GOuX)l1e>V(rrx>0?9qSwnxq)FO)9p+9?l zg`c^xpbkHYpIcLrMP_lKJ@%EC$X)Ss98r!6r)^G)+-MvP2}TvA*&y(x;9IrBuM)suY4d)2I2kb01+e`-d^avEdM_5j&ZO6A;40y^T~ntIwD1cAa7 zm69*-|6e^>?#@P=w&Gn`ilw$Ih=fVYBeX*ekR|fi<@fNm*X^I8NdAc97;E$=lb8>l z_AkcJmB7!R9>q@w{dqO(+bHtGofeDfy!Y1LfPlqsR+)!7w=r zLr`mvpBL}e#F8>PK&cF!Tv_msmn@?rcVk9PROz(8z2r^G3jsRx%AB2IiCPc5`SU36 z6MtjYfYl0S74Gt@!`^$mcsu112Jn{+yO6gVgB8Ma3_==X%^TlDm45IS#$ZGI+LQAa z^h0+3=_O<8RX{ujF(vd+cH#HHG)xE`lWue&g$)tGRSEKtUV|ri$seg8P~jv=ocaTj zj^#-+IXO#u!fz;Liz6^s8u9F5FIh_?%Dnb*Bw0N2CI)OHBrQl z_c%Xx&UUs5zYxAKysP7yW3{8d{Wtp-`$)TF+hdz#b6NLWpRhKwd}5hnX-oR;`KFD$ zkS#XRT25;!+{#MCbwSSqExlye@92fOakzWmb1@3Nu)5ucxEx5Jhkl08R$-*H&`$sE z##+5(2OTQk028fE!rw0Brx!|w?vSzJJT#1Kqw#}MSh4hSA$bs??B2_H5mOkQ_mGmZH%{MKqA ztc%i?=hN8Ut%;IywUzABE)lXguQs8Ur3ko_wwu8#acLK!^>!gKs=}N7UOk+N@v9S= z{MmcWH>|)Joh*N^9)`%3_jJ)5`?>twd-trHbQo#%%&;Nm$8sbXI#zu1a5GYSpk-$= z^RJqF)+HW6=$rxdyqaS=oqpK$8%IoF2r8F~AU(cp=*2SiUURJmH*-<^cj9^c?hP&9 zDD5J6W69Vv@4-0s1zQlY9f)-+D)Bq|UYdou4@ua?lQd(z-L|7}WGpruVX-mGPqgt)5 zQu~^JGjBDIFO}l+bky{F6y1ZDl#IK{ILFR>-rU!lc26uT2NPuLy zSgW*Clfm5gA=Pa5YzMEF?WXsY)Ts=)On*nQ-??Q8`tLpXjVW|qhtAm5^T5opOpDiB zdbRp0h-?SD;tl+Ko&Mw-SoV7|eq-)`6MkDRAsVkHStw%VEj)sJxfl9gCjG4yfUT3J zF&gy^qg?^QtJN^XRi7t-=xL?V?`hixc0up9^hybIOf=gtISSRx%18FK{;)a!T@y_u zUglVgyWB?fZqIE(oU*(?^C4QiXj|U76HX2M2lXsN{(Z5tS3B`N7F)cW(khY+UftPC zFLZFpfg=M#cOr>#h>l)VMrl|6X^bpBi+V=>bD9FP^xG(O$?dV2W!Oz5xxB$0t2O-w z5$acy35Y6Idz@ji+S5DVe1>@+nC2S2YQ?;+xsl7~`C!nHYG>`)-7?4#*D;A{6OjmN z^fV^HCly*rWk5s+PVApyT7(#kVpiak9;w-%Mt7FwS$y=f``g+3(NNV}Weh(i2J3K^ z&ZX8iDB0C1s7Z2ph1*NU)899@VJaj;+v;FFEdmG+*-2>{lsRA-*|#40K4oCPH8&VA zd?-WK7qnUarWqhL;2?MV*6Y$l&ZXXsRo7qm8`aKz*1g^RygSp~(zUN}H>f#D_UUMFBmO7_9 z2Rd88)$pnCSHtte$A%|{TO3y%?>JUDraAgJn%aM{e{6rn{=EG@dwaN@ykvXJwwz2n z^lw5R^C|OobAfrBxue-?x{3k0()6gQx2dsmQ#r0|Ql3+MN*hI%zkkFnKB~T3tORE zqesT6Qu{c?hkMz61H9(hK@jf%_8v1bj?A;=s-*vbQ68ua7VI4uFa1YW3>XzhjfJY_ zCdR=*#;Lsn<8YE3PJD5u`{ETJ$$qFqEQvnwxP|;)(#}J2%riJd{q>b+B*#JF`1FhI z;>d=N23kmim%%d61{MabXuQ}$wyp#2^81K@LNcD9Iy4YDFW=irB@OOP_G)*IMBt`~ zfe|C{d?1+uEMCsax)*F47cWxDntMliwHUNCw1-O(+wP2Hq>z#9fj%ihZ?tm)v;0Gr z!=kks4p5c{lEUsN#g+NjRpRfCXzm`z!@_YX2$BELNDGH99p7&uGY_k2+R7mpv=ZnWudUGW{ z&yqV}CM!nGfMG`(Dylm<1Tv|)hEgBV=XV{GU%H=MD_SiFJ6gN{V}-E~8%rjzR8Tk8{XWNc(J3r{=L{`QAoK5|PrHKk8UW-_2K>(BCno!_Z*)dB(egZRQ4gm-avUi$cv7oXPwi%k{iW4=zNhK zRuksl<6h*>cBi_dTtB*wxc=vQ+BMYG()pkBytB-?kfg7Cw@rSAy{YXN+X>rd+icrN zTU(oK{Spq3U$8!G?P0BN`Pp*JvfeV&GR)FS{ZBoo?pBM{2i0WNX})IOZ(d{0F%K|D z;}zwTxFDTp8e>W@sW>n0!{g(RDH%93{vv-YZ;|K7_sJb(Rl1D#m6uCXrF5xbSdIGv zav4Sj6nqI?)tn5KyxLukAvr0pqskx4Kn0(Hrgui1mcRzXwVmh!lbV*RWM)nVy6nB^ zcge5$yijB7_?j=?T+ODA>WKbcdWkh@R2)edkIwMcq)}j=3TWmNbUONCEPBqvH1?6u zv>zFV#{klOqvBAPryoUHT1pfK-u2=%^iMmPK1W6JX>=4nJxBFG?<;aq8b)bE>Eg6F zQ&ZY-8591ECDT)onZ3scdZpJ$^Xd?+H_OL3=mBcY@3BRg$K?{TP0*ZvzSG5e0f(quEzqmV352O0z5 z^vC-V!F~*Yo}}b{FxSw`9lHi z=UzbCfy1=^DM;>48)PPLo*EcSPR+WF8t#kwU%nIbE`_vHJ!y|wGZ-<>{Rp!ngIuX7 z(eHgndC3(k;$~2;9ev{A)u^WD;U9%g9CQ{tHw07X8ggZ>8iy?)`*a{lN0HO=5vaRv zM^W`X!y5ni&uy6Praf8nhS?=!c2-o+nL$0;1T+%eNV{@gc;}?=ukk%l-hrNOA zXWKE`2HUf?5w>{iUF+x8a_d6tBx^UT+w#4o((;le&oap3!R1jE46+K<@oJK4Ggq76 zHNR+n+}zjP%=D}26VofE=S}yS+MCSEC1tO&Oql|Q)s5s@`9ru6oF)6@Ht;?5g;XIe zma?T3DJtwnXoA4~!7)6eUpg0qupi$3P~%9}f;6=Cre$7C^?NE8#!I(}w0Rl4@@+@U zjM=;lK8^MUpsYoUEM(he@Gd8tHZO~Z;=cz&8=ZuG9wixD5VEX%y@fPfu?TkqJJAiM zwpi?i58&(-i{fBj%s%r-)ho;5iK`hJE^g~G)O#6;DMQdZ&qiZq4A1rA0nXg;+&F0( zo?JW=M`|~tkG-`G2~URi2LwI!;99t;dUct{Ed7NEi)62z9Sct!nXl6Hw-%uLY)8)1 zpF?yEIxrlapzA|)Xv`srJHU12JuJsvc7Q9Q6a)9z#4HbPijhqJ?aN@@G@zrId^;4AonAh86*Fe}Ub>zy0Mo%TR5ybTl+g~g9n?L2uqooud_Y_M*HQfA zWTXE&DrDHN$mVt*nw`$SZ>3^yfoFdzwqvbukHE;E{Ni9#?%Rk}-Qn|^?x+2xaL)c% zGGYhCl@7-CWnn4K3?!QF{YHZcD^=zWgl{ns-EHPe2>;**$of`FYD#WWQh5tZ9>wcH zI(aWL&>zbyws0M7x$(ACJ~ezVk(iZ(lk07AJm>H^+BhWp7xvffFW4Wp_arM<&T23v zFa>;PIcOo4>6U?(7V4ksDfLw~A9u)!aJhTMywAMC{0My2H!|HcePr5fnr#|sYHL!I zuaq~G7nFyU9(Xw8C;6znUY;orlUqsuN#~_|}@HxQut&t~4mel6L}~=i3J59w~_w7A=Y+ zz4~^Xr;x#v0 z?z?VVPbaUWZFo|88Y$eiJ`N6p*ymyXnN0SW$D;%d%CRh)k&~M23>|W!Ikkp3d!U?J zB)*v2XGg0XMXqAO(80UOuVWc&TJDuzBzu~cV+NwzkG;qGBDO+|&Aq1%d2t{*bh0lt z%Y!i496Je;D;3bW9Fv!f_y|?I_tZi!DH}Ob)%Ko>K$3m6J^13eFS-DoJJJ{O*)0-A zE(OW-4JUNy%G$*it^X%9M7&mxGgCaC7i@71)5nT?F$!AUIgi>pM`G@r#|Go>arEWI zi%_-q5os~nNuy0^!ZrxPqoc^g7)BgNe3Te{`;3Le#y$jEcMOqjps7+C-va*G*K2+% z1kMP85K7)eVB9|}KNcay;Yr?-wTsbEc<89{V$_WFE1Z|sF2?SP&YjmAFZO7|kiM0C z*LZP^j3+6QURoR{%_SY1fcrorI(}YSjOTo|2B5O-mvlN&Ow@|J%OSgH*fA!EM8&sm=^A}n5$|_}=(no2EQ^yH;vpicKCAX6m>7uk(S|&}=PaPzq;LBE|qw_0O`w+_jrrid$ zs`dduuQXAM#1b@&Y;a&pS$wLY7Y~0YzE&FtEkO2}N;2#xu;^@EX3>;mcB~#@i%7+( z{Xji264S-}bsijJvG-$;n=ZUt3)i7}6G-7*C1e&&Oi1LvV6=Bo56CI(Y!w=QWl4y_&lPJu)zG!-sUPnwlniv=Qw5 zvQ@@W!3PZ_~i(NKl?lm$C%>GrVjd&8_r`)*eLMSq^9QCw!38fswF&vm0^(!n% z3lQE#*S&M;GW6U(r6&gpjX1v(%iIIG;Qx_Sx=tXbmL%prEY-sgW1})kN72IXVVg0ND|9k=At()roNkOYF!2@a56?9&$34_~RA;MC z&<7Ti7+gM*7U_>;CZ>pm@InT})%!6u&jf;8eR-6sJwt~`LfBn9Hse`%+KSyM@bUkH ztJ3w7E6+8^<#GPa#{UB61UmjnLT3Bs`TOiE?T^}f+Z)?%+K$^c+McudY;A0k^$Tl- zb+I+unqrN#{9ri@sq<5oA(mM6AN7p-nz}%JK(qUriy-02}d*RQpb0T*}1$YC0} zv+i0Pw17NkcVZmVJ=-&9ccOJagb#$BanSm_ijR!*GX0VfbmOm9?eu8T*cot;oZP)0 z=iCj~yxPf1db+&+8PZ6k$a5C})zIZvJ&#E7^j@}x)R6n>rg&2_y=d?>Ss5#b?I5Qj zYq9U78OS}&A26Tdr-X0&1L8Z{j*4hkF?aNl%Ugf6%b1Lo;QV46XuH$tdM#kkXB zh+HzFNo~BA!w$JGb_4U}hcI3!B!|6ADtka(rU~&4SrJF_n`0enRC_#zlr=54Xvwco znfc9>i>SfosJAkD@?nr$yJpdfK}Kcbaz;DHIPa<_=ymT3kCqeN92GIO_Ba{>?>9EU ztQyNdUQ+lAfLXyAtdq^*OAH$%_|6-|asGh(~ zNvBr^{Tx+}IrHrsUg={T6fm*KQKXX0!ilL~E&SPTLk5-~$2G*L1DJ+fYG|rPvk+G2JjwdO#}yYB=PZw{}RFPX_AW0p!qgx3P8Tigjo2?AzD~HRD^J zXM1|hSwRqwCB@9U9VdtRNtSj3_mlK^r0zhG4n$!CvsXKT+;=3t9kuc1cUY*B`Eg$D zH10RziSygz7VSJh92aQcpK1FF^}I#%V0yr1F=>ZGjRX{WbxCnrYX}BS-7SN{NM`Tk zM$PE-@R@U`bH4L_XD6p4{G0Ii!&iqt5#B!>=A0edw`$zl7FK6k;CBinshv82icyxT zT0vtMYHkCa;MJ6isF>$41Q>Lh?f@AyJ%}rJquVjuL5kak5ZGLAO1X%!L$^mCbOS;n zw}6ly)e=8ygt+gn8H{S^^&`e#TjKiRI6BbUYw=PA`;e^Z=t4!7yI#{(HXQpMi6yR` z1r~BEu1NJCc`b$f=A|=!OrHL!dypF$pJ{c(_z?-@wl{S^@n@PQQY^PccAjq~AhU zOly3f;^$DpS1jYx)&QOV5ud@el(R#We+n4Fe+P@nDXek$U zsD%&5`d6+U$K3KdM&_}Xui;!>hQ+HDUBfTqCQ>ugX_;(z6}zbhUsCWYt^#y$!;oJC zVB%X?v>L3t=9wl%Q3d!UDJjgo+r7&DjQf7K#dX7V%Jsaf4eqoTI6H=43SSc5&GD_H z#F1{lVP9+SkIQY%Hq?6Cy3IPu8fMvTx!+<@-&C_zm-#*Oqvi%sa++?sNBKy3Mv0YA z%X8(n(&yOERBeb!Bg&g+ms>Lz^fu%9WH+gN?qw9AZ@orLW@+n^xu6#(_`2hgffBgv zl2Xz+CA-fZox)_8l+u$^q~pyQr7@FSQbG?s$X`6N#@u+1QqnjjW7J$8M9bWUOK3_- z<&???Ai6{b8iyz;oKiC8UvKG|JFO{mcaDne$C(+f8^>kGedpD#LfND-vyY?PT{)%d z%$>pXXR`jY-A0{}8j-U+bm5ee1%6&sJfwIhk_PF_37h1{IYDD)Yn0N7Q!4uNI%q_# z(pZorJ(U-53&(1jnx4@R9rd`Xi@e&No-hp}Stlb|@`;=hS%G_WM$wpY84Z-6$5#1# zfW&OXD5(P{-HPlLh%aX3yDYx;oRTrHd4RHK5HDyuPLaOt!ZSzhy;Om@TTgF0{9_-h6-Np}ir)bFuo(tPKf!P?-2x0>vTI&Qno50HK<%C;R+o8N4 zZ=9-w2=bw-Hh1kbh} z&|mK+qw!#@8(~YzT+mz(Qqnd+$?bNHQSRZCq{Fz3=V4&J%P7tCFfmVY3d?L$jw}qZ zuZ%`*Ac-bAdv`eyQ5H;lX?`1Xitmw;-gQ@7Ay+rIUt)wtoKRgbloPa#ISQ@XCJP#x zJ6oMl>3aY9dbXq{3mWjBn`T7gXXb+XJX~JV)9_RC=H)p}3^k9?!t=BTC z@q$Qm5+e1aq&_iya`qFCPJim@)GnR6Bqt-r2p;3Dz4z)d;+dZaDe3i0Q_L1qX3RcQ zrb~{nJHy;Z-3#4=-A-zv@Sn@v!_@9UiEJA2M;z%m$rY)Z1Vq8VHIb?!An+_qq$;N& zh>Zuy9sX041M{rx{vym>oJpB|$$aZI=u;F26!k}>a4(8$q!rMFUI8K>7@@(Z7d5CgEzDHRt{j5xp4wzG= z6=sK&WxA+zHSJa`rhNH=Dbsqzx(Awc^Q9!~C`q=qv^`<@!`9RCp-FVIT>pbn;)hyi*SRM9>$Y0NuL`b$7o@u+k5zhl|DzYfc+I#aGn1{hPkz4eSqZyhVc9lVmZbz89Re4M;W3dggC+wQu|*5 zksM}-su%Ku=~Oa|CuMpNQ^hbDq$oC!_Y-8puwAV!JntWoPw_*~_i-K?nbLCqRIn8v zKYZ7Ik}+)8VRH7yG#JWIr1?$8Max2No@=3+S;n^vRM7sobN$?YM#ZqOP`G{4G7ZP2a+j<0I=Z=ihDjIl8K-yq{vl8m-`IL2;*^CP;J^`dc@RDsZn0XjxJTcBOjLi@*vqM*-uHUq%l(Cu!~{D6Ju-S zPAQrd?lYSto6Ty)f5emUOKcOCdvn}`Fg2ySEZiftMUj|sWxsUV%k?-@Nqh~J_pDMd33@n>(C zEyQn5Ey@dqr}1y96Msrkt|55F8~1vf^1e=sZ%HYdZlK>n_)zVbQ;KppS+BfXo@N5s zno{(nA#|nEQ;Z$KpU~+xR`uQR3`^1MSTo0+&X8d1vEp3fnMo&#*jivQEvl%(~LUo?4X7=z(V1bRo^yOld_ugAB9?zOT~i6d|pshk>K;T!7CD@HqiKE5K(2_%i{n(&2o1Y&tCvP6_Zy0sd5gKM~-M1^9#jeKv*Hb%LRCu0528b7X)~T052BcMFPAq0Ow=)`2#-y7YlHa051^W z`2t)hzy$&<&MWf-{9FNk9xyzs@)>ZBK$tDS&k67>0e)71pAq1h0{pZ9KPA93LU6vE z7y2RR3h;CR&Jo}z1^5X8eq4a33GibA{HOpw!eD*=pDGZh2=K!KJXwGr65wnB&Jy5B z0z6TG9~9sTVx$iUg!=_}ya10A;IRTcMu0~P@O@!n%D#+__1ei)8ka7SjX$k2>xCXA z@QoB;p8#hH@CX4OF2KVCc&Gpm5#YfBJV@-E_X>o80z5!~`wMVC0q!fneFQi|fO`vY zFCAu!Ub;Z&DZo7hI8A_41vo{3y9;nP0q!cm$pYL(T=Y5%giZpSB)}a7I8lHT1h|6$ zw-?}c0^BwTW6^69{1GO%7T|aRjuYTk0^Cx7V+GhNz#ajP5#SaS4lH`n0-?D8-y^`y z1h}aHHxb~*0^CS|8wzj(0nV>4{HQ0uQ34z(z!3uM7GReEI|VpgfF1nuY$d(v_v5BN z+;uwNwaRs$tD*Bt=l`4&o!;>4;oBipPjK9F?1jc|n*FZ*u-$JTY9tnro`mU1ZFB&&a^@2d0E-m1xb+`QC00uMf%Gp#jEFhwg@l~YK$5i_^KBc8N;U)w>21o;_2FTZ$2oTR#jo&DQ+5W}sZ6Y9i}S{xnZV71t8p-_t!)ME1>o#w$eCrzkDHWcqM zWFfMo&QNP+AZI*fI9y{WH8s%lx_@izM@iCL8EJAK_0>WjVMEzrh@j-7K(>)XbVP9t2g`MI`WKS zYmm-{iP>%O@oTW0;Cly zhWPWIRt+6cQPPTK0|(Wb5oSX+(j`L@z9xnXVU!`v5JL&C`c-ir|qo>0KD}p`t`SwxvmbTw*AJ~ZPFi$83?fiU2vc<7^vJRem-gA^mw=K=7%Alci;66UT z+*w_$uUHw?^Tmu{Zs3S|2L#IuU-D8`5Z{ip-+4Bf#BV<9;Syh^Es5EDHjzw6Y&xb5 z{gJG7ehpf_Tk-{9uk}WLDX&KI1TZ!WW zZK*5=56qrxVJ6o?!CNur$}Kqofp$lRIGs8=AP-ks^59jeqjh#t40 z%8DkpHUZOl*IBO~xH;*tE6^P(e+_kqd&q=cfd;5-7f|R>n(@-M`=yO4Euyd{!Gt8( z6PmKVlD+?i-dDyk_PligB?YAkWANM>r_9j&*_soiJW=rvQ1oZfx5D;fA| zu=b)zZ*(kTE(_LPB)Pjg*bZ<%Ru=r>CJ&Yc>NT=uu>M>u3qZ6XwVTdb$>-%~z5EVB zaq`mkejR7Ec<8)3QF znjVf?>!+jrQlZpevW0z$7by9^lwv+dVkzXS{RnZf!KN}WKXP(X(L`~I#b#iZVG8y= zIj#=fsGN`utx@A!Pl21AsZ=k0ZbLE7P6&jrjv8wSpHj@V z69V*G<6=ZQ%4bwgsQk;Y8Nosr(oUeefs96D>n=jYM*W2BrJoC{H4-qYC;0v`B!I*H573XSz5FBCVL~Cj{tuC-w{LFxxB{H58;5Z-^Cu z0)|u+Xad#0b`~OFyB%FcLC;`zkMVUP;3^7%<8{TDJh6t@W`ip!pbgl5)YtA;p@yt! z#au%nP=s5f=hn@EuA@LBKo&g<775`|MO;TA5I*T_g_r@lSt$-_DbO( zErqHpBkPorYboeSXc?WMXJ&{Yt=OoikS9+PGr?*hpr^oh8dMKi7zyYoMm>cps5XU; z&}gTSngT6@XWXko4iJB8u~APUW6OGR-%W=K*HZ`#g^G29#0+34-)|Vog$>Ka45So~ zH;}9U98)LwI0Kz3pb7a;DdtKFfz*9Zt}p~oE#@i;w2EmXZ@PKA&Oi^SC~*3%wi^xM zQ;Ll$3ROdPiOHuHhcpyu^wr(GV(nN_I{^g+-aOfHhWK%X!8H{0E)GpR8{aad)Zjfkfs8y>B9EE z7#cFI*r=ya1+90X6s#t>ozV#efZ3Un@^2}mu>Li!j-0WAgIkhiW)6FUl~ z3!|1o#UBO6I!Z0(N(!_FX#5!q@{KK(Qf$;x*!0jWF$LtHj+O$HB*l@QR?M{&0##i2 z+Ik@esDQNMkeUK*r4n2agk~F7LtITEP(z;n;-ZbY!l6CM$Gtb$_SugzC z@V()FJnPolaocgivCi=XI}}~DR~QaOkJ?h$p{UH7Z_UJui@#WoSXPJqM~_8Hmc^nZ zsb46vx>N3cMx7(Sq7Id3s?FsA<{G(?`JnW@d8PD@`5|eExr_9GS#?*rH@ly5_j5OJ zU3b0hTI{+%?4*lqDi{YtqxM=7tdu&FGv5{kG^mw+WaAF(_WC5j_KQmfl}x&OlgX`1 za&V750*;`ACy(LeGJfdW_hD?SS9NG<@bEH}IJXALLm0W@gQdaE^I(R_J|4mhVwk*- zLYR9Qrt(AxGmv7mysbFC=m`&Cm`aM#&oBM?w$ry^7T*e6!fsL?nN$vJ96aYGv8{Ph zFFagARiMrM@RhwGE0{rkvY{CLoHv{0rfOA)C6PsvVLhR5$2A-{_5E}Lc{0C#0?c1= znk>9$tIkmH%GuTA?4hN)BDQpBAL{6&q!*5s0YC@nW8 za3he+<4nudlZ6dlWGFxv`XiZqvu;pu+;paemGB@)&(@?qdIVRmRyu`+!NB9@^f)Sz zeN9@GYs$|E+YmbcRJqo;#=4r}{If|v|CEP6g-473a=h;-z$1_O&h4%OHkHsTSSMYwzR8w1~TK%-rP5wvTFBi)F zWUKUvv{F*B{xwEe8G)|k`$DQun$%;Xi@ zYU@nY{(xkOFGkg0H5B7lQ~bJQNl#yTwN{89WTRv$?+v4NuHPtFlG2Qat5iQ1ECp6F zA7U3(w^RH9!4hBT(0D@XC><9p1xi=)*oC^K(P z=W?Y$@T8rF=1uj7q)IgZFqo=Se@2;7RcBG!fW)~-DG+;J@l!(L=2U-3qC{ga!HjBb z9fLrL(5oq7L!IUi$&+Z^Ww$>p)Ezyz`Hk`H^t8dN=TmNu%c0&gOxuZPU3yI%9kysU`_EG#Yss&b`j~AM7TUD z5I?;M6Y3cBfH;Y#zvP57b=B}6|+^>c|5<)AU- z{hCy#rw1fTdI~=pbCBi_iImth0sGH7M+U!9rsSzRQF4(|pbDgy>P&Q8pacsWHVx6M zaUp)Uif+PXN`VsGs;q10;OA1MKnj)nGwW0`mnsG5xBfgX(lHo~GNnp+dmTC#D(T_1 z!Z)pki7?I2WlDiY*tD~!Fw*Fe!EY2QNy{%A5=iqKrAmdz#*1~p#wwR81&UB26^b!n zKHxH?Ko&CoI$k#mVTPgN`{xZq1gXU#nGzBq6`zb3$09wWbD2^g2cF&Y3=yOibBR(Q z2bD2~vnxCJaG6pd2Hy)tGlb$fh8$KuenB|*nC;fFVd+wmr2L{BQC2HcOqcPTb$gt7 zPU7}9*O}pr!kZC+Gtb|SkFBkhu9n-1#d1QvU|9#d&?jUVgULfIZuuVds`Rs3AstW` zNGsIQQkEKLDl`9S%6ETe%5-lrdE8H%kGuPuUvhVEXrAW!&Yb3Y+Z^s%A|=62^*_8- zwb7A>?NL4ZxAwizo*8RzWBc3o30@G$we_*pvwmyci(%HUd7MdE9X64`F!p|3JT%{$ z=?QG|a@i&?V{7p0ekwy$HoYC(M^DkJepjS`*yUku*}wRuZ>uLW?oI1^2F@T4Q3Om6 zzgZkSJ7sGVaq0?$$YLQ5J@;}j{z)4I@1fRg^(JO;n-%&vf-}*|D z)(eKRf&7kSzqlk!^WDBDB*mD<0(x%$5jasj#t>4QBf+>IWteJU0%yZVtZYZ0`JAfn z=y`d3iJ?H#S)i;wcLMR|FhrFn#KKMrIAER##`^^2!Ab4we`58jeVm1&xzV@u{JcRk z!#40*BHCe!b^&dZpuGrqnKlI#nLrLd_cA<~Q4?9|16sAZ6p+@bzVm;ZRcWFERY{Vu z^36eXW8PX}NDKLP*2^)0(mud<^uC^6E;upk(wp=}!793+WwQ_%J)3J7g1!M1=wstq zl}R)OuF*?gK9!m}hrZXmp(OKA`nfRuMSVl7z ziqTnGFhq5iMZuQ1Pd~+2GkKY{ng1wwM00YVJ8)XY1cygi7}}nCBwSWXo0C6l;NY%ZY)-u?72K2|A7&1!j={BB$>q@Rn1LN4?f&PC(l$mObFa>~y|@8OZU zzIdYUWBYP@rfzad&qd$CsVpSfK88ao~#(MQ=Evgn$0%MqmiXX zbEy=+QL0q(v%#Vb@f)Q|d8>@(U}=6XRSHb>zRx!cOAWO-^&7=XB{PPLd*>8CmnsF8 zfylR>G~_?c&!tL%_;1C0D#XuLQ=?cZQobw3pXTRMrC@mTHgPefm#BU&R|@2y!Wcn{ z-zZcnM28d_02N$EsOk2@Yoh}sZyYZBHwymsA0Be42YF@0@bgc5jO)6XZV9s zC6)tEsZpGf<~NF!N{;`Z&46E*E9qs3bb7@+V$&BAEYUK0*5-&=z<$Kffe zMu1i_h?TTTOw`6^Fo>1d!YS-K=}oXdiw&eeHEkMFPi!c*D+`E~^oGFWKgOP-i7%21UwTMUneo)F9d4KEcM&0|yi4;Y3zT`t7*Q~mcF!uuR6#7d;? zXp}2u_b>=ZQHe&mQpNf|>mIy8!w zGLo8!bW{uFIA5aZu8|SjZkEQ~o+qCA{J!bW?P~ zRKG4((qr(It*hGxT&fhvLH54|Vil+QxkxEcLAMHD5W~at)X#-Vf$))j!&OR}-zZbc z=plix1?2F=x!Y0IkSc#9LyeNZGC2n&v%-YR zYJdxs0-dbtv+u+RaE)M;DJA{bNu;Bb^)O_>bLSavJuWn%LLoC%mD^#h&kaux_PZG_ zHI(R!JnyQ46^^nZWGkR77vNu7sC|6(o889tZ_0xZ~;DS~a8 zGDzSNOv@C(v`o=W%b4k@BG{HG{e-}RZJ8q2mMMa5nIhPhDZPZSf@zr|n3gGmX_=BH zn4T(vZJ8q2mMMa5nIhPhDS~a8BG{HGuq_J?Gr_b>=_KHTre!caRRr5IB~f5X5a131 z++KhM+cG6&TNb7Wre%s?TBZo5WePPd3z(iNf^C`7Qb<6sEmH*BGDWa0Qv};GrG*ey zFfCIA(=tUcEz4J$2>}G#GDWa0Qv};GMX)VX1lux2uq{&r(=tUcEmNX|w76-Rt^q07 zmMLz5$tA!}0S*^nhXC6J*e1YM0k#ORD$d(xfnX9~MSx`imIOE~1j}~?_>KVoC&2&e zFtft^M3B)6H(t_`TR?F;fsQLCHHPb#?_1NRhzu5jVAF-VPqF2gyV`o2 zEw)J01?$(=H%vRMMMytPTV3FiqUmMVS$^3C_icl|3yQB-@JpMdFCPvnODtzMJ$b{w z3*HW~TPK-Rl!j#d{H{vs^keWM>ID|8;%<{5W(mW1>U|c(EM}Oh#vu$-w1#uR&^6&g z&NCo{@$;CsT?ocg%rJQwA)X?J@$|SDxPM&05V!iB3I>|bFok`025(slwPo}cR#)OE z(ALu{R9&wmpK{AC1;b{Bm>UUkV&1b~*cB;ORbo&q|wuGaLj%N3Og=tDfJgNlUa^;2+?4 zTDcsr_TiU$6|QjoBJ%;3zbaI%ejgXb5EZCd9lKtz{ z4qB}o0S~0KR+F_YsLeK9C+60;cl8o%y^Z@@^7K>314SIpf+r2#sWWu^Tam`;E%Ka} zg$MyA7^wvY=EgD1G5uL}nt zyQ-!vf^im(DC{3%LYQ&%fqD4-pDLPuUB(43Tos0&MDgq|Om@p;3rPd%TcCYwAKSL<~ew zacQj>0WKmIaw%dUkJUq_iD%TbgzU;bM2Pu9oM$`+TRUK|r=RU_EMWj=kP)RuFav@@% z4k}-GDt2tpBFs!(>3ZyA@Hc9*;7a!|*C(!xa9epV?(AnpMYUlgYX~1_lK8+ zPYF*6cRH#aZ#kAYCOSGntLaO78MGjYugPbl=$iDlO^$kqRN{?t)pi?EXOq*zPQ|I# z_JFhCSnqYfcK}Z!zVkcd%_{&-B>9oGajGK<@Pl}PPNwuIF5NlQEiog$C83^xyV5o;4$Rt>yLA%zoScED~MCW z`v7|%souICA*lo4QRMWIzHzGhD&UdaRgpOhu#d#Nycii94mgwgv4fM~KLL**65PkB zwo`zIm-@2e)UcZXnOifkX9FHeDpF-c-xKf<=2cU*VFwX4m^!qKHx~jNL@LXCajMA# z_+E18zM}}b2k=0WIVc%ba24U|pu zud~SP+~}EY`!q2}FsW4-DT!HC zza-N|vZ&9uQh0uat6d4cb;-nD2jRS{E4;7{FI)#F0@ML)SrE4&h?}=Sr^?}7m{V9e z+@*h=`ZrwvI`pqy|Jq3I@@RNu8V3&p@EYsIVoMQP?Z?#J2g6u6OMZ~W%n)#l#e&X@ zgewNdC^LVvc)p*yorUvb_IZFiq0Jm}3f@DgSKE&B;e3|5qa_o6^uvh-Aldp8iG%|W z*?NP0N!E{T;JXGlcVzibIuCqWMf8y4$>jio+@VZs(os9?E&tMV>Y;ZSDM?4oF^4}C zLx!txL&IrzycbEQsP5a4ht?8wo-PZH^=X#Ad{e;+1?^62l$WFveW+wF05 zp2&uma}sm<#wxOWWnv5RUglAka>0FpRONQ9N2cthZl%9rQw%Bi;1JwsFibVPFSSC4 zzrwyG{U`M0ey_Lk%GOI9vpj$yW%-HlPeI*NTfgKqR|s=~VYEBnQ-4~kX$0^px)M>7 zL+Nm9u?E4&2I`uMLe$w6qcLYyjK&;UF^D;s;#|ML?{&O1pZ%S~{?2B9pJRV#vA@r< zzcWciGKSlK87p)b+2L<6j2!+tr!1epEkM~rooA!(jbdMI;s_^R^nT`lK6E}GN0kYv z5v6aTe!}PTPa*#l&<{!Pfoak77Rnkvk4~ELPdqw^f3QC#6Ma5Gm(ELun(Cu8eU9UnEN@RJb)ll&*XSn*~XSqICiEhs`Lu`kQRfe zo0j=VX){Mm4T&$)9k4PBs*xV^MeWvdK+u(be+6a?CXx0werLzyP) z--q;XHv5wFCw{XM(hcs}O_MlgImKLm*aQ;j_RVL%0sA5I_ho+ z)lrKXPRp9U0A*+e5F3k^6IHDxKT*&@v+nW-F-tjSVhFQ@ zW6DC9MI3X7V&YP0DOVCt3gSs<)C%r`QfNidb(7M_ydZA)2LasWh5^2^UhrAW<5)~` z4`6<~b3go{%{+kl+A)G-mXFjiB;iaV9HJd)g-uToH!p}=J|VzYGbR|vK$3@?$!Y=n zB3u@xU7D}x@j4`{b2%+1gqfpoFyC{Gt5-18b&i=xFNGNc|h3f8C@b z@-5p3=->YOx1av)tAG2Dj_XlD)5}mF$?H)*_ascg4BAK+GX_lx;#xE*=QwzQWdT== z4X~eT4AQaHsPy;Aq1El;rT6&fUH*B8f8OSwee6RHzJoP_h1p9Y)*ws~`)H?zInk;O z)*!O^oKVO=1?)o-zwvW2(W(KBE=5(Dx01|K6xqCieaQw1or_cc$1yb_%u5_I+>a*g zPf+ZY)Y0bv(#|6_AiW7{gt_Ttq=Vz(Jeooh9&eze+CgwW#t;!4G3kDtgS$i@LyuI^ z(Y5fuPL~p85b?2tS9w7IGmvA-LYV#>liwBlD;}yZ$3%~ZQ@#e%e)DK2f2NgEf7)+e zE#YUnGov86ybwP0D4m?_jxoY<$1D$FW^&Bs z5aub|Z_wH7sQx{|z9e0y^V10APOa1_hxG43atkTTYCQXr1NUKO{DYQIZN)LJ(Lqcs z*+XNOSFkVnbQRpV?M19wIlLfpP?nc6qIOq6Ez!d*CiCVbqDKzJX?MY#M0mscAC7zs zS`ox04ne16xGOH(wR<1UCi5JD!b)j4IyX;L*8<-0peHIwsm6Ft~@0}%(Gp6t_}f7-!qBe?SaI)k-v z9%`Z26gX&ZIWIAmoC@IJhMY0AxE)|xv6Fh*1G`2YIC4g1J5Xp+ifOI1KwvhGVBx55?tE+~kKkc+8d?lrj6E z4zXs-j{%TuOhXku^iT({+46k=X|{Y90FS@$+8>b1;k455-ii?(Ns#C4r;001T8gl3N%x5 zEYK;P$r`XRYibI$Kdi-jt^M52vu|r(o~=nnmJ)av{2v|$iwq^w z={zD~!o63^_oX!JJr4Zf7jH#waqm$m=#8K{h4L;zyA-OGXyXXHDP_y?QfwHJogn;e zv~~nusKyBSRgvTsw$Fd4clOF8<5?-FhClJ%KYL|@*@Q-)QjJn+GgD-R!bYP+k-3VD z<)WvRdXMdokJsPIoyYzX{i0CqCFo~`0%*osUjLSe0$gTQ5?Y|dU-Tg52*?TiS$r&N}iquEQLy`#hgIdm{qPzH3=AXs)aqq%|Y2z;p zD~-Q|tF%f>oBP6I&FZ*;A+6>7F;&(o7bCVOk^H`D2`*1Kzo2=1QNhKi{S-MViE#c^ zS<*{v%V*khoYrQp#HT<{Y1t$t1@P5c+L(z4C%Y zm6AedN>u$u!AMParS`c(<{IyW-^_|Mj@ZQ&F@Ac5%r)0+o=c|g;zAy$o zG3m2Ok-#dOyo`5gFFtU6S8oJta`xgyw6(m#pZMk68&9>i_C@wA#f11$rEETnTaaF{ zAspk=NjOHx#JhWR(q8mXU%{J*`9G>G{)0q*%-T#ds0S~|ZP<@hyH zP|VviaTUz3R$wZF9XH`EboZyNH{mS|ZAUaok-m}$_w`!#sYE3}g-ezrLd|8qr@aLA z5#VPaJU+?G(2!<7d?+p1aJ6fcZQ4%qE9$gAjPRlZD{wGSWY*up z)qz6o6bi~pWBQw!Xp_G_6D>$+!qbj&-^@gXw#_2$Rz;e(+T<5SYVS?csoXzP*%Jej zsKX$9Xne&J0}_m|0^e@SOcLVH@*nha*~!?f#3m(Pp}fZiAb{1l*-d|XYygV%H3+xN zdTc-fWgNCxq0LO2)!VH8I=r3t)?_A#DD>EC5NC~gTuLhY97mX5gZqklOp(S2nXP>v zk;5WufpbsIs(ED^WR0RuhZLQEG*5M4lBZMitANF@lC? zq0k$*sRaXmvYr@7%TSjHXKyX>xq112vXqp{11{`oIQfg&Ny2%J%rL@_K05|Jsc<1O z?Y{A-I?`98oY^}b*OmIYrPWVDMwZqxn+3V_ku02#dy*;gY!>8R5@!{kBT8k-FJ+b- z>NPN6{krct-*n1dy5g4qof~6~e;bF5&Bg-b4x_K(Z}+9ljhCz3*tv8Fs=LzF82^K2 z6w0Ao<7p+Hs$4t-(VIzG81h^^B;J_G{~Ldle;?q#j{CV~op$FhJ)HjamWtX47dqn3 zU*etqbv7(qgFC@*t!Bc%inbg;XDO;eO2mtC z^z8>Y%DO^ZmMcq`&#Np;70Kfe+1c%Q=iS%P>Fqq!@dquh*77PXlPGf`Znm~)H5wxe zhoJD`5nR?p^tH-Xn4N2po{E%l;7<7@NFt)+wT#mejT{_TLB@5oG`W*);}CVY1&uhgA3 znXE`zgyhg+c|AE4zcVut*?@)mHhbbu9C-MnE*D8HwEB$J$}EP_k7)~rB;f<@EGVC2 zJu?;zNiZ|yST(Db%TVQcqZFnYvEQH!gXv+e5K=$qV(L-6^MRXBgr@<%N% zYWcmC6jM6HLlbkd!SMSDM6)I46d5Z>5DaR+JFzfg5qz3k+x<}Ejt6(kV zp0qh+x(o0|-;E0Px>A1P8BAqQ#PG+ai$a~4|9{7mw9q5~?m zI-AwmEXHN|ZMi91`)!%8HREG-uo`x{$OoHxIUbwTQdZ)jORdIcb1i1{qw{6Ed+(wdIvC}#9lPPr~T$D{Y-2K@`^(EYgOGHygEGG7wmpQmM!mUFc%q|?PH6+?eO zQ|3%Ds*2hWM(fTNqkIh8EHKq#tu_lWJ63`e4X^idNzDk*;Sw$-YBivSU6#qOOf@x# z6UkKNStv@R*8Dp>6Io3)EkZRl`++nJbMU+)jPi4v6r=npj$0h@Q9nl=iTX1ltf8m@ zQ2}ul%cm=UyEVGw6_jy}(de#ke+7kSZP3z^+gn;77TVn_#v};eLljyuCXpIeqOfH` zWl4B2Qj5hPo%VV_dF4mQePnDxsoHoS8bhv4IAiktmR4wK!u6B%a=Ly@M5Q|K$z46@ z#)>iLMD$-OE61QDI;g~lEU0Zd8VHU`TvYT25@gH(*ucPzQ4UVv#wdrB80Fw3VIL~P zjg@0i8DgA+No#}zsNjV$NyZ>4DBpo0pxG~sNig~=P-la_3Zz6?9(^cx6^<2Cf&0IA z6+fjCMW#l`LPg5NWVCx%jftlk%SMehi{&BRjjM5e{$Uu0!7&3Z=%8#^jbr+UDpL3m zo}2O+qDXy&3|1tVXZv7q9mf&=0b2H_qBZ#6bCr~o+#c6cyas3VBq~xEAw3nT=dO|R zNzf$X6Gt6PKK>J0&e8HwDJicRX*-#GN3-ZW{}%pbQkrF-a5Z<4e++-QL{!DKks?d9 z^K;4)d>)0hvOJ?mUWAk?QX3&9illNbuO5U+T~*JXRhn?Dq;-QZsmno9Qhm%SE%Z#EGHLzSY#5P_o4q*!ST?dd7a@fa6?mRtE&L_8q)GEE$`Iw4lSo>IhhQ1 zL893#dnhh<0gB>w5(#Ikl$6Ik6}C{Vtp)ySu@Gys7oWwQ{s&XOq!yTqA;(7+pGAAf zW+9uU{sO;^^I$F7i^!VPS~xnv8X~o8=bcTW8lL+n!R-pP*$=I&Gud>5DEI4#vh zN;B|ol_K@9mo0!-i$S>!dSadhz}(m1Q86DU)S$rz7*S)7*s%7Xj1O^{>`P5!5C@T3 z+k{-5zPobIn5Y4@?zh}CGA*ZW%E`0CP4o*=USWt4yEtQ9Xfix=-bDq3(BujFni zDZp(MWqDg$c53+sb$kV-X!Asj>2<~{DIB8}NzJjzNJYvlLV*dkElt@%5rT52_GWIpBHUCyi(zBDPRusurZ9XIfNOsCd_FIDCipFj7vvLU+9x$#sNM#x%f;ad>e zmRfCijn=9LY{N60!6R-jQO6lP7Pn5zwOX25R%y9LOEh<-|7tB)NlAqd;dfIMN4%DCTE=P_)UrFB!p|@KPiy&PVyvpU%Qsy%xv7A3IydP=#7qi3V-Eo?#4{yeNFQ}O6 z*{ql_zO|Hd+H1*yUFeF+1EtV7OvE?SWo^ia(uW|kD&w`%NL3N?2Su7AA_1S#Ji70F{V;BM~kR#Kn4vv`aFEdeH!Jjv2d zZEeOl*WDP2h+b~S1-lzb#zrM+7>16`UO)*H&dRdX*xH#|HP&gz-7i~?hN;o1DcM$n zQ>$#X=8v~J&7L@753}|??ltZUtl5DQc!jfg-I>ykcEF3;& zD*7#F7~G=3R2z&&gfSP*oUt789+j1g01cJ|0MW1)agLElBz#yDz)octgSDJc!T<@M zS2;Ici*iFbV;g7uG9vm;DJgv>irHfMYfMq3$|96LhX?fd*K4*}mBmE#cp5h`HwjT= z7F1Wf9DUV^xQT!QW+fT}5=2wMcwCHoxs+6Y071~%<8d$UrHTai+N4a8ya;(tk@Ih$ zN9D-GGi|>g@4c*>${xoBN^O^?O>2!dE2z!ZEcpreahCIW>whzAo?naXQ+{V}6uvJl zL80hE>*j}-z{2Jkt>tfAqBJ2e8wJZ@DEJ1#W$YycW}`64i;y0Q)LH~FhW`3UjXdLj z^m;A~8Yyi((uFRfnaPA=S0tfQSib~wKTb-U@cxxLmX{)%PESJtTV9GM_-{o*5%Pl~ z;RyL&65+m}<-cfJ>|&mOjUr-3>|&fdQISvvVjqh1R3scBXq_OH@S=4hWgN}hfa`5r zhi6f;f$zLpk*R|)qe_ttk_fS0SwbAxSC)0!vQ}A!UfzPPzaLeG^HDnNM{(}_layxa zl{~9OnI2ZC>=Kl(P~#=&A&TF)1P#ssQd0Mua8rpJae$U8oWp6E0C@zd! zi+$i)7xfI|B?ySHER&#|Aac!tidykkVA^3-f8>PD^kW}I;V0cV(@GY?xdHN zJ+7tciMbOR^69BgwA zVfW)~Gixs)MNDhPzNHv#)`bn|zqqkOq|6sRg6<>p1VH<0oWIxY5;GhkP60 z%3YBg6)AiZMf@oI%+#r7Fi>SaJ;DHYAsjEJcf8H+FI3IWd(yZS{ud{TF+IB6MX*BDhHl0{`YF96EP>K z(X3BOU|o%GJ&jij-Z6s=cD?Vmy~QbXuPC5iMs*NrCO?g`HiAG|uUYl<}ww z6Dk~vYD{YNwiZlfhI3HrZTLM{6Rxl6G|n$v|B#Zx{OT&pSw&hSRB`Xg!o1^;!QSqSOTL!39Nu)bHf+#ub2=2rxD2kXyM7*5#)sRbTWpuq;S5tOPe$Lz&;1Z`(<2do)4DO1K- z3k;{F&++-Lmy&9E6-32z)u^<4YfFl<6mn;TEr<(2>$GPPvT|x2Dv~{m5YNxUHAt=g z%Mxu3@YuR!!LKZqx(Hd+`79rQW975e=(epMqW~rI$e%L3#HOg|b?wx8>bX-@?rUln zb0TJ2%(F4~#te<=9B2uA5HJIe1u_Cj?c3TPZ(q}XQTwUw2eyxi{(E#?G)3n}-y9u_ zFOfF*U&dy!IsO!E7u)39=_~i$=Nsnh;%)JMfSsZr^JaLHJZ+xio*K_0&s5JqPmKHT z?m8?m%6H%Fj&=R$YKV5d?3(M!aizH2&L(#SKT7_IpESOO-Smi>Gzi9WvL-TEEN;#!X(H+Gbg!WlSfuT~r+_pfMF^lUwb0x6(<+AE*h4MpflEXED{ zG4iq)ZSmHlIH7ZKX#&;rG+C*obI^#rD4l~wl2w73j=lT<1I36eNL9EFm~HZLgf3zG z6%(#~l!;TAWmj^$aw4BKIz}Q9jZbgz*{hb}_!Ic-p;h?F*oT^J7O+{~r&B|?-X|o!i<(FZz7MI0t zw^^IhVqok5tQqL+B8#{>e*y& z;9o$9`%DuGrVgR4WU=X*RbdUdV)X0pTP$^0YJW>@#7Ka4D7UE)Q?2y3+MaG4floC&>GQv#8H&I-w7WPj2Z|!BdIP2`jHF(kdCsCWR7IfHJz|xwQ zqs@9|;~KP7Uy%DfP|6CB{?%wVCqIOuO=>|d0;N`HF%f+m^{GayIC`v<)KokK=jl>| zF!P(4T1y#?w#At)HE0prtb9GXh3tm~gWQ3WNt$f<5P~rZ3h_I9v#th1jXd*_-Gt~s zTbz>--v3fcX#tDbq2!${$d#q zYGZmy#CSlns!P~@&6+P^do^pEzZy-uk47X^-qG7hBR?9^!}vW7|7b)KuUlf#Q3}k2 zwahUJH07ZA$q5Z?>;Oj!bZt?9*0S##`$k|)b|h`thr&3>jm+))P#C*!QY0@zQ0Kij z0)d_rT7D)a)z3yBpyEBG@f}kn_^3@jRV0tezIymXUrD>`ag^vQq%@na-~vaPx+#<@ z%OHii(4jm>B26vDt8g?AvE3R=6V7jyEqyC~OlhmKG~qm>Y=sxlc^t*p=KhX`^VPf0 zYuTb@vzCaaOZ&H#d+;FYdAPv22MJx35(Ig+iUQ9|(0H8dUFor0g5=L^uvCIV2C8Oz zP1bC}^=DPXHS<0k%CcG4g8cI;@`}tgK3~;u=Eg7979mtL3GI5vx*|;8BNVsJbzrSF zE3;YpWcc-ajr}-m)&Dj4)pBr1YONf?VNJBdx{KF}us}a6OWUfZX&aSwLK|QDH!~|) zSQ>8@9^r;1tHST5c?(fu`dv~|Z7oWDwostX(n7YD$CEf`hjBr53o()@wT1v&+uBg!^!hRwVQ$d_G-Y)Wdj)4y`Xj z^zec2(c4;Ilz>Ux62$WIND0u3V~;HhhF~K3W_V!5;^v|xV}TTOs!NUHw01(4_9&uL z?#XBz%P^{Ya)L2ZgC~AMLB-%E1=<)~^iM{m$*Z7Q6aMaW-ai>bQFoy0{k?xOCK6mB ziP3W+0+prBl(6 zuJ5({mzL+X{HK=RP>~0BmhYXMLM0wtckMom5*2%JOY5b{Y(;ZI5win|Jf%|dwr{jH z>{~RKWr8XiE+!|MffiobI8xwhJT(f1E2 ze6=Xv7oGN-`SHv)bo^^#4&sMrt?qGt1z0QJgC{37b1WKuXSbn;-g^g5D%qh8D3GOA z#H`n?7ceX7e!Qeo+tQ!cu`!F~7guWL708E)w_b?HO@NI$xEq6QJY|kG;n+gM5AH^# zghvcX^((c7YI(iC^7*r}1$fPeA{d$+dm#zEJt<~MK~s4G@`SM$642X|JYVs0Ui-tG zsxHITORZ&|i`>ES5BvJS)jPgmZhFkdI+;$-z zbKa$?>0O=7*rY(pE(^S@z+47LyP_tKeu55lt-<^Kn3j)fIa|v|q@=QUn^7KgMGK;E z7qp%q?ZPYzLKz{oyblpE==ISqlpiZ8?xS5OVZq@1Ts3Ym>Pso9ih~u(a$1q*2x(Fz^|9`jYwOP5ryUeR-$D@;kDR=-kBmN6lrv0rhp>H6-o9?cAJ&A zpfO0n`Vm?c-ePkZ7~*$RlU!y6RP6k4&@xj9l=i-b$ek0v+gpe+I(KV{-%9>ptK}{! zv1tM7lYMU?D(QU3JS`6$Tv+Wk&+?gt2=ewX;pgh99)2wVcy7tMUSRRZC#mRw!*| zO@urpiSZ;ha6%yWN+qsP0;>=hc2lCO5?!RAw&H4(TkoQHA9mXXrD1K;sdsVaatlz^ z87|!5WGq&u*if3 z#T73{h?*Be_A16Pg5z-v)`WAtvgJj{Iz>_!oz>p0#n3vHEJqd0-5{xD@L(}(SfGRc^qcHuSAn;KF*t(Wof}2yo2A*$1$5%qhgiTJe#Gy zXt7#mZ#)CP@MV}sEVWu*(YpN^9Iw@8$?H*)NES-5{0eOruvxv$@=%!2sONB$aVv3- zko{DgvE~u}G2alsf_E4Dq%U(LZmK{~=djWm3-C*tm)(XLYYwJL}3W zVvZOmt`hBx&x|cbsgY|8GCH*Thg@+Lt5#fnQM4MR#P4{IPqRwS1#!+}(t!?@uEtPz zsg%^r9gU-h$&4YqgCiE_4B;Id zvGfMgTzfSSU@FrPZNlOk=`vW$K~mDkX$`2P#4OD9S%6=PHQ_i(YsD?_FRqM$I-0+pKnSU*#wKNiwDI zC(_})Xw-)I1wXbI&0Oa;NrZE&mRq!}(K4*%Yjoa?b2svY>x*uF8mkmp%9E_VchAD> zyHb$=PsQ3%_YGRRH_+C)Z;&l{U{aAmk_h)eWeIR7u2T%(-CxmL!x=&xD^}z)MZyts zjN-~rWY@GE!Q(7Lxi@txn#euNmiI7_yJ0eRD^Vwfd0uumCU6OUeghXNTyED^;rC)D zyzkSv`O8sbn*1B%J@@&3uX4|q2Je=WGxI0UC!SY53q5yv24IBaAMQQw7u*lKZ*|AJ zesUdlZF0>+thtZN>pbmz%eluk5!1~boLQ`(kzJPsKqK2Gn)@1GGoCN8m98helsblGdD^H<4G4cbdEP`<(@(d z8#-ePEE@(|c%uyoYi$^^Arf(Kp6zB>()TV*>fwl$q4W!ru!9P(ta<;!B=jT@%}^?} z-GUO4q2mh6(d-l7*+>$;e^HtbMR?UrMA}CCP^kOPE7BYx&5EQB+@^fcrs#{@;X8;C zcZ&(%Ipx(DA^%V$nYYZ~V>^Ak6^m)?^BFxIpW7<6A~_8~khV7LN;IBO;&CPBDDjvS)K-e1(vF5*@wBC37i!Gg@f`SHbhu^OS#)atYK)rh z;AY-qD3CuM%}NQb zqj~S`LX8-Z62q5hRF%BP#X+))N=Kw+riu5$%%)anYeAxbpmd+fdgn z!ReMMP-la31!8y)7wNG?fvF5Sm80s2;msVRmdkrlDvkFNk=l9Qtc*3IggTVtT*KD` zRPa+mP?{fG25(p?L8=7CP{kYW!ZT16SQoC+5THM+a(a}nQ#+NzuIQn%{0SZ>-5Rqv zW*T;F?ilzwuqW^Wbsg7x7-cU_zJuP#y*7@<8YlhKHRd38?)X%0k4>RsZqhyR6Q12C zH15ftcoN7;v}d0D2~nI{l>e+%ZbCDj6>Nblead;6g!I|Mi~ps`kfzGP>i>9Qv@uAe znpZIT0S}O071{N>7DXsrgDLZDL!E~1cCSL5wT-8cvSxgMeU5NJbs;Vz3xDcCHI@kz zK{>Qw9z@9TLCQ4`B6Ro;N`vVp@}2*Wn%sf;h>F~=NL7T)z-q7-)bQUQ!T95*miWqk z*WgKSYC-X8A>loxoB}-2aDs6X_259ftE?F}&^Y5HwvirWfk6t)WiSy36rLw3lTT5| zJ6S67xFo_eM@qA>JIDHz=~0F1FF}vcTGz=WbFQ2=nD)636 z!u%c<)X*L6!`WUug{cbUy^Hrlg1Z!$n_b{C6A^tx)Al|y9!jDa4DhI;hSL=ku6r%z z9!t5~Ql?o7wqxaI-LL=wx{dtsuadyPdR6A;3^Y!dQw#^Gr53wT`%(+6=A(Qm)Qml> zl`5PL^I^U!O8qgygkr*JXk$8t33S?8EPK;w9j)dbPAlB)l|rq~lOFQ}FwFAvYnJYc zFt28TOZ?m__j-G&+*voDb)Mg5&w_uC|{Fav7-O|<$v9$A?L$YcyhtE5N(reM=t(Af4>{_&jy@M4B$URRL8mJLZ z$}x8g;$I*T)5B$Mhl=_RF)2;c*P-^84#InfxfNzqqnEJBnl;;uPW8v8sT0sTN)WDE zGZPBBd%G;YZ`JWb==2eBtAvb|)T5iRf2}-*?{&>; zE@4|WYqJ@p{_Sd)`4y62%!1dTx^BRP(5V~_l3EORGNhKDit1Eqjl7&)YIPPvfXlc+ zpy`}}nn#+3545y8?_|xIIbJ2LF+8zCYI&D0)>kdLEo3h{C{MX>oN zh;ZysmNFiXQ1#+5nTdS9F)FA2x4q9Z%JCAI*oLtj(_Of0Fdc*f94xsvqa0mK-Vjr= zkj(;I1J7tVfL~<*O=~%T7rDQb*mmjPmZh&Ec}y;TQ<8vvrp|v;l4MrN0U;(d?BH&U z(6P3gD+j5KJGNPCExhwe$%YoZ!n{ticmry>V93(yKSoSKTJtO>yo;#Axe|1FIb74} zTnVN#?Lb*@*|`#oS7NVIwuN}t8fi=CZ7$hHVVhN1Ot_zS_4jN|KCS zO1!JYJ5o?{H#BTU{|)Jya?pqx^*8)bZz@yDBn#|NV6F|`P@usAw40+(GVxYvGPY2` zS0$+Ec%R4FU*Q?YJjO%GtPSI_IjQyLZG)v2Uc=QfTWf5;4(1un+L-nK2W~0)57he~ zxZQe=iz#xIB0=6ZLy<&DM079eh$@BBufenG#I;43_aNx(LZO&G4~G21d2@&iK-RbT`SF7uoE9@$9 zgnf+tcO#ySt`|cuW_?E-YI^ypi}%}snl`@W*q&g z3vzqwZsg-fx*$)ur%-+uv`jnH;mX)1+>;ck+GG1nRHQjV##2T&&SMj#q+||?jPJ&I zEL)NKQv5iIWGWKmpNk3A@D4iJ-EbXwHfv+Hd?2Reg}Dq|F%VNNLOfZYHD}ZbzKLZ6 zOM6frnx`5bx1clXSVTQ)sUqrA+Jg_P822eLLkX;?W{3sV5ERF2HYbiPk2epE<#B({ z*zy9(9E&o0E`O?pEmY`5XH#lrT&_xO%N#_yq*k{Ixv8@(vnNtVl-Xt;c8kHQ4n)bk zYD*cjrBS%DlvAyAs<;iu4?l%lVoi9yr=_>y+`AhQ7JL4-a!fAxmm=v;+dk(N3GfGG zRH|kw>fVB;29E+S?2hN!$Z=TKL^S3!v#pWGilr@i1>Vo%?)a^uu>&C63M1r?X3+)% z4Fw-51sjE;S+Woe1!9F}4VSPLR)Xc)7>gAfR)VDzmtKyN9v@SIr1V9INA^m`eYzGZ zQWYWd6=`O|nOb;Wk{@>R`8%l0&6_^|UptWe|L*ZD+rtx&ZALN3-E*lmbVCzBYHiFC zKi!Oz9;5hAH={D+kBl(E0-2RcGpoMlAW^SRH{)0NM7gC~M&UW6Nb3~sE^M$1Y)mq0^u6Nip*al8SzZ*3Owv_=#0( z%SL7CeVw*YGf#D!eFC?zk_t}X7Gij!h6LCd0{}yjQar1~GfLod0c;VHsyoYnA;AagFh3P-LZAn(()mHsSYc>6JMW6CL@bkde;Db8xkS5*j8W6vV)Ejpm$%VqC0TWa%=I!mpxsfb=+ zc0iw)!}jP6Uj{Y7tnzbkq!V;hff3w%2@XeaYP9Vw+^?#mnPn1=!nSS+#wv3-BtmBh zgP00cC}b?xa+#J(wJg`NOv@$a+GDs#;qPZDeU*ZJchI5R03(8LeKf1J8P!kfYnzkJ z^YZaDVjib?5h}`7Oyo4bmrp7gEpSbnBeald1>c5Q^#O;dq)xf`sF+fXx6Gw{cV-RG z@X>6iVpM}DvrdtJ4;IF&1I6BkdS2rO%p=lj&FxtJt`0wy`<1hUs3E~FRmw9LkBGAWiC@z)Y*u3h1sN@Zu{ zN1x|cm2dwho6dK+=Xs`h`g)>i;i0RO1Nw6c$D(SYN~5OH=HA!D8M0m6O@5Y9c>W-z z({nW!EPY0AMiW-9Nnq6s%1!k9j^2zmu3VL%9B#3EF`ExoQavo!xIJN;33VG@YYxo8Tn~Vuk zIL7n2(x$xK!|D8+rBXc?WFw}$jo#)pXbImmHVTUvUj zrRCWyop)A}eznYwq&1+O?xKb?TvH=|{NdBI2DIfJiXzQiWrv*#``L?Hes+h^5?MDT6I#kWwhH z>KX)1`fo$mjFdj&kS}f1sP2@K_*oq&THR1n~;_>!VUWA-hq?U=QDBEQI^LeiE z#eCK-w;czLPwiUL?>)zA$0NwRiBWA)pG9p!=FN>76x9J^w(p76;t_GX=w@@?{`4CsqSv~jh*8O#_?Y}< zZ>L)pa!NgIQK~TQ?Fte-_Qf z`ZXx8r8bUdC9F7$Fh`JQB}le*yv1?}+R*_h;0jlu;VQK%UgsyZR^A+nHR1n;4iCVq zU2 S3MQlCRb|32$}2BIe*Pap#{%O3 delta 64648 zcmZ5o2V4}#_rKZO-Rq4m9NZp7=^QEuHo%Tj?H!0>MFCsva#&-cf<&Ggbup%@QB0zj zVnI!ei4BaYCJH7oT`|cozf}FdnLW%efB4*+neV*mv%53zy_u@k?YgR}zqK{NK1+-$ zmF*YR!#|Q*8{d_piM{R#h?*^pNXd-a3cI|u(MiHQQma;L;mL^ViLV^9WLrtv%Mx!@ z`c0HmRG`$MMR{T-f@YRc(E`*jnF2_@g}e`)*&XhF!HoWieXL zn#0VeO`T0G#%N=U!DRSAzgTxvKUlj{H-q%jmUE!(uK8WlLp&y~71jzpRXYTthROA5srEFO_eIhbF~F}oXuPp6U`^iQ~5bpH#}HpxrkkCTWN%MHuNmaCSF zbWj^DT`hN3XX1qDy&9*C@#*SuDwV#jI$EXD*0~>5sWhB>ph_k5aQmrL>f9+va=ISN zRFj%jgs4(fyHu9b7O>2=^tPDHH_V64Rpvr-xanupSyP>9im97PZMv*#H*$vS zhF1+w7)o{bNt*s+)h_)gZZFqccTiVOKGNRPKCX?@+|g{%gp1e3g`!3HSlBDf5_onwK1J{#WBfGKSr&K#3yy{@EXwXjy)sB-}t~!WaZv8QYSmA!>K)rnQ697tsA2~=VKSk=u4w~`; zt;{cuq3PbFjW&7$f_gdKx3aZHAf0Oq%tEhleq1P2Mg<1a1_a5W zSa+S!%lW@(pgajty}$k`l|N37fB(t*v17&v1Hx!|w3^<2{imM1p^vH$?X+~c73*CJ zwQGAti&35UUQQKM-gE>)QGB3Y>>R}RickPK_qu}^#Hhf3Sl&D!>BXpke}I}Rw9EWy z+biG1&Xtxw7A$o4eBK5F2&v3Y|M zeE4Bbl`L(vlNd3<#sa$^r-4K(pNlcJS)oLNmA?n*#Y783$mb3cD<+z00IFsMN^$}w zz-SXgY4+&0y>M)9G`{dHp8VC_xqqgspRc}<4+kytKj1X<|8U*V^#AE-f{#J$56k^hn{q>Jj5Y7Blbh{q}E zb`!bjphTR|P&VHOvlI)^zL4%1|hAO{w9yT}SjFO0^*Xw-Zb z^&X9qrCAaQfsA($zK(E|>_Qp=O?#GDVf>%8pm(sK44QTZ)|<7Ek{ z6Pi!@S|M@&XoDP&g~iJoC@MS3uzK>73~6V4HTy@0$bT_lEK*E3uQ)`Wx`w7{VnEHx zM{V+TOsJPHGB_Hk6SmANv%;_gSU~r+l(~kX1>a)VIIQ+PLd1IKSVx{e(cfV&_V$ zW%p2tIH3KNg+AcAi22@LPrEHsPR7I%G+e~o@-|Q?!wVHt=op1$6H+HgmkKBz+<&_xjA?G5+wmnjTV{-+o{VRr}_DB-pU*QY>a=cuOZ&4!gJvjXm zPfv*lEoa*1c|V$ej##P#N3nIn>TI@7fFnL)0Qe;Gw>l}S3MqO{>C~Ae_|3;JeD+8& za8eJjh4VpBZstUfCys9=gg{~D^5Gu*7UvO9S`sfM!ZpXFSlC4Nh3iR)N@ZFW&4H$0`c>fY7GYn!w)G+%0B#je5|LVrF& zeUKw$AW^CMVxhz9Eq5Cw7weA9Rq>!l1025d&rNl7sZ|%s8TNCh+oO1CD4C-AQ6#RtPv( z1_a4T$Z6$N$2`HW>Tf?z7r)&DEGw}WY`?H|*&4Pod)4v+Tb(`4)@Qpc7cKRcbCyGv z8cUO9qvZ|DO1fejVp(PTX=ikQK=01tEZ||x|Zst zDwQ~_+Nx4niq}-CRQ#*%S;&8H?FyvrYtxbcg*7=i3zOD}xaJF6orN@XwF7IFL05sA z&DAMb%1L)W`mjs7)6>!{+`3 z&TX?pq2b5JLxg>}Y~2M%o*o?vjgJTZrJ`oQ2Xw6 zJ4mlPgZYWrN~rvymmMrYU3}o~6+bt8wpQ8nR7ebkc#4Or9Yo<8e-H zwd??m)#!xb*5!lg;}vTGX^)+cRSdhQM~`7AN%uc-XVD3$rDIl}lEn43%iM(!D-Xc(sd)v}8G?r}Al7H|f4p?i`h zu&|J;HpqYPb&zaMbsJg-8iVCT4Ee*oEMpj~JgNzVySchfoD}0(a@I6j!ImI}xGh;0a5iioLi>NeS;!FLCULUO5kbKliravf^y#{fbeYpZq8q3ERY5mh=LDw2AT zSNg@*6@M-2>+$t4ECHhlRPHSfufOgP1$=Gm-RsH2Tu;?G^06gNRcYR0`o>gb%rt2= zKWd&6u86hx=ENVi72J(tcc_&7XTT3J{tuuk#($iSzoXL8Se57dSpQc!-f&RWMt6Uo zdlrZLFIRiy1pg8atER#SkgJ7Zo&C?NM^=x6wwLvG@a^oM-c6_;qY3Qe*AkJ;I(M~^T%BNe7k|SbW&N~v23!8ut4qew}a^T{$z#`NdZX)y|d(zDN~cXcI(c= zCImdyNdc=eyNj1_2H#Z~dl+8Sf2H4pGx%HW1kG^qL*bOTLFmSBQ{Pe-a+}FFs=JnM z=GCU3Op+0tuE0)uOu3i#iFgSLtjpQV&GRf<9w6h)ofnY7>x^qUdhLrv0q4~ayfR?0 z=M%|aCMHZqf65;mSQ=0RNvi@{JUy2M^dtK0>T*qB?m&D;!>SblfpR;(IrTiYdNH&t z53tK2`*A}L=LZ{2@a~ELA0-}ct_TQLyz*>hh&xslAhr(NsIpXBI++{I{Y=+P(~Q3v zHyZ7RLxxO!i~bSa&$=}_f9>F35B2eSk zC4*ZK0aYb|kx*17TA`*}UyWyAN#L7i%W`6x?G|{gN=w_LRW_nD9A;&80dF?-|>o@AhlIt*83JZd=R(lu> zuet4~S05z_s#f(uoL{Z4hPXU8TJ0ZkkQ_as2lPE_kAuK3?cJdw%%043N`_U7>s_8h zU)qZZ*v^C`6C04X>@H9FZTsUKSdVscabj-$d3&;F(zo{i5NJQr$tCi=lGUXgw7lFe z-t+4B_P6k@bJ3oRFYLm7om}fb{n3uUQ2d2?KK;=V>qFAE2~vNvR+dJBc!Oi(!>G6JM-<9szlKS?O;GC@I# z-Eomnd^O&xA(sCSR~$-2*xFMlOj6RN5W0A8Av;-h!^Bp5YUUpCdv_&xg{U8i&;?-@SuVT}Vl%D3kfz zby#hXxV%^|cfX6GG5Jcf1NZ6qw;V)*%yB4uGx(t-?;eI@<)aLxt2lYo7Y^JXh9H$t zM%T>>C*HlTm)~Tlo1*ofW7_iFUs~lZUph!<`D+TwbI?CX&PD2gI~h6~?0Hqv%Rgc` zUVa6s1df12yFBVET3jcjxc7~E!zv#}Z>qeC0qx~B6bVjgvC2`mY0h6MZ2hrTYY~W- zhar{Z!zL#QmcK`ToRBj7VS87NIuyc9EtFfZoni3ckkbaKZ+NWo)~{(^!;wnR`13Io zK)>k`<=+_k8d3)&`Aas~;(1mtk3~6WoEVeK_eZhNX1~Y~kWNv_S`ID6{ruq|sE9g* zbsXl@3%!T&69U1Oe=P{APT{*L;#&t}VN!aKFJ_nF+D^@Rmj^Q|Yst*V)Eh zI3Mz-CFx<6Ia-uoLa?)3#-bV%aOlkbl=poGG~JA|LZA0=W-MfAPoyYV+KV|P#p7(e zf#FzrFGHn7Ox!r1!4VGw04#eg}>G9EEug|HAz1@(xFVQAjSvRae zj?S_UJ3|6Vn_!h?^rp(?45&E~YlSTdSgfrRE%pP3R_w!3n~gxK+zTnD`!+UME_lOE zMCiA2K$P6T&_judp>qFVnofgMf||bAFVCYlUYlUaATvA$$E$0UUe2)7!~#-eOTx$JdGyB0vnf!Y7-Lm7i?Sh< zvb8Wa=}aU(R43^icogBzau1|f8M}#CA?%2xmrw2v#GS?GEOIwe2RPEBLgYSSG!r#a z6xkk+w8Cb}T*a7QM6|O!lf~7H!soHS0hv=AR=Iqg8KrLJ4w^>_Qc14aK%deN z&>tu7M2h401V!$(;*(5Dykk|I8flYr+vqzW6sZFa_y03QK7rnNc{Nh({=wl^INa-3 zs~m$bsm}7Z3@x~TLeEtQq-gZ`iX5SOQ`WTO8)!P`Kp`eXX7`nmd{`e;3`YbArp zM%@d#3SF8`r9H2$(T>$RG`BT}HH$QTG`x60+$xR}CE;s)`93Q26$JhwU&oKP*_ujU>U)9Fx>f9&*n?`jK>KJ|jf|d1162 zGNK}Vpgh9)6lB(Q#XWOMj2YL<(IF5S>2yF--@aD3d{#0*RZ}mkb$6J!Yb0pf?Rg_zQcaR8j3JNbs5#%_Q;B;zFM({~dTxJhAeNnQL zH{`j0P@dP|Ch};GDUpzTSqjy>ss!Y{BdKgs^)iq%$fEi?l24A}WEs&gQS~PcLGqSC zzR-M8GLhfqIe%g^h5s_VqyrcJLoz+Ul4bXJY_h2OP|22~oYx@#vJ|Pi>SgK5a@M`=4JIoE zy#*(cNoIV*TfrBgI&M+vLlNk(|5l$47T$$(tj!(wp*tJ7esxyt; zfu>iV4S|MKe3qU~buJ}8C}UdCdx#A_zbYBXcM8xNKFS6cZqTIPFr481b5Rq?_V+XQwo4wr9lEJ~8>WVj1(N|-I8?WYLc9*?S>y*J( z`20g`=p`xuv&$~j@&&_|Ue0uK(c8kOXwxq!f!2wMw)$P;^n0}QwNu)=AEFQusP4 zVx1JZPKsJ5bzUcRStmuWlVXlZv3w>;1)(^0oF8|ZTVpwad!-2TTjurV0<+0<)ibX+ zR>zr9E3W)0E9b&1NfGcrOKgX-#AdLv#15W}i|$EK`TRb{7y)@V4*Pk=Oo@G62N`>^ zrofdqf75uD&WSY<@4kQ~R`vyUB{4%9{kug%5gfgDa?4xm}j_cSZN5*AJPxf-Ow%4 znYABi=b)X%MNOS1Q~Xg}i}L&T@RMTUkMkpWqTZn%#NFT?=OV~ea)>-eoT}GJ0sceQ zCLvT^>Q0y4s6-2cqFWBs6@4m+i1qVgRZ)X|Amu)e#f9fFe9Z4UutTRt(cB`Cq9W7#@Z&&ZI8J_? zq4?-K`IHKh5m^sZZ^BegvfgkNRX!>MXloZ*qf*xu*$|5TE+#2(r>9e z7lAnJ`C!_W%t#?KD$+(*QMBtjd3&+IlIa6|a6Kja#wz?l5BZxToFT_r<4`w=>InX* z5I>)U88t>?_|xzo!(GF-hA#}C8$L3$7%mvzHk>lNZg|DBn8YvRJT^Z5Y0Y>+ZQ_W7 z(J)3s8MQMS%xD0k{*3xD>dUB&Q4^y^Mh%SW8P$4B#`s^dw9S2pTvGN5ue3qCZidQ_GPpWqrDmJ#b{4PQyEQRwEOzB zka$m8NW#UbJ?*Oha{9;gPTdi-UO1*5ZmF<{<~`;_(_5O;nn~gou?xzjbNN{QxcIxe zNj;YPhFeJ{kRMh5g}zk@ud7R{gTXO9@e!S=I!G)<=}REodn+*>h7C#z2PT85I38=Q zUz{KqRBlssCofO!@4@l#S5^m))tqFNU1uyr&`qxnK%Q?I@kjoQGJg3(uq^M67zT|_pPS`}dH(mSjRq=4+0d@oZP+^4VjkwnxGrT+uGwk>( zn${<6PQYKHCuwui)ixV`KJDr}m6#>82-Eow`2L`+a3y*&D_m;STbrxHF{dt*0--S4 zl?UvjN55{^4WiCm?es=AYLnuj;2@U&@UsX?Cpf$nA%7!BP-$Wh{QMM@V#`CJsWh>4 z{fmoSo);G-k6jXh73rpWMuoE6gO)KC&is;jtXXY3Y#L7nm_*c3Ofu>~YEK^KDQ{2S zX%I%Ba-P7$@h5SfNq;3jNANpXS_m!g*b|_6!_(m!QbYj`O5VMcxBkHWWY2;7T}KnD zrHJBj)s@$LKVVx>x9&%WciSrnh4E@kOj{}x#!>IX?1jQu8h$v~>wgftd43SP`@cvP zgI!e{K_k5$E38Es_17RZbn9YzgPX+#llRC{607nSpI%2MiVSTnfY4?&wcqe&X9L9IOixf|wLU%tLJmW^v?Y>}rO#f9kM`d8h18`%B?v1jJFA;AYxt|MP)MFNQR2Ju^5;Dw%5C7i@7n(fA3oq!Bq73 z4FD?a5p$y%>TZwqqK!t1c46p|vKR%GYo4A(C|I`5QgSwors1hd2fwmEkW_y<55;2l!3PM z5ey8h-xOsN>QCAxD6Tc4nCFoT^ZX~%YA{;b{{K82tB zCy>cT#oQo<7WN7EqKz|gBy$59YBOOe1-L}S`doKfg#f-#HvzKR!W3QG~#^z`d?l&&4^-bKvyQHrU@#I9F&1VQVJ z0mx=!Xx2SMr8n)!W>wN1qph$of8p;m^2J)L*xC?GVy0+a*7qz$AQYpZSj;t1aNFr% z%-py;2r?fnMouG3@2u{GvvKT+7Q(8?s9Gc!nv^Zn1 zRnW0Mxc52s;y<94akliP=@%pfLgT7HEMKGKTK{iURM1w;^?!H;1^wdRD9_N-ga3`H z8Ct0OcfK4$-QNcOD__E@+xBYr?0U({^U1146-Z6%x}C%MXI4{Wv4Q}v3%*aSj@{;a;W?c zjS*kz9$0@m9UGU&z>;@pC3Bg-%{nj~s^)(eR3DD7mAo7!)ldByh#ftQ0(HqhD@;Ha z(_-FGCFz3i(%G_E%*$rLy{L{V-~t(|FodDDZa8YJV$A`x5rY|e5G!Z!=iK}rtq z(wYrq{>r7_p?`qlztz!|RZ*G9-CsfHc~KMvBG`|CWF<|O#r$oLb};lzM21WTo?eEN zhSeuC#))VKLkquBJ|Kb6KAR4BUxvCb()#qF{<_m&c$2j@^hAca}?6 znO+Rda;AIH#{0C}dor}Py0aIp0ose{483Thb0j^07d2 zOT%9}{(9oC7yf$VuaDKcXH_Har z^JNB-tN_+?b5TNw0Fb|q*C0Yr*=e8>zv8EIQ2dpiVIZ+e;mgs+KR+~4U*9t$oy4G|4b8)>W2fLw zdCe?6N1_#^re{V7kj1A_nmw)9&Y^XX!dP2^_P38wMwFLhQdvr+CchlgFpzHd3PB|m z<1E-TE11J&4+|vi8*i1bBaZ<$k_>6TP!@u_Oxhh#UQT5e8d!4zkHfX*8P!!0rFk1G-@Y@ws{vyZE^#`26-ORFsS(sn{A;SSk#p=>^B@QXb2pN*1Wn>>?)HI zmpKW0IlgwBffyB_W=|>FhnE#23yw5RUSKN4UPJjO?H5TG6fCFpmPk69KPXLlYbqK# z_Ze)ED-jNZ;y!~z;MSd9p?mt!hKovn{n*P+1SLUp=Poud%q`YKWl!wD_R|w>P|LH*moU1R(M8~s zo0%HP{4*Gx&ge8oOBtQYXbF_%W`-3r|5!%HFglvik&F&!w2;vPM)MfWVRRUyLm3^y z=paT1GCF|Ke(S%>&BR|8{;0n{{swry%N-b4&1-*C4FREfV20jMzz@|m@j>d_FtmB# zP|x<}fq7~$efUN)3e+>u@nl~cI8TJi$-ne~fYNcXV81@FJ7gpdaB;f*kgq=JTL1F( zf%qHbdHMR_`~mcBI1B`7$XGn2GEo{53Wd@VKg+jj4|hVnRy{)PL{a8lYOQe^3f}A_ zfGasK&a=fi#HoggY56Wu9KlROYVAM8gHP0u_uz7QK9-dZ6)Ou}p1z%jbm8Q-SA7U# z@o;)(zH9wE(L+4%L}&NxL3f*DNTZ4yWVvG5jVGiu<`2yK%=65dmUOel^ttJvX&Fex z+1os?7iXU_@-B6$vf1S0&B~u;gq7LX$SOWeRiN6h zvb0#9w#>H-u=tyAn?2?Y=5gj&)1RjIOi!80Onppd<4t3uvC3FrlKt3JaTsvvqz zwu|H5gz?OM1)f-woyc(q;W%@jgCCgN0QR+%Zztq2cPmt_%}(OD^>75kMsAsFu&$60 zomPvs24_PfEP9UPDw76LP^;DIpym7LBFs9Do8}tm#R#-48y+Db#!ZC9>u9ON;e~ay zlpOe!h5LbiJ;i%M&-E0Kfif1x_g`#7BJ5&;P`JokU-*kL86j~4W#*uG112WFK@D?n zLlfdg((3B*um;ZPvk?|8_R5E@)Q4c@=oJch(UtNKxadBsD>(xj9>eTWRr3aO55N!1 zl_6{+Ev^poHq!WwP=&6MtV&G8Ot53H6G%)$gh?Pv5)&wdm=2t;4~Z~o$%4fAf1odW zP^c^^4k7GNtrnQFq5SdeCygXKu?x#jhly=PQ4wfw)F-j?L#Xh^ClpFajA9Ti;>e@1 z5hjr&xFQjvqk%*8?V<=lgYE7D?>s?k>ww>x>j!b0Xe;zEfx4>u@Hh(-c$0G|_AT&F{+yT{Pevd$Df~f<5 zdt={-&p;NWb3=qt z*Ed1?5V5UD!jVl$&~zX;yQ?r&63W*)M!z=hI@Tv9L3oHbJrg60VnhP}5J}FcK*XM) zrU;uLErZrZ7&ZP0+(RDsvJ=Q-P9P5v=l+i)(llCXV3x1?ScPXy8Z2`yeJlp^b@Ku9 zF!M6ganlcGJDR~xG{qYKFurHpZk%CEGip)3{T!s$WWUb|X<(?u5ftK}H**tUPHpxh zoM49*44VXBm%9@dsLY?rMZF@dBF723M0XIY2Az)1FAhbhMS+89TQMj84*=%=1m~Ii z9sIsEJDKCZfcQGyog-^R1~%6s59mi7?D zJxR;h2NRjQ8#XcbNjS;ejc}K_)gW!7e9NH_T@$}3(fx3Uz#XRGNCUa!n z5WMtMcC?ODk4l=xf;TXz=G6J1-9<~yhBW35fEmoqfbF|zsp;?@3%lSC=5~SD-L!Nm zX|lH<{KWkEVXPX-Q-Sdt{E!D3 z8Vy7At@_1yJnOh_Bw7(|*ACEphUcv%@uE0YvBl#cr?YI#6OuaxYaj$U&P&`xqv#V9m2?;eTTtXW=dR(|gxKGYyYFAEc>0(GVq6}e1-+7)U= zRG>~oIF`)lP$4cZ^3mLErV1S#^yZy`f{nE6Q$GFcvss^<}8k~iv#X{--3Ly;u=jv(W_@zRrnd|=oPtj!I?#$MtIrjnE|+P zd8A&p4xlv~F84)ugj|J6WXC_YG4e$kE7u}*Lgx5HD>ISlA^*+*Y9Ax-8b~=xkfI4@ zhlz~L52EP14DGOz(aU8FO-3p~^OC_K@-+tRK#D3^6gYz8^?beDdoWE*kfJL0sd0As zIrPTKGa1lfHDi>`Lnzv!peOQ!cbgd;6+pR-O;DwUI}n)k54J6kzb{`OIM+J~QX%JaE!v8EEN@>;7-d?{f9#o#xr*K4!D2)pW$P+BAww<8-Dj zXp;Y)@hM}Ou`l`5Xdwp-t%jEks|}+Jo%R3KpV#l;-qz2;<+f3GgDl5m+f}+kUAXpV z?b}+=&d{c74Vuq2hcv4OXbLn=@n`WZaho_@OcxE@bm22J1*{T=qr&4C{%yXVpU$WA zdVI8BQm;}Msw22xNFLVO`{&@h3DDLU>R0EuqY-woU}e#8|2pZ8DC}UttZpeRmOKUd z6U!#nXB`ybecLZidL~9Bm)RMb7M&EKgfFqMtu2y;IdZnH@w>@zWm!&Ao#n@9cFsn4 zp7GR1{uCpU=a|3pTmt3S2-Wn=4P=cRu4Gq0g9P@TPf(8FOl8qWo-Ab1o{A+oMV#mU zvYg#&dTIxHEzF6b9#UJk;`V4(Se^Fk7{$YXMdK{+)u1&|ubllFO;^@j4waNzt609I zR|?Cw?%cQE)tSHlMo>ymMzs!Qs~i4*akhiK~N{1`<0s&4;}W6*W5ek*7FR$Cdy z`mJv2-89B>`fkQ5y~2cAparI{l}Y4Gt8+qfhA8vYiVZJLE& zxtrWbVW#>wew6xM^-eB|{EAa-fn}i3(-L6*+Wfltaq~p-jyXZtZn|%37Dt+%HqAE; zB-_Q8O@YR5jc*tqN5y86QHACLPaEbN28gRwiJuw#`OaLm{c+yiJM`r5& z(p}Q+(Ut2mbvEr6+E-D8I7S;SfX!-3VgvpQEKP!aBZi|u8c#|IW;SXibwib5BJ)@{ z`LllB@N5>o*SMO6IoyPk)j7)O!{pUD8koKoW%}&URSDc#o0Etq&7=)E-C;T`je*9E zIVmt|LynUT>VXY8I{0c|p+rOCab-;Iz0fCG7|%*MeKrA4sBF$LgYSw|Ix97>WSue( zgm>Av(j_gpmPKoz29~D4>bRe}h~zPr*hbP>6=BuJ94(A^*ud^Awgq=RY+CNb$0de} z3!+$W{9G`?+Y!nUGWQ+EC&JzJn9E07yloSXDrJPziQdTkZC$-Hn>@q9g&(APyJ%m1 z9d>c(cC4W99B)4p8E$O!_L~sN+S2ljGWTKo<2lM~6pEC(8dys-@bVK% zJ)5v|SMMof{X@>97{gB)4l&o;q0W#9>|g;8q??{v8cq9Jc#M^38#_|yqzEhC=D${oe#|rir3;iJUgYMt6SF9J}OTcx0xNH6WF?pW*WAeorBnc(?`Qd0j z*|{mduV-3Q{zTkvwMeO8K9?_g?w`(oQ;q8^YA?{X;h8*+l&!}IkNI4|ukVrxAk-9g z?$^)&4&9H1s+Kxin;w;DqYzZxzZo-?d6j5W9nTA18WI2?{O z6n;eW_t-M6cx|Yyl`%o6T*af8b~=%xC2G8*9CZcDq_70 zM`+<(+3>;ggYl7Atq*2oIpr&Te24~aukGbrzx80DXY0Y?<*!hCh-AFC=b?sDW4kNv z5p#vVgag8ORK32&Plw^fBeFGlZa=0k$vTTlp@x@Y1A)r_5vD5UNkICdo{Pb){mLm! z70r|2G&L$c&VqF>PJCtUDdS^t*i~+PxRNp)alE3S!ut3wesg#E*oUxT^Z+?)uVH$<0 z#(5I7Ou!cr%@CSbPo^+cIb)^zpngng{H$Wyc&c;8;?GR+!c=yTFjYFE!D@I1TwFVZ z!c^;wnbc={xf{D>P#D$D2=857ifV4I@?Rb#6Ze?4k9na;hrD>VyQ$;vcV~*~HL9 zL@OCu(i>IV3~fD6RhAVDE&Q_sZ68UMljRI`m(fbj^`;xwk?!l!$T5eZZT}tZMT_Q) zK(tKBZO&K)t-m}L!PyLK={d^Fnz@(i;%6~*?^HCT!Rp{(%w$0AdDQ(g{cMFe2K#;n zL)&^{*DC0s(TGlGsI8pVZyNKTmO9ckXI0g z$Y=ti@r=f<9~(asf1~g>8h>N(H`X&Ye%!&;r}$!t9OS_HEb5o?$5f(fFa=eEy`l2Z z=rpK3i?>1)Z9|3O+{=>ReK~2^+ zyus+u$w~v6uEe>f4#3kl6qu#}1t->{Y77@t=7o0n=kTN~Orr|2sot2544=7^5wXtqA4aSK;tr0q)m`BXGr8(-0{}Vq^A9u2Fa@28bx@l zv}5iHJbZySMRB<2RX85r5XIs4mqKYUZ$I{O!6%FGyHBrxdU+le*LdJHo9voJyKba6 z<-;qVWP&)JO6s`hiNkHJl6ljg3$Vuu74==k9y``$WpRH}z%+d9k|#UKX91}AYXKgy zKR7x}wyvg{#9RfIUY&&s*D_jej*_aCM#}`DFm{y_8LHroYY>O6j~3hE@b^=3e@}02 zI>J%4VnqbrqD1d(;>cj7F3oo=RyclOG}=hd!NG%Dhen68>zeF1pYR+=;}a+>X}o|l zXn<0W=CfsX)DBb4q`ZaR(FD#VSW?IROsI%O5GT>a$sw})!(fa*^(!8OX}pM8WGY!? z-m0`gI|A|%k=9BcU5NJjmyn?^tyC&WJ=QdiH1ehas-dWD8efp`2?~f7324J5_>ef$ zB4ak+s`{Njgy(#R@-hA7N?5_+a@Nt1pSHEzo%0sTkjxlTRo6 zp`uJ24W}R5>4Nz2^-u>UO^D&N$&e|v`awTwLh-g|26pjWkS0viz-9SKRLMP$ z>bfh*>z|CCfWL|On}ol~o+qP==6JlS!PcT-keB(6FHEVJ?FS26i=sVpYtbe>oSD88 z5Ai%%>P#Ixc1DN9}*O9tRs*UwZaar=x(+%B4?nykhb zj3W&{7`7W+`p@(W^pfs1-Bj&g+84B$nvXRV8m-tMrU}i$3_-;tZs*m-+;3bBnrM7R zn#fo(*0LRS^T#<7!r`E|w9K9+w=bh7UQwZLmti?dy8JBLRrGF)fBnf4<&tYq5g#I- zLw~%y5vh|Y<>f?7mkW)SX((esO}zX!h7;s7NKvrk)y5m;=_{y6B^D_r>rlzdmoS_l zZ)GS|%gZS%DewmZcx77e!A^M}0(fT|QoN*n% z8zpZ?iY;S60TsUROz46~dpgwha-Tw47>5+k81%)I2hiI^p2dI$3iuAEz*Pp|Izp0{ zA<$V)M~cnBbp#YWi6Wny7><*lMv5oHy-IznJZuC_tVfE64x+o@$=eJ($0WamKmu7I zUq^QYS+3No{$qTfTkrNV$g}XUVZ3&kS9}C}t4H~6Yl^nWO^C;mr5%m}AhPafZlk2qln5Wy@$<@FH(i!!|rpM~wvJ-H0cU zh4KX!zo4U}HzwKS{4ppg)XncG1g!H0hsiG?m_X)v`3i>OK{9&DlI%Ygabsl%b7t2x zR--ux$B+syd*eJ5(zQ@dQZ7G0cLbU1jjD=4dy1yEUV3@cI2u1keiPjhq>ROffpz$J zA30+@)`-lOA4Rv2DyZpnUe`EvU1RC7#%Uyz+K1<<&a3cZmR*)vmNbh9*7j-LAf~2Y zIuuJwd|+eyG=F%uH5<=Up_1NnseM|mnrDi6YQQg$AoK$nFg3xm{FiA<1Rxt)@#)7a zWnMez@{IXo+F>4?y;ddTD>1+OD%biM52oR7I{s#OW;~d=Ba^orP$hbV%vmQm@7~P) z!+-KbE{IxygZ26O3GjodTlo4}{b%8Cwr5uVGT6nNHmQ2xMHXd+62FK~SO2PhQ$2}$ zz@6cibGMiBghpby= zu~4=6do)-|kFt8!ww3kiI3ZSfa&^be5x(9lA}GR6EzsLy#GwC=HjYKKllR^T z{Ji%@(6sE{2!HQ&5ESSz-^Y6+0=)M|P?#BX5u4|wFUwlB^sV_cZO$HOJ+xBeNxf6% zPXr~`(=7|JM$8~=kdmGu>`v*L>cGj_P-to#1Qm({V z|FZyJpL3NZQUxD^hF*{22Y-%oN0@Zy@euN~;xDHD2JhWr)W4Vd(@^fht`n2)BD6=z zr+o{a6hF0%T0`vyqQ^y)R6m=F2AIECy&Waa7oRuCTs^H$Yy3{Udr-g7Kz1k$?ek=` zaT>oBzy3QFq(;9m1S)6zh#%?9Iv>b|*#@#*iE5g#6;~`zdb2*Z!wRQp>^22x-rv~@ zH!u4c@WLXx3Y4Yo^x3OZ8eg-0ww=@}{)*s!Hra3v*DZ1_(3M!z$sIP3jaaQ3g`?rh z^L8M+W{1|BFeB$LcqtoL?EP>(yaNl|#F*+w*5T`?`!=ll6H2O9dq3>_owKpEJF&I( zsVl6oKMPlZkJAd3lgAXc%p!lhO2dq$ZBT%M|8BLRu@=o~ofjFt5nrm7g$Ce6?9$@8 z8a(y+)NBK^m0`!ohxI;CRkRh&w)V_MySnFUaW%LLE4ErmT{UQ{O@90$whxZP(>2j9 ziHrnig42#S?BaEBM2mU4j-gdy(EzO7Dj<6hm1g3i)$v#jGlOHd%5C{$9V)RX%L;|1 z`PeMH)PcupIqNV*Rr(67_of=G_cDc{{m&=xE{Ib&21^y7W)Dt=C0>7bR9D6ArOU;| z%H)wAvj@u|GF{|3XTFbXK^b$MM<-Eha4%esy@7MSkzH7~u-Hx(9FxjPGkxT8L1-@@ z!D+j~*!J=v@Jwb_Xl;Sk^G@lnS!gTfzbBqjAYNY-^$AZ6Lx+q=1eq4rN;AgZ-G(|H1^d&Ny}Q4k#?ZStQh;AetbS- zjz&bW^!-tI4-N8vUyX+SxXD0RG5Rf>j*JYZTMdL2v){r`7J6aV+YDChezO*?@xtvR z7g27-@Rxjvx0JC=yfCu?MiU9i3rjneP;SKv_{c96|ArMa;L|72ww+}{w=c-8*a5fn zLP_8M!iphqEpdC{hI6cficN6gn`kx6xV^B~DtHlEF(XX7Loo?%Nk&EBKK=bvFHEn~MOb1m`3|KhOn64`(?!_9;7T7HCO(;SeS7>Pj!%Fq#P~gBk2>`+smJ1|Jz+8D2lO z66H&WeQ33V6y^iH6}0I$_E-iowDxj81*MnaB5#1g`yElx%pY09{TZ749gevoHGHSc zR_~gM;u)#p&ntKXqpGLsu*!1w;ni33%qggIdec;8avHxhzJzx~B^jCxb$E!bVgz)p z;k$b94y-Tnnjv=mm2<~a9k%8LUR!gDFkAEHAJ(I3`3SGAIl_vqd7;5mWo0~MzAz7uFa^xAtj~GB(v$Q4yrUO+t)Lo+BJ}wKYJ)~;8DLYzd+6IF!O9YFuzr;Y;si}!jXmZlRcLX&+j1xg{y}1cmZs@`HXoyUOcza z6l~mY>}hxb{F;#`w|V{?4LJYvV}j@FkLI6N<3Y*|{k>-;TQ(%e!^zXw4qVek0-a3s zP$~5MY<`7q&W0>x%fRM7n6Mxo_CJkmv}+;w^Z6g^N;mYy*d&Z~8z;uYp?&kh=}5r% z8}qN~rf=wlk?g?h*LW!Q=2`l#$J3zd=^J{2xpdz2W3BT^<2I5Tye%}_#&KitKF1je zFwBJuiP<#^93m4AL_tH%j4t$M$ugo?-8Q?X$HAzr3*M8RoDWIn^vO^(XP*m2U0Y&W zU7lC!7W}CNNB`<%g64+ktheo6;IZvq_-j`*rq(tuT%~?d{U~f~UO3I9?Uv}CgJw5q z^F^(`zWE}`7@p`ZLj=tYskP#%4}E==Y$Kfp`O zJq)ernl$qg-H$xXvGCpHXhx)&?sAuSjeAhNb3HQBz;T+%N$z3_!m9Td;{7+8yd-xK z3m?I-n$rw|>+jP7`hxiyEjSf2t}S$-rlbNLlO`z9J)V_A4I}psSwqX=licIH<>(;U z@BjFE54b9h=wJNq-pVa-!4d;1T(C>I*t=kCv4Tl7ny8?$H?YNMl%^1sC}_eY8Xbx~ zh7y&V*Z_$dyTmS0F}4_^g2tFw|KBsakpFwX_nv<4&hB^4)LnLG&iT%n(T(6;`bBWU z`!#au(-?SdhKH=(Ib?0P%O;&e)`lBwjR=2b$3zIGl?V^9pz{=8%tnbYkF6Bp7FI06 zY*uw1Z8ofRCJa=|m3Y=FeDc&j>lR`%sjjXaQfzu<%()$QWxmPS$|+0c{Cy+j#yXPZUX zja?LBFmt<1xRZ4fp@U5kVF3F^gtgfT5!Pj|MCiss%1Ithi7C{>4=_TT{z-5G@o0(r z=EhwXpYLBGhEK$Kd+G~|xH zT8Vl}Kiio=tXh?oSZ|oPIr^#WqDVW2xm}|?K4hJ);b!TFvT5Ra09!A@p6rweyCql5 zNv@ddZ8mn)^wAtLSZ1ItYdxujHi^}(`1n&-M#abXWfoxp5e>^aLg$?f zb=|)4@jF`Cc9~tbH^r)Knv_qy^YMHgi*bM3sWn7|@@qqDNg=Mje}268Mnq$2@c&^$ z0=N_WriPKW1x|A>nqshADbv`&aN01@pwWM&cj|uDMQQ)h7HNmc59JiuS8^e%J1i=u zfiJeYU>oBnp45&@RJ?BZlHQ#UN`*em=0hal9k0%)c6(9Ucq60h)#8w zPb-t^qe8)|dV~y=%IYLmR7$%VtV99OV-XF_mxX;h0?Lz*uo&f4DYhb*MUlJ}j-WxU z0Y%Ar1O}YGz&hWHWyfLn@aRYHm^EqI~$sSU}t#M?bvN$HFMl zsk?(zuF=ZBI(E>+o$Iy<{jwf**$X5{}mO$}#vk7=609psd;@cr%4p68Iada@TAnG|z z+w-FWzlSK7#T-s z3MXPQ*sJy^>{~+J>Q^hecnQ3o`SNE#15sn;e*CwbigoYMN6#mGj5Kiewi4kxlH#_* zm;JZIDvpE#=23CKK?HXVmRdGr5^m`J4k&ZEi0X%Z0NnvD7{BD7SXi2_aQ)*b#I32ix_Wd8sP2Z;-DyY1b(Llp-! zh!4RLTGpc|IJgtt)V~1)^4$VHZG#Fp_z&nXM?pK>La^Cfx{7%(JsegaO=Imjjc%sL z`yM(tj{VTxWZP+rvRSP^SU<8Hw#>rdZl2RMq;j6aVu(x_Ne*u5&WN=svpsry)U(yX?ozu=Bh9JiJnJOhLs)*XY_QC=^s&6Dea^B@c?JoR&!f#}L)fMbXGgQ29`_J* z7Z#2yNy{pGZEwTwY(J}#Se|E}ZO2Mybry^`)?@H?DD#LU)s?s^Ea8DdRWaOB2rbv^ zE)|3pV{ffWNLkcUA-DAs@{W`<&1En`rhj)>g(9wLL^siDym1cV3Weny}9i ztz@#0Rlc=-Ia?1O*NT`}%|K#$50PW|vd<`$Q7UFZ8_%w00c!`~(cN@*D*NKsIf8v7 z6|j|?&wj*u-V=NwqrOD6B-%^q0z)@9kx}fn&lL~c>~i1jZP{CakD8K#Xdzw9T2!fU zy0QjuS1Cc!CsLH!4q?6|@^q_uC!|r#Z`)bXI_=k%H(B`Eb}eu#)%Lv48uo_t_|0dV z3)YJxruJk>-06VkpNo5EkWP#Gq>r2`3Xr8!EOhhPwyd=DcOf84hgsR+?ICOsmAm7M zr78<-uSh1J@!-l7P`F0 zF1pjjSEJFilZrHQcdW!I@;K=6sYe6%3iqw<7GE8rN!^Mpm}x__@IJ=GzVqV&)Qq=XXW=y8OBM*_RsSTqiKOzStj%HK$v1y*9%**N!mQfPC9MO7dPwfu-G{-Y@^`l3))6+HHP7Q)X_^FAAxJ0wu~TxZ z4_Ilz?=6}4lx%;RJ4TMwy~n17E_qLJJd-&AI=$w;RsIIqAUx6d6%7eu!AmRsU6Wfa z`O~UMRc-q;NK64`&3V_i-IomY(|H&)ZQ<>1$#5^N9nL^k$*rZor6TE5DO~fLD<*15 za<#D`^4hiP8~2fV;|LKqF3l$gH*T_e;{Yo+ZoGQqNKUzN6V)3>h%!%)R_5ttHy5k( z^u)1C;!;DcZ)$pJ4r$!2w#&v7rck%XprhV*?`FBPn#%^cblH5jMHO~*G38uQngT~p$0K+ea1cOt5O}|3l6-zwU>iTK_ z)o#%agHzaD@<(zl*s5YBKg}unL;m0Y4N6cLQ|YTVss3Kl{MV2QNg+>3ub{~iZeZf? z2riq#o(jDTotoku=GGS^t?;L!FU})%rb3&_YWI1y%zIfRqw=PnK<@?@GVsY_aS*H! zr%Egi6|U@7hhT*{6@3-u3+amc%L!JfQ&mm2QbE%dh1Tv*RTDn!x>e@eP0s;cc3#T*Eg^b zk&wcgs=5yKnt;hz1Xv+ZMNh@MUBF}_0<18oBL7Au1su~TgB!l#|0$Y=pq-I}bz@sEXclp=km5gONWD=D`}3&as9 zdUKSL>Kvj`2AmARt^=8o0#0Z>RD~m8%K`kMfU~}ZrdUXmO~=?I;RC=nR*>4$A2K79 z9IuC~GGy5SJY2wKjhCq~*>{lieUVgZh^2^r6GWLF?xLH3>tslh`996Ji#BVaP} z0De!vS#K{^;rNts1ivfb$Z0DDObvcV5xj|KP^5=FhXjWxg5OTBhGE(P94TPW+pPej zM>3LyqBugJ1#eV$NZ1dPsa1moEM01+2n&-B;6VhhF8D`9^XCYr=0G7_wTsLT)YoJL zh426Yhu*-5L%?MI1l(W1Wyin~6mb4M0rwMd{od$0E8iP|2;F7c7g1h@ zx0KI~Kr1i(;brKid~O6FJ~z5><#QttF;396adm07teH_Aj0jV-G8pNplpzrDO^bMA zou_Q*lk8kp=v=OgD$?Z(Yjima-GMEsPH#wqnef{@GxHmn+mLl1@ja&1S2tS7-7rXi zyEDMK*g-bFi(p}atrhdh4)`U8Jof|a%kt+e^^wl2aoh9YUxKAy@Pj*xxl0>J=M+4z zV>(!53*|(k`^5r7cAy)GJ#`-+p zg()+4;0TW>KOkr6Czjqdy@OOr0Zgx7qiRJt=u=1AbU1A}yB&7qZs{=hl8fR~rJEZ_ z#oX~D@&BYQu;R{GxXp`|LqOUuGT`}$^Iwgl|Aa>ERgJ8lE{%l4;q;Eu z9>qL-O^^CrX@%533Ls-4l3~lR`fPf1DpmrvM$8Y2sNIHL6pG#<^Snzz@>{3ZmkO2h z;?dX>!guL5JPNaxg5Bb?20Vp#NqDC!;IrA2O>IN{xtDH1<^I|}5?quMN3(J;Tt~kM zz>>xri0jB!;hwX&dO6@g7H>or`Dzx2VTq6Jf^K-&V6zw8;Gki-N>s&w6mo+DazT2$ z5qE2470lK9^>a|#MB?y8WNh?eCn!GFN_W%mj}&nF3>3eZF^^rCvXD(09Oij-GJUMeka^^~J zv+W`VFY`3=C>b^nY|Xn#num>rH+altwKLO}xOiroQDVO>nBv1|+$b9+Gsi6TA|pF( z6kD|jS2P#Eu%;|-icg034_>YxveW!5uzE|iT_Bp+F>Kp101nFA<$AGtc}OI($hSBs zQS7!WJ}LY2((p^iZxw#4@%sY5FY(J@`}0_Dd!S~Tq=Cy$ID8x~O?B5XuY5MhH71`K zbh5`#RwlO^?ke5M!rhdybNH?GK`bJCoxedEMGPQbVRMs$QkLy!_?+D z{%_`oTOIOO8trNAM6E{tikmY1q)u##u}#2&wOuFxFC5W8mAh@{0b3}~`2wvbaN{pdpuY)}Sls+RfFRxvM?W_B z1h_IqpF~;s(5+O)_W})@fI9#lx1E;)g?bQ2JQ^yQ8#ikkp@{;u|1;a0e-0ps{~-Vn zyX*6(09x`PI68RP1bTVK{e%Z+EFPq$d^f?Yn&k~eDk+_hA*yk*eOQk`~ z+3-_dyp`-no{2(2tHs7wqym;%y$?WEZwH14gpTK zO3y<)kU7t03R|VODK!$fuu}dQ2cNoIzpMow_kgL{6Oj8LHTO_nllwq5_x!W<0)^97 zWim_p5Vf2EYB}jY?ts-ZHM_c;{%Se)3z=#;{g`z|_5^KoYG3wa*t#)p?x}s$+`}`% zVGIdhS6%nsYMxS$ia_>j__{8p)Lv?Kj$AaeD3FRl-Hkl~lUddAan>NkzixDD56Tr- zWUIH}uyPHa?rl^t%F$6fE~%lK5%!_0&)b&J)3&OZ9$@NIDC##`IS1O3KNBbov6wRhOfka& z%6|;v@WiQXIV+z56smq4@#kO7nd9A1i$H!<$kq_&_&Jg#R@5+t@54LXd5zv;6%UsC z@iQn4%70OL;?;3?)O#Q!9v$y>kSduC0J}E53F4aNLlDY}zmGNW9f$(`7e~~;nk(0y z&jjFL5d**W&e#E|q(dZiS%8^y@xmqo9ls4UkoUk5H3y514?avy*(QLf-3fL!02Df2 z2sDs;;RucYU+}y^#5&&aXF}%-l;&W9UdQ5hqO|3Rf?LUMVfR9S zs|EN6fh?lymoS`AF3$xz1V_9qeS3i1NUXd#N-*{q<+58u#f)ABn?xZJ922NNj`-db z%m#f9L_PMJ60|-}36_bduw)n}h_cs?R|(+sTiA8`HGmd;IF62tbyMo|@h7Nx2k08# z8)OFlo|T^h3W7S0=!I$$A8(6K93Db}<_=wkJvm601I0rq&>A)$H{d&mhfsVJBR)^_ z=FiH_i9$ zT=V)fC|&BV!uFr=AWR0-Tni#Ui?n~*m-r5YBzattx|w$VBJ5yGV_yFCFKD!(T~aqP zBP#oIE!>8!HM*t4U3^kkB}-+*%iYhS;@H?gDgAV>6gv%lt^8x)E%`4f(U)=kw_ZGR zHx29J?kv~y8;EPlJK+dMeV-IB-uWCE+Oh%Qk=0`)PGe(MM6x>&g}($G@x6;i+h}mi zy8n(SpYs$oQ$$scMHCH_*`nW<<1a%*T^3Q9eN*5qa1WJUM9J=nxE?s-gXIl2tQbGP zqGaz}pd7b}sHk`G0Cl?ybdNy2aCBr;t+I>ZG>f~7tO72Q{CtWLBWqSf=yHMn1{B)} zU|kV^03eX}!x5W6)yu)u1mkP|-X(fGz6Xi{4m4PN2Y~k68%J1sal4vcrY=mRYZg_N zOom=`>`HYWFyd~>mkG3H2;QDQ0^rB|W-qPJUqJ%)R~)h9-2LSaKIICv`~U#d?LNKe1keIU z+(H66UnPlE0>JLaemqA2&jbMd7FJ{6HHlFgU<{WCm?eOt0)VOuqci}Gs|ot6qA-`Q z$a*=NN%nCKnXlijhMu3oGT#jD+yeFSx{Gj7;B*ZN%U-54L?(VhX29sLHUu*3%#C2% zC4Q`BRdY53yGHfOZioe=iBq*qTC^dU*_LjA(yw>+79@%miKLuCIFF+9>W@EVFp4tB z3RcG9k%=QIzT)!gK-PI1_jmo=H~Xdm$wm`g^Hnb_Uc6IE*%_Jb+8LSCZLrPqhBL8q z4N)TDRbN4ru-IACtejBUVrC;mh#f&GG{3?oZaJYg^FJYp)gIT}SbFO;INO%Z4}%jG zrU>zhqIY}MR`q9cPfa0ZogY#mZs%cwLm_U**;kuUh}!{nOoTtMSDR6YTP_y-QT%2Wy#<}; zhJLDDld>5wZ_r4GL~d;8w@74q#+HikKGyir^)>boq19C8OeDu?l!$r0+SZ6%i?B9{E{sc;YyY(!lmq*2$PsS zm*i%!aD?urNx=zoQHWqYVJB7S>fH^dvCf1!H6@o=W5nNzX#_hjlD^A~c~tp#z}Qr@IE4G`jKu)$L<#)In~pGVZdqnQP$@n#_`r1 zl_YsQ2mH#^iE5&<37-VAE=|=$tn-PS3EFX~VE?FKb*9~Q{j~0ZaOn92(n(%bPny<(UpTA}nv9X%DXE0OR#_mMZ zZQ`1hwlTn+dSd}B@ixb_Xt3&4GF{>YYN&NVe~_NLLhbzmK|PDaHM6QhXnu=N%3eGl0$A7mC!8mrHXZk3G4JP^c>+LR=EsWc z{JEzC+tkS(v{7C=cJKY%_V8f?kH_T3+U3Pg?LUlI8w(#n+T17mX;rrih*d#iiVl0r zvRS$C>r;qk6uGd>Wl>237Wc_fsDcYoO_;1l0s0LaEvCk{TtYLU5 z5cm|QTx)iRWnDka?Et-=BAC{{mM8Puu-A|A4~a z{#XJvp65QeK;DQO@j?;EADLTUdd{NvkedA^`sV85ebBA$LvB%7 zd0;NQi%wg)cCQzkIx5#9{llyNpu6>yQnZ()zeP$Oe~;7|>3yDKvHqBzUKx8gTKE`X z4Kv#IxAPAgIH0?J%-#R=lO{`#Sm-;*vl21v3Iv%0ZsOSB`UTLmZb8K=mH3Kwh#$NU z85JNIROxqrw6js~pbig2Mp%=*Z(|+4;yx*Iv&l-3mV>rVzl0onyq^nIYr`MC*o@72 z=!`#DK^v&Nqx3s5R^N}Cb58&~?E#VHX5!c|qiCa#^gF>Fc)Av z1S3?UvLr(~nWDng>!va*e^6XB6#j1>roy}glI7n5jq(w<(o-O1KB5EdN5|q-1io8n(9--C`7QmWip-#S_VFooEp7^?k}?f*oS! zZ=-tamAt#9sr8Qd6`Z3%i8=+YvqpvUcjgnU6>$2A5EagJeMhh?VCg{v6^_5KonTC9 z$TiM`ekxp1cQe5n0T+aRrf!&H*9!(ioPbLl!iv5~Tx8uqj$%_CnlQtlx=_H8_vWfF z&6NRvDqx2XY71%H@`MEn@;g`rl%D3uK<5jzMBbt%x1Vw&_!9w3-S99H8zUobsiz@f zo&Y_Y_^9-Y%DAnyQ*_R)_-(`QTi9^7zx0U1w|tOPDjbEIT6oMd4TrOEaya*c`%?E2 zV1d0bKDV8)t+&P4!fjsGn{tG8mo>%uzO^}asXH!}TGm;nTf!_}<{Rc+=45cMn#*w* z6rOP7*u^+pcDU7o?}dxuZbcG6npD{?MbhQp?o=2V)@U*n<;A0JxXv-+KaoLF+S+TmY;a$Pk}be}=W6B)<$Yp4)O`O;Tvk%qNO z$40h%EPBKcs$5sSw@_STBi6Dw)fXL)?HZsq0y}`e#zn9~faKw=-PI1w#?5QXle)P> zdjl$JQAXPUX`#e7;~{W}j+oRwK>9=FO~zuY8j|999Rk#TTGS>$`hsVmWkOs!$oJ!1 zd1UzN)&bH!{t9_1&Lj1mL^ER#rZl3w^}mZtN5Gio>{Q;_(87rAQbkMmvzq>z!*Ge? zwnBPLt0=U0w7axP+99Sx#ACDegA3liQjix%lPbmycfoe&6G_!*!|a&X(DO0_69^&aUoH z-B1@gK;T`@W7221NNiF5~1y~Nwc2Hkt0S)9Y1)Bfu-Ui(Kg!EhE07Fgp z?cVx)rT|U|ASwrZf|CH+^R_rbfvXP2?Y#Hjq@Y?S(1>+~-h2yyAg;#|+TZa|9byn~ z zN7M!1J~r(;&`g3}P=%HOg+twa7^GL`p_>@cmi&=G>4PYkAgt)jMg!Nrq-;h&z`^r( z(cn4$%tk$*BH#lA*Dyt(YzGQuJC0~w9n#-|9>UwcqB2(rw1z~&!pc=sNgn8k9K2+`ig7{GZ5T}Hn1AuK)lX};5Pz|vJz0^z9dFOwNMHU0P;+8jp z20jpudPx^P0gieVop|xI7BH?*{9nLHuim3NKOP26yH9fE&W2{ULQSA%j_fu@Gsn#a zCB-~T-F7YSI}f)a2(9v6DE#=~+uWPFM%drm&4#-V4B zwx3#lTED}v*~}|}hp@j3!7BQ++=u7GaTWHp!1hN{A4QCZ=^dE84=#ek~PS9TId7X5uLJ#eV5Sf&kE5* z+7t=S-Jm1TUGm})ZZxTEt4O6~pTetYES20wO*iVGoy7s+4hZ_P_#8+S2?=;ZUsy5& zZmy(7B_KDwpi(Z-41Cf#x~0mlR`C}~y;$ZV+=E#pP|@QsIA#_qGDxlYTMlpL6NgF% za)*r`o6{Z6K#|Zwy{g!I1b3w48F0{$40nC+4bLyv(QR(7O5{JmjJ9aUVKbMlR7JYx zzv9#)HC9F9|3W5Lb!W^O^GeWtZ=ZwHjTeO$xR+9jjM_=Mi$K zUcgRSyXY}=C>NF3A&JZGs7yv6di|LD88b}9=-H^trJfmGsA>%r$y0uqN&DiNXTMSW z;QdE%`Ccydj?{r@O%LxK#J2J8(gTla|I5O5Y|VSQ9g_2ZD9ry+%6wC!(R8G}&iBI& zcT@L!?mOLM-P_qN*w!$6#O^5OAJ(zEvIBc@#O}*hl^z&6U|TiieV#%O6o{Y)lJ3iB zSfFqa305fYpC}weA*WE@=PMjUfhd&sxt+zrciJ^XX!lZ@jNa{PGI~#WzD~bQ(}U^l zd#Ap6#Kg3}OMiicou(}`N!rEQA=n?s&*P+PnSJliGM*KW+v}Zh1C}}dY^ZBjt-Vhq z*|L}w=<2s|Rn*yg7oKFlAQpTiE3WG0OlkS%Ui|jCTE4med|jQXy9Rs9Irj%b`Q>!( zZw;E+80Y>ju59OicZ0I$kOMpGN`bEH?e>q7pm20Szxs{JR+m0W@#wZ6zap1Mx8g?$ zq~D8#ey?~;gYQktOk+%)OkT!6!0cROoN4T5^fx>M)qI;Fku54NZoiAGyLOgB!^XmrRy6_rot^F`H*uXlRNMoj@4?z8tl_Ayj69=n zLk#Mpe=d&XVW0X#uT{r4&!2Ux^U%QCy-iVzj_fn0n2r?VSK=}qIq>g!n|T?QnjO+& zsP#UE&(_S+7PQS)yUfyN9ppwProXJ;V%{y^CueqWW%tPZr@WgS*Xb$@F^2BO$;J-G zllUIAHXX6%T0d2WZEQ_u=_svOy>Cemk`3>(ycb_$q3gS$%<tb1 zeuim-lO<=Dwh{8NzibH7O7(RtefSBdt3gg_fsWY@aR?5{*iw3gMV6DJx=>cUbqj9Y z)>2o?tv{XLME#6?t0E4@usNsm>a))y4%%GnBMu%guptwldb7M*H@%=03~fFT-|o%% zUTpWUgHD#+7w`7&DHkZ}djG>dENt5iuauteAH?qveurH>-~V~vX3G5cUHT)7BzP-K zl#WEotFcG)iZx^CULBKIe6KK_%cMKfL&w&qw(=uKTEz%msiq^BWVY(UI&bW+8|=l0 zu9flE7}2G<7dEzYl~|61&8Dp3m@ocFA6b5%4TwK7l*5pY^rDZvUY z-0?%43OhF*CRm|`U++Izg`@hNC0L<_ODnFbFntdJE3|OOcZ*awfAs}|6>fOs^Bw;y ztZ>6ae`>12_)e0v$_>u|Wn5ISqTwZi6>@lVlTsDVJbaa4g&dwx27Q%~jz|?n4ic@5 z^jY!0!UF|d(0?1$4-?Cwx$1uyv;9Pc9WLz$S6jd_9~>lDVTWfG-XMLM!U?}l3m70n zIwXY;F73o336Uyno5)PzgJ0i#S%vd=9ihy?2d93of}TQ1+rJgk;DZD9Jil6nqo$XU zbT0wN&b(Be_dp@tQ^}+jnw8n(A{I!vMBZ-+_#`0)>$BBdqlT(5*G!b9mI`-QzU$WA=6R73E!?$;EX^~L#R&okB=**Zztd?J=WeR5C1)(JZ&rB^bSj)?WuZ#(zdxsx{ZJfwqVHx zV9K_&Kq8MpcP9k%&k03ZD*=c9vPXsE(~Bs5sDK?`;gJ>6@!LhVP63x$x~i2(Yw|Tc zvLQ+)In@t1Sx*3CZj2`(w#QLb+HdYrf-&6(96Btz8qO2S#Xtc^H_5Eddx`jaZ7JX^ z?MhYioZm~_vK9iaN?0ynYG8AL6dWh-$fUWh3^@v2mfunQj=6Mx$6s@o_VlW;GL||P z#M-2D^p5*X=20KJOJ>)}gU8#L4WDW5$q^d3Fyx#CoE#p%L|GO1X7Rc8JY2RCgxAKeWpNLOH+i}havrBdum_)y+w8taa2 zekyi1WrOy-37@8!zmzIo4;>%h4wT(mu>3FboUX@Nzqohv&|idM?}=7FO|RFj)3xO$ zI0tCdP&z1$H1!33LOV`=DSs&k87qwQu))L@Lzw;|^X%NLvy4#q7p-{1ffu#?hr`~i z=qzT5kLx)wkNX}+Og2@^jg=R@>A-Z*8%NB^Lt$U>@pgd2b*YX6ALGh?wd`bF0&li1 zCGLSEX86%PF&Kq~i4Q~(fqbh#Pv<~Y-~!Nsd*TREj%r`Ba$l6$lFt`t#K=-_7Dx42 zF6#4n6(*2mFX3N5q6y^_x`yp0&wD*I5J7w-P)7zVBzzRE+w+5T&B%Ph%K)?h^;+YK zKUW{V{W5D)oymB?)9wg&pJ+r7H{%GW5HMb_h!0@9s*CD^E({b+`4>hiGN|!|0(~2G z!rux^4#LiraSrp}570M#SiD?}DL0w3iq4_(tIz&JaQT9LkV_^D8JUQM)@v9UUBD<>X?Y~#}N%*43pti7{}nh1*uxsr&P-*N->LN zTmZUOppQfXSR*hlAXE#(7_s~L860@|26 zarDn<0q2Ci|GLclpGvx=YXE}wME0`>m!Jh1k6Ydp%1N5nf> z8Kdu&Coqv-sie7X!ckZnqXMeDSjf-ioN~ zT`*;jqs8#pIsQxs;inUR8IPAYAU~ZX5;oxsJYZTu-OC$+`MfMoQ}BrXXk=PP>SVhO zot(>WQb!7B?ZpkT332dAWII_@hflldiwS7VNz`Q$)fOXD8&>NS#g=3H;6s@IJMZ6v zgN##1F8wsU`EKgA4^WmXU8mpYA^}-aq+bEd&ITj?LLX{oQ z+1aTRwUIbl5wXuIN7^yO1Mp$@*E;One%TkBRem@5OndfzDe5(v$9AK+dS-7tK}DtI zSf?Y_?eKLLH2Om|wqhv=YTX0rW{*@M`@C}W`v>Bx1M?qC9bI|Zlf8fxIz)I=Uy0`! zCIa#FR`fvuHf}){!xSXubBi~>j?!_9S6V#T06hBAdrDlTZynHjKcRkjw=n9g6c*>v zTQfjfjr~GB-Y~9r|6QBmzS}+3qf9#BKGHqd_SzU`J1sUD>S?QEy$!!0%d`irA6kQ? zD$8rQf6KDWw7g}hgS~}*pq+(`Gh{z=pxZ0AQ*N1VGqr!Y^~9F3cZ{B!3IVkzvSiUt2Av?@NXXp0L(mQan=VdkP@*KOjs1WdsP*#%*!! z_@+7qJ`tx-k)aeRRq_X`(SJ%?!c9k&ww(V$?I-;@O3Y$cBvORYaDdv33E%BR#pDyN zT$laSMp$E{q5J{0NOW=kxz)`#G*QH)-08~QF??JT4Wgbzz@$r~6s%+$`3Tymn^;uHQ z+4br(iSZc=)gIHsX9@}~vm9;2^B2+m6!*F|o==gJAzN+h*Ik=ospM!pRPZvgFG6@t z(eD$9(~BZRcj}*G=(3z1{S@)&UqB|xqc0sx&Ock2e@^Guq>I#2Bdx@sPcoi@rDuw< zqtR$MXUH+c>hcWT4K{tbZlQiFYjXPBP*?ovbFlH4#>u-p?E!G`Mf+cWU9Y?)`|j+y z7cA726^i^L7R(N1*7N7aK@AnRCB`X*vypCDp{}#%&slfdY-56xh%N~xA!w$tbjl1(KNLPgQ)Gh(^g-VaHleQZ@y=DoQAI)DS%$LZ%oWZOWt1))}J3ul*=lMc+f z>bz671+s3dNTLawA+8-P3)eQAPe>ANL#t#Ldwz|`t}fH9K7Yz)Gle8p?^KF?boVq6 zkrFe0K{?5+!xzZe`Zt^W#d$y3dYh$+>ucDZmGBF!5~0m{DkQO*4vAKD`kY!}{W&CY zc1^J*wbCeOYY|KS@_e*x&0}}JL@RXGt;{DwoMf|s8C1qvHY0=bUd^%)VY4m|P9%?n zO0Ri_wm`30lYSmB{-3GG*uFoE%+JRyjnkI84L}x&SZxf z6*7(e%c#%^%;zh@qu2luj$jK#7|C+KIv*-q`?Bi@ZPqTqiR87A_{Sy|diG@i>+$vZ zRrCm%)-9oH)1!linQG z*t#J&aZn8sJL~7h5@W7!sN^*Ey9k%DH`meiV%ATDacqGI=d)RoTmIOX1weZ$YwP8TNx+%P%wd|~DVWnzd;#G)>Cmn}xtGwPzB8GrcrCyYo^ zs|7)v!zYQqt0?MXU$b5@K*bgX2eJ>h2m4DcQa+e+5x-0LU3PsirF>KEQ1e4gU(FGX zdB5c@9BGX(H*@>j?I(G-+iJIHw-D1y(=pT6a2BGKo*2uF+&E22XTkC1L%gt8<*&AF zwt2SRw%XR;tOeFMmJwh6T(~$jxP2!a@MjLa5$JlHP;Le#v0xY!d;M6#p8lb(sHNp; zl9c1BT2|gb%Y4@k4aFEfa_vxOO3aFK{C;)CthkcySqJUvC-2qhhU)^guds$T8(Yuz z#_HiK$*ylQuWXZ9VsxrEhSyg}saZ8972C4l+?APg<)gjYM!M!MW7JiDtUySLp_KsX zSMCI|wD0fur!4&Fs%zm#*A_4L&_2*OqqOJ>Q-WL z;sbe_+(vp175q{uLV81U0{@kN@-;C#c4eKeNRZaoqA46cjov*#vq+pq+5Ue9g5HE735QY+1d|!q9GUQ!-F`x;nnW1_vJY5>^@Q zxW!GXm+Jj>1m+HyVYsy!%ET;5*~M1K{@U@3mCpr^Jy4rc0c4cnzmg#Dh$E)%uM9A9 zy(l#Ep3Mo(0*X0kOPCQuW|vs_cR&L%V>;;&<80V$cXMy95Hq6e5O7h-I9Ra0yyo59_ii%RXZ$De;kq3e%O;>$+7U@K$=o7 z6X@yhU>hTA4*wEp5Pt*|_4~aH6kaj{#qD;%VjU#rst|x}9u`pk0RT)^0XP`xw1fea zy(ZZ+Lbe5tm~}$0#ajlG>?#F-MoY1V#_fS@4x(VIMiflvi6`OQeIzQP521>TK^$hN z&~-8X7rJknK#u~&&E1KnOxp)Nz&W5Txf4fNcrKtKpbuO4Njo)g4NwQq1XmclrGO?3 zgE?^AqxW-t_<8_viYR~@B5`{@1~6t9UZDO~c1H)!@S^;W3bck$%+CQ0;;nE*ZYrtR z&fmg=heg!_t>zNj`L{qD3lD!wbQMc<*O%yi+o-#duWPMYBAf=j(RixW$8yy2iKUtO z0*G2|-EO)abDIzD{zX%Ysh#nb@eAWyhDW@c-5&$gNtN;SjDwOC@?Yd4Xqw}vynrGu zN#XrvCAJUYD)si%wNuc~;)kWF%E~;S6-RC5zQ2wfJK}m}o zK|!tej9`WGS76_$!fC$>SfTtCtb3}$_BOXkTA}=9oqq*QcalQ%OTP?la>S)4ps8N2 z3q@p-LiCFzKPL*@`T@ZT(a-ToOBD`z@h8Cw(Jwp+_5>k)+EqocLiDS;+fRjyi%B|B zA^J%VCabVB^d7+q<*&@WRW&?Em@pK|U+mli)tQXFNzw}CFFfFofZ@t#SUK93sPg`j z(ZQ$*I{1))72@A@Cuo>}NhPXq{vvslDj3$`k=?Iu;yhib$>wO+Y3huz(NibRm^Hgo zn^tYwwjGrCu~4B#c81nmWPG|0-MPfM3Y@#PI-9u{2%aP0@W(v_{02Mydq1l5Yyp>* zQ_m;PQeb$qXfv z?bUr*Y`^P?)=d|!Yx_T2H%-WwUHsFZN91~A3)IS=Z%{j?3f%E(ZZ!@q3E@)&e!chJ zYP`7CCBi2Qyr8^C4L(ow$0UJEtMAp|*&_dm0*__q6`XF&1VwbzkZRFt+(GSl&zLHf~igFj`d<=9xH%NCxucRm{E4 z&8gBVwxG_ddGXz2_-3E2G_(AVpfvQ;XL|B|V~IPmR1rAU@qwKU>P@P{ z&lH4SDm>&Pl1WjJFuYILz{AkTB`eyuG{n}I_9+(?2esJhcXXR)$YQU6GFrdzcEdQTmpi|cKX|!$Kn*~UDB_9ui}}&Ww}7P%9wKB zN6N2G(fMy^AG_SbBAoWmpa;xViuUW{Z)bU(|2DIqrl5S`w`YAT3A)R{9iExjPlQ5l z{Up>u_1N=Z18zcYm%q*H;jjma!f8*vm`6w4gsn=0V>-ewkXLKyjpJWDzPzEs8PoY0I+!bQ^xbB;h=T?)ox!_k(a>jJ8C~dxty3iE zTg`YERR0?VpszGr1QBdNjjoNbJwtD??LQ`6IOD3k7htM#C`! zN4D^2`v4?6R?v(d{IAgGoqIk&Crn4JG8|vq*{?t0u}o7c7I6&TaN>R=Rc^zB<=CB( z)b^R(=_aqph`BvBWo+dg{C;4WjtyeV|b=pW>}^3H?+|1 z)*sLg(T|WX=^lBc$gv*2vYWe0%5onpb+p~F#c9fHwKbE>4=@C4=vL@9$aLGf-P+Tf zWu9mb!F+9>+hVu=ZeFHKrc4x}(eQ|VVluMMgpugNax*{JpT>m#IQrFa?Cc!Acrhj% zfg@IZPbT#Di7~JMG6|S9n1G{z{aD5J!+6YQl-c<~KcKMm(L7-u zABQ7Mb}+m1YyLF6?u{eH+c3EEyZ{2~;)uce_bvxt*@!Z{4FE|Nm3Z^vjj5_@P!5LO z<#jRa-gn8&Gm&RYOnek~?od=$Gwrb64!H+Otx_I*SKqm3VMK_kf21@!*UB3@ac3jzdyy3Enk6*vRM%Q}a z#a90IOz%pSAI@2)v)&+rQs%>%QW=|@`4IHUY)NY3I-2>=sAHA=AA@0bE7`#wB-}Q# z$8n-{(=9`|F#xhe(Rd?6mn5x<)j1yJ|QFXD-HfutgK=S5GK7$Ny(&|99 z8CCv+;7jPiR)3`#4}4)aq-MygTe1@SgX!|>7A(~B!8mF<`6!~MM;A;FO!;hnmFu(l zk3P{85i-a)RO4~VW4*^zk4_#Y_w()>+-JInvwHE5`ntx%Ke~o>sqPz{`W8Z!Q(>PK zn3BEXkt=(}QajJSH1rp+sisn{Q)|mZk&ecGPKu z>qO?`7eBqsY4^KZH=Vpjp zY#^;obszWx&=XK|4^t*xevIE=u1S}ld{xH?PsNQjv2f14h!Q1UOb$hQDI+f{Q>@7 z;H3py0_7XUXs$g3V|ms4Bo8bsuk7~2Da|zc?d76S=x&sR-M`jB84^TfI@u5TN%cD>i{C(EgEyGsdH1JD^ zYRZ46C}E)N4UN%w(ioNf*b#fTsa8$6idj64he8&A_A|@9c?E2lLF;jV3TP@yBjaUz zX{;&|G4hr-&j%DFjZvY~2au}XXsW*r5m4)>CQdm20@_rH;<*Ti@NyA5I>k~yV_X^Z zEn!r@UWoE#c`a`@KQjI|3Fl^h&n2w@+&Xn^%;2E%DcZsxwJwYX{&B|vtp9{7ASNf zK>aeN!MxmU0>D~536v3qUiO9cH~w=t0a-**%FUQ8qN**V@SsOgH$1W`7iO_ojy)4ig<^k(A2~qFh9S@ET1n@PfCLypz6tzc0Z9^13Z6HR17KNVj z#YX_X|CJ3`7yrHUDU{8YPMSU-^gbP>pRVnywXlzhp9ZrHQG;u_wiZ7f1GdBmawzkA z6&5Ou;#rf$0{#B5@bz$}oGyKe-#_^Mi{CTX>C)%fE3Ll4N&o5P$$`GR!J}Sq(tn!y zGS9!D==bmnPNEHlR7-}NA1oOjHfDJDg%g(B_7rH@#CKnW$nLM$*CPCv9TnkU?70XZ zvcUH!{tq@%gtyoV5mvAw5tg$`5yJKA&=(;#_cP9hf}JF>}?Uouunxeg5@E! zxxeE~s`eoR$5Pl!z>bZX+y^+5$cc=SS8O=kmQdr~Wqn6by}PkbMA(sS6JZ-xF2WFI z{ea49&bog z@cv~J=|pF%WNSWr0Sn$;c3gy4*(*fIwu>ySMP@J?Ig%nTu$3cUIBd4#en~!Vt<{3U zTeq$4p_6T;AxYjf1(lW06lT1=%sA?WzicaHEl0g@$hKTIXw(Zo*|r(qp%)=ovy&N; zRHr6YLN~HLYHeSIB$4kJr6I9{8X+s4Eu9%gi>j_<9Y#}a64_)C&Sl?-a0WXm!imXI zFOs8P7Dm02Vy5G(+D4P9@yPQSz;$vuru5ny6bFz*uJ*)wLZ0Ovc8ACjdobZ zSUkp{c?g-_I=+`bNQQ<=5x>kbm(ch0q$=A;zEE8`PKTapw+q37RCh5F?Uh zbdOin5ox=PB0?p#{!ELG->L<|aF5p{uaH`!|AFR-VD2}xju2ON!1RPl&SvcMIV zYsm+-{x@8qxuy?&0Yd8zf1&0T99Le3E2^w?5nk7$B#WVHpLR{gw7W7h@{l@yw zjRID9u4T(VQ*(*=N#vsNT(hn(_UHLsEl?Fko%Ez$Qi!ge1(7PwV>T$=0sB0WKIxljtW{^$}6!*}Xa^zahIRCxz`A-lst|p8vK`ek*iW>1G>$p7y371Scs}*YJ`) z3RI6*_Erj27_W{;JiRE4Z_!d(i=i5Yvw-xwP{W8rEg=)xtpO$QQWA`PQOmDxlBZAvcl;lU;3T`qscKhWwd}OX z4pDOtfk#@vo))$gzb@ZWL)`_OKCqGh+KA@jkJKg*$M4|^LW)+Gq2kr6(NJ#c!(Ok@ z#u|FMGQwUr)yt+kVjmshjG0&5caywi@W?W^UBPOU_d~p-H^n)(ziE@auuU9AWOQxr ziDy<3%C`pLLRKy+MuvE$Jnj8@`P1G~5q^@bd66#OA>EXP({OBMy%`0t{R5v{$aDBO zQbmo2whivX#rdG;KGFg8e1(^tU!4uLAookc7?fxBlO?E?ig{(ZH>U6vNM>jGhy1Lp z;e`7`cxC;%k{9>ATO0bEv_8)p^8812Bu^glnGqDs$RTy52J83YE#WqFQ2*iK1AFy( zk4b%>`|>ozgN+GqF&qqGmQ;RUa{SRg7+^)!P0$JaMllwILG>W@)^nCrmtUSBp=~#KX1T`D@ZQp zr&?6rJuV@p0Z+rxNBTp#TJK{=8-MNj90GS$fiS#D;fOJXG_Jp^Sj0dti58a%A2WPW zo&_}u`HX7x+bF7$c+0ZlUg&lBnn8v5O2qo`*UPCg>HVKKSih8Xv^S6+)TBJ-06sgM zr&0@kQ!rLp=#6dU4vETMS26p**LJ=aNi9-^3WfcI9`gMfEtTbK0wXU>4r*^@Z8x;0 zs1Is!MM2U~nh%`ckhZQbBxd5eq4E~y$JUEH%2i1yczw8>OkEUplvK#S_C-E$%}+U> z>}jR;v#fo!QOx(a&1}%yh{g~R(d_j2r_fQSQZ?b1pAfvBeWbHWSA^|{JkLb5SsFvn zUj4PsQsm(5ha7$p!1~Wgl}mp78?5;W!chaQxRZEt8uEvDdqZP*T1j}CV)rFcznu~Y zR;NStIaYUK*L3&)g{egmG@myRmWPgJ``o-F8lhna2v zNT|C>nQae(mV8R+Goh$m0Jl(GgI;;D0psq&54?Dm2JdsD%nz-Im!uW(va~;5D@ieN z@^wv^=2wljyQz(BgJG}kHn@s^+vZA>Y<;zM+nWZB{H|e?^$+V_>*xA1>u77JRo3^k zoYf8g|LVFD@TjWmKXons?gfs6lR@>w;^Sw%JwgDsuI9S*%Yxk2^OxpFjq- z=j<0upKG5gw>3-iIY-&rBTL_R<_yPf&9TCA!!cFUdu1ag7*q`&aeGU?kgmeC_+JRz zAkK8QZQ>=j+pQ0grj+GMy=VqGfvW4oSa%gnY^aeERg#97m>9%W`K zIi6v}!LX{CmcO_1(JOd{u4Zd(E28)L!6Kjyiv$DLT>P%3S!>~X_?)2edpJ=1w>VvM!?%+&Jpu`QQbkL+MwSTO2bt8jhJ)g9i=Y)t<)h z1&%_)^RyO^oumKo#eTz8^BpGZz5L(Fb3y&Rox58+Q}0HHK1AnmT}CfpI!U|{+uK}# zzHFIZwiKyAT{aq`=Fllr&;zGc%=&}tE3!9Z{+pLs)iRq}W>;8nmpN1>`-OGW5Onbn zqOnhR*gOM0THJBR!;nczv~_X0@Zb6(_O;k2t8Q_sLA$0nRdTFe+`j}i(FgQ~hfaj) zB=Qh;f5hi&RXKHa)@#c#ToAB%IU$gwz=>aoBY`|h%E7`vF7pEciS?yPW<8RwZRXl+e`%H%Yy#lJpb*P9D-v*<_E zwAyHceFU5#^TUzvm%G$5Gv&=p=B!<>MQUDfsW%b75REe1e}$Zyx#7s()h=~wvekOl z)P-y{E*0`xVk}3juUp1hR$1>fZ?k%-GvHwDiuyx#1>g8$Ei!e7ON~;t=wWVlYZOCl z;REoRaP}H*gPl~o9obB(Im6Mz+{`D3xt%?u=e-%JnYk#z-XqsGc3`aGAz4;FAW3s} zHxinO&!nW5ad!Kk+fnf>$TRkV(SzY!7at6_IkQYA%bO+P^%fn;?6mUow_Qk5_O@Hi z)$hGUmA7QeEsyIG8;Kns^Bv+Ub#m9b3tZ=1rLJV>8_p4q(~id-9&NLhYyXFRmR+-v zEysG;`Ws8LMYr@s(CW$RKX3%@Vv3l?<9w~Eg&^5aQOy%4MD#Zl&xr$s{l^@J+qDj3ATbg*Sz!|fE?r+ z1<+zoL#@o(=MT)920Eg!-+tMzH!=E>pwAj;ZX=F_RDNY8%O9EddqIN**P+GAZAAW8 zCQTR8{U`?iFZjo>sT%6KO4M~sNFN9ZMr-KN7#%#H>&gNR`p{PM^$?^xp$$S>Atd&d z5mPo)bxd=YaX5N~4ANsuyVA4utXi<#R+hYGo` zdxHk67rbWlW*Eq+|5!%X2pWW+j2suDYC%_`1b7+6nIRY1$BEAv&@?$Xw5sZoi)IQq z1qE;f@}UNtN*RM{KNWPppx#@cY|Xoi)uep@hx7slUKQXa0pvJQ+}+#~3*ZnA75V5f z`8^%K5$-4P1kTF9Lx#Qr+J%k@+H%y$OLyGE=wQ&Ge*W|u3>EXfCFlmAP;|chRgePr zGMgJPCJF<85@3!1IE56XivpY!pn-wA$D=Qy@}A2o@BF3bJLs+FlA!W_ltNIxBbRd0 zx7dkQ;hz5me-}!`HAs1&{Zh>q9wh&Wo=x)7N5C_xgFyx7&!QNGODyvH6~(EWZ)jLk zv$dB@dPi>nXNH2;t(4{UsF3q}XT9@z=R~bJGG=X}8lh2foCgVL4%9r66*-XXQC;v+ za$wDJZbA0Q=faV@eI8Y_$P#cmkfp(+2G%}RWB03CtmE6Xa>o{ria54-)lZw$AhH$4 zt9wJM9lv(;L$Kk$Z~zEP(0tkae#?TY;ph!7sNhSZYFsB$1(wYmedcdo%chBA z{4fpy37+hw@7BKzN1hoMuR<29>wfib4v}_M8See5??04HhsVbwm(!QG7xz-_^}5D! z;Yj0s$ffgmWw4KxtTm9rU{>V)E!8jUfPLzzS2{qrs(Fj|+!dc;J)_`0Z116<6_9Z?be)@DCP?s_wyEai@|W5S66qS$=?y(IWL=_JI`YgJ&yV3Q{!e z$YqZ=fli|r+>kK%=BZRRjH}5kK##5@(dHO<(~2j9CTp5CbA#uwb1q&6C;cY2q2iodxnn3Vj*(6$WKg@& zm+x}x^PW#euWyeSMMG|WuE=i#I5=p@in0`b;pfJ7js9#X#4u0;N_V7&J9wVVtd>#e z5@o13#Nc)YY3zHX6gX}RTQir!(exvo7I3ipxoHx z1~P*Y$g#`k?8ZsT+Y-=4BaKE$VBS2nC4r7eRpusIzKDh4!aI|3JaYMPC_D<2`5g{J zMn)iBgIpcq2HuGBiZ&TyJp^mqB(w`}V=)*~=}a9zP2PDZiRsi=qW}0YTBbA$LOa#1 zlVHf#pKpj)bqxv18V(FL?K7EY!T0{6=NuB!Jng#I)aZKGbI9E>CWShB6IF&fNJH`e z0g?H*erz_opQ_C}wY{g(liFX*j=bJ35i6PA5V(l_;81->?_s?n;gm#mmqq|>x-PH>L~+V%#5;rn5+;L=&Nejj5*=e@{5R0=whxGk z%?ONfHES&tr6`g@WjhRAy%8ECf5|!!Uh)g??Q)a+=RIwZ5fY=Ibt`N} zk^FthYMog>oPW48avL7)v$IQi_&~CXgb$>sqkm<$i3cD;bbx!>&AeuJoa31Dapz>D zaPQ*O9OwA!^rpsIMLX3^Nl=|58&-F~8H{#`magVDSLFc%!eM8)TwBvYMYJ_Ob<<`^ zGlwPr5p*nvRi&z&wut|ub@@0IPLuaT_XFuk_PtdKY1VjqR5+$No|eAOd(&qD;F$PR z(AQHBD2-fHde(_o*s_hc58oS9;6927K3(wpQ5GlbSFtK!K;bwYH%!37(I2pyD}gu_gFu$hXoE;4sUh4BU@WL?sEul#}KQ z)xAPm!c;Ldu#O6?6xtl2O+k5V!aqI%-5IKQ z!`Q)EXdFXf(n2B4783N)2$aF-1wlVE&@pf|ZD#Z?ovRrN8pgg=N3G}f2kkwfZDpFF zuGUK#PG8MM5exMltpqQi4Qvt=I~vDWz1Pm#K^fSRZwzfk;n zUhfOHr7EA@GI#*?-c-NYj}W#8wx{xEYY-h?5>WYSfN&xY=vjOW^OhvzV|eNX*$(uN zEcmAnYOlyuWRXn7M!U?9JvQpUS-`pF1CFI8&mPY*&qz;P+#B#p8XWs$?5^0~#kyk- zQM)4n737s+v?H?gpK!RdM#Ho%U-fA;Oq+9yA!-Y9W$x7s{m48RrkvZx+29MM3oH&` z>^Y@5UpJ(Afkm3%Y6$!S7y|!mhLA1@3+Z;}W|r@%;&eb|>qoAo#6-583LwZV4g^NJ zH3ZZ>sy4qU@m0I#_{6yBgDXd+w|SjK{%}5^Mw;p9=K-}rvmZ8Ez*%S6wcBkUZZg>( zGQg6{k7VgbCUkI>pSciFxTLDcnF}3N&}#n>-lWllR6|ls52)X(MDuEM7+IxC)Bsa6 zGF0JT3cke48x6c3qL*HVY-3etk<1(&IFbr6#xQd}e92FH!38Hk01)VI$LrJ`yxCUx zK1F%UFh7tF@?*TM1;g;R{#$0vw=!#j|M>~u_h}pfVG`;Yqc;q^9wbW@PucGQV3rR8 zFkOKAQ86Su2962vfdH_>!ELabi(~%*1*;37 z7N_F?u?hRs)y{i4xuY{j?4FLG(ymB4>%C}xe7E7R5@xY9i z0me6WvBs&d+05H0=#d$4jP}gHD>6gSDJa3~A3A2{ZK&ZSP`JnnTL05esF%@Ua~SOn z8lLQiy(4V_(3RFOKx~`CvB5|ACT8HsP4(Z42uLv)x1KBL?*y&6?I%Qg9fIo40>x=U zLG#!%`!b`W=Q277H1s&mSh0IHl35A5p3%x#mweRzF}{xrD13vWj@rm^2^1;31ib=Z zZaN1bn?4gjytQ+0MK=~a&gf9k;KUYn*^Z%FLAMIZp4*iA1m7b8FrEcxtMr3m=wKp! zu#znW37NVK63$nN3%B0Pq-#PtBP2d!g}9i!^75)Y;Yltv3Jf8==SYkIJiL+d5@>5y zDsOCDktq#oEkn!YSxsK}0(R*2|LVh72gbTEOS30zip1?h@$n|}-;hwUtQ~?PEt*Gi zsF5nL9S%_41^ztEIn3>tatQ4paiE|BK>AVUL7s1I+h-RI5&jL}#C!rZ#2aZJ%@^1$ z)qJ~!AE#&@c7*+ltPkE5mqBM+840_`8hj!LCiPAyr`;PR&B> zgB=Fafc>2tW^Cv^2iswrA@|=e?KL*%m)G(hr#Bi9r24h7*zWII!k^|M?9cH&01r{y zjy*@sjrPLNjH47@&%7BZ5i?&>)6PQ zH-)iidLkq;7+Ssb7+@%h7CjO@O`Ag7xAAV~5bzi^-FU)k*h2%{-cQmK$H2>G{BvS)81YK;ixuFYKQP|$gdU~i>n z!QT^ntKem@ykfTm+bV8%o^O}_Dwj$C54KXnvEeHfc&Whi00+g@MGyZ1%W_0$*MxS4 zX>67CQ|^~s(*(>nyhI=dN!(z>K8s-!>K>u)1QiQntEHAALd?djX0z3VOSv*VZ!k+~tfj=1YI@P++V1*=^OW;3XPje`V~F;tHZ!gW-}s8y z%$WL^``lN|>nu)n)I1QMvWMMId%|)1l+x^`7wy%K4UXSAdTBpu5pA({r`FDX(!L&X znZpPMQ)gRbyTg`f{mi<~I@ubqoQ21432xZ-&&ogM`KV|&cmG-(^1yyX)K|wp;7QXP z*G|+hw*zodkI=?F_?8*E4rB;r9v7|{y32_eB39O5+5C|EW&K~dOGTqY?SXVhDp2P1 zLd)f&JO9SWY#>2mrH-Gz22Zxdon)yU8#s`q#DZg)w%8Y)GqWCn#)4)@v-V4K8!qxK zS&`$S+t4iR*FNIvywEYk)1_71l$HCrDNhN$b|X)3)-&Ks4IKG?frsd4`URSFfbnsl zhC~bDqrE`7(K|rWs7=3svjA9CI#UQ#Y>C^H5wr?;H+lwmsB%a`yrF5Jn?P|^o6(=? zB)l{oiy)4hfEuhm3YRTI4M7J1XH#zX7&RMR&@S2qZ8|4>?9jV-ZO~^>+d`x0r$0-N zZUurq9>t1~8PWym`RDo2FW=2EJcuC+h0z=E_qkGUj*ee6Z z_(Ip{qg{gR1QMiEP&h)_xs$s;L%MSf1_uHP1_U2&v=1L{3=8Bxmf#Kb8O;@Z5U!A( zM)1wIL5GG9GG>CIVz#t%cu2!nDou-irFdt1TC+@BkZg2>9v+Brs@t;=uTCXJq0<*T zAp=mquJCHf=CEd8gBsMmoqMF>+&uD%+WRkP#|z!=2%NxcE_I665A6xU34B3fw1Tpk zLlX4UnIXi7@5U8;k5V`S^w*#d2kjZSA6Ebe2YRXC2b^xAn*x6za9&PVA5H7dd|9|c zDBBXk_c#abIVk7~L9a~hkKC7gg9r%UgLxYTePdiUe6zda)>VQ|!4+Th4}(6v!j%c5 zN>7duzhBT`<}KK4X7<2Icc9q3;R@&MOlUq2;Lm=v8rO6x^#;`l7{-kV1ee`#JJaI1 zmJdO|lG%i(m<%9?df&jE>^(?o`Q$UYOd~M*|ZWXc8beCljrV$@J1@pc$iM z9TQAx=3yr7nrF4P)Dzay!6V(WZ|5MC$bb_%*X_%0NB_sScm%YFg8j5R>ir!7d%nd^g3(MO-7@vnL zzS_{P2I<;A85xBug7>v3U9qh?!LSe4AjLt!>Zj*E<0<|W5KQG(9V;@N0moJaSG+c? zSf#xuL+Z|-sYvnOf5AK-e9pBMLr)(sdU`L?k}|#mRDP|Kfs2NoY^V8NQ$P(xRr1?b z^J{YwIKlwpnogpLM{a56Tv5+LajOa*yLG&q*|E;0rf6`KX^U^AeDvout^eXdcri@0 z8}e$VA+KtAiw9aufR*0vSG}q9DyF+5ckux0tdJ>>761%W-LKK@`qe5AGL0_oAMNp< zNwjoDMr5IkNZ%;^v`D;H-shgbOL~4Q=pib?SOk%;mLEPr#$v4?O9eS2NP{2~WEge= zM=2SvZ0YWAWnk)|*7nmRbT{NS830yg`=~oQ8fX20V|{8>XyddWaKwKUyiKQ#!(1{v z_k)Vhev1;@i!8Fto`ow${7qiwsxTsWjRmRsnek+XjMsi1uWHytd~^^4f}JE_v;YI^ NrAsn^8!%iM{|6KvL}CB{ diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix new file mode 100644 index 000000000000..24983a1e950f --- /dev/null +++ b/changelog.d/8386.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index a34bdf18302c..ecca8b6e8f45 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -89,6 +89,7 @@ BOOLEAN_COLUMNS = { "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], "local_media_repository": ["safe_from_quarantine"], + "users": ["shadow_banned"], } From ac11fcbbb8ccfeb4c72b5aae9faef28469109277 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 24 Sep 2020 13:24:17 +0100 Subject: [PATCH 052/245] Add EventStreamPosition type (#8388) The idea is to remove some of the places we pass around `int`, where it can represent one of two things: 1. the position of an event in the stream; or 2. a token that partitions the stream, used as part of the stream tokens. The valid operations are then: 1. did a position happen before or after a token; 2. get all events that happened before or after a token; and 3. get all events between two tokens. (Note that we don't want to allow other operations as we want to change the tokens to be vector clocks rather than simple ints) --- changelog.d/8388.misc | 1 + synapse/handlers/federation.py | 16 ++++-- synapse/handlers/message.py | 6 +- synapse/handlers/sync.py | 10 ++-- synapse/notifier.py | 55 ++++++++++--------- synapse/replication/tcp/client.py | 12 +++- synapse/storage/databases/main/roommember.py | 14 +++-- synapse/storage/persist_events.py | 14 +++-- synapse/storage/roommember.py | 2 +- synapse/types.py | 15 +++++ .../replication/slave/storage/test_events.py | 12 +++- 11 files changed, 100 insertions(+), 57 deletions(-) create mode 100644 changelog.d/8388.misc diff --git a/changelog.d/8388.misc b/changelog.d/8388.misc new file mode 100644 index 000000000000..aaaef88b661e --- /dev/null +++ b/changelog.d/8388.misc @@ -0,0 +1 @@ +Add `EventStreamPosition` type. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ea9264e75181..9f773aefa7fa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -74,6 +74,8 @@ from synapse.types import ( JsonDict, MutableStateMap, + PersistedEventPosition, + RoomStreamToken, StateMap, UserID, get_domain_from_id, @@ -2956,7 +2958,7 @@ async def persist_events_and_notify( ) return result["max_stream_id"] else: - max_stream_id = await self.storage.persistence.persist_events( + max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) @@ -2967,12 +2969,12 @@ async def persist_events_and_notify( if not backfilled: # Never notify for backfilled events for event, _ in event_and_contexts: - await self._notify_persisted_event(event, max_stream_id) + await self._notify_persisted_event(event, max_stream_token) - return max_stream_id + return max_stream_token.stream async def _notify_persisted_event( - self, event: EventBase, max_stream_id: int + self, event: EventBase, max_stream_token: RoomStreamToken ) -> None: """Checks to see if notifier/pushers should be notified about the event or not. @@ -2998,9 +3000,11 @@ async def _notify_persisted_event( elif event.internal_metadata.is_outlier(): return - event_stream_id = event.internal_metadata.stream_ordering + event_pos = PersistedEventPosition( + self._instance_name, event.internal_metadata.stream_ordering + ) self.notifier.on_new_room_event( - event, event_stream_id, max_stream_id, extra_users=extra_users + event, event_pos, max_stream_token, extra_users=extra_users ) async def _clean_room_for_join(self, room_id: str) -> None: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6ee559fd1ded..ee271e85e551 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1138,7 +1138,7 @@ def is_inviter_member_event(e): if prev_state_ids: raise AuthError(403, "Changing the room create event is forbidden") - event_stream_id, max_stream_id = await self.storage.persistence.persist_event( + event_pos, max_stream_token = await self.storage.persistence.persist_event( event, context=context ) @@ -1149,7 +1149,7 @@ def is_inviter_member_event(e): def _notify(): try: self.notifier.on_new_room_event( - event, event_stream_id, max_stream_id, extra_users=extra_users + event, event_pos, max_stream_token, extra_users=extra_users ) except Exception: logger.exception("Error notifying about new room event") @@ -1161,7 +1161,7 @@ def _notify(): # matters as sometimes presence code can take a while. run_in_background(self._bump_active_time, requester.user) - return event_stream_id + return event_pos.stream async def _bump_active_time(self, user: UserID) -> None: try: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9b3a4f638b13..e948efef2e0a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -967,7 +967,7 @@ async def generate_sync_result( raise NotImplementedError() else: joined_room_ids = await self.get_rooms_for_user_at( - user_id, now_token.room_stream_id + user_id, now_token.room_key ) sync_result_builder = SyncResultBuilder( sync_config, @@ -1916,7 +1916,7 @@ async def _generate_room_entry( raise Exception("Unrecognized rtype: %r", room_builder.rtype) async def get_rooms_for_user_at( - self, user_id: str, stream_ordering: int + self, user_id: str, room_key: RoomStreamToken ) -> FrozenSet[str]: """Get set of joined rooms for a user at the given stream ordering. @@ -1942,15 +1942,15 @@ async def get_rooms_for_user_at( # If the membership's stream ordering is after the given stream # ordering, we need to go and work out if the user was in the room # before. - for room_id, membership_stream_ordering in joined_rooms: - if membership_stream_ordering <= stream_ordering: + for room_id, event_pos in joined_rooms: + if not event_pos.persisted_after(room_key): joined_room_ids.add(room_id) continue logger.info("User joined room after current token: %s", room_id) extrems = await self.store.get_forward_extremeties_for_room( - room_id, stream_ordering + room_id, event_pos.stream ) users_in_room = await self.state.get_current_users_in_room(room_id, extrems) if user_id in users_in_room: diff --git a/synapse/notifier.py b/synapse/notifier.py index a8fd3ef886ce..441b3d15e2d5 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -42,7 +42,13 @@ from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.streams.config import PaginationConfig -from synapse.types import Collection, RoomStreamToken, StreamToken, UserID +from synapse.types import ( + Collection, + PersistedEventPosition, + RoomStreamToken, + StreamToken, + UserID, +) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -187,7 +193,7 @@ def __init__(self, hs: "synapse.server.HomeServer"): self.store = hs.get_datastore() self.pending_new_room_events = ( [] - ) # type: List[Tuple[int, EventBase, Collection[UserID]]] + ) # type: List[Tuple[PersistedEventPosition, EventBase, Collection[UserID]]] # Called when there are new things to stream over replication self.replication_callbacks = [] # type: List[Callable[[], None]] @@ -246,8 +252,8 @@ def add_replication_callback(self, cb: Callable[[], None]): def on_new_room_event( self, event: EventBase, - room_stream_id: int, - max_room_stream_id: int, + event_pos: PersistedEventPosition, + max_room_stream_token: RoomStreamToken, extra_users: Collection[UserID] = [], ): """ Used by handlers to inform the notifier something has happened @@ -261,16 +267,16 @@ def on_new_room_event( until all previous events have been persisted before notifying the client streams. """ - self.pending_new_room_events.append((room_stream_id, event, extra_users)) - self._notify_pending_new_room_events(max_room_stream_id) + self.pending_new_room_events.append((event_pos, event, extra_users)) + self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() - def _notify_pending_new_room_events(self, max_room_stream_id: int): + def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): """Notify for the room events that were queued waiting for a previous event to be persisted. Args: - max_room_stream_id: The highest stream_id below which all + max_room_stream_token: The highest stream_id below which all events have been persisted. """ pending = self.pending_new_room_events @@ -279,11 +285,9 @@ def _notify_pending_new_room_events(self, max_room_stream_id: int): users = set() # type: Set[UserID] rooms = set() # type: Set[str] - for room_stream_id, event, extra_users in pending: - if room_stream_id > max_room_stream_id: - self.pending_new_room_events.append( - (room_stream_id, event, extra_users) - ) + for event_pos, event, extra_users in pending: + if event_pos.persisted_after(max_room_stream_token): + self.pending_new_room_events.append((event_pos, event, extra_users)) else: if ( event.type == EventTypes.Member @@ -296,39 +300,38 @@ def _notify_pending_new_room_events(self, max_room_stream_id: int): if users or rooms: self.on_new_event( - "room_key", - RoomStreamToken(None, max_room_stream_id), - users=users, - rooms=rooms, + "room_key", max_room_stream_token, users=users, rooms=rooms, ) - self._on_updated_room_token(max_room_stream_id) + self._on_updated_room_token(max_room_stream_token) - def _on_updated_room_token(self, max_room_stream_id: int): + def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): """Poke services that might care that the room position has been updated. """ # poke any interested application service. run_as_background_process( - "_notify_app_services", self._notify_app_services, max_room_stream_id + "_notify_app_services", self._notify_app_services, max_room_stream_token ) run_as_background_process( - "_notify_pusher_pool", self._notify_pusher_pool, max_room_stream_id + "_notify_pusher_pool", self._notify_pusher_pool, max_room_stream_token ) if self.federation_sender: - self.federation_sender.notify_new_events(max_room_stream_id) + self.federation_sender.notify_new_events(max_room_stream_token.stream) - async def _notify_app_services(self, max_room_stream_id: int): + async def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: - await self.appservice_handler.notify_interested_services(max_room_stream_id) + await self.appservice_handler.notify_interested_services( + max_room_stream_token.stream + ) except Exception: logger.exception("Error notifying application services of event") - async def _notify_pusher_pool(self, max_room_stream_id: int): + async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: - await self._pusher_pool.on_new_notifications(max_room_stream_id) + await self._pusher_pool.on_new_notifications(max_room_stream_token.stream) except Exception: logger.exception("Error pusher pool of event") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e82b9e386f2a..55af3d41ea77 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -29,7 +29,7 @@ EventsStreamEventRow, EventsStreamRow, ) -from synapse.types import UserID +from synapse.types import PersistedEventPosition, RoomStreamToken, UserID from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure @@ -151,8 +151,14 @@ async def on_rdata( extra_users = () # type: Tuple[UserID, ...] if event.type == EventTypes.Member: extra_users = (UserID.from_string(event.state_key),) - max_token = self.store.get_room_max_stream_ordering() - self.notifier.on_new_room_event(event, token, max_token, extra_users) + + max_token = RoomStreamToken( + None, self.store.get_room_max_stream_ordering() + ) + event_pos = PersistedEventPosition(instance_name, token) + self.notifier.on_new_room_event( + event, event_pos, max_token, extra_users + ) # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 4fa8767b012e..86ffe2479ed7 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Set @@ -37,7 +36,7 @@ ProfileInfo, RoomsForUser, ) -from synapse.types import Collection, get_domain_from_id +from synapse.types import Collection, PersistedEventPosition, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -387,7 +386,7 @@ def _get_rooms_for_user_with_stream_ordering_txn( # for rooms the server is participating in. if self._current_state_events_membership_up_to_date: sql = """ - SELECT room_id, e.stream_ordering + SELECT room_id, e.instance_name, e.stream_ordering FROM current_state_events AS c INNER JOIN events AS e USING (room_id, event_id) WHERE @@ -397,7 +396,7 @@ def _get_rooms_for_user_with_stream_ordering_txn( """ else: sql = """ - SELECT room_id, e.stream_ordering + SELECT room_id, e.instance_name, e.stream_ordering FROM current_state_events AS c INNER JOIN room_memberships AS m USING (room_id, event_id) INNER JOIN events AS e USING (room_id, event_id) @@ -408,7 +407,12 @@ def _get_rooms_for_user_with_stream_ordering_txn( """ txn.execute(sql, (user_id, Membership.JOIN)) - return frozenset(GetRoomsForUserWithStreamOrdering(*row) for row in txn) + return frozenset( + GetRoomsForUserWithStreamOrdering( + room_id, PersistedEventPosition(instance, stream_id) + ) + for room_id, instance, stream_id in txn + ) async def get_users_server_still_shares_room_with( self, user_ids: Collection[str] diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index d89f6ed12868..603cd7d825e0 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -31,7 +31,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases import Databases from synapse.storage.databases.main.events import DeltaState -from synapse.types import Collection, StateMap +from synapse.types import Collection, PersistedEventPosition, RoomStreamToken, StateMap from synapse.util.async_helpers import ObservableDeferred from synapse.util.metrics import Measure @@ -190,6 +190,7 @@ def __init__(self, hs, stores: Databases): self.persist_events_store = stores.persist_events self._clock = hs.get_clock() + self._instance_name = hs.get_instance_name() self.is_mine_id = hs.is_mine_id self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() @@ -198,7 +199,7 @@ async def persist_events( self, events_and_contexts: List[Tuple[EventBase, EventContext]], backfilled: bool = False, - ) -> int: + ) -> RoomStreamToken: """ Write events to the database Args: @@ -228,11 +229,11 @@ async def persist_events( defer.gatherResults(deferreds, consumeErrors=True) ) - return self.main_store.get_current_events_token() + return RoomStreamToken(None, self.main_store.get_current_events_token()) async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False - ) -> Tuple[int, int]: + ) -> Tuple[PersistedEventPosition, RoomStreamToken]: """ Returns: The stream ordering of `event`, and the stream ordering of the @@ -247,7 +248,10 @@ async def persist_event( await make_deferred_yieldable(deferred) max_persisted_id = self.main_store.get_current_events_token() - return (event.internal_metadata.stream_ordering, max_persisted_id) + event_stream_id = event.internal_metadata.stream_ordering + + pos = PersistedEventPosition(self._instance_name, event_stream_id) + return pos, RoomStreamToken(None, max_persisted_id) def _maybe_start_persisting(self, room_id: str): async def persisting_queue(item): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 8c4a83a840a2..f152f63321f4 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -25,7 +25,7 @@ ) GetRoomsForUserWithStreamOrdering = namedtuple( - "_GetRoomsForUserWithStreamOrdering", ("room_id", "stream_ordering") + "_GetRoomsForUserWithStreamOrdering", ("room_id", "event_pos") ) diff --git a/synapse/types.py b/synapse/types.py index a6fc7df22c30..ec39f9e1e885 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -495,6 +495,21 @@ def copy_and_replace(self, key, new_value) -> "StreamToken": StreamToken.START = StreamToken.from_string("s0_0") +@attr.s(slots=True, frozen=True) +class PersistedEventPosition: + """Position of a newly persisted event with instance that persisted it. + + This can be used to test whether the event is persisted before or after a + RoomStreamToken. + """ + + instance_name = attr.ib(type=str) + stream = attr.ib(type=int) + + def persisted_after(self, token: RoomStreamToken) -> bool: + return token.stream < self.stream + + class ThirdPartyInstanceID( namedtuple("ThirdPartyInstanceID", ("appservice_id", "network_id")) ): diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index bc578411d69e..c0ee1cfbd6f2 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -20,6 +20,7 @@ from synapse.handlers.room import RoomEventSource from synapse.replication.slave.storage.events import SlavedEventStore from synapse.storage.roommember import RoomsForUser +from synapse.types import PersistedEventPosition from tests.server import FakeTransport @@ -204,10 +205,14 @@ def test_get_rooms_for_user_with_stream_ordering(self): type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" ) self.replicate() + + expected_pos = PersistedEventPosition( + "master", j2.internal_metadata.stream_ordering + ) self.check( "get_rooms_for_user_with_stream_ordering", (USER_ID_2,), - {(ROOM_ID, j2.internal_metadata.stream_ordering)}, + {(ROOM_ID, expected_pos)}, ) def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(self): @@ -293,9 +298,10 @@ def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(self): # the membership change is only any use to us if the room is in the # joined_rooms list. if membership_changes: - self.assertEqual( - joined_rooms, {(ROOM_ID, j2.internal_metadata.stream_ordering)} + expected_pos = PersistedEventPosition( + "master", j2.internal_metadata.stream_ordering ) + self.assertEqual(joined_rooms, {(ROOM_ID, expected_pos)}) event_id = 0 From 6fdf5775939100121ad9e6e3a8cb21192a5444d6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 24 Sep 2020 13:43:49 +0100 Subject: [PATCH 053/245] Add new sequences to port DB script (#8387) --- changelog.d/8387.feature | 1 + scripts/synapse_port_db | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 changelog.d/8387.feature diff --git a/changelog.d/8387.feature b/changelog.d/8387.feature new file mode 100644 index 000000000000..b363e929ea8c --- /dev/null +++ b/changelog.d/8387.feature @@ -0,0 +1 @@ +Add experimental support for sharding event persister. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index ecca8b6e8f45..684a518b8e5f 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -628,6 +628,7 @@ class Porter(object): self.progress.set_state("Setting up sequence generators") await self._setup_state_group_id_seq() await self._setup_user_id_seq() + await self._setup_events_stream_seqs() self.progress.done() except Exception as e: @@ -804,6 +805,29 @@ class Porter(object): return self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) + def _setup_events_stream_seqs(self): + def r(txn): + txn.execute("SELECT MAX(stream_ordering) FROM events") + curr_id = txn.fetchone()[0] + if curr_id: + next_id = curr_id + 1 + txn.execute( + "ALTER SEQUENCE events_stream_seq RESTART WITH %s", (next_id,) + ) + + txn.execute("SELECT -MIN(stream_ordering) FROM events") + curr_id = txn.fetchone()[0] + if curr_id: + next_id = curr_id + 1 + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (next_id,), + ) + + return self.postgres_store.db_pool.runInteraction( + "_setup_events_stream_seqs", r + ) + ############################################## # The following is simply UI stuff From 11c9e17738277958f66d18015bf0e68f2c03bb8b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 24 Sep 2020 15:47:20 +0100 Subject: [PATCH 054/245] Add type annotations to SimpleHttpClient (#8372) --- changelog.d/8372.misc | 1 + synapse/appservice/api.py | 2 +- synapse/http/client.py | 187 ++++++++++++------ synapse/rest/media/v1/preview_url_resource.py | 14 +- 4 files changed, 143 insertions(+), 61 deletions(-) create mode 100644 changelog.d/8372.misc diff --git a/changelog.d/8372.misc b/changelog.d/8372.misc new file mode 100644 index 000000000000..a56e36de4be1 --- /dev/null +++ b/changelog.d/8372.misc @@ -0,0 +1 @@ +Add type annotations to `SimpleHttpClient`. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 1514c0f69142..c526c28b9307 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -178,7 +178,7 @@ async def _get() -> Optional[JsonDict]: urllib.parse.quote(protocol), ) try: - info = await self.get_json(uri, {}) + info = await self.get_json(uri) if not _is_valid_3pe_metadata(info): logger.warning( diff --git a/synapse/http/client.py b/synapse/http/client.py index 13fcab337846..4694adc400b8 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -17,6 +17,18 @@ import logging import urllib from io import BytesIO +from typing import ( + Any, + BinaryIO, + Dict, + Iterable, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) import treq from canonicaljson import encode_canonical_json @@ -37,6 +49,7 @@ from twisted.web.client import Agent, HTTPConnectionPool, readBody from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers +from twisted.web.iweb import IResponse from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import ( @@ -57,6 +70,19 @@ "synapse_http_client_responses", "", ["method", "code"] ) +# the type of the headers list, to be passed to the t.w.h.Headers. +# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so +# we simplify. +RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]] + +# the value actually has to be a List, but List is invariant so we can't specify that +# the entries can either be Lists or bytes. +RawHeaderValue = Sequence[Union[str, bytes]] + +# the type of the query params, to be passed into `urlencode` +QueryParamValue = Union[str, bytes, Iterable[Union[str, bytes]]] +QueryParams = Union[Mapping[str, QueryParamValue], Mapping[bytes, QueryParamValue]] + def check_against_blacklist(ip_address, ip_whitelist, ip_blacklist): """ @@ -285,13 +311,26 @@ def __getattr__(_self, attr): ip_blacklist=self._ip_blacklist, ) - async def request(self, method, uri, data=None, headers=None): + async def request( + self, + method: str, + uri: str, + data: Optional[bytes] = None, + headers: Optional[Headers] = None, + ) -> IResponse: """ Args: - method (str): HTTP method to use. - uri (str): URI to query. - data (bytes): Data to send in the request body, if applicable. - headers (t.w.http_headers.Headers): Request headers. + method: HTTP method to use. + uri: URI to query. + data: Data to send in the request body, if applicable. + headers: Request headers. + + Returns: + Response object, once the headers have been read. + + Raises: + RequestTimedOutError if the request times out before the headers are read + """ # A small wrapper around self.agent.request() so we can easily attach # counters to it @@ -324,6 +363,8 @@ async def request(self, method, uri, data=None, headers=None): headers=headers, **self._extra_treq_args ) + # we use our own timeout mechanism rather than treq's as a workaround + # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( request_deferred, 60, @@ -353,18 +394,26 @@ async def request(self, method, uri, data=None, headers=None): set_tag("error_reason", e.args[0]) raise - async def post_urlencoded_get_json(self, uri, args={}, headers=None): + async def post_urlencoded_get_json( + self, + uri: str, + args: Mapping[str, Union[str, List[str]]] = {}, + headers: Optional[RawHeaders] = None, + ) -> Any: """ Args: - uri (str): - args (dict[str, str|List[str]]): query params - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: uri to query + args: parameters to be url-encoded in the body + headers: a map from header name to a list of values for that header Returns: - object: parsed json + parsed json Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -398,19 +447,24 @@ async def post_urlencoded_get_json(self, uri, args={}, headers=None): response.code, response.phrase.decode("ascii", errors="replace"), body ) - async def post_json_get_json(self, uri, post_json, headers=None): + async def post_json_get_json( + self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None + ) -> Any: """ Args: - uri (str): - post_json (object): - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: URI to query. + post_json: request body, to be encoded as json + headers: a map from header name to a list of values for that header Returns: - object: parsed json + parsed json Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -440,21 +494,22 @@ async def post_json_get_json(self, uri, post_json, headers=None): response.code, response.phrase.decode("ascii", errors="replace"), body ) - async def get_json(self, uri, args={}, headers=None): - """ Gets some json from the given URI. + async def get_json( + self, uri: str, args: QueryParams = {}, headers: Optional[RawHeaders] = None, + ) -> Any: + """Gets some json from the given URI. Args: - uri (str): The URI to request, not including query parameters - args (dict): A dictionary used to create query strings, defaults to - None. - **Note**: The value of each key is assumed to be an iterable - and *not* a string. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: The URI to request, not including query parameters + args: A dictionary used to create query string + headers: a map from header name to a list of values for that header Returns: - Succeeds when we get *any* 2xx HTTP response, with the - HTTP body as JSON. + Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -466,22 +521,27 @@ async def get_json(self, uri, args={}, headers=None): body = await self.get_raw(uri, args, headers=headers) return json_decoder.decode(body.decode("utf-8")) - async def put_json(self, uri, json_body, args={}, headers=None): - """ Puts some json to the given URI. + async def put_json( + self, + uri: str, + json_body: Any, + args: QueryParams = {}, + headers: RawHeaders = None, + ) -> Any: + """Puts some json to the given URI. Args: - uri (str): The URI to request, not including query parameters - json_body (dict): The JSON to put in the HTTP body, - args (dict): A dictionary used to create query strings, defaults to - None. - **Note**: The value of each key is assumed to be an iterable - and *not* a string. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: The URI to request, not including query parameters + json_body: The JSON to put in the HTTP body, + args: A dictionary used to create query strings + headers: a map from header name to a list of values for that header Returns: - Succeeds when we get *any* 2xx HTTP response, with the - HTTP body as JSON. + Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -513,21 +573,23 @@ async def put_json(self, uri, json_body, args={}, headers=None): response.code, response.phrase.decode("ascii", errors="replace"), body ) - async def get_raw(self, uri, args={}, headers=None): - """ Gets raw text from the given URI. + async def get_raw( + self, uri: str, args: QueryParams = {}, headers: Optional[RawHeaders] = None + ) -> bytes: + """Gets raw text from the given URI. Args: - uri (str): The URI to request, not including query parameters - args (dict): A dictionary used to create query strings, defaults to - None. - **Note**: The value of each key is assumed to be an iterable - and *not* a string. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: The URI to request, not including query parameters + args: A dictionary used to create query strings + headers: a map from header name to a list of values for that header Returns: - Succeeds when we get *any* 2xx HTTP response, with the + Succeeds when we get a 2xx HTTP response, with the HTTP body as bytes. Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException on a non-2xx HTTP response. """ if len(args): @@ -552,16 +614,29 @@ async def get_raw(self, uri, args={}, headers=None): # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient. # The two should be factored out. - async def get_file(self, url, output_stream, max_size=None, headers=None): + async def get_file( + self, + url: str, + output_stream: BinaryIO, + max_size: Optional[int] = None, + headers: Optional[RawHeaders] = None, + ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: """GETs a file from a given URL Args: - url (str): The URL to GET - output_stream (file): File to write the response body to. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + url: The URL to GET + output_stream: File to write the response body to. + headers: A map from header name to a list of values for that header Returns: - A (int,dict,string,int) tuple of the file length, dict of the response + A tuple of the file length, dict of the response headers, absolute URI of the response and HTTP response code. + + Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + + SynapseError: if the response is not a 2xx, the remote file is too large, or + another exception happens during the download. """ actual_headers = {b"User-Agent": [self.user_agent]} diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 987765e8770f..dce6c4d168e4 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -450,7 +450,7 @@ async def _get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult: logger.warning("Error downloading oEmbed metadata from %s: %r", url, e) raise OEmbedError() from e - async def _download_url(self, url, user): + async def _download_url(self, url: str, user): # TODO: we should probably honour robots.txt... except in practice # we're most likely being explicitly triggered by a human rather than a # bot, so are we really a robot? @@ -460,7 +460,7 @@ async def _download_url(self, url, user): file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True) # If this URL can be accessed via oEmbed, use that instead. - url_to_download = url + url_to_download = url # type: Optional[str] oembed_url = self._get_oembed_url(url) if oembed_url: # The result might be a new URL to download, or it might be HTML content. @@ -520,9 +520,15 @@ async def _download_url(self, url, user): # FIXME: we should calculate a proper expiration based on the # Cache-Control and Expire headers. But for now, assume 1 hour. expires = ONE_HOUR - etag = headers["ETag"][0] if "ETag" in headers else None + etag = ( + headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None + ) else: - html_bytes = oembed_result.html.encode("utf-8") # type: ignore + # we can only get here if we did an oembed request and have an oembed_result.html + assert oembed_result.html is not None + assert oembed_url is not None + + html_bytes = oembed_result.html.encode("utf-8") with self.media_storage.store_into_file(file_info) as (f, fname, finish): f.write(html_bytes) await finish() From 3f4a2a7064f79e77deaed8be96668020abef3c9d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 24 Sep 2020 16:24:08 +0100 Subject: [PATCH 055/245] Hotfix: disable autoescape by default when rendering Jinja2 templates (#8394) #8037 changed the default `autoescape` option when rendering Jinja2 templates from `False` to `True`. This caused some bugs, noticeably around redirect URLs being escaped in SAML2 auth confirmation templates, causing those URLs to break for users. This change returns the previous behaviour as it stood. We may want to look at each template individually and see whether autoescaping is a good idea at some point, but for now lets just fix the breakage. --- changelog.d/8394.bugfix | 1 + synapse/config/_base.py | 10 ++++++++-- synapse/config/saml2_config.py | 4 +++- 3 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8394.bugfix diff --git a/changelog.d/8394.bugfix b/changelog.d/8394.bugfix new file mode 100644 index 000000000000..0ac1eeca0a2f --- /dev/null +++ b/changelog.d/8394.bugfix @@ -0,0 +1 @@ +Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. \ No newline at end of file diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ad5ab6ad62ac..f8ab8e38df8a 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -194,7 +194,10 @@ def read_file(cls, file_path, config_name): return file_stream.read() def read_templates( - self, filenames: List[str], custom_template_directory: Optional[str] = None, + self, + filenames: List[str], + custom_template_directory: Optional[str] = None, + autoescape: bool = False, ) -> List[jinja2.Template]: """Load a list of template files from disk using the given variables. @@ -210,6 +213,9 @@ def read_templates( custom_template_directory: A directory to try to look for the templates before using the default Synapse template directory instead. + autoescape: Whether to autoescape variables before inserting them into the + template. + Raises: ConfigError: if the file's path is incorrect or otherwise cannot be read. @@ -233,7 +239,7 @@ def read_templates( search_directories.insert(0, custom_template_directory) loader = jinja2.FileSystemLoader(search_directories) - env = jinja2.Environment(loader=loader, autoescape=True) + env = jinja2.Environment(loader=loader, autoescape=autoescape) # Update the environment with our custom filters env.filters.update( diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index cc7401888b24..755478e2ffb9 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -169,8 +169,10 @@ def read_config(self, config, **kwargs): saml2_config.get("saml_session_lifetime", "15m") ) + # We enable autoescape here as the message may potentially come from a + # remote resource self.saml2_error_html_template = self.read_templates( - ["saml_error.html"], saml2_config.get("template_dir") + ["saml_error.html"], saml2_config.get("template_dir"), autoescape=True )[0] def _default_saml_config_dict( From f3e5c2e702fb2bb5c59d354b92dec3a46f4dc962 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 24 Sep 2020 08:13:55 -0400 Subject: [PATCH 056/245] Mark the shadow_banned column as boolean in synapse_port_db. (#8386) --- .buildkite/test_db.db | Bin 18825216 -> 19279872 bytes changelog.d/8386.bugfix | 1 + scripts/synapse_port_db | 1 + 3 files changed, 2 insertions(+) create mode 100644 changelog.d/8386.bugfix diff --git a/.buildkite/test_db.db b/.buildkite/test_db.db index f20567ba73e97bf2568a9577efb0f132d66c429c..361369a581771bed36692a848aa396df96ad59d9 100644 GIT binary patch delta 168113 zcmb4s2V7J~*Y~a4?-mg0urx(_6BHZRQ54n|JD{MVND+HO5Fu(*Fv=LCf|{6^7zLx= z6bq(lnnJ2+1`=bM1ryVw`OeJTU0CzH&-;GvZ+DpgIdjgmJ9lQznQ`5^dzy7^yRR&9h0#z57JR z_FhuBC@&{BYj)9+!o^XgB?Vy#JUfMu;uqyDELxhEh0guXb}z#;_jsO-C$zXlMMVp< zN*CuWE+vFGJ0W&SY2Ko&{M@Y4yppKm6}@74_92oYW=>vi-lCkv`9+0U#YLrA`Gt_W zaCTl+Va~!lETdP9gEe0{7t<$eVP0;2PF88ooV=_#iy*fpOL7+F6fVv$B;-cd5%QAa z+?>UESw)L-^U4 zx^VjU;MpPnlT6GP5>m#q8H9uOiPF5n#s8rgfx$eRhS_N2&0ex-5oTPNw{RBZb6$Qi z?h--J&X^}RZ)rZXe*vy@TsctE**UZ4<>78sK-wrQ5c(oz)fD96nkdLc>kOP5>5cvY zkn29Z6QglA%*(=cR-8rpC2kqWb%d2?XAp+`xrI=DSqt)3lx7tc6)Fu_3UJQiC5vbi zc>2RLfw_r@MW7D)N)hQ*9)3LA+d%>;Wm$QPb4YW<`ts~tLZTJ3xM<<5(#1uEdATGj zrL^ni<}ZR?T(oEf&X$i4^cb2XHwX7^XuZ6`(xOEPnA*!5ss~dGmn_K3%3D~xcm*`> z!iA9M#rb)Pyt3R`m^avqXUAaPdHK1yd4*ZDMu8`vlpnJ|7w_e%NX%P`8>1j+RvvWX ze|0m=?d}236&Bc)l@Q#W2-EVG7w6N`1a^le$4Qjtqz@?`0pJPGTSUf`a5tWvMtMjx zl6D}^WNsb?a!GEM(C6*>$pemX1-&&TjH%7^g6UCHfoYJ*D7hQIH~eH&OYMd$hO>t4 z#*5PHh6RQ}(m~@hQl-JFzoBn6Zq%QY=Ig8UxyAx%m~o7;hdxQK*In10)$P%h=!V0{ z__Ou{?Mdx+ZMJr})=P6s^MU3i&6bndU$L4fZ-Gr>@gw-}F|l`qz)qpGe-DZYo&p};a)|2i~A1q$pi zn&N*L8_NnJ{RDOZ<@ry8V?-zUKaGyzp#p28ssH=%7}iT*$5Y0x<6~Ku0n$D`_6if& zi7Xka{?`HWzq587A$tZ0@TPuHYMFi9#g9mjo&uXeS^n!784@h8X@u{;43fd5(f_wm z(u)q0L8N<9=6?^9fh6nr>HD9?$$%(@mV0=D*4BB3%6VEdfnZFoQyXh6vK~tqE&os;wViJwt7+*I&Y20AUGY&WQG-{;p zrMIMKrAMTN(m=^%Xfre!Dh%TdA^KnS7uYHK$JmGU^ZA+j{(Q9F$TPYxxmH~xcR*Lc zmFmWGLvio!=~t?E zaTK5p)9X_UIMyNd4iOSiJHTFo4g#G2I@rN-n;^D~&RV&J+EzJ)-X!E!t<_O!H;`$y zgZv{Qm%H5!sxSVHXVoZwWqL2AwtgkF+Tfq;%6}n1`|wITaF+n)Y2$)*v<-fSc0>Uw z(u?Toiu6f}oZksoiJwD9a;J{p2(2mV#~_p$0PIqT1jvGiRR??#M7%>v+mqjIGeFKRyRIw)gWy*Qy zNiRqJe1Qh6b>#hXn!lHQ?SdX#>!_d42yOpiyAn}NfY`Xsj`FDkq4^M>@)Kf3#P;)b zwRh4_90+P#n?6&ivnvG6`OMc9qiQs?EZtKfeN0G$D&F_x*do+iobIWnmH!cr^hD~C zbQcBru&yNCirP9^KERQlXh(^k#jgK-e<5B(wIzPtsHXL0>3tMU?-5NU$GntGoG(is zR3T*Z0@mhh^&q z-AJ85+oIi}oviiIT-6-V%+Z9Ze^5WCE>g#;epa1Stx)w7?}_Kd_2Mv5Exavk6KsMT z{|Uc`&*lTU>)bQkLN1p5h4p%oUCpMj0`ms5sc&VH!6pi0A4abIM@f%2+4*ou>zMkq_!)6tb4&u4HF=7c!`ic2eJZ z_3x@)-M2E5(v{NZe9v~JL;XcK6yW-^^^5&i{e?SeNZoI9&S98C0Is0c z+OP*JsFzcE_LIb}IS~6+hC0#rS9h%ev0r6RhkTsD*k59UygQo@w{H)p^mls4J|I7Y z(v=#py&c(=4mA+$q~Oky2Wy~TWst(5C4G#&6xp?oG#uBjGSEpz>iH2}IaD~80W=rT z%D9?RUwtqaRwoH}UU$*~{kUJHzY|C5=CH0hNKf}ubSORjqLU7&N?#`lC9m}8DuI;4 z$BCnM==iRc&GY>#y`30x9w`m-ap>rFLCPgeJjmQWXq(`2gIA4=&k)d zyVAwvN+Y2|^^wkYC&cNQ)IkkNP690J`#d1P;3QywppyVKG?L!IKpJW6f-Y@9YDnj# zp=6e|s|FaCv`+N$){w4rcq%nc3R2@A?Ct$3)lM8uGlKrrN-8HE?ZZ!Z)dA0g=pU8 zP7LKEAM6e5S2}4ZFLCPc{noE=PT%*zLME?Y?%=?qhGk{C)3}qoewh=!Wa_veI>R9^ zdF}+ub_~?)X1tgR#$+(PYV@OnA&w~#tDHez5DvHf&cXxMo!ytLs9d;-i(z$aDyM@-l85!xrQpZMECyq_C zO&k`Ax5ax|d!ffV*aI^9iRD#1SO?Jlj-#On3QNaW7sXxUwy?55*R=dyY7dxczF zj@crOWK18Mj+!=@rkmoWk+Mq*mmsM^!GvLB5xXfpkhRDqUvQ6(GwMtiV9sY*ZTidf ztLcJlj^Slfl9;p_>>v^AQ^4YJYrTnKjQuNSGK}FsMHIv69<6{Bnn%mWF^rlk@5wOy zn1|peJ>paf&(=K8X7bkOh0MSGk7u)H#9D4TLq-ybSvcdLxn>uFYSbUL~x$C z#P?Nc6IAXql8_}(@IXaIP9@9XLK}<5m6;})Dn?FJyZ7zOR)#OmnN^U7np{-f(Y`R@ ztD*nNG+=KB>)4D5*0B@Q)2%~?4NskzKEWDeZyIYTwJhZ4Li@;lq6KRdI=Fpq7)p!X z5UgPssd|EtXRE*MVqqiE)o)%2l<&*L`{M{A4aVX4};HS*)5wT*dfr97%hJA%`#)qkh|O8>e3BQ=UO@As6U zBpOaWds2H*#cJg0)7mvZ^WE9GNZqLO_2#^}tA=6vlltDKP~B?Sh=LE(EFD$9twv4O zF+ONoqs}Psy_x&CFn`K(n{=%#`wfn?%U&mR*Eb)}7R)DwZhuFgTBjF^Mb`8;V?wo= z*W9q;t0q0iavHq;u}D5U)rJ5$CDV119@F-*x-g51vq9frw~?nu9w`}4bG*g09jVQR z0M-NrIFmyPU_#b?I)P`mS|A39xiE>P{w3rigmzfW7-L=XqUX>=PIfF`u4%VB+gIImQX^u^O3TlSc zmrpFWkyfu5kG_3JdLbaf+SMXy9h^1-x_qzl@bXNx`S(UB-)4!_5SXE=HsxhCcv8amUKV^g-aIL>FFecODjU~FvG1E(Win-yD`FwO+@zm%fGWOS2+2M=yF+Iz1-*`FE_eA!Wwv! z#F&novQ3ilB^he#IU_oC+BFkN{;q#LuW9+)BnvUF_p$+Rsw8Ql+7P%tVp=e^A z>sfgca-GDZ)V;1pfUQ=+ln#2X8YeT^#9yQbaJ`%;61 z0@?y+;pVtuP`UIc+a)EcL&|Q8b`_*cCSYyS!}cvN-UetNbouZHp(e7t;3Hv39!=A} zULA}&KJ-_K-@$9>byWE1N`;B5gLlz0gsJuJ%pfyer*lONBU~ito$=#>bP5_^%15oY z*=QL0gg4RsE$Mb4X;2aq&5-Y1$Cl9_sD1UNSYS4%pnXq$fRni-pwzS@p&lfI+%*o; z@un7B4+BjdVD^%m)_wObhYNn!5ap7n=S5D@eI4GSm%7At5Pz4%7jFHbhGBIFGOec? zyQwoh)i4t6HZ3*IlkQ1dBv!g;{0O?5#(3zY)vZoKR!yW?#e!z@Mbz56rw@uz4Y8m< zB)4Z(revs$3#dHgs294u&EFzG3z^XV(vQ7S{g@~hS!Htj%iJGw>&|?_nC2Pp8EcJ+ zQa$YLjL}!?e$;-VU8i|LGh6+V+Eq1LyerfR8QgL%oZZWOqN;^tjD*S?RSBrX>NZdQ zW@Yz!R=ynKwwmWBR7^qb$8|oaXQW$Slp5i7LH;h%4Yu}xb0X>!?RHT!sbZYUJ%0#? zQh#CHQH|BD8;TBaQzMr^H-GsJtD6feCkMKn;N;Q0+;X^6F>XSKo;QtWOczXBOygm^ ze(JghuGe3TY%*$^DI_A#=MYgQ;)kkzVw&(Q z|2sdIdqP@lSfzhkpKOZKJ!^d4hznK3q5!kIhro)=Dm>sV<8{vh)XdFQ_o-N}avvRn zTEGjVOfkF0b~O}O{wVKLu}tkA62eu)k};+$wd>!O--nDFmO5dWbwcXk^kLSnBx|S= z&Cey{c!ZUxQvN1WKcJvzPwGtQ3wW=hqeHcNRJO9aepD!xlb@@kf%(kRils2-g>dDT z{KDM4<&=TiF0zzhF*tAJI#DdYq4qtlu2KJq&zdn-$seVLI;6;37IjY%Hhwyb=_5M!#6U_MiVkc8Jmsc zq-)YrNn_Y;h|r(aXXx6%rp##{)rM$f&0zJr>W5T!RGU<8VuKhioDx#`clb$M8#kZ5 z%dSNhTj&7O8rHa=Tog16Oax#8jB1M9i@C)?s9~*pu-usK{+_X_*uARQqpH}ms@SWl z*!xtmk5J5}A+gP;CyHwG@kL|We7sOTM1#f8#KYn;*bj9T+J$4n8s#%u7|ow#Bbhe- z2+RZ9PI$-2SKE9(<~U(AN-at1jRwBz6OP=k`6O^Ei6SknJ`wWKYd%>pfB6$bF^GN5 zCq^#1?z5TW^y4U(coDc}eDCuM%Sp>A^xpBo;qr+eeO`lQ$*JW@XwpH?@aj)*`oPy0 zzJBtjH~qt0*w~$-b;!<1)inEsYW-X-NLQF_{M1ZG$2yAD@{1a)zX;CZosqBRfDgb_}oCm*m zDo4TZvz1}++ffN)q~+)J3t$k-UOxuL>=Wyw8Adf4uS?WhD@uTRw_-fQXj?0K!)w8} zqC5Qhl`mr$eMb2d7_Fkq;caGQ%Dv(DX?VSHXJ^2$`Nlx_%f%mrGqFrHlVh3*Z}GFn zg+?FgZE2;{3;YzS4T<`1^gHxJbU*11>c;60*$x&E%gd@nCcVN!>T0l zy0}vuD*P-QfQ@aAZ-Q-Y6L%IiwSCyP*_CWB<_c2{V^>>0Kjc?2(;H>E?e2|UA936l zbNf-)%vDtAQ%xsCCz7C8|k-XoLKm5}kshDPhmW zqi676*7{nf#|}wkewP~2^4x9$ha^D^k@G7{DnB`F&3ihZawXS&f;i3j^6ft z$^{w6L-Zva{W87U1r>d>BM2EXu&z5*N^}N}7I)k0f{tS5=Q4mfOMh62PTvMaSvR&C zO6GG2s5shKx{*dXHk;-%Tkmg1KltqqM9XeMKCO#^Vc!IhcDe%i{cl3kk*KsDqPGXS zSu1=ag*LMaAL#HF5&_WuCM+iq?)69g@A*&TzTC+!VD8QpgKqT+83xS;xO z`vcK^EU3o&pao^PK{Vr9t}mK-3k3Q92y%D0?e;;(e%#R;6?~QJgQDj_GG9;NyXFQ` zF5iHf5O3}XVkh9-k45djtOMD1t1L+G158!jfhofsDrDC!5OxJeGe19&Fl?;^&x=1( z#hBKa7~?vao2)V1GpsRa^&7#7aidPF-3Xhn8#P+>Mlb|FtP;g4Q6*FfD!z(WaaEj( ztzuOu_3Vd()ocAl+q674l-lkGCc86Vd!UYW{w8#}&>EqiUzA_C){oNJT3?_?9tL`r z)`UXb{1v({r8i}$15u0jArI7;rS3M;-0 zHgBj2?e(=w^`i9DJ3w!Ye9WUR%GZP<_xLGFJr#1ScOa?>-ROZbUrsQgsK$( zFcjqFEX`Z%LR3;(rf)G6X}-#Y+ISF5+QHI=Zkx-NgXXAYE5`8nx~*%tEK{)+8_f&K<~Tuyr~otm@+BpYNB1ONJ-~M z*h!!Ffd<0$U(N`a=80@t(3>5V0S$25WI~(2g(A`BUCRRp%K5`Hn2L?=XBW#+vE|>p zD;l$I*eimiq|{IBq@rlg2mx!XvIbOH1FNh-Ro37tYe0rCmSrqUb}$*!Xd9*7$%XO1r*yRljVC9Ti0+|Rjn!xlU2px_p^2io-DpD9%$Qya0_}aDVcWXvdIt9*S!#7Mp;wr@*kq~ z)PTN9`jqehGm@s->60F$7r&ceM*V;9qvUTQrPo&UP}0|+I5S%Nr=325(y>X0=C4ui zL&uJ?@e1ilc&kI!L(z}-?Wx8&lRQwvMR@<;9+IeNwNZNeh(smn&9nBTna(67>5&ok zq#2Z6J85V#>e!uNLD^}EN?nb0kVc`~r+O>8#!$3mT!Ny@XN+A|x-&`9wRb{-l615} zudtW!=2^ScQO+bK>F~;UC21O^TOKngT3?=Ompam!q$K_Ff<5U7N-uAZRgzwwX-_)b znWQA$+8(259Y*QxlZTE%Yrl(C(4iD9afu-UJy1$+tkQ-9XNfKr(OOaO1~2TFFhi^`A7`B z)Z}P7UYu$Rhi!iS^XxAt#fVZ?u45xsao)p(|M4Rmg@`BB^|9~j*r{mUFjz%e3=ti? zTK6k^MYzJq#*~ytSt&x_LvPi0(;M}i?r*u#5@F#)APZOIv#t?+)PwaJ29^vWFL6Q& z`5mP9av%c;(qA9vK#~cPNPc0Vfbr@+-Vt&i@5py@lMU+g49s5ww6HY|dvfDU#U_<; zpD{{$O?{qk24B9L_K!5x`GW?oi+qt2!qA;fpUvWhFxcDJx_cJh9_4u5&w^u1@)R$h zua4{udvFa0$RJn6Y3Ej`RC%^|i9vS~A|ueH(J*vnCVHw-gAguNM{kXUZcA6^Ca0#4&f;(bIvrotc9kF}$Ja_`4+L%gD)I3w@BhUCd_7IDj{ysSk z&3g0QG1brBkCmUjA2*4W#Ge^(DQ48$)q_+wc6N&&2v!k+J((GQ5U=@|Kk!2Em+Bj; zc=?7Z!R>Ig7|uj61&r{iuu_bg18hJ zYlijmnx@sk=F=S20+qM;wzx_R7sE4P8ljnHLe6t4O+p6DA29AbmjWE-4S;vfp^61> zGr%xk!0coKWuHy}3=;+n<7ouMV0wUK&eI3LVOpRC+^}7$UEJJedFpUg;QeywU-JS2@m{<5dp6 zn0jV3-X9yG&VYF#QPg#sY=T5EB_!Iq&XI97!E_Mg&J$$7VH${Wr-?DZFa;#A(}Y+^ zfK33|(q@{>sxu>iV}=QpY^U#K57fLr02F10D|nzCuld{`XCRD9tStw;;Y^r2Dn1>C z@m>_a1CC4-&weigf|U~5e+v$*?|2-i$Uc>T39vB2RT}%UZ{WUzI6=#k0T_n`5ynfl!vv*x zh%3zSH-}&x7D5=eTnq}VZ~Mi>deoH%VH{RLgg-72=8ZFk;!?rt2SZIV>Flu9!LjzF zz`)1bLU0Dwl7z6x!FXzaF7WY|D8L)8U&8`naf5N&01(*t0<74~F9%{A7Bv{RNEC;< z+7*xM1Qs)-UplSu0EeXv=67D%WhF>l4HM4ZV7P&TA8fy^eI=^?mgb^;EE^oKoegT*PLv zQ1lVr5|+y|Tb6Bw?idfsp*z}N_w>aK-7%8l&M!G3K`3VGQ#*XvA?nOH%3*)oJ5t6{ z9KXszP7IBaS2%u80~Af6&i6BZ{{kLGap(808J-=w#h3{*MlEF3R!9WH{19inJ!Ax# z%X`$lo`8kGv=GDlpBX@5)EwOt<1ih>I9r#b;5D`=jKf?Ik z$GUEW1?0e_5996TVRn4RFpR^D591|24N>r#&#f4TnI6Wmzao`WW2%nBHL=yFyX^Mwd{G!`u$zmUvjHll-)FCvsp;hw-Mj?0B(jD&~g?9mZQf9H{W8 zB=o{KOyn?bTN|$6W)C6tdqpbusDNxq`ZYiAQ2gpV_YJ-m0He za_5H&1#kIvM*`$MF@0K8V5yU9&40MC2quOD$i{f}Ykm5_w=aB?;oA?s{oy-6eyz{I z{@;T6A8dmLJ^Z=^k!hS@Y%)%VBXq~58DPdZX~;If8}^|;HP{~3!Er8uDCcEYpZc~V zde%}u<5L$lP=zg^E&(X^`ujepW!G&>{qi|18zc?})QU=0d;w;|UGZ_E752kDP;8G2 zzNmTB$7VEW=#@Z`y$1W`sJzDo*gd@dBy8Ni^-`dUZGv5M3$nphWZmZn;>9x9UmuR{ z+=bo4Y<`y+U7EBPm~623z8S^NDTV#wW!YwQ_MIv~gJEBOE%a3Az8dI)GRF@MWV5Pb z2AzsYVa;m8pX}4(ItCfC_dM@kDXo>3ON*oen5mDI221^=L@7oJlY%8b$wP9fHzysm=r%eXI6|ScnMT}YfK7!Q{g)pzC++URK7K4m}`Z=UuT9Rv1QmG zl`u-%3_en0TZWC|xJ`0h%dkwI)BH|*=FpE`ijdPj7`8~o=`&H2;FEyd@}`6%pU;LR zPz%ol{%m;l@y~|Aces4~vk@6X1)-4{iA3wj$tcS@(kJ8@ak)5L>?V9AtQFD)FaCR8 z=FadN`0;!Ix0$<5KX{^l9yUHbMHCY|HeB8oFfxGW#LsCqw)^}Pj$A@UzQt+sD6(X& zEnFVhV`NVb)vh?+2VKa|2&=vjI#Rw6nwFg8FRfxGvW*Nk+;rKr*R;T-HhySaWK1#j zHJZTb;|L1QPTMJ;%uaj5z!?t`%RL{%=b@axV21_;;E7#w;?lI8YMSonPmhPok!#aFV4*KFnQ-)g z`IXmj-MQiXE^aCF27i&g&Q|gBxI=6Y{&(&Qdy4PFPBOKa8eoZ-Vlo@sj7N=Y;Iyi@ z^rQ5$v{{-ag&Y1bTrli3zoRZt?@=eKb#R30Y1MMoa4<)HEjB?99Sd`q8^USf5n-x;QkO*g z>!Z|K8$BGsk)?(tnnI#jJBFJ6@eDGm0g~(xhZjUf%;n;M$Sj|v)Y&n#fA4@G!e+KZ zu%d<8J;Ah{*qvEdf+Uc}V+cE3b7Yx9lnR|}cr^nqAt6z!PVCOsG@LZX01%|rvK(2J znN2FpXzFCdOFoi4&W@qlddMDTv)CcX9&FZ=?CCqPrZ`WkSL}kpVvD5JbYghrM$#HP zF}$MWp{CF?>(XL42tQXsV|iK>jt^AALM7Z+PWf6OG?+r7(?AyS!A2!L!h(!C9tt~B z!B~Wkv6-|S2#OhjL8v@%Ie=qoq7H)yRis7Z@L~m(`ey~z8n&Lsr?02+N0jiig5Of& z>6MheS_zL-(sWmq(7b`tQu+t0jJP3z@#^RW_;E-+PF$OE!_ZP^%j^7 zgc<%cyk*#Bm}lq%7mHlcKZO!&(%#3V(AQG!{grTGZQ3l1w?Z60f#sf#vX?CF7i0#H zWT>`s4@bQPe0?;Che|?owo^$+2`?()AKTMnvBZc+X_y5ek&K^DrB+|;5o9K{8dHu3 z6^gchA7QWS-<7oCJE)H7O1MP{&nn?RyyvLJH{JS-uDSQ zqLn+w0u=jDh=MuBD%9TV)gbcN53i&0m(|fSJ)?wQD4`kA9=sM;6X~wlgAMn%m^F^_ z<$N9JVxo$mGBWI zJgtN`l+b54)k=6|H`RAl3C)kwp&;Lpxt!8Os8BK#%u~R+Lj!_odoodT0izFb z>Y)KaIv6yWs5uxzUp$@`iaUm84^`bq3Fj%{u02%sTT1*-B@Ek}76XmSM9p-FZvE?K zFc!6CZ`uqT-q=e;`s}0eRr_fCSta~T3H9|<#ejO+o`v;kQ383o8{jW2sfC7B4QZzu zMhP{8t=P1^*7&z^rS!YB)Nt3ZNZ+9^)ZNg{*Iw5af>+sEjY_>$-Cea$)dRer6NR(j zuz3kQg|2dQ*jwyUa6bm)c8X@ndRX(%u(=^N`0yx{a`=oE%`m*;Nlb)5goQsQHoaR7 zTT`xQLMg|w17{o@up5CnYWIOr!rJ$kJ6gvzTPT~~*jvMF=Lc+2*orfncQ{nR_M0=? zx?yJ(HtUC-8He3C%m%0R6lrZojw)? z79VC~{Y5*QPm{d}>K7eKVaJasg(Ff*y*=}yqwZnX4-=cd*rgEHegC8)qvp9z8L$UP za+E$DHcP&fLus{_Nu3<98Q7HrHvNPX7SxhHU5i>)yy(a?>}+k3Cm>oqK-4hnVK;FG zcokTB9Zp6mYT%HgPJ~OutDtN>Y%XGREg`n#?gpFUNSRu+%htmNqe2^n&W(1!u)m1m zob=<0+87NMnl6VEOL*CO*l2WSsObTvJTmvBT`=r8Iv7YtuW z{GYOxI2gJ;$dH+S+=6a(GQhsMvxFGA&Vqa`C+vBE4Rp*9J8G=b^x5N2fs55}hYHw9 zcVeh}0R*J2aL{2(-HDDempub{OgpKV=TkpQQa4^>xGr2Z z$igwh!@@elRKaG55CZl0`JeTd`PcLf{7(HMevW=9pQP{3^15%;`*bg;i*y^+DZO-4 z)fQd2>MQL%)$`g*s!DBxwnQ~iJB0Z_)l>Xk>!!I5u9XjKrl`p=3v6EMUT^-H2M3^6 zOJf7+DJ{;IdTob?*&VLAZs_PT`yB?Hijd2vj0;wdz28T+6?Q6nk5DZz-BV7yb1db5 z(tfD|#}FVkZM^+ngMTnkH!#eRgeM%WFZXaD96?wRLX~nNDuXO35j6_v%%7I9mamFd z#B;6$2ngzw??RB;N#pD+76RCAN7!$V;LI4|h!x3R5bB7Imk)NtNF{$CX_q4rAZJ#j zgT+7)JhQSV($hq}I;&TQ)~!f?NU6=g(6JTvn-*FvckRj8KS?r-D0drNqH^AhbTpG6 zvX!Ps(JK~8(tF!8^@ASF*7bnti*zy{UyC4?URb66v<-v%J4i~9yrm^L;sup zA{>HQpzp7jbXRqax^i$%4btA#zOF^udD^~ULj6p0M6*USR%2D)QNN;oRGp_zRI622 zREJe7Rijlt;w|x|$@nW1Hd?*|ema)@>k!*zE$=u_A;N1Ds?05WD_C-F6-2}TW zEcYH5ZWo(cO$WFo9I=OlaNKa^9+yX#B--zB;ZjM#?qu2@H!g($txFRf?Kp@amZV9J z?mrA8u#Evrc6+}aN|?@fKF31{VEch}wDn-qP_P>Gj8K|+ec`bH+6?ST6uH&$xQ@V) zBqX|L`P+pLC$d=gEPJ~Z698ukHP^0GqfJ}WUqN>>#|P9m-+}Q;e3Q^|A?W2Ii?*eQ zDdX2bDr-feT^8+>wJQ@H#T!8HCLtCV+$~VcHn{bpbXjMuDQ9DrIfUd;g{wMitsm)H zIos4U3t|b)@|sQ#dX~ll2+ib$^^Q#TCCs+93Q5J#0+ol`uYckC5Dox(so9PAZ)gtK zEwR^F644$YPbad~wBwiGN^&%6B25p7@+h1oK)|&m`@qnfJ{xCjG<$3W`m(~&NC`NS zguaOz=V+vOBA`8PoFb8Z0vXpk62#$%az#w-fN_pdDHaV_pPqo;zn0|ak}*V3E9@R9 zkN9o^SlGUf+($bQCHLD15TNbr5FCY?D$;wS@2b)Rl=na+YOk_qp|xMEgEIn|tJ5bb zOyOu?b$TM2@L{6WUYB7+w(UcEW3va*!fHpsdf|vNJaeIF4@8un){~?w9~f(|Vmd@x z4h{%5(340#Xh*{rVUVJVi>a#dj#p2J15kQ)0s+py+BqTyI}oM1XkF~@6cR)@FrWyb zZzfBiqc;T-4glbypGMfhn2EQHrz#Jsur)mfRTdtzDiqF$6=!IyXP@rQCZfcP&-PH+oLwqDF1#w<2+CNS*p9eks)_`BeUa8b_x<}Z1Hfq&NkQg zL*XbmC}#;tf(e{d&hdyZ;z#iQ+;7}v?lL-EW1G$k_tNN%( zZM1bqD;sS61>T)^M<*I=pYVDOI7_6MvskoliJypS*y2FB`$5}JYEFL%wT`$Sj(R_9 zdyF&qqVqN1^g!0vM~LY40$9XgP3XgUsR8wu2e`lmPuDl}3#i_H#3pY)GOp-(8`!Qx z_;(n=PkmMWh&oyIovKbXRQw5!xQrG47M|hX`Tw=8qjA~e)4)8Vmdo45h4HnSf;uyo zn%v3{fqgPN0K8LErV=|dxI+@-^+V|}naB2Fb9|IpOwhzI^yHR0b2d%c`X!WvrW{LB z&Z78!xW$Cx#RJvYWSFC9(qt-l&3R&a%%*tkUFZeGKemnHSrjLCLJ>UX6~aH0;jf#Dhj>inFYPoylTIKIUKV@Eq?-Qqig%of9ZIT^-Ycl39P~=M3xo z*uHFi1{gQD%U@a|Dp^yT==4F^t`SzbDrmxWxT_$!({d?495&%NT)V|EezmDy#F$w< zJz@fUC&G6Ud>?}EWO;hTl&9C1B{EwX)9a>9rVLXz<0s%GnFkZq+tLYX1zb8Pf^*Jx z!(@Y>{*r#JJ_@F%TXh4qx3v4TV>S0RM>R7wM)g^Bq1sP%NwroLC0-S`iUZ-kfqlYQ z{yu+{pUE5Htz$KL}2dTxRRw&dXqG4{ltk z*n`ow<8fe!t{QGZXJn|^=;t9yyu?ZLOH3tB;WJ(UY8j>yAModJ^fHbTm+xD-;Op#v z0-{$x^tYffF9Nj)M>9i%mFPJfjh>qrk4z`wubRt!E$AlZ%fiuy5{Pbj38Kdcb#0yn zeSxF@;HXs%Zs*6a-^1-w_UJ+ZUfJhfjDwR!e5?gsdO0ot`QvEz!bCXHb2`q7%3iQD z7cYeTZIkU$zXcG@aEOx5fTAxkwej6}MbRKflqfR21d@rL_q#6xa}idAqdU(*E-qs| zBq`Rzj-_>MT^#Cyh^Bh1iO`9rXv|)e6%8e9#07o{qRoBdEa>VnC}EZ(imB`nDy*X_ z&IPsULD`q5fjwp;G#H7R&Ox;0c@S100X5@gh+6N$nX*5P5G^|c(T@F4d+8`m*cMnoVJS zHRXMP#HRGFhr3AFShftMJQvqTT%9b$+7P>KxGP${IV^~M5jAWMOJ-$s`??rDwg9GR$4jNF#vAAS*AKJGSVtY`=HVAf~ zYM#P23fxU>+%KM*IpS$?sW{Z6V#8tMIZbF|{(y@HXDxc)(a} z9B4F1pK>AG%V06yt%)(X!%d$@?H}y+87^Nap4x+B1!UWNFA=@5ad$Y%D4qHyC%RCR z{2R50!_mscQ+ILVClslQZwyC^mrTv$q@fszLDpwi$H@JbPJQFBpowL`s&1Xu3&mKc zEr$Cp_BXTzp*EjgD)}Ajv^>R>{(jNri0U1o)8IQDzBAxEQ{EApwNMuH5!mIXEo(Fy z*OoO#-rJV74K|^}8lq6<)9GGl+m7sR)erxe1>bD==D>HB{P2&nSDZj>L&EDS6WtNB zeSARKQft9mX3{9fmYMNEhLn#FSkQv*j!ik~K?Z9dIGgz8T89J!Whe=3QdC@)PQal& z&lC2&NBx5g#VH1?)XaJ`M&gWr+R2uA=c#I2EV~K5pMR)I7-0b_+I* zcLg=s8PpFlm`_6g7d8XxnoKlRjhrG0S+!&&^r}J?F?%a1~)jXN%n{v*B@0%SOL>8y!8q< z%PBN+K=;1NAsyyp&J5(p?n09ze=sw{47Kk@6A3T_tUlVG)?Woo&zxy7yWnuG`Qf8V zAFF_QnlnT5!Z0&>>-{74yu+*wGqAUOsr0(9^1)qev!iS7ih9430j73c8A8#Q<K z9B)OBw556S7nzHwrNO@k@i~&;?U*x$=d{1jaf=_C8!oTDJ7ssGpZr}Z0{03NDT^ud&wxenS=7qKjTgoXc{XHz zsx%Vakm3WY=MR|=-v#pgAq8lUz}#RWVGJvOuWmtV_AbM4%AE|u+I>)El)y}GP7hSF2&;Cz>W z2I&*w;Q1AJ1!qKL1ge;=nZgVkdMp2?JG%5rf(t>Be$YrbAw4t7g8q4~BM4O*5@AYo zw8I5`d1pib3XwnrL=)J}sOeZoB&vVXBLKM`>$pjyG0*x0z}gj)&OIK=v1S%slt4m* zXS${SlU{IRGV(Oq<6&Vqess`Cm?wmch)4Q(P<<}Cr=|WKN{JuoTlYSk%k$q927D$+ zm;IH$rT$zvPWPJt@wPzVHp2@)5Fs(qw>}g8j7Q(XpQt$%W+L;WV2zWcO7yL}2D86Y zM`2deQw>DyuZ*@u192INf}1kBilY~B6niJ5tFyrQ*j)qB7|(uiHn9ojJWJ@DXPaA3 z7XqMjP1<8g6eP(Iia8uFEt!3FjDlETb9E19LT|W}on~ph7*& z`ZjJ416AQr5BWRbuWWCSQWFkYdaMqjnFg4jeu7!B3o|8XcS5xJ!+{CNYe$$5YP$-G zvOXRN2hr;wid~m+G!VUr<)wtcd^hqG==~nc!w$?e`sSk$Z3%*z*tySv`423#?FQ5q ziLSs@?A1)8^-XwUb`z++cv5~FN3m}+iLyDTtVL{#powGBk=Ri*9!>2i8Ywq)6m3^# z71vM9i;#EzUR2;d9b96=+2aCj7-MMupS3h>e#UiT!6To(9RyaETp+Xjv>)2)~N9XbBcj9~J` z9QbBln$rA6p#>KkOud+ZulA&bw!uY&0482cIFFAa5na~cL4*LNT}%jg95%gZCZ^g5 z0c^TNl;0$Ukn+rYLV&3)CX`g@%;|T$#w3x6z)UG^x%mPAysVpYo>xvZ}#j^+j zrm&b0Fga8qw7f-iz|@tb(7_fRIr(-rpVTGHSTSCE6N*7_*HHwAi7Lj~d9?e&v=m2U ze}L|b*--P8T*3#_QA{|`D{@iF!*d7$rlFY7I&Hj?qGdhpFEIVYgq*zbq+7sb6Gv-b znXTkF^X)uR6qspZ0=|%xmNC0_Bq6{I6B7dJU?YT7aAQ2Jd6-#Z!v5vZ%!E)A(vzfs z86_s*dzESXH|```S1^&pxTOxxosfRf5=Rc@!2A*8_}*EHXG4Ec_FJsQ_b&VEA7y$o z$M7BLkH8j)!PpAk%Dtug;QYHz3NTzlVI9TewLdY+2fz2@Ww;*anhu7q2hRW?*Uj$- z)VDlqfqNUR_lmE*$NGb(cS*9g0(L;QFbkLkD05ZsAhe*Rq?Y5ZqtrfUx5-;tOQx`> z_~^sA)uY}okw?A1NPb7)&oZg7*<3nQKal%XAHYAtPvtw%)P~Y{Z6A}0e&m{l(kM9H zP(&7Za>v2aGH@Rk@yXS>Xu!6NaG8I)6gp90MPDR7+_w>{>sK+9zCzkiYx5w3+7&&!8CdnOVdn zi;_PtE#{=3>ReYBBCjt>uW+Ua)Y`gW4w`zk^gXB`)X^<|9zXZtWMs@+H1||nDch*# z{h6U$2!s60K7J}#>aOM|HEd_AdD~4m^-GElbMyXZYo4Q_GS$#H(*ajg< zKg%J~_U3Bhs2Ee80~mFQZ2c@a*ns`hn5gn>%!$^%za&&g;#nl#2yv|Nv-hb2;|Emn zpbwVBz_Pp;0-|Rs(SyBQ_m8Y3dZNpxD0;Lgwp&)P5yV85Pj>LtE=1JPGx0+zI{!l| zdbbk3t%QFoVdO`YK2r&|DPgk`-uZ}>Ii`G!q8FN`-xI;q|L&oWsgg|}Qz0)a;SDA9 zy+ZM^N?4|ZjY`-KArv&Ge27DM>(1ALO}O%+$_G12RkF2$x^yo6gbI9039l%j_EU-{ zE8zkq+@pl=D&c)4jQ))BXDeYXgrvr?OEh)`)uNKM9i+yi$`c$|&OU>J#XItb{~Hn` zc3duxb6_QxMg{R84^@3m^~g&2of3L}LGe*axJn6+DB%}MXuL}K23=KJ0eeT2URED4 zl4og6#gtLAt>$zu$vfJX-R3wo+zW`8Xo<@&Mab8`UN)ZR^w)_MpRVl+ zzFoEwj%j>KxazaDE^N3w^2V~$Uk~zQPBW&}#@~%grJtl?!wtiHxIT6coTbTLA`)8E|`1?58k4r3^tJ zp-6f<9N%74R*_2ubkZBY*9s8tw3QODpX zY5vz%faMTt-=DZeNt_)5i7)Lvq-X~VB2L>$Z=AT_jP~w0Xs-Y;C1QpGf zZWr{=w+HQ|1B0UzLtWe!SOe~HWDpFGn4WVL8USaI(En^c;7~t>s*inXwW5Js9kzVz z6A|`2fWa1vNd0E5Zg9w$ zNM+c9H;zJkuQfy%iMwbyQ!#-OSQl6fkt%MvQ4L#HD-T47gymG*a@OpxFJ*%H4;f*R zX|hQ$Ho}2O4OpJCC4=DwL$3Y;3hY=OBWHFj57P4n?=JUR2)6R^JX$wnaTKa~`p2MZ zpTC#OK7X(H=Fu|g9y6Btn}L0hgFM4622ZD9f;-op|DOGpe~Ep8-^_00XNb}4RFM%{ zbytOhx?{o;-8x~o&ZhZ9^NcP~vr7B3CSCiQ##g&jeM>t>eNLOCuF-1LIc&H(PI6K6 zhObp07*41T88)bv878Sl8hWZc^}mZh=-(7yj%5B8AJOj?XXqDzd*&r@$*eb(O2wvO zQY!ZEGPxVSGrp*ojGK(pj1dy5wQlcLn+f-KceguVNdp|7E^~<6MQrjGhhxhOg1~;0 z1F=!pjD-%DkW9+bxZc5%K@i(9hY#gg0%ZSC;BaXggAsc0%I?+zzJSp)NB0fO2js2)Ajz~QraT>*Q>i}xm0)hxzs%^Z&Mm=UMkmq{hkA>Q*z=7)L#`TKB zmzYNql6S8*D z7CW$)aMW5i*?|TE;6pdmhpS|B`c5?D&>=??Pj|?)*ZVXo1BBX)$vG)R#P0)QQX_$oc0-+9ebqOJ}ARpp?$Yd{hJx`ah;|`Pjb$ z|Bq>k=hORlXWDx#d4TXKH@ojvq2jw0;2nhEj%!hjBz@X48Fog9)1Y}{`qrw3m8Tk3 z3C%(pcZ)$j>+-x&Q1^U&uuQvG_+mN+k?SyXJQu~L^W~WBh=dsHs!utCPT+G!6M=mlb)P&{ya)^?sT^;SkNs zx=p&tx*p)EdRP037NyQihKo!FzN3=g&Mr$3X%k5l-FC z&}orJ%!YV0CT4@Dk%c=|SPy6$@(=rlP2|{Dhy_3ir=Zf<4H=wi5(cNBz;olmQPjqc zKzV-Lh8qUrO(r)D+K_1#*k73#xD9W^6c_d!yP8d9zGBwfKXP&QhAI}!68~EDzkjVV z0M&5Ou5!if4P}pngD3xBIC+fCx|&$+&u|gdP2)iQx5f{Q#hO3VNpQ>kPR(q+zgkqi zkK#HuWD9sbFV}Qz7@+2~=V=wanYl7tj{beac9t`EQs|o-*CXWMKR3Xozs3XSYInOr z9#Q7zS5>I(-iBA`I{2-&>*2ES&xU*10(%D=X|t<(s+iX`Z)kR)#O$g}Kf@^YoZt?P zJCJ{#FXuC)p}epD1otCcrVJmsA-l>3HxDak(BE!b9U)JcRrQ1l75g1eLcg|Fgv(R& ztFHb(!oCAMisJqM_WGq>0@6zgJ%NO#bP_s9Nl@v8P^5{FP^G3qXoiH6K@cv0f*>U* za)Lkt2m(?P6%-H%9YG;2xz5iEGKO3Aww}WIt;4!c zgEL}|W;kPx&fVT?to}o2f$=_rXpk zbgC8+w-9yPA0@~-&@~rz`}e{dFy>_{r)wsKmKIv)rlOdwq{udM52II**F*r6lZo0u zm=FRW^c?|Qo($JO4it==!8zU*KvDf^?f^I%3t;f=F)Gm7i*q!R996yC0dO>w9Ahwu z+-eZDZ#3sX8YNeVa);~59+VGB6WQCx5I zw=k1%AnLomuyZ1!43c}ydwI`dzJpkM3lVuFXnrHsC1`#M{&t{Sn@rdVaXklxq1ZUiYiGi zWym(QVKI4E-|~7h(D>LUPht0~Gr9rmy{QD@tg|}>vcfu}Ls+M;QQX$Bbs)=Z`ZSc? zSP%!}C3NNN5*6QBh+>efoSju&)_y#@O~gs1hJv>l&=#9ZU^D!@Lm}RaErKEDswh^+Fgc8fI78l4C9R#>1GY+B1JHZObi@u9cj64jR zXw^eNzJ**;Xs;loZYYzID&k;Cf8J#89#`xqn?}XuM{yt!3~v-tqw2Ka;=_L&RNrC`~U~6)Vm2FDx68bTOYDcPKDWVM zpAgjpo<03h?COPLVKxlogHY7#mXf8ulA|EEoMMNQA5;PVZYqgoUz3o$mAKw*#I=gJ ziU;8St{Q;iT`!*Iv75Hs9LF!N-M)%*S-)raIq7O_1i(E0t^tRQcr z|70~4mT)o49R`I;xouf-!tIO%WE5=6!Y>=Y#rQ43Zz+Du@XKKZ+gLllFx|(B?ts67 z0GfT3F|2eR>+X!tV^*Wy&&a2S&VxHxv`tPFh5v}7*w?1i&6vLrE607y@%s$F75J@m z&foX>wCDjRBTs_(`xD`P z)>k9IcZmqrbQYg#Y&mZ_lT7%FR0;S#5kc(7i{z^?VL}#s9uhuP3Gkib60WIQviLsp z@%K4alYs9Pm*6o2Vw*Q)suJ+sB7y*qnGnkiSf~=<`$Yt9GoqIZ0}wcTp{gjucZ>+M znGyTBg4REL&p4)ia0*_5xqms086P(yaD8}+zaMR9FDq*j@R9Q*$a@V5e*G4x&EvC2 z1g$grA7d63f_dr1eCp)su93Rfe1XOuIytM0h|?xz;mX`95XZtm8Ar2W*WsXbgyP+ zs6z)A2Hu25KvP#XIb^nanQ&#`1gQi1AAZ`Jf^ZNGAG5xqIRas zz4J+_aCIO#o)cR(FHANxF%>QkBuAUz3K7ZQq-rAI{6HeK=`M3TN|7FP_EmH2tLNAq zbL_c0?Q4|C=EkwQ(g?&0FFJ3(HGWNf4*N9pKIPrY>o>14winhMtI4v`($IVmfv`>> zcCyER&K6N?cIXYh?^sy0z1hz+1?j-NPOclRH;p1Y>}Xbea@``zDnp@-ZG#uan?H=L ziD1RSQ`U87g>8!boVAXuJEGV7zO$idz+lDsqVpQlLgCNJX>V}N0Wp`3n=zN+$-QiEj`H|MDhyI3ct!sCO+OPlD6@#;U$ zHRN#p;6sjSKl5#vBJSwKRQPoeqigzaUx7QC+bJzbguX~hg@Xsb=h`|Tre&(I*Gx=> zj|XwPmI8U#k`OGSDQ@?;&r`~(A_9a6*TMi~stCIfp{)&aR-*mL*jZZp{*qZ;BXEM} zWs}rL&QA*PSSpC?FY!y(o)=#s#&hAo*7uvGj)aep<61zJP)~BZO-B|ZmZZt?7IIw6 z2~PIs-kEBDykvhomK7}Oi>WqNa&e|6$0qyZvB+TC-k+}a$13FPR~H*XMBc{fnh3a- z(4Eni9_+$#jT_!2{JwfDL4=5??k*7~DFO=;cJaNbBG&nYR3oapv6P{ogo*`F@f>w1 z#_XP|-ByEOb}i~b)laK_Uq|pt zAp&@aHOsj;RoVzs;FBV2yRb`{Q{}5E1uiL6t@)3!;+uo)Y(#@8D#6S*2*|;V^;a7a3{5ni6yAX>WEAMCiZ0Lnwei3w!TWFT(Ox&ahS{#z;;nyL`&s_5yl?)+ z9Ai3e>S#PZ$gtWQ4n~#*0C0W6Vx%8Ae(S^v186ZCaT7;l<`CWo1pZyL*Rb+ z)v>K-K_Q~3q$COvE{fusc+{HIQe`M92}DueO-9zfzIN9A$p_(nGWko&I8Pz9*oBKO z{;`7J<=Y1rz83z1HAR>5F~P_j14llVnUbh=Y>bfUVk4J8Hnx&>@gE6-M9np@@Jgb1 z&LMR)fh=~u>h>3OJ+Xy)$)g0L%U7`Q4sJ~T^+a`8M|u*}V%=|`U&73q!cEoa@7FDa ze_)RHI84yyC#rF2QlzO`BVgom$i^`k0p6WO`6aatdS z5(WC8`6w3qpcCDq$7-FB#%dP@c`;&qKh|V$lXY3)&M30(_(Def+;N%=X|(oqcx3m| zTr#-Tx~!XHV{)glNY|DAu_i;Btvy{I+06WL>R5BDHTe$IfyRZrD#-nz<0Etba3wo05&`#z^lyu7A$%QETTL4dxjhmfzGV26#(;QO_CIer*-SVEM!i zWrZEqer527r#z(srkBSxb8PBB!-uh~_}r3k)_jvA#CfFC+G}Q^s!{VK?94j8L&?|? zyVkiQc3rC;mw$=!&Y> zOBLQku+Z=u5zt>$tyJs)(vc z{oQ|cy%bAf6=g8TruBJUL~5wkr)W#dP@@GG%YKItI@;EYMq0?@xb>ou-1V}N$|R8Y zJvIq0Rulv@wUX9IO507ck&0R)WP25j4BsFcS-wFu^0Pd?+8`Q<_)6e|zv81;FSWEL zIQXy8QgA6vFa_N~a6cMmil!=txv0azP@|PDrRGM_Nu0z7|aLU7^k2B?Qt2|zkNBdSm7mcGI zxwlP5BiuDk&;5Vs#zm$~(pui{lc4^woRO@SZQ^2@ZCf{l#&h*HF4B2w+qwpJZ}SPA zuX0l7+svmQi!4}I#!EL&{l|g|>s4jJh4rc#%_o#WWW4FGI8W|h_u6RiIc9S1FWfL+ zq1-G+*6;W_c%rU_C+axX{KUEu*#4}KBZhwZoWeB3@h)8Vs;bfkn-bQIU}H;ks)Ey>B4f{`t>uv&j#p*U1?1be(g=(=S#mxA^5?`7=R>bS7W6Hch}8 z(kjLoYdR-neHn^WD>}q~E!}Vg^+CyrPk_ER_e-Zacm4CKMq?L>#ayty32R-jzSCRf z_yojZQs5M%7eb;;YLHu)pJ3cg4Gk0h6C{*NS=)Bk@-E?Jy?MQz$KgzY20Ou(A^ zk10_9jYjP6Q$uG5vhHV5yvvIR$Kc~b#nvAI&D_^Fklm$X+L#Gda<=rsVw*}?1inrM zEibNG(Y^{Rc!KFFFW#kM)Rn{l)_N;ckCQ(}62#nI&;nmP2i~8wB1fxh#ZRvegX$6+ zj;;=aM|JN+Y?v7f&X_4fp`St=a<-d_xxE0CqqF#BU9q=l9Mxrv!Plxk_ALycVxBVr zj$`N1))p#W8lo1fuWFBNE&mLHT76zzD67=cv?`a_cU!)VT5?$ZoKyUK<#Rjs->QK~om>pLoXGU2ObDibP23PzvT>AWt0B- zalf$b7t2&)1=y9qD_GtV>i)WZ(3vj+UMMc!zN%pZ=HtM{@0|<&+;yV@u>Wc(ZqrG& zeMwgsC2GNV;WrQ1rCR9HAUeycg==kV| z6mQsB{FO0kWIbiL&du8Uw(nKnEZ+}(1AX#+#`u)?K8)bpL0%`krg_!0{bozC)q^YP zTx(;?70V)|mH!J7Q`(wJOsh;?j8BbU7-JEn;A=x4B=r79KS=MR>{5nzPnlwbwKO<8 zy0l_k7GS?_jY-Aoj`k3!`yjdD3~M{SufW53lx&Vz-4WpCG}ivB8Gt2C$RH!j)ZFBVZrK!fHnPLC*VNgrW%|Bixv*7^g)OkBtB(%B?~!# zwF&{z2AHKeP#7s1#}b8r``7VG;6U<2!GVdRE4a+d(UPuY`$R6FuK(`rU&GM6`K5o7-vC{VqEmc& z`_%IO&-7bzw~erFw&hr7dXKSo_ikye>g{JK@w#N$Z&~7%XZg@8#qy5D>ebVH)+^M! z$@a)R!*!UR&4`k)lxp!eRld}__vdx3!zPJ zT}$sXM|!B18rD+{_PIbf+nt`IIor+PH58p)b-$Y`ob5snYc-$nn!;*Yc(EzGBG@)l z^~amc)-0P=OM3JPKg{jVtuUU|ML{x7<6?mTbcOljy{nMR>&^IHH-gcTD?1 zP+eZ`qjqEnOI$UrF$>)U4|}vJBLpUH`c##@dpY0<=9!u&>vYk2hjcYS;4EH|GOx3C zNCU@q<{A^l%{*Z}|%Evu_XUt}$xTfKzuCwkZ zy)w|2sXU~0k>NW-nxUulFh$4l>Ect)`-XRnS1%SGxn;aw*^dlNTi!LW;y?dsz)o4Z zMLIh-+EUlX5;BLq!`fH55T0?i8&3k-Ak6&#($r-5>=1!#qwu0;mY<4M5h?=2ROQWb6Z3=x5AM=_t# z-*yq;bL2@tTaMYQIhur?g21))n9uq*XJ`bmZxZlTKn2@qwGT0Zz;ll>JL)}JQ+)Oy zin8p!x>=d!`Ha?d!`-EP70kg-W_Bu5YrmVoJ$5v+u@M^HRp7Lnpg>ffAK%1V~s7J%pMmvgX&&|&)e7n|Y z2SMPu37UQSpC+NbAn@D_&F+0WL~Wv-AaHGp&V4mZeInZm%(X8%s3rC}*W0Z%-{wt% ztf52HYmOF}Yx8t2F%R0MwH);(0qw44%|~hktp$N=n{~-Etnqt1|3DqJR&NsUeb}e= zX%borf<(EozZUb~wp6|F7M_^c^v__qK2s@`28{lJ9n!^tPScRoq0uViZUtpVS3?&}ICU6l_PoYf>>ju1o++*qj!^j%vl$uYgvl2ehEI4NjNC zfqtYFXB~#9J-k?dS|CgKH9447SdE5C&LtyO-A_QbM1VJIwE;1x{-}bE*{NgoSiwgV zg7w%g&2!NOjll>-EbSW1u2;{1O5w^Zs5Qvo9v6x+;l+Y;RUpgJ6{GvkH0U2rL;d5s zL@kcH1aun_CtooHviR#*C&dvOG!_GN>e{eSXh6`d`0K+0S;Uzs!EEP{300Z>`mg|& z+-zDfyEBXitNFB2Ebq$HVAkS8QEbFMrL%&D-5~UfsUhs5R^0bHda!3PRAKi*yWw_U z;2nZeX3i-owF`9~bcZM}&KhRm$`%sS&pw~T_llT43yj@DJzwZiNBi$~Y|33e8 z|C)YxScRC~@izS^`HuxhHH~7Ye)!koEQr~C&c}0GimAB?$+TJhW_sB6vAZIp#-QEK z8iRf~pJz1m)I~9qeb1Qk%y3J2gxF14hMiW$kdA0mK|Bc6=g#@|J#c0*Z4wIAFNgmY z?)~T(5jWo6*H_W|UT`9k!{;Va)i%iRZ?zAX8 zw7>;J%FeeAu{N{ZMgaE}mhqM@mJstZ*uFQh^n!vKCQNz0GqAz&LGL@EN*<}_p@WxQ z=o{?V)`UU%ndzMAE3e)9 ze<*UHDNuRf^*6;SG!~l^%&je>Elu!FZHTypGvu6Xhqv~Rt;jtXr`J~#!5OMQnjXd$ zt~t0|kGC7&@ME%m5ys}MJ=jQZIY~8L)31jX)3e0HN=Ma8}@%zcyuFsLZ8!Xl?y6Z-xj^!!gRlcinbfCg!D@S9D`@KK( z=2_qAR{V@8&Xm&LYCPJ;Xz>0}$0*DUyP4d;kDXlDAkewcdi1QG1toSJASz>0QF+<> zXwXEXz7NG&Og`F?bx%IpOTkhnr5vK1QmRGg@#Lcm#?h5ck0njpZe|7)kBmJ z-A9IAN@d-#WmOtBs4o(Wsu*Lv))1axxppa_6lN30q zbp&ufWq|}rK0Cw#_~lTqwCH$@g4-#LSWJ?CAM~iUfVUqZu^EAscCqy(k>8JHO;ostX`^EH_UBbq#oUXC}C|?zJbEH-hMJ+=W)m0RAo;j%qs2~7W z7OJSPw+0YUUI3mMs0b)0#cMgID47%iWd-0$A4Peg2q+^(d1Qhj;4KMw-5#S{NSCCW+ zaY}$q0Et>?(*y)2CBQ00X#q(ooG1Yn0l315=Is@LSpcpGpz(;`NgO8Gh8DMzN8J>f z%%Ec##-N{_#-QV`9Y)1wEbX+|(+BAl#gPq25fGJaoL3mo-=SS|6jc)na@0KWgFSx6m)yZ=&BwzX8tfLFqdc*1dRf7iY{b z$FG``yg9eXh+uO&x4NBM+|JEz=O(vvquaT`?OgA6uDG4+#Cg)*k$>ux*DgSL<#ztk z?fiw?`E$4PXKv^Jxt;&(cK+1u{E6H7WA7vyJAU=bBRAlo+xY{x^ZRb+C2r^U+|KX1 zo&V!@{{=3`x8P|CY<+Q7y{N{H4tK0c6tkI?8$DIjT zyY{G$euJ}k^yd^GpHqBdPVvb(#i!;J7w#-BQVNVpHcbn1e@S9H7clpX$943beA0B_3=XYfDjRrzLq9CQ@z! z9w{xTH5U6@U$}V~Hk0dZkMsp>Yu&+OK|U&_sVTNTbWc2w!L&gm1KEp1gX((2?=y)u zW0Ka%BgFyRY#F4oz9U$9u7kZl4G3hXF2<|t{I>}!y^P9?8?8e3G4Z;-}M)2kgiIj+;$w`dHv1S7`r%X(; zxC&6s(!fe?^jB3jm}e2@nW};9deBep8s=M^*&}BI8`QMFN`QG65qKtVfU}@RfC&~6 zxUx9JZ-I|T%OQPKoem~iMBtgufo0zBr8a?C7ZG^obzrL=X#|*ki6-0=Jg`Bd`lu2x z3nK#0Ob=|YRU^P`jQ7DkH1jiR8U#LSaKjI9mkLF+Yz6{cy#?3qA<)%#dVA4TChiCJWl_!JM- zn2sN)*NnLvF}t!%j`#job>dz`< zb*;%-+gAiuyJcdg4#vmuczDmKH*7b=8~l-$VH|=IoMA?#S$74Yowf?{AS6)ToQ@!s~&8nH2SmbH6TARG5M8QQ1vC?6X74x1aH)4(d23>Im^jmjoVGiH_VKSU~UI3U{86;7I-U?u|`lH>Wl_5UnDMFst zs{@Nn#VSB!dSVM4FOH#N{1R-LOCogiT@v~LcVypyRnSc}V+twZ$V$!P2144Fs*sl4 z0n)ajs8^Z@&0OvZ_|5pz_!YK=e(4p*-eIxn(7f2&&9t*i7s`Y(zfvIJTLACIK~`KT z$l@FRMDZ#H$o^m=6o=^(LJ|F#bC*0vG_zAw(LjouJ)t4ndjL(Q8|IcE;Ef$giK-%7{s*qj>B z#>FHda8>3pppB_$zYU!dFaA!&f{wVPh-w&~X*D6Vm{4A$(!1^QlT>k6qtv>~YYKsRjC|M;J?bO!Z34&iLPys3fl~x~{ta z6ob=imSLV(4?{n%Amc0Bed8(H0b{OhiE+Gbn6a&`v9YxBo&JR~M}NZSvURG@H`Xpb zY1WE9?_2)%sczZp{lv1!`zK4h_i{^p?@@?;+06W_x3~EVuRqO`l-}kJhA?wk>sO{* zetk{5{OXt%_&zrc@I7v->$@skx#l}oUrMQ^Z=?H9zuLF8KEcn<;-TVJ z%cj*c>3JLX@2U}%*n5koz0KZBnjWkgPq(uAD_|Qu0n;^$S~9H>3&n;aX$bs*b5nnW z&5te5R?X)rvt-;T)--!s6ZXZjc``pAi(CP_=+Mi~TuCyJ?$gNK_|^uh}V=`Fj(W5Zrl?)~k)YhQ>*4QlZOQ*@7{c=!vb$xijyTz^j9Z#(l&8-qfc;WMi&QU+QT%PGq$%SV=`$l!U} zyw14I+}iAGI&YlG>J*;trB@z0XBD14s~Aih_1^{9L-is8?q}uBhp{ccoqnM=Ckp-O z2h%5pGoLf3Kh#@46g5L^Yr>tsojL8Z8*b(G(#RPde%f&2Pb`xa>W5tJ5ox~CO&dXD z8_X4(2qMzFwaTJ9lS7TPWr0n}KEt;se1#(-2v=w4)Dxe@XSjcVtHpw1oGh$#a&S8BCJ?S|+ceVvaoNmLu92mm}I-1As;?l|llGQ46SFhmTsl z)Q_|}3SUMeSub_8Ru_9i;n9RsM`=|>(~pQawK+^&+D}Y0^B)t!A&<5dqQ03tek6~d%i{@od?k+& zD+PVLJZ9s_+wG}dizB6#pCYe0R;!Htuaa6BqgJw%dKfUijiZNFru|(WP(qyf@qcvk^0XRf^#AA&{LbQc&e{K?^Xp63 zg(jfA?x@adpZ8y03%&Yz)v`UY{iyG2TVfk*i?IG|{meSTYET|qiYyMx$E-o}`JQEb z|McDD`-yJ{-_pn+yxk|sryKoaImzdr>#Z-vi{N-d8gvIK=e;a?uY00u&JXW~JLBh_ z&r%dWC-#3=Ht7A<5fsT{SN4j`NXR^o-=EHe%nR88r3@W(o%MTk#=gdIct{r-)*2?V zGPxHJ=PUQZX1$wXCF$bhKxgi{3&ZvpLX9(>miUVyFp@lW+mV$qbJ#`a%wd<(7qrDN zRn>i~gX`lfza@SFzUzIfAnE@Fwu824YreH5toBW@U!$>UyD7rB#TaJT zXsD_GQXi_UQ5v(DtVcr;Fca=MP+#-)nMck&Hui6AeFil)EtPoky)nfflwy_td0$<* zLX%C*o{9Nbu{~B@gF&NB1g;EynE@IGr8Z%nDKgpeQW^o2+eF}*36phu5Ti9Q z%yH$@ELn-2rURe!(DV;bhm#B!1!dMm_>S&sFQ67D0?*W&>|QC20P1lfaOL1ku2&P0 zcuQ*pP?LKSP?Aoz>bGuc6HuJzGCW9Xb(s=JjMNTm;ai zbLnHrf#^{ewNYr&d9_EfQ})P30F62kcqRa4?>^QfK(kH+m#agsL+Mg;4&kykKb82% z6e!pusOy_rVmroM82$VGtMS52N4y;4sa6XjFlcu-zAL&nDh1}c2;4a(=baGNCR!^v>|s?l4vitl$CX0OU45%>noU z6369t;dKu~Dojq1qKyv(pk!Xu0KxIT0D`y75p^Ik3*-xfymY3mUI!oWeOn; zfcLMnl(J)nB#zo7#CMX!8_r1}@4KVC-Hrl?zK^XFT)h2j!O=kg0cVy-AbxBS=V&j0 z#PMg@#segj&HqH+xzC5SHP zzn%O~0zP!aA3XuKX3k&SMi}N{4Y&u8y4ar)bdVG0o!-WYq0uKG`j;Oh zker>*0jTo{2wsk-9voZ>)c90adH?EQ@OTL4eYDPQleV z1=n^KTvsA3%41!W?zxV6m0YjKnw4DdsW-gmoL_SNBSmkR&3G=BU;P`0XUue3KrkNG!G8pZsyWES5j|8tJLsmW=|y7{^Xa?K?nYkOAvnyh8efne6to=^@` zVJL{p2q(qY_Pvc*M2!RR0xX5IWI9XI7hv<=MZ6wXKY~Kl?u}x?{CtGEdw}Aim3so= zJh%@D!wB^sR|;tD&q7!?DkinxCyQ^@f~eOOA#^yMCD+*-$hLL(CX{_&>i|N;org~N z-m+aG%vu}hzDwJ%rE)oneW}=EEVZ#3#phH^SidVU$7c^dR8-0@-W~|;Ex@jIP)z#k zOGtWBKa{Pm1Hr|uN;P7430SmlD|QWsrF&e$o7 zN2r*0(kYAO>!D%$uN$C4C*sjmjOqGKBlZKGkspBod?KiW@9jkL-FEl{42U>TC5Q5= z^3oQRus?Q_EdKNo9XjcS!4$k$!$RtQ>e(3 zjN1PM`U@4uZQP13{=6G~%F9W(F*IXn$xZxjIfs_~wK_>GOOnL0WV-J&r0ad`GtnDS zLcH#HjYbIKk=C2m;n+Yr*j#KLgbdgHjpvMg4SyJV>(A&vKxD(wy1!U_LglKI5vh`8 zTBe zp!encJoVwdhixVQB!R~^#~h2}JQw1sM=Cz?O+4$ZngMLX6BVBz+b6v;=jXW+XBM>; z^U)ODcp^a59F2gdOYGOFojS2hd{Y8XuQ>hM2eO9=0>57!BhFjk2^Oux+emzzzylgq zmN?J27;#bU;n+9v!ZNsmd*4RM9x5LbL3y_Qm?nTSGp^bZ749oz@{AE8Toh3k!XTnl zlK2hVp(d1bC39 z$PVT-L%dSo+rY&FWsne{;mm%e6#@_RY$3ac)da1a_D?s<)_Lmrvq85$qOWqASBkQvh8aQj+9TRm ze$Q|obelM%PY@J&V~AWlU5mxMcgOk-gq`O_gcy>Qmc*)`#QSu3JsDEBBz#vO$^`>yQl;O%3>Gv-da@0>gB z!BkUOzi3^!rI*hCl>ch~G5*c`t$t_xzVI8*DkMMX#aM_n#o^kSR7_ugWWvtD7 z;9Q&ea8Q8JdX(I!b05wwWt?emMHD`)>+DzfaD>g^ebG(9WR6WY89t14 z4RFS&a}V)*LT&%B&A_>LIm2}l8tSAO2x21A^G_#K^oRBo&+>6 zW!>8zS0!K#!&}zor!nCN@Fny)q!M5)<4HghSZp)X2(YFhf?)XdvKDQQseD+`P_?Ij zFA}V4En{Uv1fG7rNN{&lm4Fov5xD$)6CUlu4v}AvNF7`nRysuB^83v?cT^oUEOrR< z@a@Ia<>FD58H*kwPJB!-<$x`dE{I zbrTVIIukR?wS%ezte=R$)2WyZyme5%&C;-rLQu9f-|R;&)~4IaGWZtD&0=X-NfEEh zz1V*DU;y*_cBy)SSXOxu8|~1 zHI`UZ?ebL4Yg5p|NJ^#jCp(@(v2O6zwZ}$@r*TS4HnQMpJ%evMUq|pBJ>ouZEO z@>n8|!M$FPu_7`pO0z)}6)X(3TX?F&wAN}r#6DRAOigTOuNT}V^p`wV>Mfe;)?3sk z%kp-Vx#%XE=qmJI^>6nMPsBgsjA9#KGbh$2#>B{hlh1o8S9QZO=cA&K3qNm1rOvd>KhiQbHs$Is>Qq&Uure=#Ti<<9t5N_|8LsOcZsBo|3`L*rJJRQ$&8oEnS!`%+9f*lS3}y9$HYB)5+xDHJM-T16&h>n+Jd4?T zwOr0Lq3*&2kLyu>xe470>RHqXp=!j$jsT@%FAi|c5ax4KmpBpAEv~$c@wIUU5aw|N zr2I^}bJFTY{&R-|n7|Pbyzgq4j*avWbXKywl}r7^R3DK-g5Pd7X57%hV@8gfFtJ&q z28|jwu3s*7tbiK-4-}J%`7Q~It&fc#ysvp5z!|1K3tI8An>8j4rVS2zFA7;v2k9At zT6|4@{b{ghPCj^2jhhebw!(72-<|do^x7QFF5V> z$)Y~0+%w*F7%iwi_?xR;=t|6@xP8@AL1u4sgOiy=ee}9jygp~`Q(kQp)!FCDgsd#A zhaNzLLDVH8*f`adTbU4{aALgxf~STFfR%iEl>>?to|Ytmh|WU1P5}4+>LP*2GXD}s zYLWnyyQ3vgJn;etW(lCkuYm-zhF;?U3=MRhiIWdVAfeZJ4q$0cKy>I~-14)Ju?4BO z0I)LWBQpmpNDc%Y1^`_omgEFnj{JrL{JF(ioa$m7@SouCa%9Z;g;>4yp7LJ0@3kmekvvLui@ zTwE^}&>TpC0v8;-U96d@?*8Qwk~nJ}Mh)VzSmxpzHkLsAuwo8il}td&E<7Ee!9E18)LFiB2TNT7f?q)o&Hp;%&-}#!tZ@mr+yN>#4#eEVpDio|Slkk@{k&?7 z33^)qSk)4sEV&~&^4DJF8+5RoB_NTVkwDak!hC~eECEG7T~%D=4xA#Vc}lQYl2*)rjO-6ZcosPYGk4*Lm;#u^72ve8$TA}m|E-{71ZhL(3_$#{J3$nXiLouoIX>#^OaWxfNzkf&;?@_V)K3wNMLc7j~l!D7A z8nRV%7JsV19L~Rop@7{?#gcE1(av!cUsADkUr5QHi^1uB8^x%lM{sR~4p@YH zGw#79404xJ z9LDWCisEo8=I23ayqbzmj&8sZ-=(589cW&E^y);P15je4n6rVd^ccW$cOlr`0%Nm+ z6ss}*KL)Y|f0luwl!~nf9zbk7qH9qdTG*@^`?0FS-*Zb(07gRc;B}0Cb{9`^qI@*#n%dZKiL{<4Y545 z{AgKb8BPJD%x95yZHBpN@0IZLcq*+*H0-<|+M^aNR3p8ORE%YKEH~h0I)2GOh;g6Z=|`zI03SW7N1Z1`GGj z%g&EcT~(j+io}OURF^&j$uTjik3VEyo6XYG&|sv3bc9WO z@~uW`U>DkK?j;;M4Z_=1IoekSu4&SCa~%mOywUm_W=Uv-3(|3@==ry*mn&<#Sq+8r znq%1~`l{Gr7xs!{gAZ%i7rJt# z+FVeZ%@)?S{pJuU>j`Vwesgc>B`X3|4Sue5ZbkdeeI)jXOA1)%lGFgQEU3)gB0E;G(dH>XUI(_CM^Q z%raV^vn2MKsZ!h!6!$-X5XwW!qB?K3vRZ962g`n6XTkWZSZGuhnv{iRWuZk`XjQ6t z`A*fs7UEO0&IjHl?Dk-cLHEULSTg z%yAwWlXAo^wzZ2#gf05j&!*)6>gT1@OED^0x^@l|az71hpgUr1VAWYp`hKN*X8GLz zbJZu-cd~h%IZ;1f-$!4ALOUS9L!1&$|5$EvpGGWU_*pZ19n-pw<6p>Gt~bsSnt=5= z(P8-yvb~grh>Q zn4zjJky(Pug#{X48U2BTM6<1KiFRsg#%us=zuWDyL zcU)R@skw5aSc6zQr>aB>RZ5DEtFPd=6{3nr7*_|$)vHEIAD|Q+S75nQz|dB7*z0jhz$Ze!o;@hT6&#i~l8DVzZ#J>JJT1 zN(m;>oG-{)*Uo+~sT1x+YMxn8SoQyq1zZddB^d6}kLpNO6*N^OHF0rmwM$Rx1hBk+ z5Il%$?q@+?MWS5-AD_??aX*`T8}q=Y(B!Vi^0R<9!ggQ3 zMBA1s#}XD)_h(%#Dqc(A0Y^eZ9L10Qm2x>Lq5f>a3>B{_`5T0Aeq>$QSIy46yj8r0 z8y-KfT}{?3B}6t|T|nENjXEO*R*;Uzf7n8`u2Vv(73WEb!Qt=rCVn*?$P}XFrTq+Yb_WfUm%zfmq(q&JK+U z5|*IMcpoJ*-dEWd@24nv7TW{7D_81R6?-K_eGYnJbkwo?E5U&UwoKaq%WGSh^*@pF z*NRC{+wnB#x+*(;Kr1yD{d|I{EKc@K^F?8=`f)9hO{w zrD+#LG2UXGWsSkK?6KttazG8T1e>p!zcP<8*E2mZ?KDj@wJ^Ri9x%=^b~5@PoZVtW zFGB_WMSZS*sJ^ChN7AMJrFM=E>Z&C^D4Tpi@Z8C~5w zXfQNQ8|fh+dpGmENh1V5#+NRTjMJ4|L+nLYINU=3C6sdOp->X^nkno+(yAhqY&rOf)eNPtT#;?=mFQqThzUQW@!Vo2569ip+s_f(99Tv z{u%*|5~6Lqc^~_Ev=FO^^3VSI}MyMtykN`x?%l zmzobsyIvb6gFYptrTl1iPDX|iu_mWY+$hy&A$+vE_D=*e7 zO&q3}0Hs&(aBE{^;@D|OH}o_F>F?_I(A-jaWGSMsVf4?D3%U@;YW#nIUSC5@9mkIN zI?OR3Hb6P1%`gwK%i;O+dIl)L2BUSk@|dQ9_mP4f@tj!YJ;TBrr+Nn{h+x%ENI3Yc zXITETJ^_k(N={69qgnBvg?w1VG|}IhG#lzzvWVSp^l6C0_h~t$mSark%8LBL6bC5# ziUSp6WQC@>BRc&!B#rpn|E7Moey&$9-7~M<{wKYwID+h@l#C4GaMf&_+jBH<$-=0@=fkC2`Qi#j3MW#*}~CKZ7_ zor@hXMj*AU(1gQ~ZXM_-s#Z=36*C}5NuD1x(SSM%dU`oK^4_W>H0LmzLx>}IUs;8e zooTR=lhEjlHDU(E)$6c~^U6$c(zJ?K!Y)pgYsL=>aYXK_ASz)Cr^@?JkBG{0j)Lof zirw7E3r3ly4pTu{rM;so{$Iytby^o;Q_l#zC+y?v@hETkc|bbomG;8N?jPj$m0u0tt%xa;N$=ra*Sv<d9&`1zlsve@WW{W)OCB)Ytdr6)`5YJv*9+Bd!0gwa*w~W7BEy7~$UMyrf-LCvMNJxJs6^&ze2~({AJE7!Q6(}@Q-mPfd*HD~hWRRy zxeOB}%O1(=OG?LNmEIHdU2z#F*rXMYq-i}JvsEI{j1=tHlLrEulwMUx_B2+olEUg% zmi*bn3VaN`K1{D-s*er?zs+)cJgLP_HN9&WG+t*P2pVfrdS%f}tZLps6b{);QhFtc zd6+ra$k_X8A1ew1Pjd%bRJueZs2~VD%^vJzA1Pr{dU+7g7_a6ekvewbcb3}0g!zM3r8=N{7h(h&bW;eZ^zN$I7)N>7Z3K}C?+_x_`i1qw1x z^NJt~%6E|k2r^G&iy%v?>>{%ZGEdWsAe&#+MFwLh_07`|BO499tvvy-ebR&GX_jGU zZ{Aj)HW)hjHNre2xQlD3+>_E_?Ih6C;3~*w{w>MyD0+*rj1*&;T=sW4mOcKFG&%M# zW4VnUyH^PhBAu2*ri30vs*MxMaZS zN))k8V6BIWqO2q6(=v*5XG)hY(1Y8&lHj%qE)Z5(;-u9TS8h1s-?A$Kj>7I`6!p&@ zMp_~n{I3vR&Jon>EyeE0%nnrKZb>B__F8hFjec*Y6cbgJ#B6Nlt%RwWIn!agB|$Et znDvNK>=@h8b*Zr863PDlFf@^_#ZfY$lzMy9VafFb1%Hj#E`Atxi6pz%i;$dOU)6GC z1_x-v2-_}U0qvx07T>m%-O+MdDUAX~UU!OGjvrRw8pX4lF8!bf*SIOwJS?i+YrCUI zOK*(?R$=0Ik*uyrqwu;$fHs)04--kzudli|{(NZHSYaOK^BWT(v$H=E_v#Itf~ETnLEvdrX)Fnv{0l$cUJCv|143v~Vm z{TKRo_V@GK<~QCER9Hy~@`1~X_K&7U?^^R0{8341i)gSq%Z2?mPeNH<`VN*)8D4i#v8_AhAW16sC)(}zbhZ;3c)V_ zOG@{W<4T!`RDT^-weoH4>9!F;c~+!3?&KETY@t2YR>`iR;er5c_lDP4&r>Z&vOK#C>t>XK%_^Ub=jvy4-}K%AQ5$)X5sN ze4o19h2sCbslFNH+tkHe0!-$XP3JFD~-E{l|a5tJ+`R^u$JFmR{cBW8#UWwyDFQRU93uwZ&lZ})u^ZE0@(5jS5yh| zo$9%Lm7T35J2@$hUZ|K3Xxms>Ue-l*&YLD*q}pCq0^#1f_@XLazD8Y}T7#BdRGo+A zTU6WEidHYF{g$s!*Zx-fGxR&kDg{qUqjxB}<+;z5W+^uIb?l#V#-5ZWU!=6_mF53= zK@~4wq1x`(oac~U=%;mCzCRU@O|UF;%{f(oe0#b!!u>j4?ib{{+P9L=3S>yh@hUqzCCJ35Z4*y_9Uf85`XgBy{ z;*t>cyX=+C5!KY7ww06~CFL~Ht8K+Me`0s+@iJ(m)LM|lBEOI@cSQaHS*z+O+DDID z4Y9;a?kXr8xnm7Tf6)BFEAikAPj6{zNF5@b9LwwRq-@S#H^s!+@#2}H$SM5r^cI3_ z&G;&UjNSXrHPU@L1{4$xfYosY0+bl4uXdPIjsAnTo^CNVw+U z$>P#A3^qpoD7rg(vivy`pOjt?I1Pd7@R@fB7nk+VMJu~h{<-$&0Yf8Ek^866K$P@* zrhzar66oO!DySkZxTs)hBr12;P(kF??uigZi!)}lw>n@R30k^f{YPS+wtA|9ncThS zt{LHoHz*vrCO?oQAMn%uGKLEhGAxK`f6QyUHtqlL4RH0R7O)Sh?XR&96BI=Y<_h+e z$;Uxa`6pKcL*Jla2|wZu_40ie#SlTU=5jg|h?ojv#$MWnd24uY7tP@Rp~+3LqNOcA z`D%Z{@q#8`d#sRC+wr=upN0;411;xB_=1)W*NeQgtBDgdMf+!{J;}6g5glkI#U;rsiT;*0I1>HYtQCLx`!rSPQdTKb7r$eJVM zu)^U_?T$K+T`PsYZ<09TXOozg^L@07?jvZFYm^6*FY5juYi}N3(~9@AH=28x&JJgP}3g$%vtP# za^dI;_k*F^#2SXa|6ovJzI2p4lt6c#wFvL4EPjuYtQ|~mEkd7v5UKcwOYX|x+BG@A z-bW`_()K#yumqQ3QT84b;@dqnO0w5KS(M_vTJTgNj%D!2sTnn1j`O9Q3a#JuoyCOh z`A=G2=UVQH>mes=>>et|mESSrBJY~Qbysose&ba2u2jwDFW_{@(OUiYR2IMA%@%-C zj?+Kx%Fe?zELf!HaqQkN(40!~H_US2C|E?7QP*IK_(8Q`cYaQ-hxVwptL8UNCuzUb z-ntgt%P~F6JO{LT^uT+-7tL*uWuE&6 zX@mQxRxcpsx6fOiI?FN(8VfOy(kstfTrp({83TttRyQGe`6t}zUO3WLudZg9B}@$m zI})K+2hLfV&n#Zva46!QyY-x<_+|l9<5whZ$gc+6_&aM!z${&A7~pusol=fkQ!tB` z8V4lncbv0aC=-GO8t4t@B=q`{wRl3RpclbO3H{vJnyFckyy2{b9yB;(DU?Nuyy3Kj zrUzOX%p#=5c}d)lD7ACw8B4v*Vx-22N$g56IISwNFqoxC4Fh{Qp|R=KEX-2G>g|LY z{e%?GL|C&h%MdFAyF;O|KbKf4VHP76wOmm~uu*i66nI4oZwJd3{Un5=;sd;j_I1Gu*krvb>VivG9 zEc_}nl?}-=fB8(ac(uIF97x}57g%U!xoV-|P?l$V1I7H?a``vYJUjav!-NH9ftYEY zm*rbucg}8s>#uE^TCTrlnx|$Kh9FA$bg{X+Wtt~t7Mge(8a3NP76$XAtmcX6>_=a_ zQ`|Ib>O3cdP7n;mgYp9@yp^?HJPWhXtnW)xw_nL3?J{}x#Rm+n4^7AZNjTu>y1@F0 z&2z6B4m?GTd(S`QT1v>%FV3U{C)PwJsegiD+UHgV^Q_Ctz;9O5l8Skj);CYNhQ9H3 zHN~vTwJ?}xTzu?EykQNWbn`8=HfrwHx2)mi3%S2OJKs{g*8gDO$FAx7`qtdd)2lZh zy{2yhtPEHm5`+3 zC!TP1Y2)0^*`SYyBcoiW<4$3YUpZX2zh)m}mv1}EwzIZ@=8*JAa(8`fv&*$L*!jO* z;;-dTT9%uGXB&UbOnvg$#H7jNCk&mC6hCrY%EZ+1@#9ib#>6Kl;(u@89_>2xrc3qf zycPO42DxVs`R|eCKr=Du-7FV-&wCPMq*j{P?8t0uV<#jJ9sQqb{5PscqfFl{M+eW29W_}o6NU~Oo%Da&=WpEqjn;@W z@`!k&`2UTUSpz9g>tV9TCmfj+PkAny+Vk^Cv?yh4P06*Mupl zNkO4ag}|EGI+ zglE>uvUl(d3)5tM@E;rKpV@hY{m*s?lxr#{)6ZHDw^3+e(Kq5JB#%jw+jr^QrN{en zkDmB{-}l}_10;=`lr%0edH4kR{f;00oicoMGQRSEqD&l@{7>dFNfSOw!TOP|mecs* zJ$ zJ3BkI{BNu*kp5S>$ZulZTIn;gw2*9VG+8d9kNmg9{u{0_ywPX=8=D-clmGUgS?J+I z)6e@|oax)?bzNxkbUY`K=A!XbgAuC@Z`HBJ3PR&)@NL^^W=7MMdUf7*!nm6!=1bZZx!mb8X6NP{+j##ZRDbkGj;QjQVvPyi_hZaA;y;QX=AVvXX;m7bM$G zB+J7Y$^Jb}yjlU78EB!*IqFhJBqm!Dv&u#9EE@;Uc1Wv(XiGBGuOs9GC&<6eC9>H} z0UN?y5q}Q#7-y72F z_DrAYkU7J_vs;j6roZS*qD99gO<>uXGB$q9(8=+mlSd@+xa5BDCfVlSZ*=cBiT@|A zoHAB^lO=GB+uza;iq4#lWdR4T^f$TU8^0NjnHiS(se@-wkd#?Rw9lL5H$!dT=vHsi z0@dOqCkjCkDkn|i;hmiLzBxQ5d91baW}y@+|Gh=5U|jDm-P`r<&qCQUN^6n}l7qrC zr_tSCbfGQWU(TGIIn}|_WYT1H6opSrnv^^|X}tL#_ZM%Jx1I`9<3a~7imh$`HWe4> z`bmu_WP`3v^xK(J(B)uE^c0^?E&k}C!;+x$L``8#h#-Htzf;|x)0M@wYW4VUygb_f z&R_sBDgt6hC(8n53Xghb(6Y&*sfF zSrMY){;74?xcN_*&5bvj^gpq@p^+d<)@eOlXya2{$11Fr?BCE1C{?Q2f`L*VJK_Hy zjcfa>}C$cU+v7s_^X)-_Y>}~hvLtx7I?VW#@Qv1i-;Exo#XUA zE^zMXx3APo$=m5AnQW6mQT$H2OWsORo(tB`c75p}>FxdmjJsF^Pva){V7-^!4o-ls z4A!@_eTbiwb~oB9x79Vg7=7ug&R)|M7_P-X*^C#x*%OrV=VrZ~2S8_P{L2blOFp~I zK)WMz^mZ2j>S)8H1U#^GZ?>0RKEe)ac>N~{UUpn#TiUR(Kkl--q`LuN3IC(F3ng*XEM{xqSA$*Qx;K{n< z_8-}Y*&VbSYF8J&pS#++X-{hxXq#$|X-48v$wg9gWM=t~LbB!FRB%lmxy7MGJ6vNI z72!D0kK8p~aglnIy&JvKzooN_3TJR=-w?cSG|SyU$Ioa1hFL&-fw!=EcGUGP9l4#= z9<}ch>^RUrh-MD;H03VwG*H0-*_pn%pmn8s=d?%dq5`ZjtCn%hR{?%H^8Xz%yEbs_ z?;l1d2Zfj@;d4(nHEPg#t-*G9eaHTE@=uvxTk>?LG2K0#$o&*v4~nVh*dN{^H}v*1 zQPC>7uAy;a^7z!zL#N1%WBlGAc~bd#t&{C=*^*LGJx?P|I;G9Ff7b^Y2a)>#B$7I) zt~p~av+Zzi3wz3-x+rWzPaT~t)`r*)_X1(Vrb&*?yP&rnqw$RN2v5``)Kg~{{FWmg?h)ZtrCPmRkf)WtV~wn{ z3#w}kXP-bgstz|_ev+q&d_VolWY@B`V}HN-dq#NCm^tolG-_*qon6aXz|=36Vf1#R zMHjU?r^X(R-Te5=Q@+t#R$``_B%NJjcYw6vBhLoP&S?xZF*tVjBj03Cf12^EF2Zgc z217074q|xOHF327Wcb+ALkUx(xmeM3pp@~F<2qZxa+5bM*sqm!y2g5QF&t^=mAXN? z#yV@*cb$x|lO?R=oHt4U94!EyT z*Cy^cZgk9Y^mf?d(95B&eSv*fy9&D*c5m5k#FOebv>CWfwn@`Nx+P^wUN+RYxNd(< z;!tuwtMyUld@4yMYV%4Cr4z4YUnLg8&z-_HVvsCnu=#6Qrh*RDMsj+M;35vno$DEB zJvPE=;%m7fc~$_6JJQ!cJ${jOw0nMqJ(a$e{pr&R*^grT`x!`I8@;kmg&a)Rm|5QD zXQV9^NUzIv1jPltU583_S-^ zAO@$DmTp3hJ#Qe^6~=t)hI`SL8_1{gO$77&w;L!T1w-Y^DQ!bq%UD$kM#-z;z`neR zlxy8$z78nQ&6{$l)SEWkLRRb7ZZ%T(TXKk9R4*O?euMnrd07;+pN^`3cLc~PoA z9N4|;=BKld?dCWTBVlM7Mq5Iq{Hu4o7|03g(?!X|$`~*)hV8){Xb9 zOR-x)uiwcM-tyfYd9MrOu>*ngl>bmLz;k2?`dRLn5^pjJnQ%ffs$KM>Y9Y1uG9d0@l zIDF#J*1^vHsQoe(j|eRJ>RAmj+Y*J{h_(x-GgxeLp?@Qs2*q z@(#oIZky&oI*rZuDnof|F!a=bA?>4k(8mseUK*SE=E%%#Yyg`30i;!M&lk(q`(!;{ z4BK!TB0g^cnl@%XI#O$`Uliqdrr_Dbt8G1~U_Y30I1%XaaP+-Eq_H6o1&ECdGE$B$ zjJg5j9l=&N1B{ImQvH4m}H832% zXR8s*^IBoyJ}?}Wnq@9nG#-E0@e>MG5R8IJ6*s+Ty`F3K6>SLigBO$#XpkIx6e#sr zGn|BS4s_Dke4)~^2@f>vwnw&CMx#Xv#zw&q@ouK0jKi`I$qs&AQkKe@_nT~zGS%Oy z@eu}c8y0~=%v1}J-4$vp!rV1c;LpZ#(FvB#JA>&&lkjD&@ zRlx0t>?KX1awos~QXzbK%Jrok6yxvbXTPhyZG^vudX5;{-LsCvIEU448;})@^!kx%$nFHjcCtN)VYkhA46r=QQ9%^_Bf6# zDkRRLly(4-&Qm%5aXhESYE14ICwtl3oDn(ZXaapTHYL>axZJ?$q?w}$<-4M=y<)l{ zk$qGMn*DCsHo^}+0JqAfEm}9fBX~+JUJt)LE%8|r#>Ap4EM%(|zJHS6F_7>10YDy5 zI}g8>(mjZr{w7K{dK-k=r}Z9wsnQ*_L&vp#*PSk%!Wg=3?&0;|zVyiD0T>#JG+C1H z;3vI{zEc90>nckTA-N4r(6`r@;7eUq(SpMtVrYEd9~qacNVbHhO?;bo(^1YDbmw9h zFpQnl86%|`;>v+|%Ci_jj6uIozVf0Iv)nP}VM)>lb^M~>iOKJb+)}}TV3eU?DVP(E zy|1I}b4V($J4Sk=Rv6Hymv7F+OnKvcj+(Bgdp#YDy`8$AWITD0x3{SQ;-1r zeV0$Tjt-doqLiEr2Yeny>F-5pmsw^tfM@T^i5y{#TEkTKhMxC(i0DwCF?ZhJj;KMu z58vt5uUo&a{cTpMjBtzYMLE$ZbHv+t5%t1F8OoqBYW(_q2nNEBDwl9>o-l?pSfL`q zS^2JdER&a$pn{!IG~c*-eAc85A>Fff(KeGR@qN zI;7TWhkf}5;W{UqB|I4muo*-SRdRUn8T+yJ4t9BVQ8OSVQz;98IuGf1OfP z;RK2>%a<@EcDS!WN%_JRB}jXTgtV_v?`$XuBb|FGhvZgiZEb^W7TC-;EU6Ryi@uxg znr@n|t8?B;$}8>jPSRy^X=#u+jRa0sPn;RLxQpTc;!?G<|7u#M0?MMgh>%tnhEKx z3%AB#_32Ba&V>Zpx0)H|IM_V-oztYPJMBpCtkWjV3?=vTNJ(xEk@@;uNOLfTSQ)K? z42;1lWA^VM9l;Z16?_K~_z8HeW zn-@1;`2^Z49(|qh67zbdSj8G;T>v*8Bb5oyG`ve0{L3QZoT+bVNC@33MGV6sh~@`P zf^@W_6fxp10<*|D0|i|S381ZvRe4|_?I}f2zJwqPKKkP#V*HDPMP|`wP+6)jAvcTs zbEaP}A-4}NgU@gWPL$R*KyMDa46KN;a>lS|WuOI@LjuqV_FN80LIb^RZ6NUm0;UAq z+HcWAPTV>zpv7e&A>j73a^txJ6Bz5Nj8*0tflhfZkmcVM{KW{m3z2#svkGMsqf%{E z(&D88Z^b9bR|-OkwX7*xrV*S!7TzF}Epr5&vJ2c{O7CEnwR!txqvDUBVaj#^SY`}5 zm)qO zxn?Vj$4)8mu~**b?%WV}CB?YnpihY9BA4BPV#=HS4GN)CH7ik)z2!^okD62nmlqT=- zqB%K3J;?W~k9E1XqT%tfJ6M)!s|Av$HGqW5S z4a4IvR5oGx0FT_GqxF)nXcf3~d00|3u7flhHO(3&hU8740R}Wl|&>Q~DHjIZ+#S zmi%!7%7#9P451%lj2fvH7bitR?+Zq=jSEKXVi%+yUWwEVBvqp2vx8$y65O;)d^I|MI{xuA)c>O60&esk=VibE}=R7^%ANjVlPd?$+*#63KW%*IF}@p z!bxou+377KwG54kqLbTJp#AqJ!ZvKvSx;B*gEgbpaCb^YkcB=zWo}rEmvZGIx6tdm zppR>XQWZCZfu|qKIcU{yP_<_eqP0adjS8fWO)z0$Sq&7=gK2m*95EXmV8t5K8AjME zwK(NoT@9p!$F!GhXz2Ns=&^wT9%P8%{?Qtr?Dt;oWOxvb2-Tbznk2c8fxPDT$Bg1i zG!~og4{@iR5ivUHsi5b64D#jE19XFI9<$*VY%_-)Ep&9f1b3YETu!)*gd5JEo%`z_ z>ho~Zucz*&E?wt|XHip}92^zLNIV=p(*CtQ*+*ctJl^)H?Ht=^ZHYEr>xqj!12qnk zB6YL5Mj5iHxxUxTfsTz;?L;#tb@s{Cz!r*W(Jeq4po*lbizq$MZz#V-w=k(cooLam z4-LJlg?|2|vlk7H1o{D_2hhgXfTncn=Rs~QyG82zT2hV`vqfJ@ZP_gtOABDCnFyU} z*)2@p=Wm=j_JO_>8U@ai%&Bz8)DUwdv{e*x>P5Swkkiyw-SFtUWwu1!TXhSgtBhIZ zOOezy8bO{ck#{tx8#!p5F45DC9Qq=oRe%&q854ASsB8B(7N zO`-ko7KcPivBIzr?}%<&f;1COh6LF+oEd{uaoe)1Cr|#Jo&9IFWg7$kImXh^=H0yP zd(3RZi+MSxLh91}lSrtwYN3cnMGMcPEfP+_LW!ntPlfa*zw}C9dVMOSixfp&N_CaWRQ|?+o&3 zN>k4OZ=&LsDP40m&iV$~e9r^o9oLJl|H9q2qb`Xq2IrrgWA%6SpXnRve$!3RIXi82 zigSG7_?=^{!|x8CIe6LsW}jg1Z1;m*ob4?v6Nchpu2iifJdw1Nis2L=n+Iw8n#aK7 zT_UdJw&^U>niYv~K=)%JI#F8$YyF7eG7ctia2f}7D-q1&;5818TbYRNG!I*4omVCX z(|M*=?HY>lPx#)1qb|4pE7g)uu?SM}N>9_#hRpS$4YKyjzHBJ{9!; zStBL$?rclSnUdf|JwF+aZ*(XPS}jW5w3S=Lm}u@a(3CGn^V(Ay)}F4xOexrs3Jdi3 z5UgTi|$%Yjm;u+doEES=LW4eWY#| z=e3!RW$R5);v-ccW_dp^G^f~Gj{SFkFDXg=T`>F|nZeh_nk3RRv zeQ&xR?I{wMuZ+Ml76XILxvfC42C2V`Vw|wF_iQ?H9%x1yPsKtOvky>#?4??mlhYFP zP<>c{Tp@i(Ior?yV)ZhV{-+6g%HD<|#I5ZB(=v84rj)Du`Q%PV(KOqEC$z<0RpGY& zFkabKEo#~wCfZN^^f>MX7N70?y|E7@5-eIe#Xt)yw0b((cO4dwUS{D_cA1Q##hDqZ zC%^>isoET=OTCc&jD!x9_H#ejJinmWhIEuh+KF5n`NKfnwFBScJ(5=UYe^fHv7vlu zh_}?00oZD*`=MW8ho2hV(t1npn$a1b;LgA;txoE!0&~WV#?#DeP~NNSIM(|x27J3r zM4>vWG10g}lq8RT`ElPov2V1u)J~;m7xL2U zk6fmU6QJIY4$&(!&l=f6%S10tMObR1vgREfVv^dbzsvEP>ivS&&+CVRG&%;h)+DX# z*TSg<8%hmv`v8;npn6|BG_WtmaRD2<@9i4d{$`t?{R@7l0yRbO=T;^COOnM+gCuQI zChUScOsQaB+a)!bR#-xFYIPSG`h&zr6 zazpTS`W<{1MDPX->2(}ze;(AhZ$36sMhnC+okxt-j72OTV4zaWIB3Rs@OfN7;foJ{ z?2dVaGu=EtIfSNO09(~H4B!~XG9o+MQlATme-#AFl){;c7~cULA3`)sY};gfjgJyA z(T_xy^-DoAQMb~`eRzt&4JYOtCrT$rI=*cYh9{jys9LGf#mT|6_ad^kPDCiZB^;40 zx`?cUFM&`qAt`K$478U(m6GPQD7@!wqch!;g1uH zG=s6|%fJp_M%CxEMFSQxww$phWk@b2#z=N$_>>C~!sH#PhX?Do<+#fT zycU4@44o}Q!He=?3GkGLa`O{IC0CV6IWgLYUgsyiN989lXEWs|22u7ha9U?Il(Z}{ zjGixp^0&-uoaw|eDF0#0fn{5!Hq?AM_zo|JL6Gf_$w-1rl8%-z*5n89mA8gjq5lD9 z#X=6sY?6%Upk@Vv2^=it;Kv-y<6wssiT-RJ^$xIs|G@9v8oB=FI@`6r%MO=5&Uc)% zoK1SmT2?KmHBKGiDt5f1-eI}J2>Vy|xo{7Az;1}`6WecX8)^%+{owxa3yqJoS?XbP z!)69M;)8n4?4>rsoSz^I(hm-?qtzAK0Ls6vg;Db|i$wzjVBx_b_T+IL1g9Gy4F4Qkw?~E;XfIHjIUPnZ zm-Iu`LI-cffK+=A87q)%gMTE}vNlH_H=kf1U^>;abQ%-*z$}6j6HMTPi#*s5@m4bPryrePa2s_hEN=`aX7*UF? z2@6f1KxzA}U~Otz4kdPfwjTRC!d8jDKO1I4TWlT8ah&KaDJoiU1WO80~4lU%a z&ckak1Li`Hf3I@R(M|M{=BvL`8-C?Z-DwmS)D|K_TvHf!#RKQ^f*VdGU@y-9OOM<= z`QlIm9~?AnTnx+acx^qkW3umCDevPbL(T$@+kF9SmN{ympz3QZC4Z0NWvf8mq){+p zoF~F+Sq$d96I@Ya3brRtz+u`q%+ykvt@7e#7gTMt`JxnwC1`T5_NeSalu{g4$XG`n z)a?*T^%b{G3Tf=wXpFDpPD@=U>ZLCzjjhPIHuJGdxO5aOw%)T~GPxmhacCiaUbYAA zT7<%7shp|tb5Zn6HBhx-0#?!%BQw7+ZpaSPy>UiCwN<@R`rv$mv=lWkyffW|qY2`7 z>{djHe+QQ2bQO@3KV^-dts{9J^3Q($1=byyK24<;Z-#*pVT%Hd^FbTLeUpv+r>L~5 zYSg~X?%I0ZSK^$qmyUv6m5g?X|9rd$McXaWV=5;KB!A^)B$IDS6kZw--8XMLchliG znNeoMj%3w#bklmv(E$r!gF1~kD!MGg5?~{;^0&c z$~zFe!@;W@!~m12j{|~T9XT!scW`hc2QkcKs_%%P(TU^kL~u3-Go4Tj9&2>p5v8i+ zpyBZZ6ZLgkf)yIw62IJ8tm${98EBsqB5Vgl1)o4QG}IwNJw1XM^I&(cW0rY3u*_nt zYCAr~ea5O8d&F4kci`js4GqyF?f_?CvCCmaAK>6#4kpa!qzVzOsWYMlyKwQ$vmG2- zU~I0yEE67j>5PItcR}2^V|6f;r@(?A;ezyrxdJPHF&63VMtUVKhVhUng&i%p1mv-Rea94S+N z!Lg56v^n;__>f}C)6 z19lm{gG9%959TlW2m^i0+Vzmk$Kl)&tJ_thR$>ui{_GACPIyejIgGqJh>cSm82VFJ zV-8+X0Oq4A4KLbzNr%0Da}6}Q8EQE%%TJK6P+ zWViGAdh=0}2i|BQ!*9^cXtEk)xLT>lQM26hwC+r&|m)Y?$&$3T`= zsmaOq6VcoFkV$$E#N1Ju4`t2TyjK~@{Hj|s<59JC9O~uyDv`oOFyW;AGEFG z<+S|&A^Y3y&BwjzBP+2!v_C(D*6hv4%+hRxU{wvB`{Bnpp%oC`k11PT5dZD$y z=7-Vh14u6JNe0dd9{`KTK?JRK2}BEWoXQv*F1f#?+0&f^sN_|KYOIwW2f^0$H()SS za0vJy%D$0<3EO94w{8K{q|a|)F&+XYF0B_b_61{^j9HBt%GiWF{`d`fR31WU3#v0P zF*$^i^*)SXxmSA_d~D+s9zxvhhdE1cT>tltnh!zvZ^R8bjJWlVaORKEsoB<{-TzLr zqj^l21p+RWL)>i!RL#&M<`rhpD~><>2(ST1xlq1Hm@ObmXXYbJ*vkaAgs5;cn42C2 z7JLjPlYh*?%YAGRy=LY`Oqk1rj81diso56jC+)HPhV<+h_n<21r=(-3!O-If!b-w& z&@nXH;p0f&VkkLN`gD$h0PBh7ONw7HF5n!`QSUa_}#oBq=5V0#}nd?%!gj(<0x5|14lMB9# z;6+v$NbwIgVM*X)^w0NL)E&FODHJoop~c)DEDD=H*c7Tx0`m&j`?KrO__BPn!Bf04T_d@(PH#cm zPpyoixomzIN>+MFm(7>N5?4SFUtFP=%2lA~mlY-)6Bakf;;zH4DOG=GRN(g|azDKi z3(d`-&BQaDuRd6(qmx^3+e|#eS#TQK%X}wIUcUuf?H{hwVXGZy1aNPR-{TBb4Hj+rq->h6O^dU$pv zu1UzNQ0U!TIO()dBrP7~T(=pua9~~}YOlsIY(Z`8RHLR*9W3UHYZO>bM;v~aGy5D0 zY^$0tzJMw{Re`G0 zn_$ZfS*NFAr7N*c|Mo66d3N#iI;V3rlIU27BtTbs^{y+yLiu?e+N2Z~%GE-omoV%d z_uQRmkwF5du~ zXV(fnwf_)hj=8kb8|T;&8Rw*Tt*AraURoI?Ria11zN~UY@yDJA@3_k-ZFvKvnzC}e zC~Yn7ac}d}sfEbqwi+S(@=BBW;CEFe+R(5Yv>QxoiSyr_W^O~IasL96%;{F`r5Lv9 zaJnVD6l~YbNif4$2Cnk=3l&#l=ylmjJKKhEt%~XL+fDdn;G3(d(e@?wqwKYIi|m@( z9O{_fPT4+bN8AJ2d0{^B8#g2d4rk zvI3a3vNYxa_!P#tdUW=|sbI3ZeyV=%?iD5)UvVmwQm%toRB@^^_ST?0e5uifT&%ra zK!WdHM}oVrZi2wPvjTg4rN9cVpYkXFYn$p((NF8KA9Qt77|pr133*=K)R~_0uPUEM|4Q9+NUwyI92brTDYx zlK0C{oo%;Kor20uGUjUU(pz_sQr`NV-t>9prVz@wgPh9O@ARU39K6NBjCq)J@LY^` zm#na-{&x^R?k<;TK9p`o$qI-X#EwfnsPtggMG%dqd2()HWuW3AJ}?J4f~XWwK(%QaEy{ z#jmahF*l#QM=y1v4L@vciA`vT%%ZMWu)5H=1V{IdmwM+;#Y4++EAV1w2j+pSJgz|t zEx5?Ji7mx~6Tp2GtVE;BR z0WFepUZR047XWoqU#>c4$6F3Tex#h#k>9GVzPV2~o3OVoC~>>a8#S6{Y9Q~WJ#ILL z<1!pH7R!9MiKSjBf3Nd=Vof|C9y6TF(AXTA{qdy($$haI4T%$ySS8PyzNZE6Fnd$P zD|>jra&y4Bxk>m!sT;SLkXXCVT#=Y~Ox!p53=#|F#JqB!A$ExPN#*SiLb0qtli+Nm zs1V;&lqR5Uvs;#!G@2mz;6?_;ElW^%EcbIj_`vgx;${dG2>Y~~zobN$I~kedF>#o> zzi=<|SNRJbZ!^%vpOHy@70CD#jda7xJ-Om zQtGW1I3Wb`Gh)IX9i{$-mN5MLAg(z7iC~qg;%^8`G zOCFb@(R@X(H>u3R;hk?^tafOyNX)%nc(}X#K6l@@xdbhwUi?oemEV$ePI3Q_>2vnW zGF<&S2Q>m?NKda`l!emsmt`={POrp-=w(?L4SQ9F1-kWH(9u2w%kh_BIn0>FB#NXa zuMwj>21~ultFjPU$iW(m$nG^#iF*Vr;c+3aK5oIb&FeA<8pbT<5cXPLm-&;=A1IJ8 zmS#Ufw153k=9e3~4!v|YVm*6=SROY|K@|T{_AX83Uv4mxXx|^mw)`ekR>nD$yZcRO zp@^$cG_eEMnP~G(=pEvq{0s$PtBAaB0ei)m#V(4Zk=Kx()hMD3!p_@$8uGQ}6=<`AjHRwZ1=t|UJp&Or z`xbbGNfgV$JZ3ZAM$W=G%3#d#>Zyh_^EPrWeu)CJnMAwKAk9B+fw}TF=VP&!a?c{d zz}vukS8_?LMpG;k_Ay~Q6RhSFrJqGgA(bH1y8}Y8KkQL9qo{~u%wxhVCS;VZH07QH zRL#(%J1C;Xrh+O2KKu^w0e89RYhm)iw!#$RE@Bpyp+mXy;@a+W$gAmH@CD!Fyezg> z?(c}di1E3=VRG@wSG4Bydq}OQ97~Qpf7Jf4s9Gkd&XCKK}s5Fe|lwW>OH;^ z&sjrIQpCi{5Qj;hHMKI4Rb*1#@s(|9=lDwW#Z|aQO%p4_D0EUK4goBK_e;o41I-^_ z89)st0E=6XTp>9PWH$-ecgzR*X`-j&5$)>R=EY(ruC*b*T8j^_{bH+#tf;eF=nB?$hqVxqWm)kx; zY>%fLI{@Pf;#E&$o|J{!`C6JEmugwko_hY2PzQs_l%)K}gT^9J$r2MzG)vhK6td<2riHeDj`zZ&o$ZV~d4ib^xnp>=9#D zfCx+RWmNn=Y|!U)1YummQ-?hrFeW#KhkHy&Fe&S_y0%IDb2 zm^Ibpgc?jM1=3AbdGIq}l=`W@U*t=VVNG4C!WLrR(^$A3QGrkNTPe2lMj~#;Wl%4@ ze_D_2NZ~3b`!`q;Gt6#d47V}cXmA4wPgfD~{8&5?x-#}2)|~sE_QF=Ai1@n2buZkQ z&N)=t0_WfaB-c8RjIhf7V^lvJa)xP$16 zIr>BuQ5IL2D0?_^j!i1@lE$m(jN}S;rErL|B#gXTxE>!V-EORyMe{SVhL9RV+8_eTNBH{QmVJBs$ft~^#5zDG)zN0jKXHP7t1 zfj5k~qtS5jKjN#zjl@^MzC2FlMc%+nP^sV|Y}kYo-o68n1&)?-7UIF0bs&;-0Cr&B z%&VA79WUimk6bu9aa}g@8Nq?4@!dawskRhE${u(c7D&_?So3N8ZIc8;?g=!|aQ=xa z6=N|_Qcsk6W9Ftt9&{R(%WudsyCdY5&y&Z9H&mrnaY`qS)dn&|MT++#sd)8@bl#I7 z19|Q%ZHIID!iq^9j88krY!HRqfGsyr4dgA+o9N0AZmO^J7?`2x41U8)aBIfr9D2)S zF6xKoG{W$;BSxd~%!#hAx`30C93$;83ONUcA}n9giOV~~x_43Z+EBM)C zXyH!CclSOx_lRY}Y$jAP!BRVCdhpld5Qr0CRUtRL=!0hnZs(w3a;Y(Q1Y+%bhFJBg zA46!EtBmBB5b_*^Sxm509ZP}F5&w~3R?w71%va5P172{pmUf`FBS9Em4Z>a~q<*8v zu{FkW*ysv=2`u~31*|JiggINtgr+aRJeLX9#-UeCSj2?qf>5S|y~Q!weguN{B?!Y_ zA%^_I5sP4BF^`(Tgi%a5EC`eKU?rci62_XmMrHCAUc=TtGrwX&^eYf>5DN#HEW(M} zCWEk?3F|(E2F#1N0b!GT631m;1H)-7+(*L*s*1RQBh9ZL2T_kHP~Lgb=%izoZN+uu zPoV_2PJt4vYJ^^X=hI5ed8djXSYjGO9r#ZH_AkaPQ$x&yryyDtW3j`bV*cPDj%8ty z&pc0bromGYt?qObs@wF60hrh3SNc;_8j6c)qfE8aU>INp9&I=V!M)QFT%Lwt0tbUP z+J0YQuT*~EjH`QH`a8oSFgV`8Mbb|9abppzVaf)9#$Bp1Dw_oG_5fzHC?B2T1%^;1 z11MY|FTaF^iT6+Il+OfuOP~pf<&alBkmOx~#^K5X_JJDQ@KXALAw<~*01d?tVnQQ1 zD}@5(+Yc~*`<^x_4Zskld?f&ACzJBE0Qk-WKF*>FS$Pg1KLInFW2J5!S)ikTPC?6Bx8; zEnI#F=ua0OJw|sl>n^C2tw$cKadjM zTQid9AK?F)gK{LkBorCU>?%a4{U?UIJOFB??nA_w`sd?%)aYsnPSrEZ=YN2uf?0%4 zyU*A?VA2S+C34@pZ_1qqe@lJ;1ej!Qo;Vkrpl6*%u}s?q8r^%0ysKWj;tU`YikRU4 z_%V*`eKy?MUKuM3uqu0^6x(2*4KpVe`z1zc>Nt+`7q_Nx{P%<;{hL#J*N#N4JY+?9ms zjuMcqD*Nt;SMb*N(LKE~4o|jV+IHa;*2p_lW7WOYb?@BddtTDNRXaoY$-<2Xn2e0# zJq#45uOH_uc*!x&TQ+?H}kA9BjIrYHh?tafb za$9rt7OISME022iq*a2?}jk3*mXJ z5n{u86jl@$KEkF(`}>Fi_fk!+!$`c3;ze{po6Hu*-#olOeG{FTk6p?W{_voW6YmK( zQk+_&rq@jpJVo*Est)&!bfpEFSpFXGctyW+pTDT^8I@A_od;dqk1mS?mu$gW-K$~n zh*1$YhP%txY9RI%8fS2`>w2fz|1V4J^B{G^DjDbpSaWFk+*wI~;sbGi^{EeJu%#~; zlsiBJl&*gX^anV5LzgfpeZUot| zK?<{h1}K$4Va<&}x4tLXdOv5NtlHjJ=?_|vvRwcfuW=7Iye;DG0~(;zt41EV(bsXO z9`rC}7Kh1@g62o}F(~tZ1}KjN+UPpUcc0N}fev`Vyq2D1P=)^{t(ewm#Mc5H_KMN$yEjcr5*Wgi z!ve7OGv$~-o4gjKHg_#78Z~`luSA12K*{L^Rk$cM0yiMIo6&0~Ef=I`Ov07|=2N?e z-_ub{Gm!L1DnJ?9n@N^l2u%jmUpWZ0J~gWQ7Z&3>K+zvX0%1zyKFnJ_;SwHw0z;&- zKmh2PMk@LahQ~6X0gCiKYu2LS74Es{nYr-lr|7^Kpd<@|wa-#4lg0_sNkK|2{LY4G3d!65}NxIDpeZ${gL6?$Hvmi`OFCl{pEg5(6@j$+0UCt4vB9#E;ydXhcAO}XT3bY-pC}=E;>Y%nbeWi2-Yk;y!kYZat!aV2N zO&I1(+9*iwFsfiyOoQkX8l{j)Mq!XmVG^{9iC!_72~cd`@3u{qqza9+7f;YtD00O>8wMj|)nq_docsQPdCo{&CysjDvLwT1{RcgdV9^v;>I5G7Zd9Sx( zXj&JqUisdx%?jy}lJ>1D;;T;_NBj$3Xvwcch9bmmP$w(AEC;D2_I3R_q6P{3Yf0GMp&ab`D)PT-p}={daN5sTwCD%up)cpfe~6T3Wvp5ipb8Y(zB<)Z!+*Y)-_`{Wack#IttL z>YS?jx}5I%q*>OnoafCtA_!Luk~FSir?mI?P{KyVuh)=3ewoHREUd-Rlss{D=i zLC81i3MxF6ft?>>^OA6`6~D!kYYE+vokj!KWk8e%olrlG28UV^T2#z$vhB*^GLM_V z4H^3p#BBF&uzdKatDObA ztTaOS$z7|BN}V5#kb6@B_$hIyuu+*VkgEczLZyvL+e$bjQtkkU())=yW8dOVS%~^O zDKiAM5BQ<{m}+ecol+=hI|R+RifIL>&Kr~nm|HsKl_1tflmI0YL|L?xxAG;B5T#Ne zWoS8X<$*wYuLcjv??G0)zRo2Y2$le4J77Phsxd!a=(`O=R@ntSOsThq^W!#COkjyr zW(mO3WY8?EBj$kwrN|_;_2@Oj)dC*yGsCehku()1v07;k+Hf#KH!_W4e}oBC*u5H# zx|noCkb>7TNgi7&Xb*JY zbVf%B^e|8?He9`FRE_}%Q<|)2px^?Anmol3-=R4;;IB-;hGtO43V;x$$s^X`^B>@H zM>FVR2hc;5g$ycju;t;_3`iJCqs|{K7QIe6BRHo$^pWcE^R{f(>y$Bw7={OQLU79; z-b9r%2={=Kt0%J351awY$A7T;hFvc!g#i4OVgOiN0l>bF8Y)qLB0;wAWu*g10m?D} zXsuQN)(AlNSR}zfEzkm#aRPwNFDugoZ~*{L_I{7)cU3FcA=d=j9#)yG#EoF%4RZj4 z(hY0@%4&wGDDt_tvK~N);=$G&tbQ-W6C|;AVL2hw^sWDO4n|Snb)eqTZ(Jpl()KR* zz}2?wLdXd!#!pGD4RI^{pX-#@?7~avKd5C;Wo1S%OR(QCAek`bJjbHq_7Jv0 zhbb{PIMP)R1H_!%?{CUXr=$yNS44t#hW7$=?HIgj{BOVpy0$;Rp<;kXf_h3El$ud_ zUCwD4!55%R0gjKoiADd3L;6|t&9xPk&2rc^Sjl>f{E;XX8`f$A;t#N7GwWXs%HS-Vz`J|7OfhuQPzAF1gp zMdchN6$%pVB`UWMNx_WOb8)l5ZB$}zay3G9jL%~HZyGaQ(B=u+BM~RoyEcD;Pb8$c zTTrV7b$~M$F%?wk{O%r{pP&v$EDU5ptx>aRo?D#j5kU=h;kX5NJkY6%I${K*3u045 z!jJ|LUAGhD=xzZo67X||RrRYhzru1!3nV~_H`iQ!50!MoaxFT4JTKll9k

ONoqBn?9RSWEp3BYBgY9pS; zWh+3Z%n$=|njl_aVvRunJtTUJu}uW02tw1x&crbmrT6GY%52>v%Chb&DAIc zvGDq@_~Py-N09>Icj^J`M-OF5U*xP5z)P36@@f{$YUFIr1 z-#r`??=M}YNOtqVEa^KNxC?KLSRd?#l3{fTPQ{eiES^bh%;djifW4$zg6yqiY+xa} z1ak-!+`|mUb>iBDac#Ke@w9~^-)BodVXf#O1|`45h@|3{h_-7ig2)O-p_Bq6S2T5( z`U&7L&P(zA@@+t|r;R*=ffEkJ2Yx0|C{!QC!6_?mtX^}=;s``}ID{_9C{2htmsIpN zlN79SD6Z#C_;$TYMXP_yA8Xxi!5(O4DEbJ`cxbc%<`fH#*5;1Me&Un|8{R@eG7wh} zSt4>&3?Fd55>7rTZs)jC_0Y$Bl;ZB(`NFyLX|(msX;t0f(k&GjP64p#o9*e#x#3u{ zFpF_k)m?e@xhpcj5jlopH}hN9ZkLd!c%nmz+s5|N*u!FYNGek*$S|jI0-2*UdK+mX zKD#ngicDT$A4AAgxpJ9nU3Hq(vTqk4l+&>qWi@gcG}THP=q>puvX|1+vo@kN!em7+ zP9!TC5XG?H|59>3=UHi^U(|V%n72GNgW2186qa%%>4_POy;TR;!@sKvxEexkDQ69wdJ|v9%kS0#J_B7{r*uGKaPVkW zRT+bf_#WvvQms?kBE23;Ik=24ggEv>5nCM;W6=MxxB?_ViS}Tn*%RZ!U5pEZa>T@V zi6~JVhJ{i25x>I}eJ$pUJ%bOboIzalV#KYl7&dZAQ_$yi$|N9g@`w~Li z6;Kq{+!@WSOd-?C3c<2WQ$S%{a>JBk`P1u?rlug3+2(>vnUzW|S-DYZiv`+li@x7; z&YhWa`M;m{^;6tup65CHexBz!k9L{G(6%|{jHpjH2_4qZ7&?Oot2oCGrla^thF{0< ze>1%MrN)TiKVa~ZFlsTTbV9>OK3{87z|ld=MUCO?5}LEMPZ|CK!}EDN86i91*hF&> zn!WAXdWPT5@HZoA4t{1iSOw`&oaRQ?_G-C{>ENQn#IDr?X9sN-@YrAsqx@`WXmc3; zD)6Yy;25%N-vHlU8;CCN#R|dZDU1#F>}Z@;4m9?ybTnyiAZYsy*v@*z5u-H&O9$sv)C@V1d(7% zi{XM3<1Wi8ls8UQm{+LLLD4| zSjH)peD>QFuKN{jN~gMCoo%bkL(DGIHdFe($SmvMierjD1sl^a`o1lx?6R!2c5n(N zM6KgKJPfRD?~bm6aoC-W!xq|z-E`?I=!Bv7roRtbCm*4n?|wyXX8U9EW@ zZFawAI7IbI0m{mW%{(7`YkI>Pd2~Heq(w9)A)1G(s45n z)Nb1SJ!otjfx!lzzJjcE1}0uBW}I%UIZd^j;KZA&z~cnXR-WOYcGKGSq?n0Vg<`aI zEYvE7$-G2E9RklV?Iu>9NVf9Wwg14=LHnEWTpdbBLJpRc2<>rT;q-apPY5OS;RJ8QfC_+mE3tj zV)l%niL)(~_7^xiXv-LM2=pHOA zndo3mRi%4((}NgD=v8fauacE++No!oNg3!rl(W0kMGsMco`N}?_7qv^e6SE6a&i>y z)|_V2R_R&3t;ur5M6zeO?OF+5y}pOO+6~1fw&hv*2RvFl&Eei#Jn+WSA;Au9CQ1yx z!A&0zq5Gq&vs*(A;ZL}qn4wi)YAT(TrgK7B0`^vTRfIBS&Tbn?KB;MIr!R<5M8{sZ zEzP9QZR4#xhS_rvEcBIayo|>Xhi>D-ls+;7Nnc_@Q`u2S~)q{_&(|%wbOx z(9esJLXUb2Kz4_^48GZ{n)y{16!dSl+DSaPxZ4cKTk{1Kx3 zbvL}!qATyYOI&Mf`P$B2viGyiOlvEOtdJ=k4ku?_!o_nNhaYfDzK%Ch6u9U z|GAe;XaT~Wn(y_We|4gzqvC5u9dvdxlv=1zy8QZ?Sn|_ZWHL+r4wmWk=epoeMlDIZ z7x}CCEsC6c92x3y4q+j9?n0I^Xvx=I@Mu18z3Y7rGtei%l6W3JnfBk67ryqA0{XMI z9fZXbp9FPommAHQ3QZaP<^3GQ#H}zs$hpVnB_}~79o;tE&gOu6lu}QRKL%NH0;nyL zK`nm~>7-8wb>9V0+0WjiFMG*U`je^4@o!lVN2OMediGVt?eRVy#~2h`H0;GOuX)l1e>V(rrx>0?9qSwnxq)FO)9p+9?l zg`c^xpbkHYpIcLrMP_lKJ@%EC$X)Ss98r!6r)^G)+-MvP2}TvA*&y(x;9IrBuM)suY4d)2I2kb01+e`-d^avEdM_5j&ZO6A;40y^T~ntIwD1cAa7 zm69*-|6e^>?#@P=w&Gn`ilw$Ih=fVYBeX*ekR|fi<@fNm*X^I8NdAc97;E$=lb8>l z_AkcJmB7!R9>q@w{dqO(+bHtGofeDfy!Y1LfPlqsR+)!7w=r zLr`mvpBL}e#F8>PK&cF!Tv_msmn@?rcVk9PROz(8z2r^G3jsRx%AB2IiCPc5`SU36 z6MtjYfYl0S74Gt@!`^$mcsu112Jn{+yO6gVgB8Ma3_==X%^TlDm45IS#$ZGI+LQAa z^h0+3=_O<8RX{ujF(vd+cH#HHG)xE`lWue&g$)tGRSEKtUV|ri$seg8P~jv=ocaTj zj^#-+IXO#u!fz;Liz6^s8u9F5FIh_?%Dnb*Bw0N2CI)OHBrQl z_c%Xx&UUs5zYxAKysP7yW3{8d{Wtp-`$)TF+hdz#b6NLWpRhKwd}5hnX-oR;`KFD$ zkS#XRT25;!+{#MCbwSSqExlye@92fOakzWmb1@3Nu)5ucxEx5Jhkl08R$-*H&`$sE z##+5(2OTQk028fE!rw0Brx!|w?vSzJJT#1Kqw#}MSh4hSA$bs??B2_H5mOkQ_mGmZH%{MKqA ztc%i?=hN8Ut%;IywUzABE)lXguQs8Ur3ko_wwu8#acLK!^>!gKs=}N7UOk+N@v9S= z{MmcWH>|)Joh*N^9)`%3_jJ)5`?>twd-trHbQo#%%&;Nm$8sbXI#zu1a5GYSpk-$= z^RJqF)+HW6=$rxdyqaS=oqpK$8%IoF2r8F~AU(cp=*2SiUURJmH*-<^cj9^c?hP&9 zDD5J6W69Vv@4-0s1zQlY9f)-+D)Bq|UYdou4@ua?lQd(z-L|7}WGpruVX-mGPqgt)5 zQu~^JGjBDIFO}l+bky{F6y1ZDl#IK{ILFR>-rU!lc26uT2NPuLy zSgW*Clfm5gA=Pa5YzMEF?WXsY)Ts=)On*nQ-??Q8`tLpXjVW|qhtAm5^T5opOpDiB zdbRp0h-?SD;tl+Ko&Mw-SoV7|eq-)`6MkDRAsVkHStw%VEj)sJxfl9gCjG4yfUT3J zF&gy^qg?^QtJN^XRi7t-=xL?V?`hixc0up9^hybIOf=gtISSRx%18FK{;)a!T@y_u zUglVgyWB?fZqIE(oU*(?^C4QiXj|U76HX2M2lXsN{(Z5tS3B`N7F)cW(khY+UftPC zFLZFpfg=M#cOr>#h>l)VMrl|6X^bpBi+V=>bD9FP^xG(O$?dV2W!Oz5xxB$0t2O-w z5$acy35Y6Idz@ji+S5DVe1>@+nC2S2YQ?;+xsl7~`C!nHYG>`)-7?4#*D;A{6OjmN z^fV^HCly*rWk5s+PVApyT7(#kVpiak9;w-%Mt7FwS$y=f``g+3(NNV}Weh(i2J3K^ z&ZX8iDB0C1s7Z2ph1*NU)899@VJaj;+v;FFEdmG+*-2>{lsRA-*|#40K4oCPH8&VA zd?-WK7qnUarWqhL;2?MV*6Y$l&ZXXsRo7qm8`aKz*1g^RygSp~(zUN}H>f#D_UUMFBmO7_9 z2Rd88)$pnCSHtte$A%|{TO3y%?>JUDraAgJn%aM{e{6rn{=EG@dwaN@ykvXJwwz2n z^lw5R^C|OobAfrBxue-?x{3k0()6gQx2dsmQ#r0|Ql3+MN*hI%zkkFnKB~T3tORE zqesT6Qu{c?hkMz61H9(hK@jf%_8v1bj?A;=s-*vbQ68ua7VI4uFa1YW3>XzhjfJY_ zCdR=*#;Lsn<8YE3PJD5u`{ETJ$$qFqEQvnwxP|;)(#}J2%riJd{q>b+B*#JF`1FhI z;>d=N23kmim%%d61{MabXuQ}$wyp#2^81K@LNcD9Iy4YDFW=irB@OOP_G)*IMBt`~ zfe|C{d?1+uEMCsax)*F47cWxDntMliwHUNCw1-O(+wP2Hq>z#9fj%ihZ?tm)v;0Gr z!=kks4p5c{lEUsN#g+NjRpRfCXzm`z!@_YX2$BELNDGH99p7&uGY_k2+R7mpv=ZnWudUGW{ z&yqV}CM!nGfMG`(Dylm<1Tv|)hEgBV=XV{GU%H=MD_SiFJ6gN{V}-E~8%rjzR8Tk8{XWNc(J3r{=L{`QAoK5|PrHKk8UW-_2K>(BCno!_Z*)dB(egZRQ4gm-avUi$cv7oXPwi%k{iW4=zNhK zRuksl<6h*>cBi_dTtB*wxc=vQ+BMYG()pkBytB-?kfg7Cw@rSAy{YXN+X>rd+icrN zTU(oK{Spq3U$8!G?P0BN`Pp*JvfeV&GR)FS{ZBoo?pBM{2i0WNX})IOZ(d{0F%K|D z;}zwTxFDTp8e>W@sW>n0!{g(RDH%93{vv-YZ;|K7_sJb(Rl1D#m6uCXrF5xbSdIGv zav4Sj6nqI?)tn5KyxLukAvr0pqskx4Kn0(Hrgui1mcRzXwVmh!lbV*RWM)nVy6nB^ zcge5$yijB7_?j=?T+ODA>WKbcdWkh@R2)edkIwMcq)}j=3TWmNbUONCEPBqvH1?6u zv>zFV#{klOqvBAPryoUHT1pfK-u2=%^iMmPK1W6JX>=4nJxBFG?<;aq8b)bE>Eg6F zQ&ZY-8591ECDT)onZ3scdZpJ$^Xd?+H_OL3=mBcY@3BRg$K?{TP0*ZvzSG5e0f(quEzqmV352O0z5 z^vC-V!F~*Yo}}b{FxSw`9lHi z=UzbCfy1=^DM;>48)PPLo*EcSPR+WF8t#kwU%nIbE`_vHJ!y|wGZ-<>{Rp!ngIuX7 z(eHgndC3(k;$~2;9ev{A)u^WD;U9%g9CQ{tHw07X8ggZ>8iy?)`*a{lN0HO=5vaRv zM^W`X!y5ni&uy6Praf8nhS?=!c2-o+nL$0;1T+%eNV{@gc;}?=ukk%l-hrNOA zXWKE`2HUf?5w>{iUF+x8a_d6tBx^UT+w#4o((;le&oap3!R1jE46+K<@oJK4Ggq76 zHNR+n+}zjP%=D}26VofE=S}yS+MCSEC1tO&Oql|Q)s5s@`9ru6oF)6@Ht;?5g;XIe zma?T3DJtwnXoA4~!7)6eUpg0qupi$3P~%9}f;6=Cre$7C^?NE8#!I(}w0Rl4@@+@U zjM=;lK8^MUpsYoUEM(he@Gd8tHZO~Z;=cz&8=ZuG9wixD5VEX%y@fPfu?TkqJJAiM zwpi?i58&(-i{fBj%s%r-)ho;5iK`hJE^g~G)O#6;DMQdZ&qiZq4A1rA0nXg;+&F0( zo?JW=M`|~tkG-`G2~URi2LwI!;99t;dUct{Ed7NEi)62z9Sct!nXl6Hw-%uLY)8)1 zpF?yEIxrlapzA|)Xv`srJHU12JuJsvc7Q9Q6a)9z#4HbPijhqJ?aN@@G@zrId^;4AonAh86*Fe}Ub>zy0Mo%TR5ybTl+g~g9n?L2uqooud_Y_M*HQfA zWTXE&DrDHN$mVt*nw`$SZ>3^yfoFdzwqvbukHE;E{Ni9#?%Rk}-Qn|^?x+2xaL)c% zGGYhCl@7-CWnn4K3?!QF{YHZcD^=zWgl{ns-EHPe2>;**$of`FYD#WWQh5tZ9>wcH zI(aWL&>zbyws0M7x$(ACJ~ezVk(iZ(lk07AJm>H^+BhWp7xvffFW4Wp_arM<&T23v zFa>;PIcOo4>6U?(7V4ksDfLw~A9u)!aJhTMywAMC{0My2H!|HcePr5fnr#|sYHL!I zuaq~G7nFyU9(Xw8C;6znUY;orlUqsuN#~_|}@HxQut&t~4mel6L}~=i3J59w~_w7A=Y+ zz4~^Xr;x#v0 z?z?VVPbaUWZFo|88Y$eiJ`N6p*ymyXnN0SW$D;%d%CRh)k&~M23>|W!Ikkp3d!U?J zB)*v2XGg0XMXqAO(80UOuVWc&TJDuzBzu~cV+NwzkG;qGBDO+|&Aq1%d2t{*bh0lt z%Y!i496Je;D;3bW9Fv!f_y|?I_tZi!DH}Ob)%Ko>K$3m6J^13eFS-DoJJJ{O*)0-A zE(OW-4JUNy%G$*it^X%9M7&mxGgCaC7i@71)5nT?F$!AUIgi>pM`G@r#|Go>arEWI zi%_-q5os~nNuy0^!ZrxPqoc^g7)BgNe3Te{`;3Le#y$jEcMOqjps7+C-va*G*K2+% z1kMP85K7)eVB9|}KNcay;Yr?-wTsbEc<89{V$_WFE1Z|sF2?SP&YjmAFZO7|kiM0C z*LZP^j3+6QURoR{%_SY1fcrorI(}YSjOTo|2B5O-mvlN&Ow@|J%OSgH*fA!EM8&sm=^A}n5$|_}=(no2EQ^yH;vpicKCAX6m>7uk(S|&}=PaPzq;LBE|qw_0O`w+_jrrid$ zs`dduuQXAM#1b@&Y;a&pS$wLY7Y~0YzE&FtEkO2}N;2#xu;^@EX3>;mcB~#@i%7+( z{Xji264S-}bsijJvG-$;n=ZUt3)i7}6G-7*C1e&&Oi1LvV6=Bo56CI(Y!w=QWl4y_&lPJu)zG!-sUPnwlniv=Qw5 zvQ@@W!3PZ_~i(NKl?lm$C%>GrVjd&8_r`)*eLMSq^9QCw!38fswF&vm0^(!n% z3lQE#*S&M;GW6U(r6&gpjX1v(%iIIG;Qx_Sx=tXbmL%prEY-sgW1})kN72IXVVg0ND|9k=At()roNkOYF!2@a56?9&$34_~RA;MC z&<7Ti7+gM*7U_>;CZ>pm@InT})%!6u&jf;8eR-6sJwt~`LfBn9Hse`%+KSyM@bUkH ztJ3w7E6+8^<#GPa#{UB61UmjnLT3Bs`TOiE?T^}f+Z)?%+K$^c+McudY;A0k^$Tl- zb+I+unqrN#{9ri@sq<5oA(mM6AN7p-nz}%JK(qUriy-02}d*RQpb0T*}1$YC0} zv+i0Pw17NkcVZmVJ=-&9ccOJagb#$BanSm_ijR!*GX0VfbmOm9?eu8T*cot;oZP)0 z=iCj~yxPf1db+&+8PZ6k$a5C})zIZvJ&#E7^j@}x)R6n>rg&2_y=d?>Ss5#b?I5Qj zYq9U78OS}&A26Tdr-X0&1L8Z{j*4hkF?aNl%Ugf6%b1Lo;QV46XuH$tdM#kkXB zh+HzFNo~BA!w$JGb_4U}hcI3!B!|6ADtka(rU~&4SrJF_n`0enRC_#zlr=54Xvwco znfc9>i>SfosJAkD@?nr$yJpdfK}Kcbaz;DHIPa<_=ymT3kCqeN92GIO_Ba{>?>9EU ztQyNdUQ+lAfLXyAtdq^*OAH$%_|6-|asGh(~ zNvBr^{Tx+}IrHrsUg={T6fm*KQKXX0!ilL~E&SPTLk5-~$2G*L1DJ+fYG|rPvk+G2JjwdO#}yYB=PZw{}RFPX_AW0p!qgx3P8Tigjo2?AzD~HRD^J zXM1|hSwRqwCB@9U9VdtRNtSj3_mlK^r0zhG4n$!CvsXKT+;=3t9kuc1cUY*B`Eg$D zH10RziSygz7VSJh92aQcpK1FF^}I#%V0yr1F=>ZGjRX{WbxCnrYX}BS-7SN{NM`Tk zM$PE-@R@U`bH4L_XD6p4{G0Ii!&iqt5#B!>=A0edw`$zl7FK6k;CBinshv82icyxT zT0vtMYHkCa;MJ6isF>$41Q>Lh?f@AyJ%}rJquVjuL5kak5ZGLAO1X%!L$^mCbOS;n zw}6ly)e=8ygt+gn8H{S^^&`e#TjKiRI6BbUYw=PA`;e^Z=t4!7yI#{(HXQpMi6yR` z1r~BEu1NJCc`b$f=A|=!OrHL!dypF$pJ{c(_z?-@wl{S^@n@PQQY^PccAjq~AhU zOly3f;^$DpS1jYx)&QOV5ud@el(R#We+n4Fe+P@nDXek$U zsD%&5`d6+U$K3KdM&_}Xui;!>hQ+HDUBfTqCQ>ugX_;(z6}zbhUsCWYt^#y$!;oJC zVB%X?v>L3t=9wl%Q3d!UDJjgo+r7&DjQf7K#dX7V%Jsaf4eqoTI6H=43SSc5&GD_H z#F1{lVP9+SkIQY%Hq?6Cy3IPu8fMvTx!+<@-&C_zm-#*Oqvi%sa++?sNBKy3Mv0YA z%X8(n(&yOERBeb!Bg&g+ms>Lz^fu%9WH+gN?qw9AZ@orLW@+n^xu6#(_`2hgffBgv zl2Xz+CA-fZox)_8l+u$^q~pyQr7@FSQbG?s$X`6N#@u+1QqnjjW7J$8M9bWUOK3_- z<&???Ai6{b8iyz;oKiC8UvKG|JFO{mcaDne$C(+f8^>kGedpD#LfND-vyY?PT{)%d z%$>pXXR`jY-A0{}8j-U+bm5ee1%6&sJfwIhk_PF_37h1{IYDD)Yn0N7Q!4uNI%q_# z(pZorJ(U-53&(1jnx4@R9rd`Xi@e&No-hp}Stlb|@`;=hS%G_WM$wpY84Z-6$5#1# zfW&OXD5(P{-HPlLh%aX3yDYx;oRTrHd4RHK5HDyuPLaOt!ZSzhy;Om@TTgF0{9_-h6-Np}ir)bFuo(tPKf!P?-2x0>vTI&Qno50HK<%C;R+o8N4 zZ=9-w2=bw-Hh1kbh} z&|mK+qw!#@8(~YzT+mz(Qqnd+$?bNHQSRZCq{Fz3=V4&J%P7tCFfmVY3d?L$jw}qZ zuZ%`*Ac-bAdv`eyQ5H;lX?`1Xitmw;-gQ@7Ay+rIUt)wtoKRgbloPa#ISQ@XCJP#x zJ6oMl>3aY9dbXq{3mWjBn`T7gXXb+XJX~JV)9_RC=H)p}3^k9?!t=BTC z@q$Qm5+e1aq&_iya`qFCPJim@)GnR6Bqt-r2p;3Dz4z)d;+dZaDe3i0Q_L1qX3RcQ zrb~{nJHy;Z-3#4=-A-zv@Sn@v!_@9UiEJA2M;z%m$rY)Z1Vq8VHIb?!An+_qq$;N& zh>Zuy9sX041M{rx{vym>oJpB|$$aZI=u;F26!k}>a4(8$q!rMFUI8K>7@@(Z7d5CgEzDHRt{j5xp4wzG= z6=sK&WxA+zHSJa`rhNH=Dbsqzx(Awc^Q9!~C`q=qv^`<@!`9RCp-FVIT>pbn;)hyi*SRM9>$Y0NuL`b$7o@u+k5zhl|DzYfc+I#aGn1{hPkz4eSqZyhVc9lVmZbz89Re4M;W3dggC+wQu|*5 zksM}-su%Ku=~Oa|CuMpNQ^hbDq$oC!_Y-8puwAV!JntWoPw_*~_i-K?nbLCqRIn8v zKYZ7Ik}+)8VRH7yG#JWIr1?$8Max2No@=3+S;n^vRM7sobN$?YM#ZqOP`G{4G7ZP2a+j<0I=Z=ihDjIl8K-yq{vl8m-`IL2;*^CP;J^`dc@RDsZn0XjxJTcBOjLi@*vqM*-uHUq%l(Cu!~{D6Ju-S zPAQrd?lYSto6Ty)f5emUOKcOCdvn}`Fg2ySEZiftMUj|sWxsUV%k?-@Nqh~J_pDMd33@n>(C zEyQn5Ey@dqr}1y96Msrkt|55F8~1vf^1e=sZ%HYdZlK>n_)zVbQ;KppS+BfXo@N5s zno{(nA#|nEQ;Z$KpU~+xR`uQR3`^1MSTo0+&X8d1vEp3fnMo&#*jivQEvl%(~LUo?4X7=z(V1bRo^yOld_ugAB9?zOT~i6d|pshk>K;T!7CD@HqiKE5K(2_%i{n(&2o1Y&tCvP6_Zy0sd5gKM~-M1^9#jeKv*Hb%LRCu0528b7X)~T052BcMFPAq0Ow=)`2#-y7YlHa051^W z`2t)hzy$&<&MWf-{9FNk9xyzs@)>ZBK$tDS&k67>0e)71pAq1h0{pZ9KPA93LU6vE z7y2RR3h;CR&Jo}z1^5X8eq4a33GibA{HOpw!eD*=pDGZh2=K!KJXwGr65wnB&Jy5B z0z6TG9~9sTVx$iUg!=_}ya10A;IRTcMu0~P@O@!n%D#+__1ei)8ka7SjX$k2>xCXA z@QoB;p8#hH@CX4OF2KVCc&Gpm5#YfBJV@-E_X>o80z5!~`wMVC0q!fneFQi|fO`vY zFCAu!Ub;Z&DZo7hI8A_41vo{3y9;nP0q!cm$pYL(T=Y5%giZpSB)}a7I8lHT1h|6$ zw-?}c0^BwTW6^69{1GO%7T|aRjuYTk0^Cx7V+GhNz#ajP5#SaS4lH`n0-?D8-y^`y z1h}aHHxb~*0^CS|8wzj(0nV>4{HQ0uQ34z(z!3uM7GReEI|VpgfF1nuY$d(v_v5BN z+;uwNwaRs$tD*Bt=l`4&o!;>4;oBipPjK9F?1jc|n*FZ*u-$JTY9tnro`mU1ZFB&&a^@2d0E-m1xb+`QC00uMf%Gp#jEFhwg@l~YK$5i_^KBc8N;U)w>21o;_2FTZ$2oTR#jo&DQ+5W}sZ6Y9i}S{xnZV71t8p-_t!)ME1>o#w$eCrzkDHWcqM zWFfMo&QNP+AZI*fI9y{WH8s%lx_@izM@iCL8EJAK_0>WjVMEzrh@j-7K(>)XbVP9t2g`MI`WKS zYmm-{iP>%O@oTW0;Cly zhWPWIRt+6cQPPTK0|(Wb5oSX+(j`L@z9xnXVU!`v5JL&C`c-ir|qo>0KD}p`t`SwxvmbTw*AJ~ZPFi$83?fiU2vc<7^vJRem-gA^mw=K=7%Alci;66UT z+*w_$uUHw?^Tmu{Zs3S|2L#IuU-D8`5Z{ip-+4Bf#BV<9;Syh^Es5EDHjzw6Y&xb5 z{gJG7ehpf_Tk-{9uk}WLDX&KI1TZ!WW zZK*5=56qrxVJ6o?!CNur$}Kqofp$lRIGs8=AP-ks^59jeqjh#t40 z%8DkpHUZOl*IBO~xH;*tE6^P(e+_kqd&q=cfd;5-7f|R>n(@-M`=yO4Euyd{!Gt8( z6PmKVlD+?i-dDyk_PligB?YAkWANM>r_9j&*_soiJW=rvQ1oZfx5D;fA| zu=b)zZ*(kTE(_LPB)Pjg*bZ<%Ru=r>CJ&Yc>NT=uu>M>u3qZ6XwVTdb$>-%~z5EVB zaq`mkejR7Ec<8)3QF znjVf?>!+jrQlZpevW0z$7by9^lwv+dVkzXS{RnZf!KN}WKXP(X(L`~I#b#iZVG8y= zIj#=fsGN`utx@A!Pl21AsZ=k0ZbLE7P6&jrjv8wSpHj@V z69V*G<6=ZQ%4bwgsQk;Y8Nosr(oUeefs96D>n=jYM*W2BrJoC{H4-qYC;0v`B!I*H573XSz5FBCVL~Cj{tuC-w{LFxxB{H58;5Z-^Cu z0)|u+Xad#0b`~OFyB%FcLC;`zkMVUP;3^7%<8{TDJh6t@W`ip!pbgl5)YtA;p@yt! z#au%nP=s5f=hn@EuA@LBKo&g<775`|MO;TA5I*T_g_r@lSt$-_DbO( zErqHpBkPorYboeSXc?WMXJ&{Yt=OoikS9+PGr?*hpr^oh8dMKi7zyYoMm>cps5XU; z&}gTSngT6@XWXko4iJB8u~APUW6OGR-%W=K*HZ`#g^G29#0+34-)|Vog$>Ka45So~ zH;}9U98)LwI0Kz3pb7a;DdtKFfz*9Zt}p~oE#@i;w2EmXZ@PKA&Oi^SC~*3%wi^xM zQ;Ll$3ROdPiOHuHhcpyu^wr(GV(nN_I{^g+-aOfHhWK%X!8H{0E)GpR8{aad)Zjfkfs8y>B9EE z7#cFI*r=ya1+90X6s#t>ozV#efZ3Un@^2}mu>Li!j-0WAgIkhiW)6FUl~ z3!|1o#UBO6I!Z0(N(!_FX#5!q@{KK(Qf$;x*!0jWF$LtHj+O$HB*l@QR?M{&0##i2 z+Ik@esDQNMkeUK*r4n2agk~F7LtITEP(z;n;-ZbY!l6CM$Gtb$_SugzC z@V()FJnPolaocgivCi=XI}}~DR~QaOkJ?h$p{UH7Z_UJui@#WoSXPJqM~_8Hmc^nZ zsb46vx>N3cMx7(Sq7Id3s?FsA<{G(?`JnW@d8PD@`5|eExr_9GS#?*rH@ly5_j5OJ zU3b0hTI{+%?4*lqDi{YtqxM=7tdu&FGv5{kG^mw+WaAF(_WC5j_KQmfl}x&OlgX`1 za&V750*;`ACy(LeGJfdW_hD?SS9NG<@bEH}IJXALLm0W@gQdaE^I(R_J|4mhVwk*- zLYR9Qrt(AxGmv7mysbFC=m`&Cm`aM#&oBM?w$ry^7T*e6!fsL?nN$vJ96aYGv8{Ph zFFagARiMrM@RhwGE0{rkvY{CLoHv{0rfOA)C6PsvVLhR5$2A-{_5E}Lc{0C#0?c1= znk>9$tIkmH%GuTA?4hN)BDQpBAL{6&q!*5s0YC@nW8 za3he+<4nudlZ6dlWGFxv`XiZqvu;pu+;paemGB@)&(@?qdIVRmRyu`+!NB9@^f)Sz zeN9@GYs$|E+YmbcRJqo;#=4r}{If|v|CEP6g-473a=h;-z$1_O&h4%OHkHsTSSMYwzR8w1~TK%-rP5wvTFBi)F zWUKUvv{F*B{xwEe8G)|k`$DQun$%;Xi@ zYU@nY{(xkOFGkg0H5B7lQ~bJQNl#yTwN{89WTRv$?+v4NuHPtFlG2Qat5iQ1ECp6F zA7U3(w^RH9!4hBT(0D@XC><9p1xi=)*oC^K(P z=W?Y$@T8rF=1uj7q)IgZFqo=Se@2;7RcBG!fW)~-DG+;J@l!(L=2U-3qC{ga!HjBb z9fLrL(5oq7L!IUi$&+Z^Ww$>p)Ezyz`Hk`H^t8dN=TmNu%c0&gOxuZPU3yI%9kysU`_EG#Yss&b`j~AM7TUD z5I?;M6Y3cBfH;Y#zvP57b=B}6|+^>c|5<)AU- z{hCy#rw1fTdI~=pbCBi_iImth0sGH7M+U!9rsSzRQF4(|pbDgy>P&Q8pacsWHVx6M zaUp)Uif+PXN`VsGs;q10;OA1MKnj)nGwW0`mnsG5xBfgX(lHo~GNnp+dmTC#D(T_1 z!Z)pki7?I2WlDiY*tD~!Fw*Fe!EY2QNy{%A5=iqKrAmdz#*1~p#wwR81&UB26^b!n zKHxH?Ko&CoI$k#mVTPgN`{xZq1gXU#nGzBq6`zb3$09wWbD2^g2cF&Y3=yOibBR(Q z2bD2~vnxCJaG6pd2Hy)tGlb$fh8$KuenB|*nC;fFVd+wmr2L{BQC2HcOqcPTb$gt7 zPU7}9*O}pr!kZC+Gtb|SkFBkhu9n-1#d1QvU|9#d&?jUVgULfIZuuVds`Rs3AstW` zNGsIQQkEKLDl`9S%6ETe%5-lrdE8H%kGuPuUvhVEXrAW!&Yb3Y+Z^s%A|=62^*_8- zwb7A>?NL4ZxAwizo*8RzWBc3o30@G$we_*pvwmyci(%HUd7MdE9X64`F!p|3JT%{$ z=?QG|a@i&?V{7p0ekwy$HoYC(M^DkJepjS`*yUku*}wRuZ>uLW?oI1^2F@T4Q3Om6 zzgZkSJ7sGVaq0?$$YLQ5J@;}j{z)4I@1fRg^(JO;n-%&vf-}*|D z)(eKRf&7kSzqlk!^WDBDB*mD<0(x%$5jasj#t>4QBf+>IWteJU0%yZVtZYZ0`JAfn z=y`d3iJ?H#S)i;wcLMR|FhrFn#KKMrIAER##`^^2!Ab4we`58jeVm1&xzV@u{JcRk z!#40*BHCe!b^&dZpuGrqnKlI#nLrLd_cA<~Q4?9|16sAZ6p+@bzVm;ZRcWFERY{Vu z^36eXW8PX}NDKLP*2^)0(mud<^uC^6E;upk(wp=}!793+WwQ_%J)3J7g1!M1=wstq zl}R)OuF*?gK9!m}hrZXmp(OKA`nfRuMSVl7z ziqTnGFhq5iMZuQ1Pd~+2GkKY{ng1wwM00YVJ8)XY1cygi7}}nCBwSWXo0C6l;NY%ZY)-u?72K2|A7&1!j={BB$>q@Rn1LN4?f&PC(l$mObFa>~y|@8OZU zzIdYUWBYP@rfzad&qd$CsVpSfK88ao~#(MQ=Evgn$0%MqmiXX zbEy=+QL0q(v%#Vb@f)Q|d8>@(U}=6XRSHb>zRx!cOAWO-^&7=XB{PPLd*>8CmnsF8 zfylR>G~_?c&!tL%_;1C0D#XuLQ=?cZQobw3pXTRMrC@mTHgPefm#BU&R|@2y!Wcn{ z-zZcnM28d_02N$EsOk2@Yoh}sZyYZBHwymsA0Be42YF@0@bgc5jO)6XZV9s zC6)tEsZpGf<~NF!N{;`Z&46E*E9qs3bb7@+V$&BAEYUK0*5-&=z<$Kffe zMu1i_h?TTTOw`6^Fo>1d!YS-K=}oXdiw&eeHEkMFPi!c*D+`E~^oGFWKgOP-i7%21UwTMUneo)F9d4KEcM&0|yi4;Y3zT`t7*Q~mcF!uuR6#7d;? zXp}2u_b>=ZQHe&mQpNf|>mIy8!w zGLo8!bW{uFIA5aZu8|SjZkEQ~o+qCA{J!bW?P~ zRKG4((qr(It*hGxT&fhvLH54|Vil+QxkxEcLAMHD5W~at)X#-Vf$))j!&OR}-zZbc z=plix1?2F=x!Y0IkSc#9LyeNZGC2n&v%-YR zYJdxs0-dbtv+u+RaE)M;DJA{bNu;Bb^)O_>bLSavJuWn%LLoC%mD^#h&kaux_PZG_ zHI(R!JnyQ46^^nZWGkR77vNu7sC|6(o889tZ_0xZ~;DS~a8 zGDzSNOv@C(v`o=W%b4k@BG{HG{e-}RZJ8q2mMMa5nIhPhDZPZSf@zr|n3gGmX_=BH zn4T(vZJ8q2mMMa5nIhPhDS~a8BG{HGuq_J?Gr_b>=_KHTre!caRRr5IB~f5X5a131 z++KhM+cG6&TNb7Wre%s?TBZo5WePPd3z(iNf^C`7Qb<6sEmH*BGDWa0Qv};GrG*ey zFfCIA(=tUcEz4J$2>}G#GDWa0Qv};GMX)VX1lux2uq{&r(=tUcEmNX|w76-Rt^q07 zmMLz5$tA!}0S*^nhXC6J*e1YM0k#ORD$d(xfnX9~MSx`imIOE~1j}~?_>KVoC&2&e zFtft^M3B)6H(t_`TR?F;fsQLCHHPb#?_1NRhzu5jVAF-VPqF2gyV`o2 zEw)J01?$(=H%vRMMMytPTV3FiqUmMVS$^3C_icl|3yQB-@JpMdFCPvnODtzMJ$b{w z3*HW~TPK-Rl!j#d{H{vs^keWM>ID|8;%<{5W(mW1>U|c(EM}Oh#vu$-w1#uR&^6&g z&NCo{@$;CsT?ocg%rJQwA)X?J@$|SDxPM&05V!iB3I>|bFok`025(slwPo}cR#)OE z(ALu{R9&wmpK{AC1;b{Bm>UUkV&1b~*cB;ORbo&q|wuGaLj%N3Og=tDfJgNlUa^;2+?4 zTDcsr_TiU$6|QjoBJ%;3zbaI%ejgXb5EZCd9lKtz{ z4qB}o0S~0KR+F_YsLeK9C+60;cl8o%y^Z@@^7K>314SIpf+r2#sWWu^Tam`;E%Ka} zg$MyA7^wvY=EgD1G5uL}nt zyQ-!vf^im(DC{3%LYQ&%fqD4-pDLPuUB(43Tos0&MDgq|Om@p;3rPd%TcCYwAKSL<~ew zacQj>0WKmIaw%dUkJUq_iD%TbgzU;bM2Pu9oM$`+TRUK|r=RU_EMWj=kP)RuFav@@% z4k}-GDt2tpBFs!(>3ZyA@Hc9*;7a!|*C(!xa9epV?(AnpMYUlgYX~1_lK8+ zPYF*6cRH#aZ#kAYCOSGntLaO78MGjYugPbl=$iDlO^$kqRN{?t)pi?EXOq*zPQ|I# z_JFhCSnqYfcK}Z!zVkcd%_{&-B>9oGajGK<@Pl}PPNwuIF5NlQEiog$C83^xyV5o;4$Rt>yLA%zoScED~MCW z`v7|%souICA*lo4QRMWIzHzGhD&UdaRgpOhu#d#Nycii94mgwgv4fM~KLL**65PkB zwo`zIm-@2e)UcZXnOifkX9FHeDpF-c-xKf<=2cU*VFwX4m^!qKHx~jNL@LXCajMA# z_+E18zM}}b2k=0WIVc%ba24U|pu zud~SP+~}EY`!q2}FsW4-DT!HC zza-N|vZ&9uQh0uat6d4cb;-nD2jRS{E4;7{FI)#F0@ML)SrE4&h?}=Sr^?}7m{V9e z+@*h=`ZrwvI`pqy|Jq3I@@RNu8V3&p@EYsIVoMQP?Z?#J2g6u6OMZ~W%n)#l#e&X@ zgewNdC^LVvc)p*yorUvb_IZFiq0Jm}3f@DgSKE&B;e3|5qa_o6^uvh-Aldp8iG%|W z*?NP0N!E{T;JXGlcVzibIuCqWMf8y4$>jio+@VZs(os9?E&tMV>Y;ZSDM?4oF^4}C zLx!txL&IrzycbEQsP5a4ht?8wo-PZH^=X#Ad{e;+1?^62l$WFveW+wF05 zp2&uma}sm<#wxOWWnv5RUglAka>0FpRONQ9N2cthZl%9rQw%Bi;1JwsFibVPFSSC4 zzrwyG{U`M0ey_Lk%GOI9vpj$yW%-HlPeI*NTfgKqR|s=~VYEBnQ-4~kX$0^px)M>7 zL+Nm9u?E4&2I`uMLe$w6qcLYyjK&;UF^D;s;#|ML?{&O1pZ%S~{?2B9pJRV#vA@r< zzcWciGKSlK87p)b+2L<6j2!+tr!1epEkM~rooA!(jbdMI;s_^R^nT`lK6E}GN0kYv z5v6aTe!}PTPa*#l&<{!Pfoak77Rnkvk4~ELPdqw^f3QC#6Ma5Gm(ELun(Cu8eU9UnEN@RJb)ll&*XSn*~XSqICiEhs`Lu`kQRfe zo0j=VX){Mm4T&$)9k4PBs*xV^MeWvdK+u(be+6a?CXx0werLzyP) z--q;XHv5wFCw{XM(hcs}O_MlgImKLm*aQ;j_RVL%0sA5I_ho+ z)lrKXPRp9U0A*+e5F3k^6IHDxKT*&@v+nW-F-tjSVhFQ@ zW6DC9MI3X7V&YP0DOVCt3gSs<)C%r`QfNidb(7M_ydZA)2LasWh5^2^UhrAW<5)~` z4`6<~b3go{%{+kl+A)G-mXFjiB;iaV9HJd)g-uToH!p}=J|VzYGbR|vK$3@?$!Y=n zB3u@xU7D}x@j4`{b2%+1gqfpoFyC{Gt5-18b&i=xFNGNc|h3f8C@b z@-5p3=->YOx1av)tAG2Dj_XlD)5}mF$?H)*_ascg4BAK+GX_lx;#xE*=QwzQWdT== z4X~eT4AQaHsPy;Aq1El;rT6&fUH*B8f8OSwee6RHzJoP_h1p9Y)*ws~`)H?zInk;O z)*!O^oKVO=1?)o-zwvW2(W(KBE=5(Dx01|K6xqCieaQw1or_cc$1yb_%u5_I+>a*g zPf+ZY)Y0bv(#|6_AiW7{gt_Ttq=Vz(Jeooh9&eze+CgwW#t;!4G3kDtgS$i@LyuI^ z(Y5fuPL~p85b?2tS9w7IGmvA-LYV#>liwBlD;}yZ$3%~ZQ@#e%e)DK2f2NgEf7)+e zE#YUnGov86ybwP0D4m?_jxoY<$1D$FW^&Bs z5aub|Z_wH7sQx{|z9e0y^V10APOa1_hxG43atkTTYCQXr1NUKO{DYQIZN)LJ(Lqcs z*+XNOSFkVnbQRpV?M19wIlLfpP?nc6qIOq6Ez!d*CiCVbqDKzJX?MY#M0mscAC7zs zS`ox04ne16xGOH(wR<1UCi5JD!b)j4IyX;L*8<-0peHIwsm6Ft~@0}%(Gp6t_}f7-!qBe?SaI)k-v z9%`Z26gX&ZIWIAmoC@IJhMY0AxE)|xv6Fh*1G`2YIC4g1J5Xp+ifOI1KwvhGVBx55?tE+~kKkc+8d?lrj6E z4zXs-j{%TuOhXku^iT({+46k=X|{Y90FS@$+8>b1;k455-ii?(Ns#C4r;001T8gl3N%x5 zEYK;P$r`XRYibI$Kdi-jt^M52vu|r(o~=nnmJ)av{2v|$iwq^w z={zD~!o63^_oX!JJr4Zf7jH#waqm$m=#8K{h4L;zyA-OGXyXXHDP_y?QfwHJogn;e zv~~nusKyBSRgvTsw$Fd4clOF8<5?-FhClJ%KYL|@*@Q-)QjJn+GgD-R!bYP+k-3VD z<)WvRdXMdokJsPIoyYzX{i0CqCFo~`0%*osUjLSe0$gTQ5?Y|dU-Tg52*?TiS$r&N}iquEQLy`#hgIdm{qPzH3=AXs)aqq%|Y2z;p zD~-Q|tF%f>oBP6I&FZ*;A+6>7F;&(o7bCVOk^H`D2`*1Kzo2=1QNhKi{S-MViE#c^ zS<*{v%V*khoYrQp#HT<{Y1t$t1@P5c+L(z4C%Y zm6AedN>u$u!AMParS`c(<{IyW-^_|Mj@ZQ&F@Ac5%r)0+o=c|g;zAy$o zG3m2Ok-#dOyo`5gFFtU6S8oJta`xgyw6(m#pZMk68&9>i_C@wA#f11$rEETnTaaF{ zAspk=NjOHx#JhWR(q8mXU%{J*`9G>G{)0q*%-T#ds0S~|ZP<@hyH zP|VviaTUz3R$wZF9XH`EboZyNH{mS|ZAUaok-m}$_w`!#sYE3}g-ezrLd|8qr@aLA z5#VPaJU+?G(2!<7d?+p1aJ6fcZQ4%qE9$gAjPRlZD{wGSWY*up z)qz6o6bi~pWBQw!Xp_G_6D>$+!qbj&-^@gXw#_2$Rz;e(+T<5SYVS?csoXzP*%Jej zsKX$9Xne&J0}_m|0^e@SOcLVH@*nha*~!?f#3m(Pp}fZiAb{1l*-d|XYygV%H3+xN zdTc-fWgNCxq0LO2)!VH8I=r3t)?_A#DD>EC5NC~gTuLhY97mX5gZqklOp(S2nXP>v zk;5WufpbsIs(ED^WR0RuhZLQEG*5M4lBZMitANF@lC? zq0k$*sRaXmvYr@7%TSjHXKyX>xq112vXqp{11{`oIQfg&Ny2%J%rL@_K05|Jsc<1O z?Y{A-I?`98oY^}b*OmIYrPWVDMwZqxn+3V_ku02#dy*;gY!>8R5@!{kBT8k-FJ+b- z>NPN6{krct-*n1dy5g4qof~6~e;bF5&Bg-b4x_K(Z}+9ljhCz3*tv8Fs=LzF82^K2 z6w0Ao<7p+Hs$4t-(VIzG81h^^B;J_G{~Ldle;?q#j{CV~op$FhJ)HjamWtX47dqn3 zU*etqbv7(qgFC@*t!Bc%inbg;XDO;eO2mtC z^z8>Y%DO^ZmMcq`&#Np;70Kfe+1c%Q=iS%P>Fqq!@dquh*77PXlPGf`Znm~)H5wxe zhoJD`5nR?p^tH-Xn4N2po{E%l;7<7@NFt)+wT#mejT{_TLB@5oG`W*);}CVY1&uhgA3 znXE`zgyhg+c|AE4zcVut*?@)mHhbbu9C-MnE*D8HwEB$J$}EP_k7)~rB;f<@EGVC2 zJu?;zNiZ|yST(Db%TVQcqZFnYvEQH!gXv+e5K=$qV(L-6^MRXBgr@<%N% zYWcmC6jM6HLlbkd!SMSDM6)I46d5Z>5DaR+JFzfg5qz3k+x<}Ejt6(kV zp0qh+x(o0|-;E0Px>A1P8BAqQ#PG+ai$a~4|9{7mw9q5~?m zI-AwmEXHN|ZMi91`)!%8HREG-uo`x{$OoHxIUbwTQdZ)jORdIcb1i1{qw{6Ed+(wdIvC}#9lPPr~T$D{Y-2K@`^(EYgOGHygEGG7wmpQmM!mUFc%q|?PH6+?eO zQ|3%Ds*2hWM(fTNqkIh8EHKq#tu_lWJ63`e4X^idNzDk*;Sw$-YBivSU6#qOOf@x# z6UkKNStv@R*8Dp>6Io3)EkZRl`++nJbMU+)jPi4v6r=npj$0h@Q9nl=iTX1ltf8m@ zQ2}ul%cm=UyEVGw6_jy}(de#ke+7kSZP3z^+gn;77TVn_#v};eLljyuCXpIeqOfH` zWl4B2Qj5hPo%VV_dF4mQePnDxsoHoS8bhv4IAiktmR4wK!u6B%a=Ly@M5Q|K$z46@ z#)>iLMD$-OE61QDI;g~lEU0Zd8VHU`TvYT25@gH(*ucPzQ4UVv#wdrB80Fw3VIL~P zjg@0i8DgA+No#}zsNjV$NyZ>4DBpo0pxG~sNig~=P-la_3Zz6?9(^cx6^<2Cf&0IA z6+fjCMW#l`LPg5NWVCx%jftlk%SMehi{&BRjjM5e{$Uu0!7&3Z=%8#^jbr+UDpL3m zo}2O+qDXy&3|1tVXZv7q9mf&=0b2H_qBZ#6bCr~o+#c6cyas3VBq~xEAw3nT=dO|R zNzf$X6Gt6PKK>J0&e8HwDJicRX*-#GN3-ZW{}%pbQkrF-a5Z<4e++-QL{!DKks?d9 z^K;4)d>)0hvOJ?mUWAk?QX3&9illNbuO5U+T~*JXRhn?Dq;-QZsmno9Qhm%SE%Z#EGHLzSY#5P_o4q*!ST?dd7a@fa6?mRtE&L_8q)GEE$`Iw4lSo>IhhQ1 zL893#dnhh<0gB>w5(#Ikl$6Ik6}C{Vtp)ySu@Gys7oWwQ{s&XOq!yTqA;(7+pGAAf zW+9uU{sO;^^I$F7i^!VPS~xnv8X~o8=bcTW8lL+n!R-pP*$=I&Gud>5DEI4#vh zN;B|ol_K@9mo0!-i$S>!dSadhz}(m1Q86DU)S$rz7*S)7*s%7Xj1O^{>`P5!5C@T3 z+k{-5zPobIn5Y4@?zh}CGA*ZW%E`0CP4o*=USWt4yEtQ9Xfix=-bDq3(BujFni zDZp(MWqDg$c53+sb$kV-X!Asj>2<~{DIB8}NzJjzNJYvlLV*dkElt@%5rT52_GWIpBHUCyi(zBDPRusurZ9XIfNOsCd_FIDCipFj7vvLU+9x$#sNM#x%f;ad>e zmRfCijn=9LY{N60!6R-jQO6lP7Pn5zwOX25R%y9LOEh<-|7tB)NlAqd;dfIMN4%DCTE=P_)UrFB!p|@KPiy&PVyvpU%Qsy%xv7A3IydP=#7qi3V-Eo?#4{yeNFQ}O6 z*{ql_zO|Hd+H1*yUFeF+1EtV7OvE?SWo^ia(uW|kD&w`%NL3N?2Su7AA_1S#Ji70F{V;BM~kR#Kn4vv`aFEdeH!Jjv2d zZEeOl*WDP2h+b~S1-lzb#zrM+7>16`UO)*H&dRdX*xH#|HP&gz-7i~?hN;o1DcM$n zQ>$#X=8v~J&7L@753}|??ltZUtl5DQc!jfg-I>ykcEF3;& zD*7#F7~G=3R2z&&gfSP*oUt789+j1g01cJ|0MW1)agLElBz#yDz)octgSDJc!T<@M zS2;Ici*iFbV;g7uG9vm;DJgv>irHfMYfMq3$|96LhX?fd*K4*}mBmE#cp5h`HwjT= z7F1Wf9DUV^xQT!QW+fT}5=2wMcwCHoxs+6Y071~%<8d$UrHTai+N4a8ya;(tk@Ih$ zN9D-GGi|>g@4c*>${xoBN^O^?O>2!dE2z!ZEcpreahCIW>whzAo?naXQ+{V}6uvJl zL80hE>*j}-z{2Jkt>tfAqBJ2e8wJZ@DEJ1#W$YycW}`64i;y0Q)LH~FhW`3UjXdLj z^m;A~8Yyi((uFRfnaPA=S0tfQSib~wKTb-U@cxxLmX{)%PESJtTV9GM_-{o*5%Pl~ z;RyL&65+m}<-cfJ>|&mOjUr-3>|&fdQISvvVjqh1R3scBXq_OH@S=4hWgN}hfa`5r zhi6f;f$zLpk*R|)qe_ttk_fS0SwbAxSC)0!vQ}A!UfzPPzaLeG^HDnNM{(}_layxa zl{~9OnI2ZC>=Kl(P~#=&A&TF)1P#ssQd0Mua8rpJae$U8oWp6E0C@zd! zi+$i)7xfI|B?ySHER&#|Aac!tidykkVA^3-f8>PD^kW}I;V0cV(@GY?xdHN zJ+7tciMbOR^69BgwA zVfW)~Gixs)MNDhPzNHv#)`bn|zqqkOq|6sRg6<>p1VH<0oWIxY5;GhkP60 z%3YBg6)AiZMf@oI%+#r7Fi>SaJ;DHYAsjEJcf8H+FI3IWd(yZS{ud{TF+IB6MX*BDhHl0{`YF96EP>K z(X3BOU|o%GJ&jij-Z6s=cD?Vmy~QbXuPC5iMs*NrCO?g`HiAG|uUYl<}ww z6Dk~vYD{YNwiZlfhI3HrZTLM{6Rxl6G|n$v|B#Zx{OT&pSw&hSRB`Xg!o1^;!QSqSOTL!39Nu)bHf+#ub2=2rxD2kXyM7*5#)sRbTWpuq;S5tOPe$Lz&;1Z`(<2do)4DO1K- z3k;{F&++-Lmy&9E6-32z)u^<4YfFl<6mn;TEr<(2>$GPPvT|x2Dv~{m5YNxUHAt=g z%Mxu3@YuR!!LKZqx(Hd+`79rQW975e=(epMqW~rI$e%L3#HOg|b?wx8>bX-@?rUln zb0TJ2%(F4~#te<=9B2uA5HJIe1u_Cj?c3TPZ(q}XQTwUw2eyxi{(E#?G)3n}-y9u_ zFOfF*U&dy!IsO!E7u)39=_~i$=Nsnh;%)JMfSsZr^JaLHJZ+xio*K_0&s5JqPmKHT z?m8?m%6H%Fj&=R$YKV5d?3(M!aizH2&L(#SKT7_IpESOO-Smi>Gzi9WvL-TEEN;#!X(H+Gbg!WlSfuT~r+_pfMF^lUwb0x6(<+AE*h4MpflEXED{ zG4iq)ZSmHlIH7ZKX#&;rG+C*obI^#rD4l~wl2w73j=lT<1I36eNL9EFm~HZLgf3zG z6%(#~l!;TAWmj^$aw4BKIz}Q9jZbgz*{hb}_!Ic-p;h?F*oT^J7O+{~r&B|?-X|o!i<(FZz7MI0t zw^^IhVqok5tQqL+B8#{>e*y& z;9o$9`%DuGrVgR4WU=X*RbdUdV)X0pTP$^0YJW>@#7Ka4D7UE)Q?2y3+MaG4floC&>GQv#8H&I-w7WPj2Z|!BdIP2`jHF(kdCsCWR7IfHJz|xwQ zqs@9|;~KP7Uy%DfP|6CB{?%wVCqIOuO=>|d0;N`HF%f+m^{GayIC`v<)KokK=jl>| zF!P(4T1y#?w#At)HE0prtb9GXh3tm~gWQ3WNt$f<5P~rZ3h_I9v#th1jXd*_-Gt~s zTbz>--v3fcX#tDbq2!${$d#q zYGZmy#CSlns!P~@&6+P^do^pEzZy-uk47X^-qG7hBR?9^!}vW7|7b)KuUlf#Q3}k2 zwahUJH07ZA$q5Z?>;Oj!bZt?9*0S##`$k|)b|h`thr&3>jm+))P#C*!QY0@zQ0Kij z0)d_rT7D)a)z3yBpyEBG@f}kn_^3@jRV0tezIymXUrD>`ag^vQq%@na-~vaPx+#<@ z%OHii(4jm>B26vDt8g?AvE3R=6V7jyEqyC~OlhmKG~qm>Y=sxlc^t*p=KhX`^VPf0 zYuTb@vzCaaOZ&H#d+;FYdAPv22MJx35(Ig+iUQ9|(0H8dUFor0g5=L^uvCIV2C8Oz zP1bC}^=DPXHS<0k%CcG4g8cI;@`}tgK3~;u=Eg7979mtL3GI5vx*|;8BNVsJbzrSF zE3;YpWcc-ajr}-m)&Dj4)pBr1YONf?VNJBdx{KF}us}a6OWUfZX&aSwLK|QDH!~|) zSQ>8@9^r;1tHST5c?(fu`dv~|Z7oWDwostX(n7YD$CEf`hjBr53o()@wT1v&+uBg!^!hRwVQ$d_G-Y)Wdj)4y`Xj z^zec2(c4;Ilz>Ux62$WIND0u3V~;HhhF~K3W_V!5;^v|xV}TTOs!NUHw01(4_9&uL z?#XBz%P^{Ya)L2ZgC~AMLB-%E1=<)~^iM{m$*Z7Q6aMaW-ai>bQFoy0{k?xOCK6mB ziP3W+0+prBl(6 zuJ5({mzL+X{HK=RP>~0BmhYXMLM0wtckMom5*2%JOY5b{Y(;ZI5win|Jf%|dwr{jH z>{~RKWr8XiE+!|MffiobI8xwhJT(f1E2 ze6=Xv7oGN-`SHv)bo^^#4&sMrt?qGt1z0QJgC{37b1WKuXSbn;-g^g5D%qh8D3GOA z#H`n?7ceX7e!Qeo+tQ!cu`!F~7guWL708E)w_b?HO@NI$xEq6QJY|kG;n+gM5AH^# zghvcX^((c7YI(iC^7*r}1$fPeA{d$+dm#zEJt<~MK~s4G@`SM$642X|JYVs0Ui-tG zsxHITORZ&|i`>ES5BvJS)jPgmZhFkdI+;$-z zbKa$?>0O=7*rY(pE(^S@z+47LyP_tKeu55lt-<^Kn3j)fIa|v|q@=QUn^7KgMGK;E z7qp%q?ZPYzLKz{oyblpE==ISqlpiZ8?xS5OVZq@1Ts3Ym>Pso9ih~u(a$1q*2x(Fz^|9`jYwOP5ryUeR-$D@;kDR=-kBmN6lrv0rhp>H6-o9?cAJ&A zpfO0n`Vm?c-ePkZ7~*$RlU!y6RP6k4&@xj9l=i-b$ek0v+gpe+I(KV{-%9>ptK}{! zv1tM7lYMU?D(QU3JS`6$Tv+Wk&+?gt2=ewX;pgh99)2wVcy7tMUSRRZC#mRw!*| zO@urpiSZ;ha6%yWN+qsP0;>=hc2lCO5?!RAw&H4(TkoQHA9mXXrD1K;sdsVaatlz^ z87|!5WGq&u*if3 z#T73{h?*Be_A16Pg5z-v)`WAtvgJj{Iz>_!oz>p0#n3vHEJqd0-5{xD@L(}(SfGRc^qcHuSAn;KF*t(Wof}2yo2A*$1$5%qhgiTJe#Gy zXt7#mZ#)CP@MV}sEVWu*(YpN^9Iw@8$?H*)NES-5{0eOruvxv$@=%!2sONB$aVv3- zko{DgvE~u}G2alsf_E4Dq%U(LZmK{~=djWm3-C*tm)(XLYYwJL}3W zVvZOmt`hBx&x|cbsgY|8GCH*Thg@+Lt5#fnQM4MR#P4{IPqRwS1#!+}(t!?@uEtPz zsg%^r9gU-h$&4YqgCiE_4B;Id zvGfMgTzfSSU@FrPZNlOk=`vW$K~mDkX$`2P#4OD9S%6=PHQ_i(YsD?_FRqM$I-0+pKnSU*#wKNiwDI zC(_})Xw-)I1wXbI&0Oa;NrZE&mRq!}(K4*%Yjoa?b2svY>x*uF8mkmp%9E_VchAD> zyHb$=PsQ3%_YGRRH_+C)Z;&l{U{aAmk_h)eWeIR7u2T%(-CxmL!x=&xD^}z)MZyts zjN-~rWY@GE!Q(7Lxi@txn#euNmiI7_yJ0eRD^Vwfd0uumCU6OUeghXNTyED^;rC)D zyzkSv`O8sbn*1B%J@@&3uX4|q2Je=WGxI0UC!SY53q5yv24IBaAMQQw7u*lKZ*|AJ zesUdlZF0>+thtZN>pbmz%eluk5!1~boLQ`(kzJPsKqK2Gn)@1GGoCN8m98helsblGdD^H<4G4cbdEP`<(@(d z8#-ePEE@(|c%uyoYi$^^Arf(Kp6zB>()TV*>fwl$q4W!ru!9P(ta<;!B=jT@%}^?} z-GUO4q2mh6(d-l7*+>$;e^HtbMR?UrMA}CCP^kOPE7BYx&5EQB+@^fcrs#{@;X8;C zcZ&(%Ipx(DA^%V$nYYZ~V>^Ak6^m)?^BFxIpW7<6A~_8~khV7LN;IBO;&CPBDDjvS)K-e1(vF5*@wBC37i!Gg@f`SHbhu^OS#)atYK)rh z;AY-qD3CuM%}NQb zqj~S`LX8-Z62q5hRF%BP#X+))N=Kw+riu5$%%)anYeAxbpmd+fdgn z!ReMMP-la31!8y)7wNG?fvF5Sm80s2;msVRmdkrlDvkFNk=l9Qtc*3IggTVtT*KD` zRPa+mP?{fG25(p?L8=7CP{kYW!ZT16SQoC+5THM+a(a}nQ#+NzuIQn%{0SZ>-5Rqv zW*T;F?ilzwuqW^Wbsg7x7-cU_zJuP#y*7@<8YlhKHRd38?)X%0k4>RsZqhyR6Q12C zH15ftcoN7;v}d0D2~nI{l>e+%ZbCDj6>Nblead;6g!I|Mi~ps`kfzGP>i>9Qv@uAe znpZIT0S}O071{N>7DXsrgDLZDL!E~1cCSL5wT-8cvSxgMeU5NJbs;Vz3xDcCHI@kz zK{>Qw9z@9TLCQ4`B6Ro;N`vVp@}2*Wn%sf;h>F~=NL7T)z-q7-)bQUQ!T95*miWqk z*WgKSYC-X8A>loxoB}-2aDs6X_259ftE?F}&^Y5HwvirWfk6t)WiSy36rLw3lTT5| zJ6S67xFo_eM@qA>JIDHz=~0F1FF}vcTGz=WbFQ2=nD)636 z!u%c<)X*L6!`WUug{cbUy^Hrlg1Z!$n_b{C6A^tx)Al|y9!jDa4DhI;hSL=ku6r%z z9!t5~Ql?o7wqxaI-LL=wx{dtsuadyPdR6A;3^Y!dQw#^Gr53wT`%(+6=A(Qm)Qml> zl`5PL^I^U!O8qgygkr*JXk$8t33S?8EPK;w9j)dbPAlB)l|rq~lOFQ}FwFAvYnJYc zFt28TOZ?m__j-G&+*voDb)Mg5&w_uC|{Fav7-O|<$v9$A?L$YcyhtE5N(reM=t(Af4>{_&jy@M4B$URRL8mJLZ z$}x8g;$I*T)5B$Mhl=_RF)2;c*P-^84#InfxfNzqqnEJBnl;;uPW8v8sT0sTN)WDE zGZPBBd%G;YZ`JWb==2eBtAvb|)T5iRf2}-*?{&>; zE@4|WYqJ@p{_Sd)`4y62%!1dTx^BRP(5V~_l3EORGNhKDit1Eqjl7&)YIPPvfXlc+ zpy`}}nn#+3545y8?_|xIIbJ2LF+8zCYI&D0)>kdLEo3h{C{MX>oN zh;ZysmNFiXQ1#+5nTdS9F)FA2x4q9Z%JCAI*oLtj(_Of0Fdc*f94xsvqa0mK-Vjr= zkj(;I1J7tVfL~<*O=~%T7rDQb*mmjPmZh&Ec}y;TQ<8vvrp|v;l4MrN0U;(d?BH&U z(6P3gD+j5KJGNPCExhwe$%YoZ!n{ticmry>V93(yKSoSKTJtO>yo;#Axe|1FIb74} zTnVN#?Lb*@*|`#oS7NVIwuN}t8fi=CZ7$hHVVhN1Ot_zS_4jN|KCS zO1!JYJ5o?{H#BTU{|)Jya?pqx^*8)bZz@yDBn#|NV6F|`P@usAw40+(GVxYvGPY2` zS0$+Ec%R4FU*Q?YJjO%GtPSI_IjQyLZG)v2Uc=QfTWf5;4(1un+L-nK2W~0)57he~ zxZQe=iz#xIB0=6ZLy<&DM079eh$@BBufenG#I;43_aNx(LZO&G4~G21d2@&iK-RbT`SF7uoE9@$9 zgnf+tcO#ySt`|cuW_?E-YI^ypi}%}snl`@W*q&g z3vzqwZsg-fx*$)ur%-+uv`jnH;mX)1+>;ck+GG1nRHQjV##2T&&SMj#q+||?jPJ&I zEL)NKQv5iIWGWKmpNk3A@D4iJ-EbXwHfv+Hd?2Reg}Dq|F%VNNLOfZYHD}ZbzKLZ6 zOM6frnx`5bx1clXSVTQ)sUqrA+Jg_P822eLLkX;?W{3sV5ERF2HYbiPk2epE<#B({ z*zy9(9E&o0E`O?pEmY`5XH#lrT&_xO%N#_yq*k{Ixv8@(vnNtVl-Xt;c8kHQ4n)bk zYD*cjrBS%DlvAyAs<;iu4?l%lVoi9yr=_>y+`AhQ7JL4-a!fAxmm=v;+dk(N3GfGG zRH|kw>fVB;29E+S?2hN!$Z=TKL^S3!v#pWGilr@i1>Vo%?)a^uu>&C63M1r?X3+)% z4Fw-51sjE;S+Woe1!9F}4VSPLR)Xc)7>gAfR)VDzmtKyN9v@SIr1V9INA^m`eYzGZ zQWYWd6=`O|nOb;Wk{@>R`8%l0&6_^|UptWe|L*ZD+rtx&ZALN3-E*lmbVCzBYHiFC zKi!Oz9;5hAH={D+kBl(E0-2RcGpoMlAW^SRH{)0NM7gC~M&UW6Nb3~sE^M$1Y)mq0^u6Nip*al8SzZ*3Owv_=#0( z%SL7CeVw*YGf#D!eFC?zk_t}X7Gij!h6LCd0{}yjQar1~GfLod0c;VHsyoYnA;AagFh3P-LZAn(()mHsSYc>6JMW6CL@bkde;Db8xkS5*j8W6vV)Ejpm$%VqC0TWa%=I!mpxsfb=+ zc0iw)!}jP6Uj{Y7tnzbkq!V;hff3w%2@XeaYP9Vw+^?#mnPn1=!nSS+#wv3-BtmBh zgP00cC}b?xa+#J(wJg`NOv@$a+GDs#;qPZDeU*ZJchI5R03(8LeKf1J8P!kfYnzkJ z^YZaDVjib?5h}`7Oyo4bmrp7gEpSbnBeald1>c5Q^#O;dq)xf`sF+fXx6Gw{cV-RG z@X>6iVpM}DvrdtJ4;IF&1I6BkdS2rO%p=lj&FxtJt`0wy`<1hUs3E~FRmw9LkBGAWiC@z)Y*u3h1sN@Zu{ zN1x|cm2dwho6dK+=Xs`h`g)>i;i0RO1Nw6c$D(SYN~5OH=HA!D8M0m6O@5Y9c>W-z z({nW!EPY0AMiW-9Nnq6s%1!k9j^2zmu3VL%9B#3EF`ExoQavo!xIJN;33VG@YYxo8Tn~Vuk zIL7n2(x$xK!|D8+rBXc?WFw}$jo#)pXbImmHVTUvUj zrRCWyop)A}eznYwq&1+O?xKb?TvH=|{NdBI2DIfJiXzQiWrv*#``L?Hes+h^5?MDT6I#kWwhH z>KX)1`fo$mjFdj&kS}f1sP2@K_*oq&THR1n~;_>!VUWA-hq?U=QDBEQI^LeiE z#eCK-w;czLPwiUL?>)zA$0NwRiBWA)pG9p!=FN>76x9J^w(p76;t_GX=w@@?{`4CsqSv~jh*8O#_?Y}< zZ>L)pa!NgIQK~TQ?Fte-_Qf z`ZXx8r8bUdC9F7$Fh`JQB}le*yv1?}+R*_h;0jlu;VQK%UgsyZR^A+nHR1n;4iCVq zU2 S3MQlCRb|32$}2BIe*Pap#{%O3 delta 64648 zcmZ5o2V4}#_rKZO-Rq4m9NZp7=^QEuHo%Tj?H!0>MFCsva#&-cf<&Ggbup%@QB0zj zVnI!ei4BaYCJH7oT`|cozf}FdnLW%efB4*+neV*mv%53zy_u@k?YgR}zqK{NK1+-$ zmF*YR!#|Q*8{d_piM{R#h?*^pNXd-a3cI|u(MiHQQma;L;mL^ViLV^9WLrtv%Mx!@ z`c0HmRG`$MMR{T-f@YRc(E`*jnF2_@g}e`)*&XhF!HoWieXL zn#0VeO`T0G#%N=U!DRSAzgTxvKUlj{H-q%jmUE!(uK8WlLp&y~71jzpRXYTthROA5srEFO_eIhbF~F}oXuPp6U`^iQ~5bpH#}HpxrkkCTWN%MHuNmaCSF zbWj^DT`hN3XX1qDy&9*C@#*SuDwV#jI$EXD*0~>5sWhB>ph_k5aQmrL>f9+va=ISN zRFj%jgs4(fyHu9b7O>2=^tPDHH_V64Rpvr-xanupSyP>9im97PZMv*#H*$vS zhF1+w7)o{bNt*s+)h_)gZZFqccTiVOKGNRPKCX?@+|g{%gp1e3g`!3HSlBDf5_onwK1J{#WBfGKSr&K#3yy{@EXwXjy)sB-}t~!WaZv8QYSmA!>K)rnQ697tsA2~=VKSk=u4w~`; zt;{cuq3PbFjW&7$f_gdKx3aZHAf0Oq%tEhleq1P2Mg<1a1_a5W zSa+S!%lW@(pgajty}$k`l|N37fB(t*v17&v1Hx!|w3^<2{imM1p^vH$?X+~c73*CJ zwQGAti&35UUQQKM-gE>)QGB3Y>>R}RickPK_qu}^#Hhf3Sl&D!>BXpke}I}Rw9EWy z+biG1&Xtxw7A$o4eBK5F2&v3Y|M zeE4Bbl`L(vlNd3<#sa$^r-4K(pNlcJS)oLNmA?n*#Y783$mb3cD<+z00IFsMN^$}w zz-SXgY4+&0y>M)9G`{dHp8VC_xqqgspRc}<4+kytKj1X<|8U*V^#AE-f{#J$56k^hn{q>Jj5Y7Blbh{q}E zb`!bjphTR|P&VHOvlI)^zL4%1|hAO{w9yT}SjFO0^*Xw-Zb z^&X9qrCAaQfsA($zK(E|>_Qp=O?#GDVf>%8pm(sK44QTZ)|<7Ek{ z6Pi!@S|M@&XoDP&g~iJoC@MS3uzK>73~6V4HTy@0$bT_lEK*E3uQ)`Wx`w7{VnEHx zM{V+TOsJPHGB_Hk6SmANv%;_gSU~r+l(~kX1>a)VIIQ+PLd1IKSVx{e(cfV&_V$ zW%p2tIH3KNg+AcAi22@LPrEHsPR7I%G+e~o@-|Q?!wVHt=op1$6H+HgmkKBz+<&_xjA?G5+wmnjTV{-+o{VRr}_DB-pU*QY>a=cuOZ&4!gJvjXm zPfv*lEoa*1c|V$ej##P#N3nIn>TI@7fFnL)0Qe;Gw>l}S3MqO{>C~Ae_|3;JeD+8& za8eJjh4VpBZstUfCys9=gg{~D^5Gu*7UvO9S`sfM!ZpXFSlC4Nh3iR)N@ZFW&4H$0`c>fY7GYn!w)G+%0B#je5|LVrF& zeUKw$AW^CMVxhz9Eq5Cw7weA9Rq>!l1025d&rNl7sZ|%s8TNCh+oO1CD4C-AQ6#RtPv( z1_a4T$Z6$N$2`HW>Tf?z7r)&DEGw}WY`?H|*&4Pod)4v+Tb(`4)@Qpc7cKRcbCyGv z8cUO9qvZ|DO1fejVp(PTX=ikQK=01tEZ||x|Zst zDwQ~_+Nx4niq}-CRQ#*%S;&8H?FyvrYtxbcg*7=i3zOD}xaJF6orN@XwF7IFL05sA z&DAMb%1L)W`mjs7)6>!{+`3 z&TX?pq2b5JLxg>}Y~2M%o*o?vjgJTZrJ`oQ2Xw6 zJ4mlPgZYWrN~rvymmMrYU3}o~6+bt8wpQ8nR7ebkc#4Or9Yo<8e-H zwd??m)#!xb*5!lg;}vTGX^)+cRSdhQM~`7AN%uc-XVD3$rDIl}lEn43%iM(!D-Xc(sd)v}8G?r}Al7H|f4p?i`h zu&|J;HpqYPb&zaMbsJg-8iVCT4Ee*oEMpj~JgNzVySchfoD}0(a@I6j!ImI}xGh;0a5iioLi>NeS;!FLCULUO5kbKliravf^y#{fbeYpZq8q3ERY5mh=LDw2AT zSNg@*6@M-2>+$t4ECHhlRPHSfufOgP1$=Gm-RsH2Tu;?G^06gNRcYR0`o>gb%rt2= zKWd&6u86hx=ENVi72J(tcc_&7XTT3J{tuuk#($iSzoXL8Se57dSpQc!-f&RWMt6Uo zdlrZLFIRiy1pg8atER#SkgJ7Zo&C?NM^=x6wwLvG@a^oM-c6_;qY3Qe*AkJ;I(M~^T%BNe7k|SbW&N~v23!8ut4qew}a^T{$z#`NdZX)y|d(zDN~cXcI(c= zCImdyNdc=eyNj1_2H#Z~dl+8Sf2H4pGx%HW1kG^qL*bOTLFmSBQ{Pe-a+}FFs=JnM z=GCU3Op+0tuE0)uOu3i#iFgSLtjpQV&GRf<9w6h)ofnY7>x^qUdhLrv0q4~ayfR?0 z=M%|aCMHZqf65;mSQ=0RNvi@{JUy2M^dtK0>T*qB?m&D;!>SblfpR;(IrTiYdNH&t z53tK2`*A}L=LZ{2@a~ELA0-}ct_TQLyz*>hh&xslAhr(NsIpXBI++{I{Y=+P(~Q3v zHyZ7RLxxO!i~bSa&$=}_f9>F35B2eSk zC4*ZK0aYb|kx*17TA`*}UyWyAN#L7i%W`6x?G|{gN=w_LRW_nD9A;&80dF?-|>o@AhlIt*83JZd=R(lu> zuet4~S05z_s#f(uoL{Z4hPXU8TJ0ZkkQ_as2lPE_kAuK3?cJdw%%043N`_U7>s_8h zU)qZZ*v^C`6C04X>@H9FZTsUKSdVscabj-$d3&;F(zo{i5NJQr$tCi=lGUXgw7lFe z-t+4B_P6k@bJ3oRFYLm7om}fb{n3uUQ2d2?KK;=V>qFAE2~vNvR+dJBc!Oi(!>G6JM-<9szlKS?O;GC@I# z-Eomnd^O&xA(sCSR~$-2*xFMlOj6RN5W0A8Av;-h!^Bp5YUUpCdv_&xg{U8i&;?-@SuVT}Vl%D3kfz zby#hXxV%^|cfX6GG5Jcf1NZ6qw;V)*%yB4uGx(t-?;eI@<)aLxt2lYo7Y^JXh9H$t zM%T>>C*HlTm)~Tlo1*ofW7_iFUs~lZUph!<`D+TwbI?CX&PD2gI~h6~?0Hqv%Rgc` zUVa6s1df12yFBVET3jcjxc7~E!zv#}Z>qeC0qx~B6bVjgvC2`mY0h6MZ2hrTYY~W- zhar{Z!zL#QmcK`ToRBj7VS87NIuyc9EtFfZoni3ckkbaKZ+NWo)~{(^!;wnR`13Io zK)>k`<=+_k8d3)&`Aas~;(1mtk3~6WoEVeK_eZhNX1~Y~kWNv_S`ID6{ruq|sE9g* zbsXl@3%!T&69U1Oe=P{APT{*L;#&t}VN!aKFJ_nF+D^@Rmj^Q|Yst*V)Eh zI3Mz-CFx<6Ia-uoLa?)3#-bV%aOlkbl=poGG~JA|LZA0=W-MfAPoyYV+KV|P#p7(e zf#FzrFGHn7Ox!r1!4VGw04#eg}>G9EEug|HAz1@(xFVQAjSvRae zj?S_UJ3|6Vn_!h?^rp(?45&E~YlSTdSgfrRE%pP3R_w!3n~gxK+zTnD`!+UME_lOE zMCiA2K$P6T&_judp>qFVnofgMf||bAFVCYlUYlUaATvA$$E$0UUe2)7!~#-eOTx$JdGyB0vnf!Y7-Lm7i?Sh< zvb8Wa=}aU(R43^icogBzau1|f8M}#CA?%2xmrw2v#GS?GEOIwe2RPEBLgYSSG!r#a z6xkk+w8Cb}T*a7QM6|O!lf~7H!soHS0hv=AR=Iqg8KrLJ4w^>_Qc14aK%deN z&>tu7M2h401V!$(;*(5Dykk|I8flYr+vqzW6sZFa_y03QK7rnNc{Nh({=wl^INa-3 zs~m$bsm}7Z3@x~TLeEtQq-gZ`iX5SOQ`WTO8)!P`Kp`eXX7`nmd{`e;3`YbArp zM%@d#3SF8`r9H2$(T>$RG`BT}HH$QTG`x60+$xR}CE;s)`93Q26$JhwU&oKP*_ujU>U)9Fx>f9&*n?`jK>KJ|jf|d1162 zGNK}Vpgh9)6lB(Q#XWOMj2YL<(IF5S>2yF--@aD3d{#0*RZ}mkb$6J!Yb0pf?Rg_zQcaR8j3JNbs5#%_Q;B;zFM({~dTxJhAeNnQL zH{`j0P@dP|Ch};GDUpzTSqjy>ss!Y{BdKgs^)iq%$fEi?l24A}WEs&gQS~PcLGqSC zzR-M8GLhfqIe%g^h5s_VqyrcJLoz+Ul4bXJY_h2OP|22~oYx@#vJ|Pi>SgK5a@M`=4JIoE zy#*(cNoIV*TfrBgI&M+vLlNk(|5l$47T$$(tj!(wp*tJ7esxyt; zfu>iV4S|MKe3qU~buJ}8C}UdCdx#A_zbYBXcM8xNKFS6cZqTIPFr481b5Rq?_V+XQwo4wr9lEJ~8>WVj1(N|-I8?WYLc9*?S>y*J( z`20g`=p`xuv&$~j@&&_|Ue0uK(c8kOXwxq!f!2wMw)$P;^n0}QwNu)=AEFQusP4 zVx1JZPKsJ5bzUcRStmuWlVXlZv3w>;1)(^0oF8|ZTVpwad!-2TTjurV0<+0<)ibX+ zR>zr9E3W)0E9b&1NfGcrOKgX-#AdLv#15W}i|$EK`TRb{7y)@V4*Pk=Oo@G62N`>^ zrofdqf75uD&WSY<@4kQ~R`vyUB{4%9{kug%5gfgDa?4xm}j_cSZN5*AJPxf-Ow%4 znYABi=b)X%MNOS1Q~Xg}i}L&T@RMTUkMkpWqTZn%#NFT?=OV~ea)>-eoT}GJ0sceQ zCLvT^>Q0y4s6-2cqFWBs6@4m+i1qVgRZ)X|Amu)e#f9fFe9Z4UutTRt(cB`Cq9W7#@Z&&ZI8J_? zq4?-K`IHKh5m^sZZ^BegvfgkNRX!>MXloZ*qf*xu*$|5TE+#2(r>9e z7lAnJ`C!_W%t#?KD$+(*QMBtjd3&+IlIa6|a6Kja#wz?l5BZxToFT_r<4`w=>InX* z5I>)U88t>?_|xzo!(GF-hA#}C8$L3$7%mvzHk>lNZg|DBn8YvRJT^Z5Y0Y>+ZQ_W7 z(J)3s8MQMS%xD0k{*3xD>dUB&Q4^y^Mh%SW8P$4B#`s^dw9S2pTvGN5ue3qCZidQ_GPpWqrDmJ#b{4PQyEQRwEOzB zka$m8NW#UbJ?*Oha{9;gPTdi-UO1*5ZmF<{<~`;_(_5O;nn~gou?xzjbNN{QxcIxe zNj;YPhFeJ{kRMh5g}zk@ud7R{gTXO9@e!S=I!G)<=}REodn+*>h7C#z2PT85I38=Q zUz{KqRBlssCofO!@4@l#S5^m))tqFNU1uyr&`qxnK%Q?I@kjoQGJg3(uq^M67zT|_pPS`}dH(mSjRq=4+0d@oZP+^4VjkwnxGrT+uGwk>( zn${<6PQYKHCuwui)ixV`KJDr}m6#>82-Eow`2L`+a3y*&D_m;STbrxHF{dt*0--S4 zl?UvjN55{^4WiCm?es=AYLnuj;2@U&@UsX?Cpf$nA%7!BP-$Wh{QMM@V#`CJsWh>4 z{fmoSo);G-k6jXh73rpWMuoE6gO)KC&is;jtXXY3Y#L7nm_*c3Ofu>~YEK^KDQ{2S zX%I%Ba-P7$@h5SfNq;3jNANpXS_m!g*b|_6!_(m!QbYj`O5VMcxBkHWWY2;7T}KnD zrHJBj)s@$LKVVx>x9&%WciSrnh4E@kOj{}x#!>IX?1jQu8h$v~>wgftd43SP`@cvP zgI!e{K_k5$E38Es_17RZbn9YzgPX+#llRC{607nSpI%2MiVSTnfY4?&wcqe&X9L9IOixf|wLU%tLJmW^v?Y>}rO#f9kM`d8h18`%B?v1jJFA;AYxt|MP)MFNQR2Ju^5;Dw%5C7i@7n(fA3oq!Bq73 z4FD?a5p$y%>TZwqqK!t1c46p|vKR%GYo4A(C|I`5QgSwors1hd2fwmEkW_y<55;2l!3PM z5ey8h-xOsN>QCAxD6Tc4nCFoT^ZX~%YA{;b{{K82tB zCy>cT#oQo<7WN7EqKz|gBy$59YBOOe1-L}S`doKfg#f-#HvzKR!W3QG~#^z`d?l&&4^-bKvyQHrU@#I9F&1VQVJ z0mx=!Xx2SMr8n)!W>wN1qph$of8p;m^2J)L*xC?GVy0+a*7qz$AQYpZSj;t1aNFr% z%-py;2r?fnMouG3@2u{GvvKT+7Q(8?s9Gc!nv^Zn1 zRnW0Mxc52s;y<94akliP=@%pfLgT7HEMKGKTK{iURM1w;^?!H;1^wdRD9_N-ga3`H z8Ct0OcfK4$-QNcOD__E@+xBYr?0U({^U1146-Z6%x}C%MXI4{Wv4Q}v3%*aSj@{;a;W?c zjS*kz9$0@m9UGU&z>;@pC3Bg-%{nj~s^)(eR3DD7mAo7!)ldByh#ftQ0(HqhD@;Ha z(_-FGCFz3i(%G_E%*$rLy{L{V-~t(|FodDDZa8YJV$A`x5rY|e5G!Z!=iK}rtq z(wYrq{>r7_p?`qlztz!|RZ*G9-CsfHc~KMvBG`|CWF<|O#r$oLb};lzM21WTo?eEN zhSeuC#))VKLkquBJ|Kb6KAR4BUxvCb()#qF{<_m&c$2j@^hAca}?6 znO+Rda;AIH#{0C}dor}Py0aIp0ose{483Thb0j^07d2 zOT%9}{(9oC7yf$VuaDKcXH_Har z^JNB-tN_+?b5TNw0Fb|q*C0Yr*=e8>zv8EIQ2dpiVIZ+e;mgs+KR+~4U*9t$oy4G|4b8)>W2fLw zdCe?6N1_#^re{V7kj1A_nmw)9&Y^XX!dP2^_P38wMwFLhQdvr+CchlgFpzHd3PB|m z<1E-TE11J&4+|vi8*i1bBaZ<$k_>6TP!@u_Oxhh#UQT5e8d!4zkHfX*8P!!0rFk1G-@Y@ws{vyZE^#`26-ORFsS(sn{A;SSk#p=>^B@QXb2pN*1Wn>>?)HI zmpKW0IlgwBffyB_W=|>FhnE#23yw5RUSKN4UPJjO?H5TG6fCFpmPk69KPXLlYbqK# z_Ze)ED-jNZ;y!~z;MSd9p?mt!hKovn{n*P+1SLUp=Poud%q`YKWl!wD_R|w>P|LH*moU1R(M8~s zo0%HP{4*Gx&ge8oOBtQYXbF_%W`-3r|5!%HFglvik&F&!w2;vPM)MfWVRRUyLm3^y z=paT1GCF|Ke(S%>&BR|8{;0n{{swry%N-b4&1-*C4FREfV20jMzz@|m@j>d_FtmB# zP|x<}fq7~$efUN)3e+>u@nl~cI8TJi$-ne~fYNcXV81@FJ7gpdaB;f*kgq=JTL1F( zf%qHbdHMR_`~mcBI1B`7$XGn2GEo{53Wd@VKg+jj4|hVnRy{)PL{a8lYOQe^3f}A_ zfGasK&a=fi#HoggY56Wu9KlROYVAM8gHP0u_uz7QK9-dZ6)Ou}p1z%jbm8Q-SA7U# z@o;)(zH9wE(L+4%L}&NxL3f*DNTZ4yWVvG5jVGiu<`2yK%=65dmUOel^ttJvX&Fex z+1os?7iXU_@-B6$vf1S0&B~u;gq7LX$SOWeRiN6h zvb0#9w#>H-u=tyAn?2?Y=5gj&)1RjIOi!80Onppd<4t3uvC3FrlKt3JaTsvvqz zwu|H5gz?OM1)f-woyc(q;W%@jgCCgN0QR+%Zztq2cPmt_%}(OD^>75kMsAsFu&$60 zomPvs24_PfEP9UPDw76LP^;DIpym7LBFs9Do8}tm#R#-48y+Db#!ZC9>u9ON;e~ay zlpOe!h5LbiJ;i%M&-E0Kfif1x_g`#7BJ5&;P`JokU-*kL86j~4W#*uG112WFK@D?n zLlfdg((3B*um;ZPvk?|8_R5E@)Q4c@=oJch(UtNKxadBsD>(xj9>eTWRr3aO55N!1 zl_6{+Ev^poHq!WwP=&6MtV&G8Ot53H6G%)$gh?Pv5)&wdm=2t;4~Z~o$%4fAf1odW zP^c^^4k7GNtrnQFq5SdeCygXKu?x#jhly=PQ4wfw)F-j?L#Xh^ClpFajA9Ti;>e@1 z5hjr&xFQjvqk%*8?V<=lgYE7D?>s?k>ww>x>j!b0Xe;zEfx4>u@Hh(-c$0G|_AT&F{+yT{Pevd$Df~f<5 zdt={-&p;NWb3=qt z*Ed1?5V5UD!jVl$&~zX;yQ?r&63W*)M!z=hI@Tv9L3oHbJrg60VnhP}5J}FcK*XM) zrU;uLErZrZ7&ZP0+(RDsvJ=Q-P9P5v=l+i)(llCXV3x1?ScPXy8Z2`yeJlp^b@Ku9 zF!M6ganlcGJDR~xG{qYKFurHpZk%CEGip)3{T!s$WWUb|X<(?u5ftK}H**tUPHpxh zoM49*44VXBm%9@dsLY?rMZF@dBF723M0XIY2Az)1FAhbhMS+89TQMj84*=%=1m~Ii z9sIsEJDKCZfcQGyog-^R1~%6s59mi7?D zJxR;h2NRjQ8#XcbNjS;ejc}K_)gW!7e9NH_T@$}3(fx3Uz#XRGNCUa!n z5WMtMcC?ODk4l=xf;TXz=G6J1-9<~yhBW35fEmoqfbF|zsp;?@3%lSC=5~SD-L!Nm zX|lH<{KWkEVXPX-Q-Sdt{E!D3 z8Vy7At@_1yJnOh_Bw7(|*ACEphUcv%@uE0YvBl#cr?YI#6OuaxYaj$U&P&`xqv#V9m2?;eTTtXW=dR(|gxKGYyYFAEc>0(GVq6}e1-+7)U= zRG>~oIF`)lP$4cZ^3mLErV1S#^yZy`f{nE6Q$GFcvss^<}8k~iv#X{--3Ly;u=jv(W_@zRrnd|=oPtj!I?#$MtIrjnE|+P zd8A&p4xlv~F84)ugj|J6WXC_YG4e$kE7u}*Lgx5HD>ISlA^*+*Y9Ax-8b~=xkfI4@ zhlz~L52EP14DGOz(aU8FO-3p~^OC_K@-+tRK#D3^6gYz8^?beDdoWE*kfJL0sd0As zIrPTKGa1lfHDi>`Lnzv!peOQ!cbgd;6+pR-O;DwUI}n)k54J6kzb{`OIM+J~QX%JaE!v8EEN@>;7-d?{f9#o#xr*K4!D2)pW$P+BAww<8-Dj zXp;Y)@hM}Ou`l`5Xdwp-t%jEks|}+Jo%R3KpV#l;-qz2;<+f3GgDl5m+f}+kUAXpV z?b}+=&d{c74Vuq2hcv4OXbLn=@n`WZaho_@OcxE@bm22J1*{T=qr&4C{%yXVpU$WA zdVI8BQm;}Msw22xNFLVO`{&@h3DDLU>R0EuqY-woU}e#8|2pZ8DC}UttZpeRmOKUd z6U!#nXB`ybecLZidL~9Bm)RMb7M&EKgfFqMtu2y;IdZnH@w>@zWm!&Ao#n@9cFsn4 zp7GR1{uCpU=a|3pTmt3S2-Wn=4P=cRu4Gq0g9P@TPf(8FOl8qWo-Ab1o{A+oMV#mU zvYg#&dTIxHEzF6b9#UJk;`V4(Se^Fk7{$YXMdK{+)u1&|ubllFO;^@j4waNzt609I zR|?Cw?%cQE)tSHlMo>ymMzs!Qs~i4*akhiK~N{1`<0s&4;}W6*W5ek*7FR$Cdy z`mJv2-89B>`fkQ5y~2cAparI{l}Y4Gt8+qfhA8vYiVZJLE& zxtrWbVW#>wew6xM^-eB|{EAa-fn}i3(-L6*+Wfltaq~p-jyXZtZn|%37Dt+%HqAE; zB-_Q8O@YR5jc*tqN5y86QHACLPaEbN28gRwiJuw#`OaLm{c+yiJM`r5& z(p}Q+(Ut2mbvEr6+E-D8I7S;SfX!-3VgvpQEKP!aBZi|u8c#|IW;SXibwib5BJ)@{ z`LllB@N5>o*SMO6IoyPk)j7)O!{pUD8koKoW%}&URSDc#o0Etq&7=)E-C;T`je*9E zIVmt|LynUT>VXY8I{0c|p+rOCab-;Iz0fCG7|%*MeKrA4sBF$LgYSw|Ix97>WSue( zgm>Av(j_gpmPKoz29~D4>bRe}h~zPr*hbP>6=BuJ94(A^*ud^Awgq=RY+CNb$0de} z3!+$W{9G`?+Y!nUGWQ+EC&JzJn9E07yloSXDrJPziQdTkZC$-Hn>@q9g&(APyJ%m1 z9d>c(cC4W99B)4p8E$O!_L~sN+S2ljGWTKo<2lM~6pEC(8dys-@bVK% zJ)5v|SMMof{X@>97{gB)4l&o;q0W#9>|g;8q??{v8cq9Jc#M^38#_|yqzEhC=D${oe#|rir3;iJUgYMt6SF9J}OTcx0xNH6WF?pW*WAeorBnc(?`Qd0j z*|{mduV-3Q{zTkvwMeO8K9?_g?w`(oQ;q8^YA?{X;h8*+l&!}IkNI4|ukVrxAk-9g z?$^)&4&9H1s+Kxin;w;DqYzZxzZo-?d6j5W9nTA18WI2?{O z6n;eW_t-M6cx|Yyl`%o6T*af8b~=%xC2G8*9CZcDq_70 zM`+<(+3>;ggYl7Atq*2oIpr&Te24~aukGbrzx80DXY0Y?<*!hCh-AFC=b?sDW4kNv z5p#vVgag8ORK32&Plw^fBeFGlZa=0k$vTTlp@x@Y1A)r_5vD5UNkICdo{Pb){mLm! z70r|2G&L$c&VqF>PJCtUDdS^t*i~+PxRNp)alE3S!ut3wesg#E*oUxT^Z+?)uVH$<0 z#(5I7Ou!cr%@CSbPo^+cIb)^zpngng{H$Wyc&c;8;?GR+!c=yTFjYFE!D@I1TwFVZ z!c^;wnbc={xf{D>P#D$D2=857ifV4I@?Rb#6Ze?4k9na;hrD>VyQ$;vcV~*~HL9 zL@OCu(i>IV3~fD6RhAVDE&Q_sZ68UMljRI`m(fbj^`;xwk?!l!$T5eZZT}tZMT_Q) zK(tKBZO&K)t-m}L!PyLK={d^Fnz@(i;%6~*?^HCT!Rp{(%w$0AdDQ(g{cMFe2K#;n zL)&^{*DC0s(TGlGsI8pVZyNKTmO9ckXI0g z$Y=ti@r=f<9~(asf1~g>8h>N(H`X&Ye%!&;r}$!t9OS_HEb5o?$5f(fFa=eEy`l2Z z=rpK3i?>1)Z9|3O+{=>ReK~2^+ zyus+u$w~v6uEe>f4#3kl6qu#}1t->{Y77@t=7o0n=kTN~Orr|2sot2544=7^5wXtqA4aSK;tr0q)m`BXGr8(-0{}Vq^A9u2Fa@28bx@l zv}5iHJbZySMRB<2RX85r5XIs4mqKYUZ$I{O!6%FGyHBrxdU+le*LdJHo9voJyKba6 z<-;qVWP&)JO6s`hiNkHJl6ljg3$Vuu74==k9y``$WpRH}z%+d9k|#UKX91}AYXKgy zKR7x}wyvg{#9RfIUY&&s*D_jej*_aCM#}`DFm{y_8LHroYY>O6j~3hE@b^=3e@}02 zI>J%4VnqbrqD1d(;>cj7F3oo=RyclOG}=hd!NG%Dhen68>zeF1pYR+=;}a+>X}o|l zXn<0W=CfsX)DBb4q`ZaR(FD#VSW?IROsI%O5GT>a$sw})!(fa*^(!8OX}pM8WGY!? z-m0`gI|A|%k=9BcU5NJjmyn?^tyC&WJ=QdiH1ehas-dWD8efp`2?~f7324J5_>ef$ zB4ak+s`{Njgy(#R@-hA7N?5_+a@Nt1pSHEzo%0sTkjxlTRo6 zp`uJ24W}R5>4Nz2^-u>UO^D&N$&e|v`awTwLh-g|26pjWkS0viz-9SKRLMP$ z>bfh*>z|CCfWL|On}ol~o+qP==6JlS!PcT-keB(6FHEVJ?FS26i=sVpYtbe>oSD88 z5Ai%%>P#Ixc1DN9}*O9tRs*UwZaar=x(+%B4?nykhb zj3W&{7`7W+`p@(W^pfs1-Bj&g+84B$nvXRV8m-tMrU}i$3_-;tZs*m-+;3bBnrM7R zn#fo(*0LRS^T#<7!r`E|w9K9+w=bh7UQwZLmti?dy8JBLRrGF)fBnf4<&tYq5g#I- zLw~%y5vh|Y<>f?7mkW)SX((esO}zX!h7;s7NKvrk)y5m;=_{y6B^D_r>rlzdmoS_l zZ)GS|%gZS%DewmZcx77e!A^M}0(fT|QoN*n% z8zpZ?iY;S60TsUROz46~dpgwha-Tw47>5+k81%)I2hiI^p2dI$3iuAEz*Pp|Izp0{ zA<$V)M~cnBbp#YWi6Wny7><*lMv5oHy-IznJZuC_tVfE64x+o@$=eJ($0WamKmu7I zUq^QYS+3No{$qTfTkrNV$g}XUVZ3&kS9}C}t4H~6Yl^nWO^C;mr5%m}AhPafZlk2qln5Wy@$<@FH(i!!|rpM~wvJ-H0cU zh4KX!zo4U}HzwKS{4ppg)XncG1g!H0hsiG?m_X)v`3i>OK{9&DlI%Ygabsl%b7t2x zR--ux$B+syd*eJ5(zQ@dQZ7G0cLbU1jjD=4dy1yEUV3@cI2u1keiPjhq>ROffpz$J zA30+@)`-lOA4Rv2DyZpnUe`EvU1RC7#%Uyz+K1<<&a3cZmR*)vmNbh9*7j-LAf~2Y zIuuJwd|+eyG=F%uH5<=Up_1NnseM|mnrDi6YQQg$AoK$nFg3xm{FiA<1Rxt)@#)7a zWnMez@{IXo+F>4?y;ddTD>1+OD%biM52oR7I{s#OW;~d=Ba^orP$hbV%vmQm@7~P) z!+-KbE{IxygZ26O3GjodTlo4}{b%8Cwr5uVGT6nNHmQ2xMHXd+62FK~SO2PhQ$2}$ zz@6cibGMiBghpby= zu~4=6do)-|kFt8!ww3kiI3ZSfa&^be5x(9lA}GR6EzsLy#GwC=HjYKKllR^T z{Ji%@(6sE{2!HQ&5ESSz-^Y6+0=)M|P?#BX5u4|wFUwlB^sV_cZO$HOJ+xBeNxf6% zPXr~`(=7|JM$8~=kdmGu>`v*L>cGj_P-to#1Qm({V z|FZyJpL3NZQUxD^hF*{22Y-%oN0@Zy@euN~;xDHD2JhWr)W4Vd(@^fht`n2)BD6=z zr+o{a6hF0%T0`vyqQ^y)R6m=F2AIECy&Waa7oRuCTs^H$Yy3{Udr-g7Kz1k$?ek=` zaT>oBzy3QFq(;9m1S)6zh#%?9Iv>b|*#@#*iE5g#6;~`zdb2*Z!wRQp>^22x-rv~@ zH!u4c@WLXx3Y4Yo^x3OZ8eg-0ww=@}{)*s!Hra3v*DZ1_(3M!z$sIP3jaaQ3g`?rh z^L8M+W{1|BFeB$LcqtoL?EP>(yaNl|#F*+w*5T`?`!=ll6H2O9dq3>_owKpEJF&I( zsVl6oKMPlZkJAd3lgAXc%p!lhO2dq$ZBT%M|8BLRu@=o~ofjFt5nrm7g$Ce6?9$@8 z8a(y+)NBK^m0`!ohxI;CRkRh&w)V_MySnFUaW%LLE4ErmT{UQ{O@90$whxZP(>2j9 ziHrnig42#S?BaEBM2mU4j-gdy(EzO7Dj<6hm1g3i)$v#jGlOHd%5C{$9V)RX%L;|1 z`PeMH)PcupIqNV*Rr(67_of=G_cDc{{m&=xE{Ib&21^y7W)Dt=C0>7bR9D6ArOU;| z%H)wAvj@u|GF{|3XTFbXK^b$MM<-Eha4%esy@7MSkzH7~u-Hx(9FxjPGkxT8L1-@@ z!D+j~*!J=v@Jwb_Xl;Sk^G@lnS!gTfzbBqjAYNY-^$AZ6Lx+q=1eq4rN;AgZ-G(|H1^d&Ny}Q4k#?ZStQh;AetbS- zjz&bW^!-tI4-N8vUyX+SxXD0RG5Rf>j*JYZTMdL2v){r`7J6aV+YDChezO*?@xtvR z7g27-@Rxjvx0JC=yfCu?MiU9i3rjneP;SKv_{c96|ArMa;L|72ww+}{w=c-8*a5fn zLP_8M!iphqEpdC{hI6cficN6gn`kx6xV^B~DtHlEF(XX7Loo?%Nk&EBKK=bvFHEn~MOb1m`3|KhOn64`(?!_9;7T7HCO(;SeS7>Pj!%Fq#P~gBk2>`+smJ1|Jz+8D2lO z66H&WeQ33V6y^iH6}0I$_E-iowDxj81*MnaB5#1g`yElx%pY09{TZ749gevoHGHSc zR_~gM;u)#p&ntKXqpGLsu*!1w;ni33%qggIdec;8avHxhzJzx~B^jCxb$E!bVgz)p z;k$b94y-Tnnjv=mm2<~a9k%8LUR!gDFkAEHAJ(I3`3SGAIl_vqd7;5mWo0~MzAz7uFa^xAtj~GB(v$Q4yrUO+t)Lo+BJ}wKYJ)~;8DLYzd+6IF!O9YFuzr;Y;si}!jXmZlRcLX&+j1xg{y}1cmZs@`HXoyUOcza z6l~mY>}hxb{F;#`w|V{?4LJYvV}j@FkLI6N<3Y*|{k>-;TQ(%e!^zXw4qVek0-a3s zP$~5MY<`7q&W0>x%fRM7n6Mxo_CJkmv}+;w^Z6g^N;mYy*d&Z~8z;uYp?&kh=}5r% z8}qN~rf=wlk?g?h*LW!Q=2`l#$J3zd=^J{2xpdz2W3BT^<2I5Tye%}_#&KitKF1je zFwBJuiP<#^93m4AL_tH%j4t$M$ugo?-8Q?X$HAzr3*M8RoDWIn^vO^(XP*m2U0Y&W zU7lC!7W}CNNB`<%g64+ktheo6;IZvq_-j`*rq(tuT%~?d{U~f~UO3I9?Uv}CgJw5q z^F^(`zWE}`7@p`ZLj=tYskP#%4}E==Y$Kfp`O zJq)ernl$qg-H$xXvGCpHXhx)&?sAuSjeAhNb3HQBz;T+%N$z3_!m9Td;{7+8yd-xK z3m?I-n$rw|>+jP7`hxiyEjSf2t}S$-rlbNLlO`z9J)V_A4I}psSwqX=licIH<>(;U z@BjFE54b9h=wJNq-pVa-!4d;1T(C>I*t=kCv4Tl7ny8?$H?YNMl%^1sC}_eY8Xbx~ zh7y&V*Z_$dyTmS0F}4_^g2tFw|KBsakpFwX_nv<4&hB^4)LnLG&iT%n(T(6;`bBWU z`!#au(-?SdhKH=(Ib?0P%O;&e)`lBwjR=2b$3zIGl?V^9pz{=8%tnbYkF6Bp7FI06 zY*uw1Z8ofRCJa=|m3Y=FeDc&j>lR`%sjjXaQfzu<%()$QWxmPS$|+0c{Cy+j#yXPZUX zja?LBFmt<1xRZ4fp@U5kVF3F^gtgfT5!Pj|MCiss%1Ithi7C{>4=_TT{z-5G@o0(r z=EhwXpYLBGhEK$Kd+G~|xH zT8Vl}Kiio=tXh?oSZ|oPIr^#WqDVW2xm}|?K4hJ);b!TFvT5Ra09!A@p6rweyCql5 zNv@ddZ8mn)^wAtLSZ1ItYdxujHi^}(`1n&-M#abXWfoxp5e>^aLg$?f zb=|)4@jF`Cc9~tbH^r)Knv_qy^YMHgi*bM3sWn7|@@qqDNg=Mje}268Mnq$2@c&^$ z0=N_WriPKW1x|A>nqshADbv`&aN01@pwWM&cj|uDMQQ)h7HNmc59JiuS8^e%J1i=u zfiJeYU>oBnp45&@RJ?BZlHQ#UN`*em=0hal9k0%)c6(9Ucq60h)#8w zPb-t^qe8)|dV~y=%IYLmR7$%VtV99OV-XF_mxX;h0?Lz*uo&f4DYhb*MUlJ}j-WxU z0Y%Ar1O}YGz&hWHWyfLn@aRYHm^EqI~$sSU}t#M?bvN$HFMl zsk?(zuF=ZBI(E>+o$Iy<{jwf**$X5{}mO$}#vk7=609psd;@cr%4p68Iada@TAnG|z z+w-FWzlSK7#T-s z3MXPQ*sJy^>{~+J>Q^hecnQ3o`SNE#15sn;e*CwbigoYMN6#mGj5Kiewi4kxlH#_* zm;JZIDvpE#=23CKK?HXVmRdGr5^m`J4k&ZEi0X%Z0NnvD7{BD7SXi2_aQ)*b#I32ix_Wd8sP2Z;-DyY1b(Llp-! zh!4RLTGpc|IJgtt)V~1)^4$VHZG#Fp_z&nXM?pK>La^Cfx{7%(JsegaO=Imjjc%sL z`yM(tj{VTxWZP+rvRSP^SU<8Hw#>rdZl2RMq;j6aVu(x_Ne*u5&WN=svpsry)U(yX?ozu=Bh9JiJnJOhLs)*XY_QC=^s&6Dea^B@c?JoR&!f#}L)fMbXGgQ29`_J* z7Z#2yNy{pGZEwTwY(J}#Se|E}ZO2Mybry^`)?@H?DD#LU)s?s^Ea8DdRWaOB2rbv^ zE)|3pV{ffWNLkcUA-DAs@{W`<&1En`rhj)>g(9wLL^siDym1cV3Weny}9i ztz@#0Rlc=-Ia?1O*NT`}%|K#$50PW|vd<`$Q7UFZ8_%w00c!`~(cN@*D*NKsIf8v7 z6|j|?&wj*u-V=NwqrOD6B-%^q0z)@9kx}fn&lL~c>~i1jZP{CakD8K#Xdzw9T2!fU zy0QjuS1Cc!CsLH!4q?6|@^q_uC!|r#Z`)bXI_=k%H(B`Eb}eu#)%Lv48uo_t_|0dV z3)YJxruJk>-06VkpNo5EkWP#Gq>r2`3Xr8!EOhhPwyd=DcOf84hgsR+?ICOsmAm7M zr78<-uSh1J@!-l7P`F0 zF1pjjSEJFilZrHQcdW!I@;K=6sYe6%3iqw<7GE8rN!^Mpm}x__@IJ=GzVqV&)Qq=XXW=y8OBM*_RsSTqiKOzStj%HK$v1y*9%**N!mQfPC9MO7dPwfu-G{-Y@^`l3))6+HHP7Q)X_^FAAxJ0wu~TxZ z4_Ilz?=6}4lx%;RJ4TMwy~n17E_qLJJd-&AI=$w;RsIIqAUx6d6%7eu!AmRsU6Wfa z`O~UMRc-q;NK64`&3V_i-IomY(|H&)ZQ<>1$#5^N9nL^k$*rZor6TE5DO~fLD<*15 za<#D`^4hiP8~2fV;|LKqF3l$gH*T_e;{Yo+ZoGQqNKUzN6V)3>h%!%)R_5ttHy5k( z^u)1C;!;DcZ)$pJ4r$!2w#&v7rck%XprhV*?`FBPn#%^cblH5jMHO~*G38uQngT~p$0K+ea1cOt5O}|3l6-zwU>iTK_ z)o#%agHzaD@<(zl*s5YBKg}unL;m0Y4N6cLQ|YTVss3Kl{MV2QNg+>3ub{~iZeZf? z2riq#o(jDTotoku=GGS^t?;L!FU})%rb3&_YWI1y%zIfRqw=PnK<@?@GVsY_aS*H! zr%Egi6|U@7hhT*{6@3-u3+amc%L!JfQ&mm2QbE%dh1Tv*RTDn!x>e@eP0s;cc3#T*Eg^b zk&wcgs=5yKnt;hz1Xv+ZMNh@MUBF}_0<18oBL7Au1su~TgB!l#|0$Y=pq-I}bz@sEXclp=km5gONWD=D`}3&as9 zdUKSL>Kvj`2AmARt^=8o0#0Z>RD~m8%K`kMfU~}ZrdUXmO~=?I;RC=nR*>4$A2K79 z9IuC~GGy5SJY2wKjhCq~*>{lieUVgZh^2^r6GWLF?xLH3>tslh`996Ji#BVaP} z0De!vS#K{^;rNts1ivfb$Z0DDObvcV5xj|KP^5=FhXjWxg5OTBhGE(P94TPW+pPej zM>3LyqBugJ1#eV$NZ1dPsa1moEM01+2n&-B;6VhhF8D`9^XCYr=0G7_wTsLT)YoJL zh426Yhu*-5L%?MI1l(W1Wyin~6mb4M0rwMd{od$0E8iP|2;F7c7g1h@ zx0KI~Kr1i(;brKid~O6FJ~z5><#QttF;396adm07teH_Aj0jV-G8pNplpzrDO^bMA zou_Q*lk8kp=v=OgD$?Z(Yjima-GMEsPH#wqnef{@GxHmn+mLl1@ja&1S2tS7-7rXi zyEDMK*g-bFi(p}atrhdh4)`U8Jof|a%kt+e^^wl2aoh9YUxKAy@Pj*xxl0>J=M+4z zV>(!53*|(k`^5r7cAy)GJ#`-+p zg()+4;0TW>KOkr6Czjqdy@OOr0Zgx7qiRJt=u=1AbU1A}yB&7qZs{=hl8fR~rJEZ_ z#oX~D@&BYQu;R{GxXp`|LqOUuGT`}$^Iwgl|Aa>ERgJ8lE{%l4;q;Eu z9>qL-O^^CrX@%533Ls-4l3~lR`fPf1DpmrvM$8Y2sNIHL6pG#<^Snzz@>{3ZmkO2h z;?dX>!guL5JPNaxg5Bb?20Vp#NqDC!;IrA2O>IN{xtDH1<^I|}5?quMN3(J;Tt~kM zz>>xri0jB!;hwX&dO6@g7H>or`Dzx2VTq6Jf^K-&V6zw8;Gki-N>s&w6mo+DazT2$ z5qE2470lK9^>a|#MB?y8WNh?eCn!GFN_W%mj}&nF3>3eZF^^rCvXD(09Oij-GJUMeka^^~J zv+W`VFY`3=C>b^nY|Xn#num>rH+altwKLO}xOiroQDVO>nBv1|+$b9+Gsi6TA|pF( z6kD|jS2P#Eu%;|-icg034_>YxveW!5uzE|iT_Bp+F>Kp101nFA<$AGtc}OI($hSBs zQS7!WJ}LY2((p^iZxw#4@%sY5FY(J@`}0_Dd!S~Tq=Cy$ID8x~O?B5XuY5MhH71`K zbh5`#RwlO^?ke5M!rhdybNH?GK`bJCoxedEMGPQbVRMs$QkLy!_?+D z{%_`oTOIOO8trNAM6E{tikmY1q)u##u}#2&wOuFxFC5W8mAh@{0b3}~`2wvbaN{pdpuY)}Sls+RfFRxvM?W_B z1h_IqpF~;s(5+O)_W})@fI9#lx1E;)g?bQ2JQ^yQ8#ikkp@{;u|1;a0e-0ps{~-Vn zyX*6(09x`PI68RP1bTVK{e%Z+EFPq$d^f?Yn&k~eDk+_hA*yk*eOQk`~ z+3-_dyp`-no{2(2tHs7wqym;%y$?WEZwH14gpTK zO3y<)kU7t03R|VODK!$fuu}dQ2cNoIzpMow_kgL{6Oj8LHTO_nllwq5_x!W<0)^97 zWim_p5Vf2EYB}jY?ts-ZHM_c;{%Se)3z=#;{g`z|_5^KoYG3wa*t#)p?x}s$+`}`% zVGIdhS6%nsYMxS$ia_>j__{8p)Lv?Kj$AaeD3FRl-Hkl~lUddAan>NkzixDD56Tr- zWUIH}uyPHa?rl^t%F$6fE~%lK5%!_0&)b&J)3&OZ9$@NIDC##`IS1O3KNBbov6wRhOfka& z%6|;v@WiQXIV+z56smq4@#kO7nd9A1i$H!<$kq_&_&Jg#R@5+t@54LXd5zv;6%UsC z@iQn4%70OL;?;3?)O#Q!9v$y>kSduC0J}E53F4aNLlDY}zmGNW9f$(`7e~~;nk(0y z&jjFL5d**W&e#E|q(dZiS%8^y@xmqo9ls4UkoUk5H3y514?avy*(QLf-3fL!02Df2 z2sDs;;RucYU+}y^#5&&aXF}%-l;&W9UdQ5hqO|3Rf?LUMVfR9S zs|EN6fh?lymoS`AF3$xz1V_9qeS3i1NUXd#N-*{q<+58u#f)ABn?xZJ922NNj`-db z%m#f9L_PMJ60|-}36_bduw)n}h_cs?R|(+sTiA8`HGmd;IF62tbyMo|@h7Nx2k08# z8)OFlo|T^h3W7S0=!I$$A8(6K93Db}<_=wkJvm601I0rq&>A)$H{d&mhfsVJBR)^_ z=FiH_i9$ zT=V)fC|&BV!uFr=AWR0-Tni#Ui?n~*m-r5YBzattx|w$VBJ5yGV_yFCFKD!(T~aqP zBP#oIE!>8!HM*t4U3^kkB}-+*%iYhS;@H?gDgAV>6gv%lt^8x)E%`4f(U)=kw_ZGR zHx29J?kv~y8;EPlJK+dMeV-IB-uWCE+Oh%Qk=0`)PGe(MM6x>&g}($G@x6;i+h}mi zy8n(SpYs$oQ$$scMHCH_*`nW<<1a%*T^3Q9eN*5qa1WJUM9J=nxE?s-gXIl2tQbGP zqGaz}pd7b}sHk`G0Cl?ybdNy2aCBr;t+I>ZG>f~7tO72Q{CtWLBWqSf=yHMn1{B)} zU|kV^03eX}!x5W6)yu)u1mkP|-X(fGz6Xi{4m4PN2Y~k68%J1sal4vcrY=mRYZg_N zOom=`>`HYWFyd~>mkG3H2;QDQ0^rB|W-qPJUqJ%)R~)h9-2LSaKIICv`~U#d?LNKe1keIU z+(H66UnPlE0>JLaemqA2&jbMd7FJ{6HHlFgU<{WCm?eOt0)VOuqci}Gs|ot6qA-`Q z$a*=NN%nCKnXlijhMu3oGT#jD+yeFSx{Gj7;B*ZN%U-54L?(VhX29sLHUu*3%#C2% zC4Q`BRdY53yGHfOZioe=iBq*qTC^dU*_LjA(yw>+79@%miKLuCIFF+9>W@EVFp4tB z3RcG9k%=QIzT)!gK-PI1_jmo=H~Xdm$wm`g^Hnb_Uc6IE*%_Jb+8LSCZLrPqhBL8q z4N)TDRbN4ru-IACtejBUVrC;mh#f&GG{3?oZaJYg^FJYp)gIT}SbFO;INO%Z4}%jG zrU>zhqIY}MR`q9cPfa0ZogY#mZs%cwLm_U**;kuUh}!{nOoTtMSDR6YTP_y-QT%2Wy#<}; zhJLDDld>5wZ_r4GL~d;8w@74q#+HikKGyir^)>boq19C8OeDu?l!$r0+SZ6%i?B9{E{sc;YyY(!lmq*2$PsS zm*i%!aD?urNx=zoQHWqYVJB7S>fH^dvCf1!H6@o=W5nNzX#_hjlD^A~c~tp#z}Qr@IE4G`jKu)$L<#)In~pGVZdqnQP$@n#_`r1 zl_YsQ2mH#^iE5&<37-VAE=|=$tn-PS3EFX~VE?FKb*9~Q{j~0ZaOn92(n(%bPny<(UpTA}nv9X%DXE0OR#_mMZ zZQ`1hwlTn+dSd}B@ixb_Xt3&4GF{>YYN&NVe~_NLLhbzmK|PDaHM6QhXnu=N%3eGl0$A7mC!8mrHXZk3G4JP^c>+LR=EsWc z{JEzC+tkS(v{7C=cJKY%_V8f?kH_T3+U3Pg?LUlI8w(#n+T17mX;rrih*d#iiVl0r zvRS$C>r;qk6uGd>Wl>237Wc_fsDcYoO_;1l0s0LaEvCk{TtYLU5 z5cm|QTx)iRWnDka?Et-=BAC{{mM8Puu-A|A4~a z{#XJvp65QeK;DQO@j?;EADLTUdd{NvkedA^`sV85ebBA$LvB%7 zd0;NQi%wg)cCQzkIx5#9{llyNpu6>yQnZ()zeP$Oe~;7|>3yDKvHqBzUKx8gTKE`X z4Kv#IxAPAgIH0?J%-#R=lO{`#Sm-;*vl21v3Iv%0ZsOSB`UTLmZb8K=mH3Kwh#$NU z85JNIROxqrw6js~pbig2Mp%=*Z(|+4;yx*Iv&l-3mV>rVzl0onyq^nIYr`MC*o@72 z=!`#DK^v&Nqx3s5R^N}Cb58&~?E#VHX5!c|qiCa#^gF>Fc)Av z1S3?UvLr(~nWDng>!va*e^6XB6#j1>roy}glI7n5jq(w<(o-O1KB5EdN5|q-1io8n(9--C`7QmWip-#S_VFooEp7^?k}?f*oS! zZ=-tamAt#9sr8Qd6`Z3%i8=+YvqpvUcjgnU6>$2A5EagJeMhh?VCg{v6^_5KonTC9 z$TiM`ekxp1cQe5n0T+aRrf!&H*9!(ioPbLl!iv5~Tx8uqj$%_CnlQtlx=_H8_vWfF z&6NRvDqx2XY71%H@`MEn@;g`rl%D3uK<5jzMBbt%x1Vw&_!9w3-S99H8zUobsiz@f zo&Y_Y_^9-Y%DAnyQ*_R)_-(`QTi9^7zx0U1w|tOPDjbEIT6oMd4TrOEaya*c`%?E2 zV1d0bKDV8)t+&P4!fjsGn{tG8mo>%uzO^}asXH!}TGm;nTf!_}<{Rc+=45cMn#*w* z6rOP7*u^+pcDU7o?}dxuZbcG6npD{?MbhQp?o=2V)@U*n<;A0JxXv-+KaoLF+S+TmY;a$Pk}be}=W6B)<$Yp4)O`O;Tvk%qNO z$40h%EPBKcs$5sSw@_STBi6Dw)fXL)?HZsq0y}`e#zn9~faKw=-PI1w#?5QXle)P> zdjl$JQAXPUX`#e7;~{W}j+oRwK>9=FO~zuY8j|999Rk#TTGS>$`hsVmWkOs!$oJ!1 zd1UzN)&bH!{t9_1&Lj1mL^ER#rZl3w^}mZtN5Gio>{Q;_(87rAQbkMmvzq>z!*Ge? zwnBPLt0=U0w7axP+99Sx#ACDegA3liQjix%lPbmycfoe&6G_!*!|a&X(DO0_69^&aUoH z-B1@gK;T`@W7221NNiF5~1y~Nwc2Hkt0S)9Y1)Bfu-Ui(Kg!EhE07Fgp z?cVx)rT|U|ASwrZf|CH+^R_rbfvXP2?Y#Hjq@Y?S(1>+~-h2yyAg;#|+TZa|9byn~ z zN7M!1J~r(;&`g3}P=%HOg+twa7^GL`p_>@cmi&=G>4PYkAgt)jMg!Nrq-;h&z`^r( z(cn4$%tk$*BH#lA*Dyt(YzGQuJC0~w9n#-|9>UwcqB2(rw1z~&!pc=sNgn8k9K2+`ig7{GZ5T}Hn1AuK)lX};5Pz|vJz0^z9dFOwNMHU0P;+8jp z20jpudPx^P0gieVop|xI7BH?*{9nLHuim3NKOP26yH9fE&W2{ULQSA%j_fu@Gsn#a zCB-~T-F7YSI}f)a2(9v6DE#=~+uWPFM%drm&4#-V4B zwx3#lTED}v*~}|}hp@j3!7BQ++=u7GaTWHp!1hN{A4QCZ=^dE84=#ek~PS9TId7X5uLJ#eV5Sf&kE5* z+7t=S-Jm1TUGm})ZZxTEt4O6~pTetYES20wO*iVGoy7s+4hZ_P_#8+S2?=;ZUsy5& zZmy(7B_KDwpi(Z-41Cf#x~0mlR`C}~y;$ZV+=E#pP|@QsIA#_qGDxlYTMlpL6NgF% za)*r`o6{Z6K#|Zwy{g!I1b3w48F0{$40nC+4bLyv(QR(7O5{JmjJ9aUVKbMlR7JYx zzv9#)HC9F9|3W5Lb!W^O^GeWtZ=ZwHjTeO$xR+9jjM_=Mi$K zUcgRSyXY}=C>NF3A&JZGs7yv6di|LD88b}9=-H^trJfmGsA>%r$y0uqN&DiNXTMSW z;QdE%`Ccydj?{r@O%LxK#J2J8(gTla|I5O5Y|VSQ9g_2ZD9ry+%6wC!(R8G}&iBI& zcT@L!?mOLM-P_qN*w!$6#O^5OAJ(zEvIBc@#O}*hl^z&6U|TiieV#%O6o{Y)lJ3iB zSfFqa305fYpC}weA*WE@=PMjUfhd&sxt+zrciJ^XX!lZ@jNa{PGI~#WzD~bQ(}U^l zd#Ap6#Kg3}OMiicou(}`N!rEQA=n?s&*P+PnSJliGM*KW+v}Zh1C}}dY^ZBjt-Vhq z*|L}w=<2s|Rn*yg7oKFlAQpTiE3WG0OlkS%Ui|jCTE4med|jQXy9Rs9Irj%b`Q>!( zZw;E+80Y>ju59OicZ0I$kOMpGN`bEH?e>q7pm20Szxs{JR+m0W@#wZ6zap1Mx8g?$ zq~D8#ey?~;gYQktOk+%)OkT!6!0cROoN4T5^fx>M)qI;Fku54NZoiAGyLOgB!^XmrRy6_rot^F`H*uXlRNMoj@4?z8tl_Ayj69=n zLk#Mpe=d&XVW0X#uT{r4&!2Ux^U%QCy-iVzj_fn0n2r?VSK=}qIq>g!n|T?QnjO+& zsP#UE&(_S+7PQS)yUfyN9ppwProXJ;V%{y^CueqWW%tPZr@WgS*Xb$@F^2BO$;J-G zllUIAHXX6%T0d2WZEQ_u=_svOy>Cemk`3>(ycb_$q3gS$%<tb1 zeuim-lO<=Dwh{8NzibH7O7(RtefSBdt3gg_fsWY@aR?5{*iw3gMV6DJx=>cUbqj9Y z)>2o?tv{XLME#6?t0E4@usNsm>a))y4%%GnBMu%guptwldb7M*H@%=03~fFT-|o%% zUTpWUgHD#+7w`7&DHkZ}djG>dENt5iuauteAH?qveurH>-~V~vX3G5cUHT)7BzP-K zl#WEotFcG)iZx^CULBKIe6KK_%cMKfL&w&qw(=uKTEz%msiq^BWVY(UI&bW+8|=l0 zu9flE7}2G<7dEzYl~|61&8Dp3m@ocFA6b5%4TwK7l*5pY^rDZvUY z-0?%43OhF*CRm|`U++Izg`@hNC0L<_ODnFbFntdJE3|OOcZ*awfAs}|6>fOs^Bw;y ztZ>6ae`>12_)e0v$_>u|Wn5ISqTwZi6>@lVlTsDVJbaa4g&dwx27Q%~jz|?n4ic@5 z^jY!0!UF|d(0?1$4-?Cwx$1uyv;9Pc9WLz$S6jd_9~>lDVTWfG-XMLM!U?}l3m70n zIwXY;F73o336Uyno5)PzgJ0i#S%vd=9ihy?2d93of}TQ1+rJgk;DZD9Jil6nqo$XU zbT0wN&b(Be_dp@tQ^}+jnw8n(A{I!vMBZ-+_#`0)>$BBdqlT(5*G!b9mI`-QzU$WA=6R73E!?$;EX^~L#R&okB=**Zztd?J=WeR5C1)(JZ&rB^bSj)?WuZ#(zdxsx{ZJfwqVHx zV9K_&Kq8MpcP9k%&k03ZD*=c9vPXsE(~Bs5sDK?`;gJ>6@!LhVP63x$x~i2(Yw|Tc zvLQ+)In@t1Sx*3CZj2`(w#QLb+HdYrf-&6(96Btz8qO2S#Xtc^H_5Eddx`jaZ7JX^ z?MhYioZm~_vK9iaN?0ynYG8AL6dWh-$fUWh3^@v2mfunQj=6Mx$6s@o_VlW;GL||P z#M-2D^p5*X=20KJOJ>)}gU8#L4WDW5$q^d3Fyx#CoE#p%L|GO1X7Rc8JY2RCgxAKeWpNLOH+i}havrBdum_)y+w8taa2 zekyi1WrOy-37@8!zmzIo4;>%h4wT(mu>3FboUX@Nzqohv&|idM?}=7FO|RFj)3xO$ zI0tCdP&z1$H1!33LOV`=DSs&k87qwQu))L@Lzw;|^X%NLvy4#q7p-{1ffu#?hr`~i z=qzT5kLx)wkNX}+Og2@^jg=R@>A-Z*8%NB^Lt$U>@pgd2b*YX6ALGh?wd`bF0&li1 zCGLSEX86%PF&Kq~i4Q~(fqbh#Pv<~Y-~!Nsd*TREj%r`Ba$l6$lFt`t#K=-_7Dx42 zF6#4n6(*2mFX3N5q6y^_x`yp0&wD*I5J7w-P)7zVBzzRE+w+5T&B%Ph%K)?h^;+YK zKUW{V{W5D)oymB?)9wg&pJ+r7H{%GW5HMb_h!0@9s*CD^E({b+`4>hiGN|!|0(~2G z!rux^4#LiraSrp}570M#SiD?}DL0w3iq4_(tIz&JaQT9LkV_^D8JUQM)@v9UUBD<>X?Y~#}N%*43pti7{}nh1*uxsr&P-*N->LN zTmZUOppQfXSR*hlAXE#(7_s~L860@|26 zarDn<0q2Ci|GLclpGvx=YXE}wME0`>m!Jh1k6Ydp%1N5nf> z8Kdu&Coqv-sie7X!ckZnqXMeDSjf-ioN~ zT`*;jqs8#pIsQxs;inUR8IPAYAU~ZX5;oxsJYZTu-OC$+`MfMoQ}BrXXk=PP>SVhO zot(>WQb!7B?ZpkT332dAWII_@hflldiwS7VNz`Q$)fOXD8&>NS#g=3H;6s@IJMZ6v zgN##1F8wsU`EKgA4^WmXU8mpYA^}-aq+bEd&ITj?LLX{oQ z+1aTRwUIbl5wXuIN7^yO1Mp$@*E;One%TkBRem@5OndfzDe5(v$9AK+dS-7tK}DtI zSf?Y_?eKLLH2Om|wqhv=YTX0rW{*@M`@C}W`v>Bx1M?qC9bI|Zlf8fxIz)I=Uy0`! zCIa#FR`fvuHf}){!xSXubBi~>j?!_9S6V#T06hBAdrDlTZynHjKcRkjw=n9g6c*>v zTQfjfjr~GB-Y~9r|6QBmzS}+3qf9#BKGHqd_SzU`J1sUD>S?QEy$!!0%d`irA6kQ? zD$8rQf6KDWw7g}hgS~}*pq+(`Gh{z=pxZ0AQ*N1VGqr!Y^~9F3cZ{B!3IVkzvSiUt2Av?@NXXp0L(mQan=VdkP@*KOjs1WdsP*#%*!! z_@+7qJ`tx-k)aeRRq_X`(SJ%?!c9k&ww(V$?I-;@O3Y$cBvORYaDdv33E%BR#pDyN zT$laSMp$E{q5J{0NOW=kxz)`#G*QH)-08~QF??JT4Wgbzz@$r~6s%+$`3Tymn^;uHQ z+4br(iSZc=)gIHsX9@}~vm9;2^B2+m6!*F|o==gJAzN+h*Ik=ospM!pRPZvgFG6@t z(eD$9(~BZRcj}*G=(3z1{S@)&UqB|xqc0sx&Ock2e@^Guq>I#2Bdx@sPcoi@rDuw< zqtR$MXUH+c>hcWT4K{tbZlQiFYjXPBP*?ovbFlH4#>u-p?E!G`Mf+cWU9Y?)`|j+y z7cA726^i^L7R(N1*7N7aK@AnRCB`X*vypCDp{}#%&slfdY-56xh%N~xA!w$tbjl1(KNLPgQ)Gh(^g-VaHleQZ@y=DoQAI)DS%$LZ%oWZOWt1))}J3ul*=lMc+f z>bz671+s3dNTLawA+8-P3)eQAPe>ANL#t#Ldwz|`t}fH9K7Yz)Gle8p?^KF?boVq6 zkrFe0K{?5+!xzZe`Zt^W#d$y3dYh$+>ucDZmGBF!5~0m{DkQO*4vAKD`kY!}{W&CY zc1^J*wbCeOYY|KS@_e*x&0}}JL@RXGt;{DwoMf|s8C1qvHY0=bUd^%)VY4m|P9%?n zO0Ri_wm`30lYSmB{-3GG*uFoE%+JRyjnkI84L}x&SZxf z6*7(e%c#%^%;zh@qu2luj$jK#7|C+KIv*-q`?Bi@ZPqTqiR87A_{Sy|diG@i>+$vZ zRrCm%)-9oH)1!linQG z*t#J&aZn8sJL~7h5@W7!sN^*Ey9k%DH`meiV%ATDacqGI=d)RoTmIOX1weZ$YwP8TNx+%P%wd|~DVWnzd;#G)>Cmn}xtGwPzB8GrcrCyYo^ zs|7)v!zYQqt0?MXU$b5@K*bgX2eJ>h2m4DcQa+e+5x-0LU3PsirF>KEQ1e4gU(FGX zdB5c@9BGX(H*@>j?I(G-+iJIHw-D1y(=pT6a2BGKo*2uF+&E22XTkC1L%gt8<*&AF zwt2SRw%XR;tOeFMmJwh6T(~$jxP2!a@MjLa5$JlHP;Le#v0xY!d;M6#p8lb(sHNp; zl9c1BT2|gb%Y4@k4aFEfa_vxOO3aFK{C;)CthkcySqJUvC-2qhhU)^guds$T8(Yuz z#_HiK$*ylQuWXZ9VsxrEhSyg}saZ8972C4l+?APg<)gjYM!M!MW7JiDtUySLp_KsX zSMCI|wD0fur!4&Fs%zm#*A_4L&_2*OqqOJ>Q-WL z;sbe_+(vp175q{uLV81U0{@kN@-;C#c4eKeNRZaoqA46cjov*#vq+pq+5Ue9g5HE735QY+1d|!q9GUQ!-F`x;nnW1_vJY5>^@Q zxW!GXm+Jj>1m+HyVYsy!%ET;5*~M1K{@U@3mCpr^Jy4rc0c4cnzmg#Dh$E)%uM9A9 zy(l#Ep3Mo(0*X0kOPCQuW|vs_cR&L%V>;;&<80V$cXMy95Hq6e5O7h-I9Ra0yyo59_ii%RXZ$De;kq3e%O;>$+7U@K$=o7 z6X@yhU>hTA4*wEp5Pt*|_4~aH6kaj{#qD;%VjU#rst|x}9u`pk0RT)^0XP`xw1fea zy(ZZ+Lbe5tm~}$0#ajlG>?#F-MoY1V#_fS@4x(VIMiflvi6`OQeIzQP521>TK^$hN z&~-8X7rJknK#u~&&E1KnOxp)Nz&W5Txf4fNcrKtKpbuO4Njo)g4NwQq1XmclrGO?3 zgE?^AqxW-t_<8_viYR~@B5`{@1~6t9UZDO~c1H)!@S^;W3bck$%+CQ0;;nE*ZYrtR z&fmg=heg!_t>zNj`L{qD3lD!wbQMc<*O%yi+o-#duWPMYBAf=j(RixW$8yy2iKUtO z0*G2|-EO)abDIzD{zX%Ysh#nb@eAWyhDW@c-5&$gNtN;SjDwOC@?Yd4Xqw}vynrGu zN#XrvCAJUYD)si%wNuc~;)kWF%E~;S6-RC5zQ2wfJK}m}o zK|!tej9`WGS76_$!fC$>SfTtCtb3}$_BOXkTA}=9oqq*QcalQ%OTP?la>S)4ps8N2 z3q@p-LiCFzKPL*@`T@ZT(a-ToOBD`z@h8Cw(Jwp+_5>k)+EqocLiDS;+fRjyi%B|B zA^J%VCabVB^d7+q<*&@WRW&?Em@pK|U+mli)tQXFNzw}CFFfFofZ@t#SUK93sPg`j z(ZQ$*I{1))72@A@Cuo>}NhPXq{vvslDj3$`k=?Iu;yhib$>wO+Y3huz(NibRm^Hgo zn^tYwwjGrCu~4B#c81nmWPG|0-MPfM3Y@#PI-9u{2%aP0@W(v_{02Mydq1l5Yyp>* zQ_m;PQeb$qXfv z?bUr*Y`^P?)=d|!Yx_T2H%-WwUHsFZN91~A3)IS=Z%{j?3f%E(ZZ!@q3E@)&e!chJ zYP`7CCBi2Qyr8^C4L(ow$0UJEtMAp|*&_dm0*__q6`XF&1VwbzkZRFt+(GSl&zLHf~igFj`d<=9xH%NCxucRm{E4 z&8gBVwxG_ddGXz2_-3E2G_(AVpfvQ;XL|B|V~IPmR1rAU@qwKU>P@P{ z&lH4SDm>&Pl1WjJFuYILz{AkTB`eyuG{n}I_9+(?2esJhcXXR)$YQU6GFrdzcEdQTmpi|cKX|!$Kn*~UDB_9ui}}&Ww}7P%9wKB zN6N2G(fMy^AG_SbBAoWmpa;xViuUW{Z)bU(|2DIqrl5S`w`YAT3A)R{9iExjPlQ5l z{Up>u_1N=Z18zcYm%q*H;jjma!f8*vm`6w4gsn=0V>-ewkXLKyjpJWDzPzEs8PoY0I+!bQ^xbB;h=T?)ox!_k(a>jJ8C~dxty3iE zTg`YERR0?VpszGr1QBdNjjoNbJwtD??LQ`6IOD3k7htM#C`! zN4D^2`v4?6R?v(d{IAgGoqIk&Crn4JG8|vq*{?t0u}o7c7I6&TaN>R=Rc^zB<=CB( z)b^R(=_aqph`BvBWo+dg{C;4WjtyeV|b=pW>}^3H?+|1 z)*sLg(T|WX=^lBc$gv*2vYWe0%5onpb+p~F#c9fHwKbE>4=@C4=vL@9$aLGf-P+Tf zWu9mb!F+9>+hVu=ZeFHKrc4x}(eQ|VVluMMgpugNax*{JpT>m#IQrFa?Cc!Acrhj% zfg@IZPbT#Di7~JMG6|S9n1G{z{aD5J!+6YQl-c<~KcKMm(L7-u zABQ7Mb}+m1YyLF6?u{eH+c3EEyZ{2~;)uce_bvxt*@!Z{4FE|Nm3Z^vjj5_@P!5LO z<#jRa-gn8&Gm&RYOnek~?od=$Gwrb64!H+Otx_I*SKqm3VMK_kf21@!*UB3@ac3jzdyy3Enk6*vRM%Q}a z#a90IOz%pSAI@2)v)&+rQs%>%QW=|@`4IHUY)NY3I-2>=sAHA=AA@0bE7`#wB-}Q# z$8n-{(=9`|F#xhe(Rd?6mn5x<)j1yJ|QFXD-HfutgK=S5GK7$Ny(&|99 z8CCv+;7jPiR)3`#4}4)aq-MygTe1@SgX!|>7A(~B!8mF<`6!~MM;A;FO!;hnmFu(l zk3P{85i-a)RO4~VW4*^zk4_#Y_w()>+-JInvwHE5`ntx%Ke~o>sqPz{`W8Z!Q(>PK zn3BEXkt=(}QajJSH1rp+sisn{Q)|mZk&ecGPKu z>qO?`7eBqsY4^KZH=Vpjp zY#^;obszWx&=XK|4^t*xevIE=u1S}ld{xH?PsNQjv2f14h!Q1UOb$hQDI+f{Q>@7 z;H3py0_7XUXs$g3V|ms4Bo8bsuk7~2Da|zc?d76S=x&sR-M`jB84^TfI@u5TN%cD>i{C(EgEyGsdH1JD^ zYRZ46C}E)N4UN%w(ioNf*b#fTsa8$6idj64he8&A_A|@9c?E2lLF;jV3TP@yBjaUz zX{;&|G4hr-&j%DFjZvY~2au}XXsW*r5m4)>CQdm20@_rH;<*Ti@NyA5I>k~yV_X^Z zEn!r@UWoE#c`a`@KQjI|3Fl^h&n2w@+&Xn^%;2E%DcZsxwJwYX{&B|vtp9{7ASNf zK>aeN!MxmU0>D~536v3qUiO9cH~w=t0a-**%FUQ8qN**V@SsOgH$1W`7iO_ojy)4ig<^k(A2~qFh9S@ET1n@PfCLypz6tzc0Z9^13Z6HR17KNVj z#YX_X|CJ3`7yrHUDU{8YPMSU-^gbP>pRVnywXlzhp9ZrHQG;u_wiZ7f1GdBmawzkA z6&5Ou;#rf$0{#B5@bz$}oGyKe-#_^Mi{CTX>C)%fE3Ll4N&o5P$$`GR!J}Sq(tn!y zGS9!D==bmnPNEHlR7-}NA1oOjHfDJDg%g(B_7rH@#CKnW$nLM$*CPCv9TnkU?70XZ zvcUH!{tq@%gtyoV5mvAw5tg$`5yJKA&=(;#_cP9hf}JF>}?Uouunxeg5@E! zxxeE~s`eoR$5Pl!z>bZX+y^+5$cc=SS8O=kmQdr~Wqn6by}PkbMA(sS6JZ-xF2WFI z{ea49&bog z@cv~J=|pF%WNSWr0Sn$;c3gy4*(*fIwu>ySMP@J?Ig%nTu$3cUIBd4#en~!Vt<{3U zTeq$4p_6T;AxYjf1(lW06lT1=%sA?WzicaHEl0g@$hKTIXw(Zo*|r(qp%)=ovy&N; zRHr6YLN~HLYHeSIB$4kJr6I9{8X+s4Eu9%gi>j_<9Y#}a64_)C&Sl?-a0WXm!imXI zFOs8P7Dm02Vy5G(+D4P9@yPQSz;$vuru5ny6bFz*uJ*)wLZ0Ovc8ACjdobZ zSUkp{c?g-_I=+`bNQQ<=5x>kbm(ch0q$=A;zEE8`PKTapw+q37RCh5F?Uh zbdOin5ox=PB0?p#{!ELG->L<|aF5p{uaH`!|AFR-VD2}xju2ON!1RPl&SvcMIV zYsm+-{x@8qxuy?&0Yd8zf1&0T99Le3E2^w?5nk7$B#WVHpLR{gw7W7h@{l@yw zjRID9u4T(VQ*(*=N#vsNT(hn(_UHLsEl?Fko%Ez$Qi!ge1(7PwV>T$=0sB0WKIxljtW{^$}6!*}Xa^zahIRCxz`A-lst|p8vK`ek*iW>1G>$p7y371Scs}*YJ`) z3RI6*_Erj27_W{;JiRE4Z_!d(i=i5Yvw-xwP{W8rEg=)xtpO$QQWA`PQOmDxlBZAvcl;lU;3T`qscKhWwd}OX z4pDOtfk#@vo))$gzb@ZWL)`_OKCqGh+KA@jkJKg*$M4|^LW)+Gq2kr6(NJ#c!(Ok@ z#u|FMGQwUr)yt+kVjmshjG0&5caywi@W?W^UBPOU_d~p-H^n)(ziE@auuU9AWOQxr ziDy<3%C`pLLRKy+MuvE$Jnj8@`P1G~5q^@bd66#OA>EXP({OBMy%`0t{R5v{$aDBO zQbmo2whivX#rdG;KGFg8e1(^tU!4uLAookc7?fxBlO?E?ig{(ZH>U6vNM>jGhy1Lp z;e`7`cxC;%k{9>ATO0bEv_8)p^8812Bu^glnGqDs$RTy52J83YE#WqFQ2*iK1AFy( zk4b%>`|>ozgN+GqF&qqGmQ;RUa{SRg7+^)!P0$JaMllwILG>W@)^nCrmtUSBp=~#KX1T`D@ZQp zr&?6rJuV@p0Z+rxNBTp#TJK{=8-MNj90GS$fiS#D;fOJXG_Jp^Sj0dti58a%A2WPW zo&_}u`HX7x+bF7$c+0ZlUg&lBnn8v5O2qo`*UPCg>HVKKSih8Xv^S6+)TBJ-06sgM zr&0@kQ!rLp=#6dU4vETMS26p**LJ=aNi9-^3WfcI9`gMfEtTbK0wXU>4r*^@Z8x;0 zs1Is!MM2U~nh%`ckhZQbBxd5eq4E~y$JUEH%2i1yczw8>OkEUplvK#S_C-E$%}+U> z>}jR;v#fo!QOx(a&1}%yh{g~R(d_j2r_fQSQZ?b1pAfvBeWbHWSA^|{JkLb5SsFvn zUj4PsQsm(5ha7$p!1~Wgl}mp78?5;W!chaQxRZEt8uEvDdqZP*T1j}CV)rFcznu~Y zR;NStIaYUK*L3&)g{egmG@myRmWPgJ``o-F8lhna2v zNT|C>nQae(mV8R+Goh$m0Jl(GgI;;D0psq&54?Dm2JdsD%nz-Im!uW(va~;5D@ieN z@^wv^=2wljyQz(BgJG}kHn@s^+vZA>Y<;zM+nWZB{H|e?^$+V_>*xA1>u77JRo3^k zoYf8g|LVFD@TjWmKXons?gfs6lR@>w;^Sw%JwgDsuI9S*%Yxk2^OxpFjq- z=j<0upKG5gw>3-iIY-&rBTL_R<_yPf&9TCA!!cFUdu1ag7*q`&aeGU?kgmeC_+JRz zAkK8QZQ>=j+pQ0grj+GMy=VqGfvW4oSa%gnY^aeERg#97m>9%W`K zIi6v}!LX{CmcO_1(JOd{u4Zd(E28)L!6Kjyiv$DLT>P%3S!>~X_?)2edpJ=1w>VvM!?%+&Jpu`QQbkL+MwSTO2bt8jhJ)g9i=Y)t<)h z1&%_)^RyO^oumKo#eTz8^BpGZz5L(Fb3y&Rox58+Q}0HHK1AnmT}CfpI!U|{+uK}# zzHFIZwiKyAT{aq`=Fllr&;zGc%=&}tE3!9Z{+pLs)iRq}W>;8nmpN1>`-OGW5Onbn zqOnhR*gOM0THJBR!;nczv~_X0@Zb6(_O;k2t8Q_sLA$0nRdTFe+`j}i(FgQ~hfaj) zB=Qh;f5hi&RXKHa)@#c#ToAB%IU$gwz=>aoBY`|h%E7`vF7pEciS?yPW<8RwZRXl+e`%H%Yy#lJpb*P9D-v*<_E zwAyHceFU5#^TUzvm%G$5Gv&=p=B!<>MQUDfsW%b75REe1e}$Zyx#7s()h=~wvekOl z)P-y{E*0`xVk}3juUp1hR$1>fZ?k%-GvHwDiuyx#1>g8$Ei!e7ON~;t=wWVlYZOCl z;REoRaP}H*gPl~o9obB(Im6Mz+{`D3xt%?u=e-%JnYk#z-XqsGc3`aGAz4;FAW3s} zHxinO&!nW5ad!Kk+fnf>$TRkV(SzY!7at6_IkQYA%bO+P^%fn;?6mUow_Qk5_O@Hi z)$hGUmA7QeEsyIG8;Kns^Bv+Ub#m9b3tZ=1rLJV>8_p4q(~id-9&NLhYyXFRmR+-v zEysG;`Ws8LMYr@s(CW$RKX3%@Vv3l?<9w~Eg&^5aQOy%4MD#Zl&xr$s{l^@J+qDj3ATbg*Sz!|fE?r+ z1<+zoL#@o(=MT)920Eg!-+tMzH!=E>pwAj;ZX=F_RDNY8%O9EddqIN**P+GAZAAW8 zCQTR8{U`?iFZjo>sT%6KO4M~sNFN9ZMr-KN7#%#H>&gNR`p{PM^$?^xp$$S>Atd&d z5mPo)bxd=YaX5N~4ANsuyVA4utXi<#R+hYGo` zdxHk67rbWlW*Eq+|5!%X2pWW+j2suDYC%_`1b7+6nIRY1$BEAv&@?$Xw5sZoi)IQq z1qE;f@}UNtN*RM{KNWPppx#@cY|Xoi)uep@hx7slUKQXa0pvJQ+}+#~3*ZnA75V5f z`8^%K5$-4P1kTF9Lx#Qr+J%k@+H%y$OLyGE=wQ&Ge*W|u3>EXfCFlmAP;|chRgePr zGMgJPCJF<85@3!1IE56XivpY!pn-wA$D=Qy@}A2o@BF3bJLs+FlA!W_ltNIxBbRd0 zx7dkQ;hz5me-}!`HAs1&{Zh>q9wh&Wo=x)7N5C_xgFyx7&!QNGODyvH6~(EWZ)jLk zv$dB@dPi>nXNH2;t(4{UsF3q}XT9@z=R~bJGG=X}8lh2foCgVL4%9r66*-XXQC;v+ za$wDJZbA0Q=faV@eI8Y_$P#cmkfp(+2G%}RWB03CtmE6Xa>o{ria54-)lZw$AhH$4 zt9wJM9lv(;L$Kk$Z~zEP(0tkae#?TY;ph!7sNhSZYFsB$1(wYmedcdo%chBA z{4fpy37+hw@7BKzN1hoMuR<29>wfib4v}_M8See5??04HhsVbwm(!QG7xz-_^}5D! z;Yj0s$ffgmWw4KxtTm9rU{>V)E!8jUfPLzzS2{qrs(Fj|+!dc;J)_`0Z116<6_9Z?be)@DCP?s_wyEai@|W5S66qS$=?y(IWL=_JI`YgJ&yV3Q{!e z$YqZ=fli|r+>kK%=BZRRjH}5kK##5@(dHO<(~2j9CTp5CbA#uwb1q&6C;cY2q2iodxnn3Vj*(6$WKg@& zm+x}x^PW#euWyeSMMG|WuE=i#I5=p@in0`b;pfJ7js9#X#4u0;N_V7&J9wVVtd>#e z5@o13#Nc)YY3zHX6gX}RTQir!(exvo7I3ipxoHx z1~P*Y$g#`k?8ZsT+Y-=4BaKE$VBS2nC4r7eRpusIzKDh4!aI|3JaYMPC_D<2`5g{J zMn)iBgIpcq2HuGBiZ&TyJp^mqB(w`}V=)*~=}a9zP2PDZiRsi=qW}0YTBbA$LOa#1 zlVHf#pKpj)bqxv18V(FL?K7EY!T0{6=NuB!Jng#I)aZKGbI9E>CWShB6IF&fNJH`e z0g?H*erz_opQ_C}wY{g(liFX*j=bJ35i6PA5V(l_;81->?_s?n;gm#mmqq|>x-PH>L~+V%#5;rn5+;L=&Nejj5*=e@{5R0=whxGk z%?ONfHES&tr6`g@WjhRAy%8ECf5|!!Uh)g??Q)a+=RIwZ5fY=Ibt`N} zk^FthYMog>oPW48avL7)v$IQi_&~CXgb$>sqkm<$i3cD;bbx!>&AeuJoa31Dapz>D zaPQ*O9OwA!^rpsIMLX3^Nl=|58&-F~8H{#`magVDSLFc%!eM8)TwBvYMYJ_Ob<<`^ zGlwPr5p*nvRi&z&wut|ub@@0IPLuaT_XFuk_PtdKY1VjqR5+$No|eAOd(&qD;F$PR z(AQHBD2-fHde(_o*s_hc58oS9;6927K3(wpQ5GlbSFtK!K;bwYH%!37(I2pyD}gu_gFu$hXoE;4sUh4BU@WL?sEul#}KQ z)xAPm!c;Ldu#O6?6xtl2O+k5V!aqI%-5IKQ z!`Q)EXdFXf(n2B4783N)2$aF-1wlVE&@pf|ZD#Z?ovRrN8pgg=N3G}f2kkwfZDpFF zuGUK#PG8MM5exMltpqQi4Qvt=I~vDWz1Pm#K^fSRZwzfk;n zUhfOHr7EA@GI#*?-c-NYj}W#8wx{xEYY-h?5>WYSfN&xY=vjOW^OhvzV|eNX*$(uN zEcmAnYOlyuWRXn7M!U?9JvQpUS-`pF1CFI8&mPY*&qz;P+#B#p8XWs$?5^0~#kyk- zQM)4n737s+v?H?gpK!RdM#Ho%U-fA;Oq+9yA!-Y9W$x7s{m48RrkvZx+29MM3oH&` z>^Y@5UpJ(Afkm3%Y6$!S7y|!mhLA1@3+Z;}W|r@%;&eb|>qoAo#6-583LwZV4g^NJ zH3ZZ>sy4qU@m0I#_{6yBgDXd+w|SjK{%}5^Mw;p9=K-}rvmZ8Ez*%S6wcBkUZZg>( zGQg6{k7VgbCUkI>pSciFxTLDcnF}3N&}#n>-lWllR6|ls52)X(MDuEM7+IxC)Bsa6 zGF0JT3cke48x6c3qL*HVY-3etk<1(&IFbr6#xQd}e92FH!38Hk01)VI$LrJ`yxCUx zK1F%UFh7tF@?*TM1;g;R{#$0vw=!#j|M>~u_h}pfVG`;Yqc;q^9wbW@PucGQV3rR8 zFkOKAQ86Su2962vfdH_>!ELabi(~%*1*;37 z7N_F?u?hRs)y{i4xuY{j?4FLG(ymB4>%C}xe7E7R5@xY9i z0me6WvBs&d+05H0=#d$4jP}gHD>6gSDJa3~A3A2{ZK&ZSP`JnnTL05esF%@Ua~SOn z8lLQiy(4V_(3RFOKx~`CvB5|ACT8HsP4(Z42uLv)x1KBL?*y&6?I%Qg9fIo40>x=U zLG#!%`!b`W=Q277H1s&mSh0IHl35A5p3%x#mweRzF}{xrD13vWj@rm^2^1;31ib=Z zZaN1bn?4gjytQ+0MK=~a&gf9k;KUYn*^Z%FLAMIZp4*iA1m7b8FrEcxtMr3m=wKp! zu#znW37NVK63$nN3%B0Pq-#PtBP2d!g}9i!^75)Y;Yltv3Jf8==SYkIJiL+d5@>5y zDsOCDktq#oEkn!YSxsK}0(R*2|LVh72gbTEOS30zip1?h@$n|}-;hwUtQ~?PEt*Gi zsF5nL9S%_41^ztEIn3>tatQ4paiE|BK>AVUL7s1I+h-RI5&jL}#C!rZ#2aZJ%@^1$ z)qJ~!AE#&@c7*+ltPkE5mqBM+840_`8hj!LCiPAyr`;PR&B> zgB=Fafc>2tW^Cv^2iswrA@|=e?KL*%m)G(hr#Bi9r24h7*zWII!k^|M?9cH&01r{y zjy*@sjrPLNjH47@&%7BZ5i?&>)6PQ zH-)iidLkq;7+Ssb7+@%h7CjO@O`Ag7xAAV~5bzi^-FU)k*h2%{-cQmK$H2>G{BvS)81YK;ixuFYKQP|$gdU~i>n z!QT^ntKem@ykfTm+bV8%o^O}_Dwj$C54KXnvEeHfc&Whi00+g@MGyZ1%W_0$*MxS4 zX>67CQ|^~s(*(>nyhI=dN!(z>K8s-!>K>u)1QiQntEHAALd?djX0z3VOSv*VZ!k+~tfj=1YI@P++V1*=^OW;3XPje`V~F;tHZ!gW-}s8y z%$WL^``lN|>nu)n)I1QMvWMMId%|)1l+x^`7wy%K4UXSAdTBpu5pA({r`FDX(!L&X znZpPMQ)gRbyTg`f{mi<~I@ubqoQ21432xZ-&&ogM`KV|&cmG-(^1yyX)K|wp;7QXP z*G|+hw*zodkI=?F_?8*E4rB;r9v7|{y32_eB39O5+5C|EW&K~dOGTqY?SXVhDp2P1 zLd)f&JO9SWY#>2mrH-Gz22Zxdon)yU8#s`q#DZg)w%8Y)GqWCn#)4)@v-V4K8!qxK zS&`$S+t4iR*FNIvywEYk)1_71l$HCrDNhN$b|X)3)-&Ks4IKG?frsd4`URSFfbnsl zhC~bDqrE`7(K|rWs7=3svjA9CI#UQ#Y>C^H5wr?;H+lwmsB%a`yrF5Jn?P|^o6(=? zB)l{oiy)4hfEuhm3YRTI4M7J1XH#zX7&RMR&@S2qZ8|4>?9jV-ZO~^>+d`x0r$0-N zZUurq9>t1~8PWym`RDo2FW=2EJcuC+h0z=E_qkGUj*ee6Z z_(Ip{qg{gR1QMiEP&h)_xs$s;L%MSf1_uHP1_U2&v=1L{3=8Bxmf#Kb8O;@Z5U!A( zM)1wIL5GG9GG>CIVz#t%cu2!nDou-irFdt1TC+@BkZg2>9v+Brs@t;=uTCXJq0<*T zAp=mquJCHf=CEd8gBsMmoqMF>+&uD%+WRkP#|z!=2%NxcE_I665A6xU34B3fw1Tpk zLlX4UnIXi7@5U8;k5V`S^w*#d2kjZSA6Ebe2YRXC2b^xAn*x6za9&PVA5H7dd|9|c zDBBXk_c#abIVk7~L9a~hkKC7gg9r%UgLxYTePdiUe6zda)>VQ|!4+Th4}(6v!j%c5 zN>7duzhBT`<}KK4X7<2Icc9q3;R@&MOlUq2;Lm=v8rO6x^#;`l7{-kV1ee`#JJaI1 zmJdO|lG%i(m<%9?df&jE>^(?o`Q$UYOd~M*|ZWXc8beCljrV$@J1@pc$iM z9TQAx=3yr7nrF4P)Dzay!6V(WZ|5MC$bb_%*X_%0NB_sScm%YFg8j5R>ir!7d%nd^g3(MO-7@vnL zzS_{P2I<;A85xBug7>v3U9qh?!LSe4AjLt!>Zj*E<0<|W5KQG(9V;@N0moJaSG+c? zSf#xuL+Z|-sYvnOf5AK-e9pBMLr)(sdU`L?k}|#mRDP|Kfs2NoY^V8NQ$P(xRr1?b z^J{YwIKlwpnogpLM{a56Tv5+LajOa*yLG&q*|E;0rf6`KX^U^AeDvout^eXdcri@0 z8}e$VA+KtAiw9aufR*0vSG}q9DyF+5ckux0tdJ>>761%W-LKK@`qe5AGL0_oAMNp< zNwjoDMr5IkNZ%;^v`D;H-shgbOL~4Q=pib?SOk%;mLEPr#$v4?O9eS2NP{2~WEge= zM=2SvZ0YWAWnk)|*7nmRbT{NS830yg`=~oQ8fX20V|{8>XyddWaKwKUyiKQ#!(1{v z_k)Vhev1;@i!8Fto`ow${7qiwsxTsWjRmRsnek+XjMsi1uWHytd~^^4f}JE_v;YI^ NrAsn^8!%iM{|6KvL}CB{ diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix new file mode 100644 index 000000000000..24983a1e950f --- /dev/null +++ b/changelog.d/8386.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index a34bdf18302c..ecca8b6e8f45 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -89,6 +89,7 @@ BOOLEAN_COLUMNS = { "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], "local_media_repository": ["safe_from_quarantine"], + "users": ["shadow_banned"], } From 920dd1083efb7e38b8b85b4b32f090277d5b69db Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:25:33 +0100 Subject: [PATCH 057/245] 1.20.1 --- CHANGES.md | 10 ++++++++++ changelog.d/8386.bugfix | 1 - changelog.d/8394.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 5 files changed, 17 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/8386.bugfix delete mode 100644 changelog.d/8394.bugfix diff --git a/CHANGES.md b/CHANGES.md index 84711de44892..650dc8487d51 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 1.20.1 (2020-09-24) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) +- Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) + + Synapse 1.20.0 (2020-09-22) =========================== diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix deleted file mode 100644 index 24983a1e950f..000000000000 --- a/changelog.d/8386.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/changelog.d/8394.bugfix b/changelog.d/8394.bugfix deleted file mode 100644 index 0ac1eeca0a2f..000000000000 --- a/changelog.d/8394.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index ae548f9f33d2..264ef9ce7cc3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.20.1) stable; urgency=medium + + * New synapse release 1.20.1. + + -- Synapse Packaging team Thu, 24 Sep 2020 16:25:22 +0100 + matrix-synapse-py3 (1.20.0) stable; urgency=medium [ Synapse Packaging team ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 8242d05f600e..e40b582bd585 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.20.0" +__version__ = "1.20.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 5ce5a9f1447088bc29cac49f4a0ebfab6c0198d6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:26:57 +0100 Subject: [PATCH 058/245] Update changelog wording --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 650dc8487d51..16e83f6f100c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) -- Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) +- Fix a bug introduced in v1.20.0 which caused URLs to be accidentally escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) Synapse 1.20.0 (2020-09-22) From 271086ebda55f9ef0a0bdee69c96d79c5005e21d Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:33:49 +0100 Subject: [PATCH 059/245] s/accidentally/incorrectly in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 16e83f6f100c..7ea08fa117e8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) -- Fix a bug introduced in v1.20.0 which caused URLs to be accidentally escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) +- Fix a bug introduced in v1.20.0 which caused URLs to be incorrectly escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) Synapse 1.20.0 (2020-09-22) From ab903e7337f6c2c7cfcdac69b13dedf67e56d801 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:35:31 +0100 Subject: [PATCH 060/245] s/URLs/variables in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 7ea08fa117e8..5de819ea1e4d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) -- Fix a bug introduced in v1.20.0 which caused URLs to be incorrectly escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) +- Fix a bug introduced in v1.20.0 which caused variables to be incorrectly escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) Synapse 1.20.0 (2020-09-22) From f112cfe5bb2c918c9e942941686a05664d8bd7da Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 24 Sep 2020 16:53:51 +0100 Subject: [PATCH 061/245] Fix MultiWriteIdGenerator's handling of restarts. (#8374) On startup `MultiWriteIdGenerator` fetches the maximum stream ID for each instance from the table and uses that as its initial "current position" for each writer. This is problematic as a) it involves either a scan of events table or an index (neither of which is ideal), and b) if rows are being persisted out of order elsewhere while the process restarts then using the maximum stream ID is not correct. This could theoretically lead to race conditions where e.g. events that are persisted out of order are not sent down sync streams. We fix this by creating a new table that tracks the current positions of each writer to the stream, and update it each time we finish persisting a new entry. This is a relatively small overhead when persisting events. However for the cache invalidation stream this is a much bigger relative overhead, so instead we note that for invalidation we don't actually care about reliability over restarts (as there's no caches to invalidate) and simply don't bother reading and writing to the new table in that particular case. --- changelog.d/8374.bugfix | 1 + synapse/replication/slave/storage/_base.py | 2 + synapse/storage/databases/main/__init__.py | 8 +- .../storage/databases/main/events_worker.py | 4 + .../schema/delta/58/18stream_positions.sql | 22 +++ synapse/storage/util/id_generators.py | 148 +++++++++++++++--- tests/storage/test_id_generators.py | 119 +++++++++++++- 7 files changed, 274 insertions(+), 30 deletions(-) create mode 100644 changelog.d/8374.bugfix create mode 100644 synapse/storage/databases/main/schema/delta/58/18stream_positions.sql diff --git a/changelog.d/8374.bugfix b/changelog.d/8374.bugfix new file mode 100644 index 000000000000..155bc3404f52 --- /dev/null +++ b/changelog.d/8374.bugfix @@ -0,0 +1 @@ +Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index d25fa49e1a9e..d0089fe06cef 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -31,11 +31,13 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._cache_id_gen = MultiWriterIdGenerator( db_conn, database, + stream_name="caches", instance_name=hs.get_instance_name(), table="cache_invalidation_stream_by_instance", instance_column="instance_name", id_column="stream_id", sequence_name="cache_invalidation_stream_seq", + writers=[], ) # type: Optional[MultiWriterIdGenerator] else: self._cache_id_gen = None diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index ccb3384db9d2..0cb12f4c61be 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -160,14 +160,20 @@ def __init__(self, database: DatabasePool, db_conn, hs): ) if isinstance(self.database_engine, PostgresEngine): + # We set the `writers` to an empty list here as we don't care about + # missing updates over restarts, as we'll not have anything in our + # caches to invalidate. (This reduces the amount of writes to the DB + # that happen). self._cache_id_gen = MultiWriterIdGenerator( db_conn, database, - instance_name="master", + stream_name="caches", + instance_name=hs.get_instance_name(), table="cache_invalidation_stream_by_instance", instance_column="instance_name", id_column="stream_id", sequence_name="cache_invalidation_stream_seq", + writers=[], ) else: self._cache_id_gen = None diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index de9e8d1dc6db..f95679ebc440 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -83,21 +83,25 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._stream_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + stream_name="events", instance_name=hs.get_instance_name(), table="events", instance_column="instance_name", id_column="stream_ordering", sequence_name="events_stream_seq", + writers=hs.config.worker.writers.events, ) self._backfill_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + stream_name="backfill", instance_name=hs.get_instance_name(), table="events", instance_column="instance_name", id_column="stream_ordering", sequence_name="events_backfill_stream_seq", positive=False, + writers=hs.config.worker.writers.events, ) else: # We shouldn't be running in worker mode with SQLite, but its useful diff --git a/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql b/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql new file mode 100644 index 000000000000..985fd949a245 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql @@ -0,0 +1,22 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE stream_positions ( + stream_name TEXT NOT NULL, + instance_name TEXT NOT NULL, + stream_id BIGINT NOT NULL +); + +CREATE UNIQUE INDEX stream_positions_idx ON stream_positions(stream_name, instance_name); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index b0353ac2dcca..727fcc521c8b 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -22,6 +22,7 @@ import attr from typing_extensions import Deque +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.util.sequence import PostgresSequenceGenerator @@ -184,12 +185,16 @@ class MultiWriterIdGenerator: Args: db_conn db + stream_name: A name for the stream. instance_name: The name of this instance. table: Database table associated with stream. instance_column: Column that stores the row's writer's instance name id_column: Column that stores the stream ID. sequence_name: The name of the postgres sequence used to generate new IDs. + writers: A list of known writers to use to populate current positions + on startup. Can be empty if nothing uses `get_current_token` or + `get_positions` (e.g. caches stream). positive: Whether the IDs are positive (true) or negative (false). When using negative IDs we go backwards from -1 to -2, -3, etc. """ @@ -198,16 +203,20 @@ def __init__( self, db_conn, db: DatabasePool, + stream_name: str, instance_name: str, table: str, instance_column: str, id_column: str, sequence_name: str, + writers: List[str], positive: bool = True, ): self._db = db + self._stream_name = stream_name self._instance_name = instance_name self._positive = positive + self._writers = writers self._return_factor = 1 if positive else -1 # We lock as some functions may be called from DB threads. @@ -216,9 +225,7 @@ def __init__( # Note: If we are a negative stream then we still store all the IDs as # positive to make life easier for us, and simply negate the IDs when we # return them. - self._current_positions = self._load_current_ids( - db_conn, table, instance_column, id_column - ) + self._current_positions = {} # type: Dict[str, int] # Set of local IDs that we're still processing. The current position # should be less than the minimum of this set (if not empty). @@ -251,30 +258,80 @@ def __init__( self._sequence_gen = PostgresSequenceGenerator(sequence_name) + # This goes and fills out the above state from the database. + self._load_current_ids(db_conn, table, instance_column, id_column) + def _load_current_ids( self, db_conn, table: str, instance_column: str, id_column: str - ) -> Dict[str, int]: - # If positive stream aggregate via MAX. For negative stream use MIN - # *and* negate the result to get a positive number. - sql = """ - SELECT %(instance)s, %(agg)s(%(id)s) FROM %(table)s - GROUP BY %(instance)s - """ % { - "instance": instance_column, - "id": id_column, - "table": table, - "agg": "MAX" if self._positive else "-MIN", - } - + ): cur = db_conn.cursor() - cur.execute(sql) - # `cur` is an iterable over returned rows, which are 2-tuples. - current_positions = dict(cur) + # Load the current positions of all writers for the stream. + if self._writers: + sql = """ + SELECT instance_name, stream_id FROM stream_positions + WHERE stream_name = ? + """ + sql = self._db.engine.convert_param_style(sql) - cur.close() + cur.execute(sql, (self._stream_name,)) + + self._current_positions = { + instance: stream_id * self._return_factor + for instance, stream_id in cur + if instance in self._writers + } + + # We set the `_persisted_upto_position` to be the minimum of all current + # positions. If empty we use the max stream ID from the DB table. + min_stream_id = min(self._current_positions.values(), default=None) + + if min_stream_id is None: + sql = """ + SELECT COALESCE(%(agg)s(%(id)s), 1) FROM %(table)s + """ % { + "id": id_column, + "table": table, + "agg": "MAX" if self._positive else "-MIN", + } + cur.execute(sql) + (stream_id,) = cur.fetchone() + self._persisted_upto_position = stream_id + else: + # If we have a min_stream_id then we pull out everything greater + # than it from the DB so that we can prefill + # `_known_persisted_positions` and get a more accurate + # `_persisted_upto_position`. + # + # We also check if any of the later rows are from this instance, in + # which case we use that for this instance's current position. This + # is to handle the case where we didn't finish persisting to the + # stream positions table before restart (or the stream position + # table otherwise got out of date). + + sql = """ + SELECT %(instance)s, %(id)s FROM %(table)s + WHERE ? %(cmp)s %(id)s + """ % { + "id": id_column, + "table": table, + "instance": instance_column, + "cmp": "<=" if self._positive else ">=", + } + sql = self._db.engine.convert_param_style(sql) + cur.execute(sql, (min_stream_id,)) + + self._persisted_upto_position = min_stream_id + + with self._lock: + for (instance, stream_id,) in cur: + stream_id = self._return_factor * stream_id + self._add_persisted_position(stream_id) - return current_positions + if instance == self._instance_name: + self._current_positions[instance] = stream_id + + cur.close() def _load_next_id_txn(self, txn) -> int: return self._sequence_gen.get_next_id_txn(txn) @@ -316,6 +373,21 @@ def get_next_txn(self, txn: LoggingTransaction): txn.call_after(self._mark_id_as_finished, next_id) txn.call_on_exception(self._mark_id_as_finished, next_id) + # Update the `stream_positions` table with newly updated stream + # ID (unless self._writers is not set in which case we don't + # bother, as nothing will read it). + # + # We only do this on the success path so that the persisted current + # position points to a persited row with the correct instance name. + if self._writers: + txn.call_after( + run_as_background_process, + "MultiWriterIdGenerator._update_table", + self._db.runInteraction, + "MultiWriterIdGenerator._update_table", + self._update_stream_positions_table_txn, + ) + return self._return_factor * next_id def _mark_id_as_finished(self, next_id: int): @@ -447,6 +519,28 @@ def _add_persisted_position(self, new_id: int): # do. break + def _update_stream_positions_table_txn(self, txn): + """Update the `stream_positions` table with newly persisted position. + """ + + if not self._writers: + return + + # We upsert the value, ensuring on conflict that we always increase the + # value (or decrease if stream goes backwards). + sql = """ + INSERT INTO stream_positions (stream_name, instance_name, stream_id) + VALUES (?, ?, ?) + ON CONFLICT (stream_name, instance_name) + DO UPDATE SET + stream_id = %(agg)s(stream_positions.stream_id, EXCLUDED.stream_id) + """ % { + "agg": "GREATEST" if self._positive else "LEAST", + } + + pos = (self.get_current_token_for_writer(self._instance_name),) + txn.execute(sql, (self._stream_name, self._instance_name, pos)) + @attr.s(slots=True) class _AsyncCtxManagerWrapper: @@ -503,4 +597,16 @@ async def __aexit__(self, exc_type, exc, tb): if exc_type is not None: return False + # Update the `stream_positions` table with newly updated stream + # ID (unless self._writers is not set in which case we don't + # bother, as nothing will read it). + # + # We only do this on the success path so that the persisted current + # position points to a persisted row with the correct instance name. + if self.id_gen._writers: + await self.id_gen._db.runInteraction( + "MultiWriterIdGenerator._update_table", + self.id_gen._update_stream_positions_table_txn, + ) + return False diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index fb8f5bc255f7..d4ff55fbff7d 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -43,16 +43,20 @@ def _setup_db(self, txn): """ ) - def _create_id_generator(self, instance_name="master") -> MultiWriterIdGenerator: + def _create_id_generator( + self, instance_name="master", writers=["master"] + ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( conn, self.db_pool, + stream_name="test_stream", instance_name=instance_name, table="foobar", instance_column="instance_name", id_column="stream_id", sequence_name="foobar_seq", + writers=writers, ) return self.get_success(self.db_pool.runWithConnection(_create)) @@ -68,6 +72,13 @@ def _insert(txn): "INSERT INTO foobar VALUES (nextval('foobar_seq'), ?)", (instance_name,), ) + txn.execute( + """ + INSERT INTO stream_positions VALUES ('test_stream', ?, lastval()) + ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = lastval() + """, + (instance_name,), + ) self.get_success(self.db_pool.runInteraction("_insert_rows", _insert)) @@ -81,6 +92,13 @@ def _insert(txn): "INSERT INTO foobar VALUES (?, ?)", (stream_id, instance_name,), ) txn.execute("SELECT setval('foobar_seq', ?)", (stream_id,)) + txn.execute( + """ + INSERT INTO stream_positions VALUES ('test_stream', ?, ?) + ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ? + """, + (instance_name, stream_id, stream_id), + ) self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert)) @@ -179,8 +197,8 @@ def test_multi_instance(self): self._insert_rows("first", 3) self._insert_rows("second", 4) - first_id_gen = self._create_id_generator("first") - second_id_gen = self._create_id_generator("second") + first_id_gen = self._create_id_generator("first", writers=["first", "second"]) + second_id_gen = self._create_id_generator("second", writers=["first", "second"]) self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) @@ -262,7 +280,7 @@ def test_get_persisted_upto_position(self): self._insert_row_with_id("first", 3) self._insert_row_with_id("second", 5) - id_gen = self._create_id_generator("first") + id_gen = self._create_id_generator("first", writers=["first", "second"]) self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) @@ -300,7 +318,7 @@ def test_get_persisted_upto_position_get_next(self): self._insert_row_with_id("first", 3) self._insert_row_with_id("second", 5) - id_gen = self._create_id_generator("first") + id_gen = self._create_id_generator("first", writers=["first", "second"]) self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) @@ -319,6 +337,80 @@ async def _get_next_async(): # `persisted_upto_position` in this case, then it will be correct in the # other cases that are tested above (since they'll hit the same code). + def test_restart_during_out_of_order_persistence(self): + """Test that restarting a process while another process is writing out + of order updates are handled correctly. + """ + + # Prefill table with 7 rows written by 'master' + self._insert_rows("master", 7) + + id_gen = self._create_id_generator() + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + # Persist two rows at once + ctx1 = self.get_success(id_gen.get_next()) + ctx2 = self.get_success(id_gen.get_next()) + + s1 = self.get_success(ctx1.__aenter__()) + s2 = self.get_success(ctx2.__aenter__()) + + self.assertEqual(s1, 8) + self.assertEqual(s2, 9) + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + # We finish persisting the second row before restart + self.get_success(ctx2.__aexit__(None, None, None)) + + # We simulate a restart of another worker by just creating a new ID gen. + id_gen_worker = self._create_id_generator("worker") + + # Restarted worker should not see the second persisted row + self.assertEqual(id_gen_worker.get_positions(), {"master": 7}) + self.assertEqual(id_gen_worker.get_current_token_for_writer("master"), 7) + + # Now if we persist the first row then both instances should jump ahead + # correctly. + self.get_success(ctx1.__aexit__(None, None, None)) + + self.assertEqual(id_gen.get_positions(), {"master": 9}) + id_gen_worker.advance("master", 9) + self.assertEqual(id_gen_worker.get_positions(), {"master": 9}) + + def test_writer_config_change(self): + """Test that changing the writer config correctly works. + """ + + self._insert_row_with_id("first", 3) + self._insert_row_with_id("second", 5) + + # Initial config has two writers + id_gen = self._create_id_generator("first", writers=["first", "second"]) + self.assertEqual(id_gen.get_persisted_upto_position(), 3) + + # New config removes one of the configs. Note that if the writer is + # removed from config we assume that it has been shut down and has + # finished persisting, hence why the persisted upto position is 5. + id_gen_2 = self._create_id_generator("second", writers=["second"]) + self.assertEqual(id_gen_2.get_persisted_upto_position(), 5) + + # This config points to a single, previously unused writer. + id_gen_3 = self._create_id_generator("third", writers=["third"]) + self.assertEqual(id_gen_3.get_persisted_upto_position(), 5) + + # Check that we get a sane next stream ID with this new config. + + async def _get_next_async(): + async with id_gen_3.get_next() as stream_id: + self.assertEqual(stream_id, 6) + + self.get_success(_get_next_async()) + self.assertEqual(id_gen_3.get_persisted_upto_position(), 6) + class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """Tests MultiWriterIdGenerator that produce *negative* stream IDs. @@ -345,16 +437,20 @@ def _setup_db(self, txn): """ ) - def _create_id_generator(self, instance_name="master") -> MultiWriterIdGenerator: + def _create_id_generator( + self, instance_name="master", writers=["master"] + ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( conn, self.db_pool, + stream_name="test_stream", instance_name=instance_name, table="foobar", instance_column="instance_name", id_column="stream_id", sequence_name="foobar_seq", + writers=writers, positive=False, ) @@ -368,6 +464,13 @@ def _insert(txn): txn.execute( "INSERT INTO foobar VALUES (?, ?)", (stream_id, instance_name,), ) + txn.execute( + """ + INSERT INTO stream_positions VALUES ('test_stream', ?, ?) + ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ? + """, + (instance_name, -stream_id, -stream_id), + ) self.get_success(self.db_pool.runInteraction("_insert_row", _insert)) @@ -409,8 +512,8 @@ def test_multiple_instance(self): """Tests that having multiple instances that get advanced over federation works corretly. """ - id_gen_1 = self._create_id_generator("first") - id_gen_2 = self._create_id_generator("second") + id_gen_1 = self._create_id_generator("first", writers=["first", "second"]) + id_gen_2 = self._create_id_generator("second", writers=["first", "second"]) async def _get_next_async(): async with id_gen_1.get_next() as stream_id: From 3e87d79e1c6ef894387ee2f24e008dfb8f5f853f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Sep 2020 09:58:32 +0100 Subject: [PATCH 062/245] Fix schema delta for servers that have not backfilled (#8396) Fixes #8395. --- changelog.d/8396.feature | 1 + .../schema/delta/58/14events_instance_name.sql.postgres | 4 +++- synapse/storage/util/id_generators.py | 6 +++++- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8396.feature diff --git a/changelog.d/8396.feature b/changelog.d/8396.feature new file mode 100644 index 000000000000..b363e929ea8c --- /dev/null +++ b/changelog.d/8396.feature @@ -0,0 +1 @@ +Add experimental support for sharding event persister. diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres index 97c1e6a0c5d7..c31f9af82a0f 100644 --- a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres +++ b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres @@ -21,6 +21,8 @@ SELECT setval('events_stream_seq', ( CREATE SEQUENCE IF NOT EXISTS events_backfill_stream_seq; +-- If the server has never backfilled a room then doing `-MIN(...)` will give +-- a negative result, hence why we do `GREATEST(...)` SELECT setval('events_backfill_stream_seq', ( - SELECT COALESCE(-MIN(stream_ordering), 1) FROM events + SELECT GREATEST(COALESCE(-MIN(stream_ordering), 1), 1) FROM events )); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 727fcc521c8b..4269eaf9187e 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -287,8 +287,12 @@ def _load_current_ids( min_stream_id = min(self._current_positions.values(), default=None) if min_stream_id is None: + # We add a GREATEST here to ensure that the result is always + # positive. (This can be a problem for e.g. backfill streams where + # the server has never backfilled). sql = """ - SELECT COALESCE(%(agg)s(%(id)s), 1) FROM %(table)s + SELECT GREATEST(COALESCE(%(agg)s(%(id)s), 1), 1) + FROM %(table)s """ % { "id": id_column, "table": table, From abd04b6af0671517a01781c8bd10fef2a6c32cc4 Mon Sep 17 00:00:00 2001 From: Tdxdxoz Date: Fri, 25 Sep 2020 19:01:45 +0800 Subject: [PATCH 063/245] Allow existing users to login via OpenID Connect. (#8345) Co-authored-by: Benjamin Koch This adds configuration flags that will match a user to pre-existing users when logging in via OpenID Connect. This is useful when switching to an existing SSO system. --- changelog.d/8345.feature | 1 + docs/sample_config.yaml | 5 +++ synapse/config/oidc_config.py | 6 +++ synapse/handlers/oidc_handler.py | 42 ++++++++++++------- .../storage/databases/main/registration.py | 4 +- tests/handlers/test_oidc.py | 35 ++++++++++++++++ 6 files changed, 76 insertions(+), 17 deletions(-) create mode 100644 changelog.d/8345.feature diff --git a/changelog.d/8345.feature b/changelog.d/8345.feature new file mode 100644 index 000000000000..4ee5b6a56e37 --- /dev/null +++ b/changelog.d/8345.feature @@ -0,0 +1 @@ +Add a configuration option that allows existing users to log in with OpenID Connect. Contributed by @BBBSnowball and @OmmyZhang. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index fb04ff283dee..845f53779530 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1689,6 +1689,11 @@ oidc_config: # #skip_verification: true + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead + # of failing. This could be used if switching from password logins to OIDC. Defaults to false. + # + #allow_existing_users: true + # An external module can be provided here as a custom solution to mapping # attributes returned from a OIDC provider onto a matrix user. # diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py index e0939bce84a1..70fc8a2f6268 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc_config.py @@ -56,6 +56,7 @@ def read_config(self, config, **kwargs): self.oidc_userinfo_endpoint = oidc_config.get("userinfo_endpoint") self.oidc_jwks_uri = oidc_config.get("jwks_uri") self.oidc_skip_verification = oidc_config.get("skip_verification", False) + self.oidc_allow_existing_users = oidc_config.get("allow_existing_users", False) ump_config = oidc_config.get("user_mapping_provider", {}) ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) @@ -158,6 +159,11 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # #skip_verification: true + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead + # of failing. This could be used if switching from password logins to OIDC. Defaults to false. + # + #allow_existing_users: true + # An external module can be provided here as a custom solution to mapping # attributes returned from a OIDC provider onto a matrix user. # diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 4230dbaf998b..0e06e4408d3b 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -114,6 +114,7 @@ def __init__(self, hs: "HomeServer"): hs.config.oidc_user_mapping_provider_config ) # type: OidcMappingProvider self._skip_verification = hs.config.oidc_skip_verification # type: bool + self._allow_existing_users = hs.config.oidc_allow_existing_users # type: bool self._http_client = hs.get_proxied_http_client() self._auth_handler = hs.get_auth_handler() @@ -849,7 +850,8 @@ async def _map_userinfo_to_user( If we don't find the user that way, we should register the user, mapping the localpart and the display name from the UserInfo. - If a user already exists with the mxid we've mapped, raise an exception. + If a user already exists with the mxid we've mapped and allow_existing_users + is disabled, raise an exception. Args: userinfo: an object representing the user @@ -905,21 +907,31 @@ async def _map_userinfo_to_user( localpart = map_username_to_mxid_localpart(attributes["localpart"]) - user_id = UserID(localpart, self._hostname) - if await self._datastore.get_users_by_id_case_insensitive(user_id.to_string()): - # This mxid is taken - raise MappingException( - "mxid '{}' is already taken".format(user_id.to_string()) + user_id = UserID(localpart, self._hostname).to_string() + users = await self._datastore.get_users_by_id_case_insensitive(user_id) + if users: + if self._allow_existing_users: + if len(users) == 1: + registered_user_id = next(iter(users)) + elif user_id in users: + registered_user_id = user_id + else: + raise MappingException( + "Attempted to login as '{}' but it matches more than one user inexactly: {}".format( + user_id, list(users.keys()) + ) + ) + else: + # This mxid is taken + raise MappingException("mxid '{}' is already taken".format(user_id)) + else: + # It's the first time this user is logging in and the mapped mxid was + # not taken, register the user + registered_user_id = await self._registration_handler.register_user( + localpart=localpart, + default_display_name=attributes["display_name"], + user_agent_ips=(user_agent, ip_address), ) - - # It's the first time this user is logging in and the mapped mxid was - # not taken, register the user - registered_user_id = await self._registration_handler.register_user( - localpart=localpart, - default_display_name=attributes["display_name"], - user_agent_ips=(user_agent, ip_address), - ) - await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id, ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 33825e894936..48ce7ecd1638 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -393,7 +393,7 @@ def f(txn): async def get_user_by_external_id( self, auth_provider: str, external_id: str - ) -> str: + ) -> Optional[str]: """Look up a user by their external auth id Args: @@ -401,7 +401,7 @@ async def get_user_by_external_id( external_id: id on that system Returns: - str|None: the mxid of the user, or None if they are not known + the mxid of the user, or None if they are not known """ return await self.db_pool.simple_select_one_onecol( table="user_external_ids", diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 89ec5fcb31bb..5910772aa8d5 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -617,3 +617,38 @@ def test_map_userinfo_to_user(self): ) ) self.assertEqual(mxid, "@test_user_2:test") + + # Test if the mxid is already taken + store = self.hs.get_datastore() + user3 = UserID.from_string("@test_user_3:test") + self.get_success( + store.register_user(user_id=user3.to_string(), password_hash=None) + ) + userinfo = {"sub": "test3", "username": "test_user_3"} + e = self.get_failure( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ), + MappingException, + ) + self.assertEqual(str(e.value), "mxid '@test_user_3:test' is already taken") + + @override_config({"oidc_config": {"allow_existing_users": True}}) + def test_map_userinfo_to_existing_user(self): + """Existing users can log in with OpenID Connect when allow_existing_users is True.""" + store = self.hs.get_datastore() + user4 = UserID.from_string("@test_user_4:test") + self.get_success( + store.register_user(user_id=user4.to_string(), password_hash=None) + ) + userinfo = { + "sub": "test4", + "username": "test_user_4", + } + token = {} + mxid = self.get_success( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@test_user_4:test") From fec6f9ac178867a8e7c5410e0d25898f29bab35c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 25 Sep 2020 12:29:54 +0100 Subject: [PATCH 064/245] Fix occasional "Re-starting finished log context" from keyring (#8398) * Fix test_verify_json_objects_for_server_awaits_previous_requests It turns out that this wasn't really testing what it thought it was testing (in particular, `check_context` was turning failures into success, which was making the tests pass even though it wasn't clear they should have been. It was also somewhat overcomplex - we can test what it was trying to test without mocking out perspectives servers. * Fix warnings about finished logcontexts in the keyring We need to make sure that we finish the key fetching magic before we run the verifying code, to ensure that we don't mess up our logcontexts. --- changelog.d/8398.bugfix | 1 + synapse/crypto/keyring.py | 70 ++++++++++++-------- tests/crypto/test_keyring.py | 120 ++++++++++++++++------------------- 3 files changed, 101 insertions(+), 90 deletions(-) create mode 100644 changelog.d/8398.bugfix diff --git a/changelog.d/8398.bugfix b/changelog.d/8398.bugfix new file mode 100644 index 000000000000..e432aeebf190 --- /dev/null +++ b/changelog.d/8398.bugfix @@ -0,0 +1 @@ +Fix "Re-starting finished log context" warning when receiving an event we already had over federation. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 42e4087a926e..c04ad77cf9ec 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -42,7 +42,6 @@ ) from synapse.logging.context import ( PreserveLoggingContext, - current_context, make_deferred_yieldable, preserve_fn, run_in_background, @@ -233,8 +232,6 @@ async def _start_key_lookups(self, verify_requests): """ try: - ctx = current_context() - # map from server name to a set of outstanding request ids server_to_request_ids = {} @@ -265,12 +262,8 @@ def lookup_done(res, verify_request): # if there are no more requests for this server, we can drop the lock. if not server_requests: - with PreserveLoggingContext(ctx): - logger.debug("Releasing key lookup lock on %s", server_name) - - # ... but not immediately, as that can cause stack explosions if - # we get a long queue of lookups. - self.clock.call_later(0, drop_server_lock, server_name) + logger.debug("Releasing key lookup lock on %s", server_name) + drop_server_lock(server_name) return res @@ -335,20 +328,32 @@ async def do_iterations(): ) # look for any requests which weren't satisfied - with PreserveLoggingContext(): - for verify_request in remaining_requests: - verify_request.key_ready.errback( - SynapseError( - 401, - "No key for %s with ids in %s (min_validity %i)" - % ( - verify_request.server_name, - verify_request.key_ids, - verify_request.minimum_valid_until_ts, - ), - Codes.UNAUTHORIZED, - ) + while remaining_requests: + verify_request = remaining_requests.pop() + rq_str = ( + "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)" + % ( + verify_request.server_name, + verify_request.key_ids, + verify_request.minimum_valid_until_ts, ) + ) + + # If we run the errback immediately, it may cancel our + # loggingcontext while we are still in it, so instead we + # schedule it for the next time round the reactor. + # + # (this also ensures that we don't get a stack overflow if we + # has a massive queue of lookups waiting for this server). + self.clock.call_later( + 0, + verify_request.key_ready.errback, + SynapseError( + 401, + "Failed to find any key to satisfy %s" % (rq_str,), + Codes.UNAUTHORIZED, + ), + ) except Exception as err: # we don't really expect to get here, because any errors should already # have been caught and logged. But if we do, let's log the error and make @@ -410,10 +415,23 @@ async def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): # key was not valid at this point continue - with PreserveLoggingContext(): - verify_request.key_ready.callback( - (server_name, key_id, fetch_key_result.verify_key) - ) + # we have a valid key for this request. If we run the callback + # immediately, it may cancel our loggingcontext while we are still in + # it, so instead we schedule it for the next time round the reactor. + # + # (this also ensures that we don't get a stack overflow if we had + # a massive queue of lookups waiting for this server). + logger.debug( + "Found key %s:%s for %s", + server_name, + key_id, + verify_request.request_name, + ) + self.clock.call_later( + 0, + verify_request.key_ready.callback, + (server_name, key_id, fetch_key_result.verify_key), + ) completed.append(verify_request) break diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 2e6e7abf1fa7..5cf408f21faf 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -23,6 +23,7 @@ from signedjson.key import encode_verify_key_base64, get_verify_key from twisted.internet import defer +from twisted.internet.defer import Deferred, ensureDeferred from synapse.api.errors import SynapseError from synapse.crypto import keyring @@ -33,7 +34,6 @@ ) from synapse.logging.context import ( LoggingContext, - PreserveLoggingContext, current_context, make_deferred_yieldable, ) @@ -68,54 +68,40 @@ def sign_response(self, res): class KeyringTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): - self.mock_perspective_server = MockPerspectiveServer() - self.http_client = Mock() - - config = self.default_config() - config["trusted_key_servers"] = [ - { - "server_name": self.mock_perspective_server.server_name, - "verify_keys": self.mock_perspective_server.get_verify_keys(), - } - ] - - return self.setup_test_homeserver( - handlers=None, http_client=self.http_client, config=config - ) - - def check_context(self, _, expected): + def check_context(self, val, expected): self.assertEquals(getattr(current_context(), "request", None), expected) + return val def test_verify_json_objects_for_server_awaits_previous_requests(self): - key1 = signedjson.key.generate_signing_key(1) + mock_fetcher = keyring.KeyFetcher() + mock_fetcher.get_keys = Mock() + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher,)) - kr = keyring.Keyring(self.hs) + # a signed object that we are going to try to validate + key1 = signedjson.key.generate_signing_key(1) json1 = {} signedjson.sign.sign_json(json1, "server10", key1) - persp_resp = { - "server_keys": [ - self.mock_perspective_server.get_signed_key( - "server10", signedjson.key.get_verify_key(key1) - ) - ] - } - persp_deferred = defer.Deferred() + # start off a first set of lookups. We make the mock fetcher block until this + # deferred completes. + first_lookup_deferred = Deferred() + + async def first_lookup_fetch(keys_to_fetch): + self.assertEquals(current_context().request, "context_11") + self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}}) - async def get_perspectives(**kwargs): - self.assertEquals(current_context().request, "11") - with PreserveLoggingContext(): - await persp_deferred - return persp_resp + await make_deferred_yieldable(first_lookup_deferred) + return { + "server10": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) + } + } - self.http_client.post_json.side_effect = get_perspectives + mock_fetcher.get_keys.side_effect = first_lookup_fetch - # start off a first set of lookups - @defer.inlineCallbacks - def first_lookup(): - with LoggingContext("11") as context_11: - context_11.request = "11" + async def first_lookup(): + with LoggingContext("context_11") as context_11: + context_11.request = "context_11" res_deferreds = kr.verify_json_objects_for_server( [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] @@ -124,7 +110,7 @@ def first_lookup(): # the unsigned json should be rejected pretty quickly self.assertTrue(res_deferreds[1].called) try: - yield res_deferreds[1] + await res_deferreds[1] self.assertFalse("unsigned json didn't cause a failure") except SynapseError: pass @@ -132,45 +118,51 @@ def first_lookup(): self.assertFalse(res_deferreds[0].called) res_deferreds[0].addBoth(self.check_context, None) - yield make_deferred_yieldable(res_deferreds[0]) + await make_deferred_yieldable(res_deferreds[0]) - # let verify_json_objects_for_server finish its work before we kill the - # logcontext - yield self.clock.sleep(0) + d0 = ensureDeferred(first_lookup()) - d0 = first_lookup() - - # wait a tick for it to send the request to the perspectives server - # (it first tries the datastore) - self.pump() - self.http_client.post_json.assert_called_once() + mock_fetcher.get_keys.assert_called_once() # a second request for a server with outstanding requests # should block rather than start a second call - @defer.inlineCallbacks - def second_lookup(): - with LoggingContext("12") as context_12: - context_12.request = "12" - self.http_client.post_json.reset_mock() - self.http_client.post_json.return_value = defer.Deferred() + + async def second_lookup_fetch(keys_to_fetch): + self.assertEquals(current_context().request, "context_12") + return { + "server10": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) + } + } + + mock_fetcher.get_keys.reset_mock() + mock_fetcher.get_keys.side_effect = second_lookup_fetch + second_lookup_state = [0] + + async def second_lookup(): + with LoggingContext("context_12") as context_12: + context_12.request = "context_12" res_deferreds_2 = kr.verify_json_objects_for_server( [("server10", json1, 0, "test")] ) res_deferreds_2[0].addBoth(self.check_context, None) - yield make_deferred_yieldable(res_deferreds_2[0]) + second_lookup_state[0] = 1 + await make_deferred_yieldable(res_deferreds_2[0]) + second_lookup_state[0] = 2 - # let verify_json_objects_for_server finish its work before we kill the - # logcontext - yield self.clock.sleep(0) - - d2 = second_lookup() + d2 = ensureDeferred(second_lookup()) self.pump() - self.http_client.post_json.assert_not_called() + # the second request should be pending, but the fetcher should not yet have been + # called + self.assertEqual(second_lookup_state[0], 1) + mock_fetcher.get_keys.assert_not_called() # complete the first request - persp_deferred.callback(persp_resp) + first_lookup_deferred.callback(None) + + # and now both verifications should succeed. self.get_success(d0) self.get_success(d2) From 31acc5c30938bd532670d45304f6750de6e6e759 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 25 Sep 2020 11:05:54 -0400 Subject: [PATCH 065/245] Escape the error description on the sso_error template. (#8405) --- changelog.d/8405.feature | 1 + synapse/res/templates/sso_error.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8405.feature diff --git a/changelog.d/8405.feature b/changelog.d/8405.feature new file mode 100644 index 000000000000..f3c4a74bc79b --- /dev/null +++ b/changelog.d/8405.feature @@ -0,0 +1 @@ +Consolidate the SSO error template across all configuration. diff --git a/synapse/res/templates/sso_error.html b/synapse/res/templates/sso_error.html index af8459719ae4..944bc9c9cab2 100644 --- a/synapse/res/templates/sso_error.html +++ b/synapse/res/templates/sso_error.html @@ -12,7 +12,7 @@

There was an error during authentication:

-
{{ error_description }}
+
{{ error_description | e }}

If you are seeing this page after clicking a link sent to you via email, make sure you only click the confirmation link once, and that you open the From 4b3a1faa08f5ad16e0e00dc629fb25be520575d7 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 28 Sep 2020 00:23:35 +0100 Subject: [PATCH 066/245] typo --- synapse/storage/databases/main/schema/delta/56/event_labels.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels.sql b/synapse/storage/databases/main/schema/delta/56/event_labels.sql index 5e29c1da19e7..ccf287971cb4 100644 --- a/synapse/storage/databases/main/schema/delta/56/event_labels.sql +++ b/synapse/storage/databases/main/schema/delta/56/event_labels.sql @@ -13,7 +13,7 @@ * limitations under the License. */ --- room_id and topoligical_ordering are denormalised from the events table in order to +-- room_id and topological_ordering are denormalised from the events table in order to -- make the index work. CREATE TABLE IF NOT EXISTS event_labels ( event_id TEXT, From 450ec4844599b6f06ff6c699a8edc067fa7d4217 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 28 Sep 2020 13:15:00 +0100 Subject: [PATCH 067/245] A pair of tiny cleanups in the federation request code. (#8401) --- changelog.d/8401.misc | 1 + synapse/handlers/federation.py | 2 +- synapse/http/matrixfederationclient.py | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8401.misc diff --git a/changelog.d/8401.misc b/changelog.d/8401.misc new file mode 100644 index 000000000000..27fd7ab129d9 --- /dev/null +++ b/changelog.d/8401.misc @@ -0,0 +1 @@ +A pair of tiny cleanups in the federation request code. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 9f773aefa7fa..5bcfb231b2cb 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -281,7 +281,7 @@ async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: raise Exception( "Error fetching missing prev_events for %s: %s" % (event_id, e) - ) + ) from e # Update the set of things we've seen after trying to # fetch the missing stuff diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 3c86cbc546db..b02c74ab2d4b 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -473,8 +473,6 @@ async def _send_request( ) response = await request_deferred - except TimeoutError as e: - raise RequestSendFailed(e, can_retry=True) from e except DNSLookupError as e: raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e except Exception as e: From bd715e12786f4e48d7a8a1973119bbc0502ecff3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= Date: Mon, 28 Sep 2020 15:35:02 +0100 Subject: [PATCH 068/245] Add `ui_auth_sessions_ips` table to `synapse_port_db` ignore list (#8410) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This table was created in #8034 (1.20.0). It references `ui_auth_sessions`, which is ignored, so this one should be too. Signed-off-by: Dagfinn Ilmari Mannsåker --- changelog.d/8410.bugfix | 1 + scripts/synapse_port_db | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/8410.bugfix diff --git a/changelog.d/8410.bugfix b/changelog.d/8410.bugfix new file mode 100644 index 000000000000..1323ddc525db --- /dev/null +++ b/changelog.d/8410.bugfix @@ -0,0 +1 @@ +Fix a v1.20.0 regression in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 684a518b8e5f..ae2887b7d2f4 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -145,6 +145,7 @@ IGNORED_TABLES = { # the sessions are transient anyway, so ignore them. "ui_auth_sessions", "ui_auth_sessions_credentials", + "ui_auth_sessions_ips", } From 5e3ca12b158b4abefe2e3a54259ab5255dca93d8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 28 Sep 2020 17:58:33 +0100 Subject: [PATCH 069/245] Create a mechanism for marking tests "logcontext clean" (#8399) --- changelog.d/8399.misc | 1 + synapse/logging/context.py | 43 +++++++++++++++++++----------------- tests/crypto/test_keyring.py | 3 +++ tests/unittest.py | 15 ++++++++++++- 4 files changed, 41 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8399.misc diff --git a/changelog.d/8399.misc b/changelog.d/8399.misc new file mode 100644 index 000000000000..ce6e8123cf8b --- /dev/null +++ b/changelog.d/8399.misc @@ -0,0 +1 @@ +Create a mechanism for marking tests "logcontext clean". diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 2e282d9d670e..ca0c774cc5bc 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -65,6 +65,11 @@ def get_thread_resource_usage() -> "Optional[resource._RUsage]": return None +# a hook which can be set during testing to assert that we aren't abusing logcontexts. +def logcontext_error(msg: str): + logger.warning(msg) + + # get an id for the current thread. # # threading.get_ident doesn't actually return an OS-level tid, and annoyingly, @@ -330,10 +335,9 @@ def __enter__(self) -> "LoggingContext": """Enters this logging context into thread local storage""" old_context = set_current_context(self) if self.previous_context != old_context: - logger.warning( - "Expected previous context %r, found %r", - self.previous_context, - old_context, + logcontext_error( + "Expected previous context %r, found %r" + % (self.previous_context, old_context,) ) return self @@ -346,10 +350,10 @@ def __exit__(self, type, value, traceback) -> None: current = set_current_context(self.previous_context) if current is not self: if current is SENTINEL_CONTEXT: - logger.warning("Expected logging context %s was lost", self) + logcontext_error("Expected logging context %s was lost" % (self,)) else: - logger.warning( - "Expected logging context %s but found %s", self, current + logcontext_error( + "Expected logging context %s but found %s" % (self, current) ) # the fact that we are here suggests that the caller thinks that everything @@ -387,16 +391,16 @@ def start(self, rusage: "Optional[resource._RUsage]") -> None: support getrusuage. """ if get_thread_id() != self.main_thread: - logger.warning("Started logcontext %s on different thread", self) + logcontext_error("Started logcontext %s on different thread" % (self,)) return if self.finished: - logger.warning("Re-starting finished log context %s", self) + logcontext_error("Re-starting finished log context %s" % (self,)) # If we haven't already started record the thread resource usage so # far if self.usage_start: - logger.warning("Re-starting already-active log context %s", self) + logcontext_error("Re-starting already-active log context %s" % (self,)) else: self.usage_start = rusage @@ -414,7 +418,7 @@ def stop(self, rusage: "Optional[resource._RUsage]") -> None: try: if get_thread_id() != self.main_thread: - logger.warning("Stopped logcontext %s on different thread", self) + logcontext_error("Stopped logcontext %s on different thread" % (self,)) return if not rusage: @@ -422,9 +426,9 @@ def stop(self, rusage: "Optional[resource._RUsage]") -> None: # Record the cpu used since we started if not self.usage_start: - logger.warning( - "Called stop on logcontext %s without recording a start rusage", - self, + logcontext_error( + "Called stop on logcontext %s without recording a start rusage" + % (self,) ) return @@ -584,14 +588,13 @@ def __exit__(self, type, value, traceback) -> None: if context != self._new_context: if not context: - logger.warning( - "Expected logging context %s was lost", self._new_context + logcontext_error( + "Expected logging context %s was lost" % (self._new_context,) ) else: - logger.warning( - "Expected logging context %s but found %s", - self._new_context, - context, + logcontext_error( + "Expected logging context %s but found %s" + % (self._new_context, context,) ) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 5cf408f21faf..8ff1460c0d7a 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -41,6 +41,7 @@ from tests import unittest from tests.test_utils import make_awaitable +from tests.unittest import logcontext_clean class MockPerspectiveServer: @@ -67,6 +68,7 @@ def sign_response(self, res): signedjson.sign.sign_json(res, self.server_name, self.key) +@logcontext_clean class KeyringTestCase(unittest.HomeserverTestCase): def check_context(self, val, expected): self.assertEquals(getattr(current_context(), "request", None), expected) @@ -309,6 +311,7 @@ async def get_keys2(keys_to_fetch): mock_fetcher2.get_keys.assert_called_once() +@logcontext_clean class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.http_client = Mock() diff --git a/tests/unittest.py b/tests/unittest.py index dabf69cff405..bbe50c38513b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -23,7 +23,7 @@ import time from typing import Optional, Tuple, Type, TypeVar, Union -from mock import Mock +from mock import Mock, patch from canonicaljson import json @@ -169,6 +169,19 @@ def INFO(target): return target +def logcontext_clean(target): + """A decorator which marks the TestCase or method as 'logcontext_clean' + + ... ie, any logcontext errors should cause a test failure + """ + + def logcontext_error(msg): + raise AssertionError("logcontext error: %s" % (msg)) + + patcher = patch("synapse.logging.context.logcontext_error", new=logcontext_error) + return patcher(target) + + class HomeserverTestCase(TestCase): """ A base TestCase that reduces boilerplate for HomeServer-using test cases. From bd380d942fdf91cf1214d6859f2bc97d12a92ab4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 28 Sep 2020 18:00:30 +0100 Subject: [PATCH 070/245] Add checks for postgres sequence consistency (#8402) --- changelog.d/8402.misc | 1 + docs/postgres.md | 11 +++ .../storage/databases/main/registration.py | 3 + synapse/storage/databases/state/store.py | 3 + synapse/storage/util/id_generators.py | 5 ++ synapse/storage/util/sequence.py | 90 ++++++++++++++++++- tests/storage/test_id_generators.py | 22 ++++- tests/unittest.py | 31 ++++++- 8 files changed, 160 insertions(+), 6 deletions(-) create mode 100644 changelog.d/8402.misc diff --git a/changelog.d/8402.misc b/changelog.d/8402.misc new file mode 100644 index 000000000000..ad1804d207aa --- /dev/null +++ b/changelog.d/8402.misc @@ -0,0 +1 @@ +Add checks on startup that PostgreSQL sequences are consistent with their associated tables. diff --git a/docs/postgres.md b/docs/postgres.md index e71a1975d8d2..c30cc1fd8cef 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -106,6 +106,17 @@ Note that the above may fail with an error about duplicate rows if corruption has already occurred, and such duplicate rows will need to be manually removed. +## Fixing inconsistent sequences error + +Synapse uses Postgres sequences to generate IDs for various tables. A sequence +and associated table can get out of sync if, for example, Synapse has been +downgraded and then upgraded again. + +To fix the issue shut down Synapse (including any and all workers) and run the +SQL command included in the error message. Once done Synapse should start +successfully. + + ## Tuning Postgres The default settings should be fine for most deployments. For larger diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 48ce7ecd1638..a83df7759d79 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -41,6 +41,9 @@ def __init__(self, database: DatabasePool, db_conn, hs): self.config = hs.config self.clock = hs.get_clock() + # Note: we don't check this sequence for consistency as we'd have to + # call `find_max_generated_user_id_localpart` each time, which is + # expensive if there are many entries. self._user_id_seq = build_sequence_generator( database.engine, find_max_generated_user_id_localpart, "user_id_seq", ) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index bec3780a32b1..989f0cbc9d3b 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -99,6 +99,9 @@ def get_max_state_group_txn(txn: Cursor): self._state_group_seq_gen = build_sequence_generator( self.database_engine, get_max_state_group_txn, "state_group_id_seq" ) + self._state_group_seq_gen.check_consistency( + db_conn, table="state_groups", id_column="id" + ) @cached(max_entries=10000, iterable=True) async def get_state_group_delta(self, state_group): diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 4269eaf9187e..4fd7573e260d 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -258,6 +258,11 @@ def __init__( self._sequence_gen = PostgresSequenceGenerator(sequence_name) + # We check that the table and sequence haven't diverged. + self._sequence_gen.check_consistency( + db_conn, table=table, id_column=id_column, positive=positive + ) + # This goes and fills out the above state from the database. self._load_current_ids(db_conn, table, instance_column, id_column) diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index ffc189474890..2dd95e270920 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -13,11 +13,34 @@ # See the License for the specific language governing permissions and # limitations under the License. import abc +import logging import threading from typing import Callable, List, Optional -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -from synapse.storage.types import Cursor +from synapse.storage.engines import ( + BaseDatabaseEngine, + IncorrectDatabaseSetup, + PostgresEngine, +) +from synapse.storage.types import Connection, Cursor + +logger = logging.getLogger(__name__) + + +_INCONSISTENT_SEQUENCE_ERROR = """ +Postgres sequence '%(seq)s' is inconsistent with associated +table '%(table)s'. This can happen if Synapse has been downgraded and +then upgraded again, or due to a bad migration. + +To fix this error, shut down Synapse (including any and all workers) +and run the following SQL: + + SELECT setval('%(seq)s', ( + %(max_id_sql)s + )); + +See docs/postgres.md for more information. +""" class SequenceGenerator(metaclass=abc.ABCMeta): @@ -28,6 +51,19 @@ def get_next_id_txn(self, txn: Cursor) -> int: """Gets the next ID in the sequence""" ... + @abc.abstractmethod + def check_consistency( + self, db_conn: Connection, table: str, id_column: str, positive: bool = True + ): + """Should be called during start up to test that the current value of + the sequence is greater than or equal to the maximum ID in the table. + + This is to handle various cases where the sequence value can get out + of sync with the table, e.g. if Synapse gets rolled back to a previous + version and the rolled forwards again. + """ + ... + class PostgresSequenceGenerator(SequenceGenerator): """An implementation of SequenceGenerator which uses a postgres sequence""" @@ -45,6 +81,50 @@ def get_next_mult_txn(self, txn: Cursor, n: int) -> List[int]: ) return [i for (i,) in txn] + def check_consistency( + self, db_conn: Connection, table: str, id_column: str, positive: bool = True + ): + txn = db_conn.cursor() + + # First we get the current max ID from the table. + table_sql = "SELECT GREATEST(%(agg)s(%(id)s), 0) FROM %(table)s" % { + "id": id_column, + "table": table, + "agg": "MAX" if positive else "-MIN", + } + + txn.execute(table_sql) + row = txn.fetchone() + if not row: + # Table is empty, so nothing to do. + txn.close() + return + + # Now we fetch the current value from the sequence and compare with the + # above. + max_stream_id = row[0] + txn.execute( + "SELECT last_value, is_called FROM %(seq)s" % {"seq": self._sequence_name} + ) + last_value, is_called = txn.fetchone() + txn.close() + + # If `is_called` is False then `last_value` is actually the value that + # will be generated next, so we decrement to get the true "last value". + if not is_called: + last_value -= 1 + + if max_stream_id > last_value: + logger.warning( + "Postgres sequence %s is behind table %s: %d < %d", + last_value, + max_stream_id, + ) + raise IncorrectDatabaseSetup( + _INCONSISTENT_SEQUENCE_ERROR + % {"seq": self._sequence_name, "table": table, "max_id_sql": table_sql} + ) + GetFirstCallbackType = Callable[[Cursor], int] @@ -81,6 +161,12 @@ def get_next_id_txn(self, txn: Cursor) -> int: self._current_max_id += 1 return self._current_max_id + def check_consistency( + self, db_conn: Connection, table: str, id_column: str, positive: bool = True + ): + # There is nothing to do for in memory sequences + pass + def build_sequence_generator( database_engine: BaseDatabaseEngine, diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index d4ff55fbff7d..4558bee7be85 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -12,9 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from synapse.storage.database import DatabasePool +from synapse.storage.engines import IncorrectDatabaseSetup from synapse.storage.util.id_generators import MultiWriterIdGenerator from tests.unittest import HomeserverTestCase @@ -59,7 +58,7 @@ def _create(conn): writers=writers, ) - return self.get_success(self.db_pool.runWithConnection(_create)) + return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) def _insert_rows(self, instance_name: str, number: int): """Insert N rows as the given instance, inserting with stream IDs pulled @@ -411,6 +410,23 @@ async def _get_next_async(): self.get_success(_get_next_async()) self.assertEqual(id_gen_3.get_persisted_upto_position(), 6) + def test_sequence_consistency(self): + """Test that we error out if the table and sequence diverges. + """ + + # Prefill with some rows + self._insert_row_with_id("master", 3) + + # Now we add a row *without* updating the stream ID + def _insert(txn): + txn.execute("INSERT INTO foobar VALUES (26, 'master')") + + self.get_success(self.db_pool.runInteraction("_insert", _insert)) + + # Creating the ID gen should error + with self.assertRaises(IncorrectDatabaseSetup): + self._create_id_generator("first") + class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """Tests MultiWriterIdGenerator that produce *negative* stream IDs. diff --git a/tests/unittest.py b/tests/unittest.py index bbe50c38513b..e654c0442d6c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -14,7 +14,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import gc import hashlib import hmac @@ -28,6 +27,7 @@ from canonicaljson import json from twisted.internet.defer import Deferred, ensureDeferred, succeed +from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool from twisted.trial import unittest @@ -476,6 +476,35 @@ def get_failure(self, d, exc): self.pump() return self.failureResultOf(d, exc) + def get_success_or_raise(self, d, by=0.0): + """Drive deferred to completion and return result or raise exception + on failure. + """ + + if inspect.isawaitable(d): + deferred = ensureDeferred(d) + if not isinstance(deferred, Deferred): + return d + + results = [] # type: list + deferred.addBoth(results.append) + + self.pump(by=by) + + if not results: + self.fail( + "Success result expected on {!r}, found no result instead".format( + deferred + ) + ) + + result = results[0] + + if isinstance(result, Failure): + result.raiseException() + + return result + def register_user(self, username, password, admin=False): """ Register a user. Requires the Admin API be registered. From d4605d1f16b5d71c72dbf07b1ffeaa81c0cb87a9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 28 Sep 2020 18:46:59 +0100 Subject: [PATCH 071/245] Don't check whether a 3pid is allowed to register during password reset This endpoint should only deal with emails that have already been approved, and are attached with user's account. There's no need to re-check them here. --- synapse/rest/client/v2_alpha/account.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index c3ce0f62592a..ed0d0772f88a 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -96,13 +96,6 @@ async def on_POST(self, request): send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): - raise SynapseError( - 403, - "Your email domain is not authorized on this server", - Codes.THREEPID_DENIED, - ) - # Raise if the provided next_link value isn't valid assert_valid_next_link(self.hs, next_link) From fe443acaee36900757d79dbf7d2fb5629df38e3c Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 28 Sep 2020 18:51:41 +0100 Subject: [PATCH 072/245] Changelog --- changelog.d/8414.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8414.bugfix diff --git a/changelog.d/8414.bugfix b/changelog.d/8414.bugfix new file mode 100644 index 000000000000..315876e89238 --- /dev/null +++ b/changelog.d/8414.bugfix @@ -0,0 +1 @@ +Remove unnecessary 3PID registration check when resetting password via an email address. Bug introduced in v0.34.0rc2. \ No newline at end of file From 1c262431f9bf768d106bf79a568479fa5a0784a1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 29 Sep 2020 10:29:21 +0100 Subject: [PATCH 073/245] Fix handling of connection timeouts in outgoing http requests (#8400) * Remove `on_timeout_cancel` from `timeout_deferred` The `on_timeout_cancel` param to `timeout_deferred` wasn't always called on a timeout (in particular if the canceller raised an exception), so it was unreliable. It was also only used in one place, and to be honest it's easier to do what it does a different way. * Fix handling of connection timeouts in outgoing http requests Turns out that if we get a timeout during connection, then a different exception is raised, which wasn't always handled correctly. To fix it, catch the exception in SimpleHttpClient and turn it into a RequestTimedOutError (which is already a documented exception). Also add a description to RequestTimedOutError so that we can see which stage it failed at. * Fix incorrect handling of timeouts reading federation responses This was trapping the wrong sort of TimeoutError, so was never being hit. The effect was relatively minor, but we should fix this so that it does the expected thing. * Fix inconsistent handling of `timeout` param between methods `get_json`, `put_json` and `delete_json` were applying a different timeout to the response body to `post_json`; bring them in line and test. Co-authored-by: Patrick Cloke Co-authored-by: Erik Johnston --- changelog.d/8400.bugfix | 1 + synapse/handlers/identity.py | 25 ++-- synapse/http/__init__.py | 17 +-- synapse/http/client.py | 54 +++++--- synapse/http/matrixfederationclient.py | 55 ++++++-- synapse/http/proxyagent.py | 16 ++- synapse/util/async_helpers.py | 47 +++---- tests/http/test_fedclient.py | 14 +- tests/http/test_simple_client.py | 180 +++++++++++++++++++++++++ 9 files changed, 311 insertions(+), 98 deletions(-) create mode 100644 changelog.d/8400.bugfix create mode 100644 tests/http/test_simple_client.py diff --git a/changelog.d/8400.bugfix b/changelog.d/8400.bugfix new file mode 100644 index 000000000000..835658ba5eac --- /dev/null +++ b/changelog.d/8400.bugfix @@ -0,0 +1 @@ +Fix incorrect handling of timeouts on outgoing HTTP requests. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index ab15570f7a97..bc3e9607ca82 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -21,8 +21,6 @@ import urllib.parse from typing import Awaitable, Callable, Dict, List, Optional, Tuple -from twisted.internet.error import TimeoutError - from synapse.api.errors import ( CodeMessageException, Codes, @@ -30,6 +28,7 @@ SynapseError, ) from synapse.config.emailconfig import ThreepidBehaviour +from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, Requester from synapse.util import json_decoder @@ -93,7 +92,7 @@ async def threepid_from_creds( try: data = await self.http_client.get_json(url, query_params) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.info( @@ -173,7 +172,7 @@ async def bind_threepid( if e.code != 404 or not use_v2: logger.error("3PID bind failed with Matrix error: %r", e) raise e.to_synapse_error() - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except CodeMessageException as e: data = json_decoder.decode(e.msg) # XXX WAT? @@ -273,7 +272,7 @@ async def try_unbind_threepid_with_id_server( else: logger.error("Failed to unbind threepid on identity server: %s", e) raise SynapseError(500, "Failed to contact identity server") - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") await self.store.remove_user_bound_threepid( @@ -419,7 +418,7 @@ async def requestEmailToken( except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") async def requestMsisdnToken( @@ -471,7 +470,7 @@ async def requestMsisdnToken( except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") assert self.hs.config.public_baseurl @@ -553,7 +552,7 @@ async def proxy_msisdn_submit_token( id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken", body, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) @@ -627,7 +626,7 @@ async def _lookup_3pid_v1( # require or validate it. See the following for context: # https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950 return data["mxid"] - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except IOError as e: logger.warning("Error from v1 identity server lookup: %s" % (e,)) @@ -655,7 +654,7 @@ async def _lookup_3pid_v2( "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), {"access_token": id_access_token}, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") if not isinstance(hash_details, dict): @@ -727,7 +726,7 @@ async def _lookup_3pid_v2( }, headers=headers, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except Exception as e: logger.warning("Error when performing a v2 3pid lookup: %s", e) @@ -823,7 +822,7 @@ async def ask_id_server_for_third_party_invite( invite_config, {"Authorization": create_id_access_token_header(id_access_token)}, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: if e.code != 404: @@ -841,7 +840,7 @@ async def ask_id_server_for_third_party_invite( data = await self.blacklisting_http_client.post_json_get_json( url, invite_config ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning( diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 8eb363859146..59b01b812c53 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -16,8 +16,6 @@ import re from twisted.internet import task -from twisted.internet.defer import CancelledError -from twisted.python import failure from twisted.web.client import FileBodyProducer from synapse.api.errors import SynapseError @@ -26,19 +24,8 @@ class RequestTimedOutError(SynapseError): """Exception representing timeout of an outbound request""" - def __init__(self): - super().__init__(504, "Timed out") - - -def cancelled_to_request_timed_out_error(value, timeout): - """Turns CancelledErrors into RequestTimedOutErrors. - - For use with async.add_timeout_to_deferred - """ - if isinstance(value, failure.Failure): - value.trap(CancelledError) - raise RequestTimedOutError() - return value + def __init__(self, msg): + super().__init__(504, msg) ACCESS_TOKEN_RE = re.compile(r"(\?.*access(_|%5[Ff])token=)[^&]*(.*)$") diff --git a/synapse/http/client.py b/synapse/http/client.py index 4694adc400b8..8324632cb625 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import urllib from io import BytesIO @@ -38,7 +37,7 @@ from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE -from twisted.internet import defer, protocol, ssl +from twisted.internet import defer, error as twisted_error, protocol, ssl from twisted.internet.interfaces import ( IReactorPluggableNameResolver, IResolutionReceiver, @@ -46,17 +45,18 @@ from twisted.internet.task import Cooperator from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool, readBody +from twisted.web.client import ( + Agent, + HTTPConnectionPool, + ResponseNeverReceived, + readBody, +) from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web.iweb import IResponse from synapse.api.errors import Codes, HttpResponseException, SynapseError -from synapse.http import ( - QuieterFileBodyProducer, - cancelled_to_request_timed_out_error, - redact_uri, -) +from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri from synapse.http.proxyagent import ProxyAgent from synapse.logging.context import make_deferred_yieldable from synapse.logging.opentracing import set_tag, start_active_span, tags @@ -332,8 +332,6 @@ async def request( RequestTimedOutError if the request times out before the headers are read """ - # A small wrapper around self.agent.request() so we can easily attach - # counters to it outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) @@ -362,15 +360,17 @@ async def request( data=body_producer, headers=headers, **self._extra_treq_args - ) + ) # type: defer.Deferred + # we use our own timeout mechanism rather than treq's as a workaround # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( - request_deferred, - 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, + request_deferred, 60, self.hs.get_reactor(), ) + + # turn timeouts into RequestTimedOutErrors + request_deferred.addErrback(_timeout_to_request_timed_out_error) + response = await make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() @@ -410,7 +410,7 @@ async def post_urlencoded_get_json( parsed json Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -461,7 +461,7 @@ async def post_json_get_json( parsed json Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -506,7 +506,7 @@ async def get_json( Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -538,7 +538,7 @@ async def put_json( Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -586,7 +586,7 @@ async def get_raw( Succeeds when we get a 2xx HTTP response, with the HTTP body as bytes. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -631,7 +631,7 @@ async def get_file( headers, absolute URI of the response and HTTP response code. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -684,6 +684,18 @@ async def get_file( ) +def _timeout_to_request_timed_out_error(f: Failure): + if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError): + # The TCP connection has its own timeout (set by the 'connectTimeout' param + # on the Agent), which raises twisted_error.TimeoutError exception. + raise RequestTimedOutError("Timeout connecting to remote server") + elif f.check(defer.TimeoutError, ResponseNeverReceived): + # this one means that we hit our overall timeout on the request + raise RequestTimedOutError("Timeout waiting for response from remote server") + + return f + + # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient. # The two should be factored out. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b02c74ab2d4b..c23a4d7c0cc0 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -171,7 +171,7 @@ async def _handle_json_response( d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) body = await make_deferred_yieldable(d) - except TimeoutError as e: + except defer.TimeoutError as e: logger.warning( "{%s} [%s] Timed out reading response - %s %s", request.txn_id, @@ -655,10 +655,14 @@ async def put_json( long_retries (bool): whether to use the long retry algorithm. See docs on _send_request for details. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): True if we should count a 404 response as @@ -704,8 +708,13 @@ async def put_json( timeout=timeout, ) + if timeout is not None: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = await _handle_json_response( - self.reactor, self.default_timeout, request, response, start_ms + self.reactor, _sec_timeout, request, response, start_ms ) return body @@ -734,10 +743,14 @@ async def post_json( long_retries (bool): whether to use the long retry algorithm. See docs on _send_request for details. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -801,10 +814,14 @@ async def get_json( args (dict|None): A dictionary used to create query strings, defaults to None. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -840,8 +857,13 @@ async def get_json( timeout=timeout, ) + if timeout is not None: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = await _handle_json_response( - self.reactor, self.default_timeout, request, response, start_ms + self.reactor, _sec_timeout, request, response, start_ms ) return body @@ -865,10 +887,14 @@ async def delete_json( long_retries (bool): whether to use the long retry algorithm. See docs on _send_request for details. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -900,8 +926,13 @@ async def delete_json( ignore_backoff=ignore_backoff, ) + if timeout is not None: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = await _handle_json_response( - self.reactor, self.default_timeout, request, response, start_ms + self.reactor, _sec_timeout, request, response, start_ms ) return body diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 332da02a8d18..e32d3f43e0f3 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -44,8 +44,11 @@ class ProxyAgent(_AgentBase): `BrowserLikePolicyForHTTPS`, so unless you have special requirements you can leave this as-is. - connectTimeout (float): The amount of time that this Agent will wait - for the peer to accept a connection. + connectTimeout (Optional[float]): The amount of time that this Agent will wait + for the peer to accept a connection, in seconds. If 'None', + HostnameEndpoint's default (30s) will be used. + + This is used for connections to both proxies and destination servers. bindAddress (bytes): The local address for client sockets to bind to. @@ -108,6 +111,15 @@ def request(self, method, uri, headers=None, bodyProducer=None): Returns: Deferred[IResponse]: completes when the header of the response has been received (regardless of the response status code). + + Can fail with: + SchemeNotSupported: if the uri is not http or https + + twisted.internet.error.TimeoutError if the server we are connecting + to (proxy or destination) does not accept a connection before + connectTimeout. + + ... other things too. """ uri = uri.strip() if not _VALID_URI.match(uri): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 67ce9a5f39a1..382f0cf3f0d2 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -449,18 +449,8 @@ def _ctx_manager(): R = TypeVar("R") -def _cancelled_to_timed_out_error(value: R, timeout: float) -> R: - if isinstance(value, failure.Failure): - value.trap(CancelledError) - raise defer.TimeoutError(timeout, "Deferred") - return value - - def timeout_deferred( - deferred: defer.Deferred, - timeout: float, - reactor: IReactorTime, - on_timeout_cancel: Optional[Callable[[Any, float], Any]] = None, + deferred: defer.Deferred, timeout: float, reactor: IReactorTime, ) -> defer.Deferred: """The in built twisted `Deferred.addTimeout` fails to time out deferreds that have a canceller that throws exceptions. This method creates a new @@ -469,27 +459,21 @@ def timeout_deferred( (See https://twistedmatrix.com/trac/ticket/9534) - NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred + NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred. + + NOTE: the TimeoutError raised by the resultant deferred is + twisted.internet.defer.TimeoutError, which is *different* to the built-in + TimeoutError, as well as various other TimeoutErrors you might have imported. Args: deferred: The Deferred to potentially timeout. timeout: Timeout in seconds reactor: The twisted reactor to use - on_timeout_cancel: A callable which is called immediately - after the deferred times out, and not if this deferred is - otherwise cancelled before the timeout. - It takes an arbitrary value, which is the value of the deferred at - that exact point in time (probably a CancelledError Failure), and - the timeout. - - The default callable (if none is provided) will translate a - CancelledError Failure into a defer.TimeoutError. Returns: - A new Deferred. + A new Deferred, which will errback with defer.TimeoutError on timeout. """ - new_d = defer.Deferred() timed_out = [False] @@ -502,18 +486,23 @@ def time_it_out(): except: # noqa: E722, if we throw any exception it'll break time outs logger.exception("Canceller failed during timeout") + # the cancel() call should have set off a chain of errbacks which + # will have errbacked new_d, but in case it hasn't, errback it now. + if not new_d.called: - new_d.errback(defer.TimeoutError(timeout, "Deferred")) + new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,))) delayed_call = reactor.callLater(timeout, time_it_out) - def convert_cancelled(value): - if timed_out[0]: - to_call = on_timeout_cancel or _cancelled_to_timed_out_error - return to_call(value, timeout) + def convert_cancelled(value: failure.Failure): + # if the orgininal deferred was cancelled, and our timeout has fired, then + # the reason it was cancelled was due to our timeout. Turn the CancelledError + # into a TimeoutError. + if timed_out[0] and value.check(CancelledError): + raise defer.TimeoutError("Timed out after %gs" % (timeout,)) return value - deferred.addBoth(convert_cancelled) + deferred.addErrback(convert_cancelled) def cancel_timeout(result): # stop the pending call to cancel the deferred if it's been fired diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 5604af379522..212484a7fecb 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -318,14 +318,14 @@ def test_client_gets_headers(self): r = self.successResultOf(d) self.assertEqual(r.code, 200) - def test_client_headers_no_body(self): + @parameterized.expand(["get_json", "post_json", "delete_json", "put_json"]) + def test_timeout_reading_body(self, method_name: str): """ If the HTTP request is connected, but gets no response before being - timed out, it'll give a ResponseNeverReceived. + timed out, it'll give a RequestSendFailed with can_retry. """ - d = defer.ensureDeferred( - self.cl.post_json("testserv:8008", "foo/bar", timeout=10000) - ) + method = getattr(self.cl, method_name) + d = defer.ensureDeferred(method("testserv:8008", "foo/bar", timeout=10000)) self.pump() @@ -349,7 +349,9 @@ def test_client_headers_no_body(self): self.reactor.advance(10.5) f = self.failureResultOf(d) - self.assertIsInstance(f.value, TimeoutError) + self.assertIsInstance(f.value, RequestSendFailed) + self.assertTrue(f.value.can_retry) + self.assertIsInstance(f.value.inner_exception, defer.TimeoutError) def test_client_requires_trailing_slashes(self): """ diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py new file mode 100644 index 000000000000..a1cf0862d4fe --- /dev/null +++ b/tests/http/test_simple_client.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mock import Mock + +from netaddr import IPSet + +from twisted.internet import defer +from twisted.internet.error import DNSLookupError + +from synapse.http import RequestTimedOutError +from synapse.http.client import SimpleHttpClient +from synapse.server import HomeServer + +from tests.unittest import HomeserverTestCase + + +class SimpleHttpClientTests(HomeserverTestCase): + def prepare(self, reactor, clock, hs: "HomeServer"): + # Add a DNS entry for a test server + self.reactor.lookups["testserv"] = "1.2.3.4" + + self.cl = hs.get_simple_http_client() + + def test_dns_error(self): + """ + If the DNS lookup returns an error, it will bubble up. + """ + d = defer.ensureDeferred(self.cl.get_json("http://testserv2:8008/foo/bar")) + self.pump() + + f = self.failureResultOf(d) + self.assertIsInstance(f.value, DNSLookupError) + + def test_client_connection_refused(self): + d = defer.ensureDeferred(self.cl.get_json("http://testserv:8008/foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(d) + + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + e = Exception("go away") + factory.clientConnectionFailed(None, e) + self.pump(0.5) + + f = self.failureResultOf(d) + + self.assertIs(f.value, e) + + def test_client_never_connect(self): + """ + If the HTTP request is not connected and is timed out, it'll give a + ConnectingCancelledError or TimeoutError. + """ + d = defer.ensureDeferred(self.cl.get_json("http://testserv:8008/foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], "1.2.3.4") + self.assertEqual(clients[0][1], 8008) + + # Deferred is still without a result + self.assertNoResult(d) + + # Push by enough to time it out + self.reactor.advance(120) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, RequestTimedOutError) + + def test_client_connect_no_response(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = defer.ensureDeferred(self.cl.get_json("http://testserv:8008/foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], "1.2.3.4") + self.assertEqual(clients[0][1], 8008) + + conn = Mock() + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred is still without a result + self.assertNoResult(d) + + # Push by enough to time it out + self.reactor.advance(120) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, RequestTimedOutError) + + def test_client_ip_range_blacklist(self): + """Ensure that Synapse does not try to connect to blacklisted IPs""" + + # Add some DNS entries we'll blacklist + self.reactor.lookups["internal"] = "127.0.0.1" + self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337" + ip_blacklist = IPSet(["127.0.0.0/8", "fe80::/64"]) + + cl = SimpleHttpClient(self.hs, ip_blacklist=ip_blacklist) + + # Try making a GET request to a blacklisted IPv4 address + # ------------------------------------------------------ + # Make the request + d = defer.ensureDeferred(cl.get_json("http://internal:8008/foo/bar")) + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + self.failureResultOf(d, DNSLookupError) + + # Try making a POST request to a blacklisted IPv6 address + # ------------------------------------------------------- + # Make the request + d = defer.ensureDeferred( + cl.post_json_get_json("http://internalv6:8008/foo/bar", {}) + ) + + # Move the reactor forwards + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + # Check that it was due to a blacklisted DNS lookup + self.failureResultOf(d, DNSLookupError) + + # Try making a GET request to a non-blacklisted IPv4 address + # ---------------------------------------------------------- + # Make the request + d = defer.ensureDeferred(cl.get_json("http://testserv:8008/foo/bar")) + + # Nothing has happened yet + self.assertNoResult(d) + + # Move the reactor forwards + self.pump(1) + + # Check that it was able to resolve the address + clients = self.reactor.tcpClients + self.assertNotEqual(len(clients), 0) + + # Connection will still fail as this IP address does not resolve to anything + self.failureResultOf(d, RequestTimedOutError) From 866c84da8ddda04f1da94b8e03719147c6d4875e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 29 Sep 2020 11:06:11 +0100 Subject: [PATCH 074/245] Add metrics to track success/otherwise of replication requests (#8406) One hope is that this might provide some insights into #3365. --- changelog.d/8406.feature | 1 + synapse/replication/http/_base.py | 40 +++++++++++++++++++++---------- 2 files changed, 29 insertions(+), 12 deletions(-) create mode 100644 changelog.d/8406.feature diff --git a/changelog.d/8406.feature b/changelog.d/8406.feature new file mode 100644 index 000000000000..1c6472ae7eae --- /dev/null +++ b/changelog.d/8406.feature @@ -0,0 +1 @@ +Add prometheus metrics for replication requests. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index b448da671038..64edadb624c1 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -20,18 +20,28 @@ from inspect import signature from typing import Dict, List, Tuple -from synapse.api.errors import ( - CodeMessageException, - HttpResponseException, - RequestSendFailed, - SynapseError, -) +from prometheus_client import Counter, Gauge + +from synapse.api.errors import HttpResponseException, SynapseError +from synapse.http import RequestTimedOutError from synapse.logging.opentracing import inject_active_span_byte_dict, trace from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string logger = logging.getLogger(__name__) +_pending_outgoing_requests = Gauge( + "synapse_pending_outgoing_replication_requests", + "Number of active outgoing replication requests, by replication method name", + ["name"], +) + +_outgoing_request_counter = Counter( + "synapse_outgoing_replication_requests", + "Number of outgoing replication requests, by replication method name and result", + ["name", "code"], +) + class ReplicationEndpoint(metaclass=abc.ABCMeta): """Helper base class for defining new replication HTTP endpoints. @@ -138,7 +148,10 @@ def make_client(cls, hs): instance_map = hs.config.worker.instance_map + outgoing_gauge = _pending_outgoing_requests.labels(cls.NAME) + @trace(opname="outgoing_replication_request") + @outgoing_gauge.track_inprogress() async def send_request(instance_name="master", **kwargs): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") @@ -193,23 +206,26 @@ async def send_request(instance_name="master", **kwargs): try: result = await request_func(uri, data, headers=headers) break - except CodeMessageException as e: - if e.code != 504 or not cls.RETRY_ON_TIMEOUT: + except RequestTimedOutError: + if not cls.RETRY_ON_TIMEOUT: raise - logger.warning("%s request timed out", cls.NAME) + logger.warning("%s request timed out; retrying", cls.NAME) # If we timed out we probably don't need to worry about backing # off too much, but lets just wait a little anyway. await clock.sleep(1) except HttpResponseException as e: # We convert to SynapseError as we know that it was a SynapseError - # on the master process that we should send to the client. (And + # on the main process that we should send to the client. (And # importantly, not stack traces everywhere) + _outgoing_request_counter.labels(cls.NAME, e.code).inc() raise e.to_synapse_error() - except RequestSendFailed as e: - raise SynapseError(502, "Failed to talk to master") from e + except Exception as e: + _outgoing_request_counter.labels(cls.NAME, "ERR").inc() + raise SynapseError(502, "Failed to talk to main process") from e + _outgoing_request_counter.labels(cls.NAME, 200).inc() return result return send_request From 1c6b8752b891c1a25524d8dfaa8efb7176c0dbec Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 29 Sep 2020 12:36:44 +0100 Subject: [PATCH 075/245] Only assert valid next_link params when provided (#8417) Broken in https://github.com/matrix-org/synapse/pull/8275 and has yet to be put in a release. Fixes https://github.com/matrix-org/synapse/issues/8418. `next_link` is an optional parameter. However, we were checking whether the `next_link` param was valid, even if it wasn't provided. In that case, `next_link` was `None`, which would clearly not be a valid URL. This would prevent password reset and other operations if `next_link` was not provided, and the `next_link_domain_whitelist` config option was set. --- changelog.d/8417.feature | 1 + synapse/rest/client/v2_alpha/account.py | 15 +++++++++------ tests/rest/client/v2_alpha/test_account.py | 6 ++++++ 3 files changed, 16 insertions(+), 6 deletions(-) create mode 100644 changelog.d/8417.feature diff --git a/changelog.d/8417.feature b/changelog.d/8417.feature new file mode 100644 index 000000000000..17549c3df39a --- /dev/null +++ b/changelog.d/8417.feature @@ -0,0 +1 @@ +Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index c3ce0f62592a..9245214f36eb 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -103,8 +103,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) # The email will be sent to the stored address. # This avoids a potential account hijack by requesting a password reset to @@ -379,8 +380,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) existing_user_id = await self.store.get_user_id_by_threepid("email", email) @@ -453,8 +455,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 93f899d86133..ae2cd67f35de 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -732,6 +732,12 @@ def test_next_link_file_uri(self): @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) def test_next_link_domain_whitelist(self): """Tests next_link parameters must fit the whitelist if provided""" + + # Ensure not providing a next_link parameter still works + self._request_token( + "something@example.com", "some_secret", next_link=None, expect_code=200, + ) + self._request_token( "something@example.com", "some_secret", From 8676d8ab2e5667d7c12774effc64b3ab99344a8d Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Tue, 29 Sep 2020 13:11:02 +0100 Subject: [PATCH 076/245] Filter out appservices from mau count (#8404) This is an attempt to fix #8403. --- changelog.d/8404.misc | 1 + .../databases/main/monthly_active_users.py | 9 ++++++++- tests/storage/test_monthly_active_users.py | 17 ++++++++++++++++- 3 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8404.misc diff --git a/changelog.d/8404.misc b/changelog.d/8404.misc new file mode 100644 index 000000000000..7aadded6c1dd --- /dev/null +++ b/changelog.d/8404.misc @@ -0,0 +1 @@ +Do not include appservice users when calculating the total MAU for a server. diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index e0cedd1aacc9..e93aad33cd89 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -41,7 +41,14 @@ async def get_monthly_active_count(self) -> int: """ def _count_users(txn): - sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users" + # Exclude app service users + sql = """ + SELECT COALESCE(count(*), 0) + FROM monthly_active_users + LEFT JOIN users + ON monthly_active_users.user_id=users.name + WHERE (users.appservice_id IS NULL OR users.appservice_id = ''); + """ txn.execute(sql) (count,) = txn.fetchone() return count diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 643072bbaf18..8d97b6d4cdf4 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -137,6 +137,21 @@ def test_can_insert_and_count_mau(self): count = self.get_success(self.store.get_monthly_active_count()) self.assertEqual(count, 1) + def test_appservice_user_not_counted_in_mau(self): + self.get_success( + self.store.register_user( + user_id="@appservice_user:server", appservice_id="wibble" + ) + ) + count = self.get_success(self.store.get_monthly_active_count()) + self.assertEqual(count, 0) + + d = self.store.upsert_monthly_active_user("@appservice_user:server") + self.get_success(d) + + count = self.get_success(self.store.get_monthly_active_count()) + self.assertEqual(count, 0) + def test_user_last_seen_monthly_active(self): user_id1 = "@user1:server" user_id2 = "@user2:server" @@ -383,7 +398,7 @@ def test_get_monthly_active_count_by_service(self): self.get_success(self.store.upsert_monthly_active_user(appservice2_user1)) count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(count, 4) + self.assertEqual(count, 1) d = self.store.get_monthly_active_count_by_service() result = self.get_success(d) From 12f0d18611f406df5e741c124cac8246fcfd9c14 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 29 Sep 2020 13:47:47 +0100 Subject: [PATCH 077/245] Add support for running Complement against the local checkout (#8317) This PR adds a script that: * Builds the local Synapse checkout using our existing `docker/Dockerfile` image. * Downloads [Complement](https://github.com/matrix-org/complement/)'s source code. * Builds the [Synapse.Dockerfile](https://github.com/matrix-org/complement/blob/master/dockerfiles/Synapse.Dockerfile) using the above dockerfile as a base. * Builds and runs Complement against it. This set up differs slightly from [that of the dendrite repo](https://github.com/matrix-org/dendrite/blob/master/build/scripts/complement.sh) (`complement.sh`, `Complement.Dockerfile`), which instead stores a separate, but slightly modified, dockerfile in Dendrite's repo rather than running the one stored in Complement's repo. That synapse equivalent to that dockerfile (`Synapse.Dockerfile`) in Complement's repo is just based on top of `matrixdotorg/synapse:latest`, which we opt to build here locally. Thus copying over the files from Complement's repo wouldn't change any functionality, and would result in two instances of the same files. So just using the dockerfile in Complement's repo was decided upon instead. --- changelog.d/8317.feature | 1 + scripts-dev/complement.sh | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 changelog.d/8317.feature create mode 100755 scripts-dev/complement.sh diff --git a/changelog.d/8317.feature b/changelog.d/8317.feature new file mode 100644 index 000000000000..f9edda099c40 --- /dev/null +++ b/changelog.d/8317.feature @@ -0,0 +1 @@ +Support testing the local Synapse checkout against the [Complement homeserver test suite](https://github.com/matrix-org/complement/). \ No newline at end of file diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh new file mode 100755 index 000000000000..3cde53f5c051 --- /dev/null +++ b/scripts-dev/complement.sh @@ -0,0 +1,22 @@ +#! /bin/bash -eu +# This script is designed for developers who want to test their code +# against Complement. +# +# It makes a Synapse image which represents the current checkout, +# then downloads Complement and runs it with that image. + +cd "$(dirname $0)/.." + +# Build the base Synapse image from the local checkout +docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile . + +# Download Complement +wget -N https://github.com/matrix-org/complement/archive/master.tar.gz +tar -xzf master.tar.gz +cd complement-master + +# Build the Synapse image from Complement, based on the above image we just built +docker build -t complement-synapse -f dockerfiles/Synapse.Dockerfile ./dockerfiles + +# Run the tests on the resulting image! +COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -count=1 ./tests From 2649d545a551dd126d73d34a6e3172916ea483e0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 29 Sep 2020 15:57:36 +0100 Subject: [PATCH 078/245] Mypy fixes for `synapse.handlers.federation` (#8422) For some reason, an apparently unrelated PR upset mypy about this module. Here are a number of little fixes. --- changelog.d/8422.misc | 1 + synapse/federation/federation_client.py | 4 +++- synapse/handlers/federation.py | 13 +++++++++---- synapse/storage/databases/state/store.py | 4 ++-- synapse/storage/persist_events.py | 2 +- synapse/storage/state.py | 6 +++--- 6 files changed, 19 insertions(+), 11 deletions(-) create mode 100644 changelog.d/8422.misc diff --git a/changelog.d/8422.misc b/changelog.d/8422.misc new file mode 100644 index 000000000000..03fba120c6d9 --- /dev/null +++ b/changelog.d/8422.misc @@ -0,0 +1 @@ +Typing fixes for `synapse.handlers.federation`. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 688d43fffb4c..302b2f69bcdd 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -24,10 +24,12 @@ Dict, Iterable, List, + Mapping, Optional, Sequence, Tuple, TypeVar, + Union, ) from prometheus_client import Counter @@ -501,7 +503,7 @@ async def make_membership_event( user_id: str, membership: str, content: dict, - params: Dict[str, str], + params: Optional[Mapping[str, Union[str, Iterable[str]]]], ) -> Tuple[str, EventBase, RoomVersion]: """ Creates an m.room.member event, with context, without participating in the room. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5bcfb231b2cb..0073e7c99654 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -155,8 +155,9 @@ def __init__(self, hs): self._device_list_updater = hs.get_device_handler().device_list_updater self._maybe_store_room_on_invite = self.store.maybe_store_room_on_invite - # When joining a room we need to queue any events for that room up - self.room_queues = {} + # When joining a room we need to queue any events for that room up. + # For each room, a list of (pdu, origin) tuples. + self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]] self._room_pdu_linearizer = Linearizer("fed_room_pdu") self.third_party_event_rules = hs.get_third_party_event_rules() @@ -814,6 +815,9 @@ async def backfill(self, dest, room_id, limit, extremities): dest, room_id, limit=limit, extremities=extremities ) + if not events: + return [] + # ideally we'd sanity check the events here for excess prev_events etc, # but it's hard to reject events at this point without completely # breaking backfill in the same way that it is currently broken by @@ -2164,10 +2168,10 @@ async def _check_for_soft_fail( # given state at the event. This should correctly handle cases # like bans, especially with state res v2. - state_sets = await self.state_store.get_state_groups( + state_sets_d = await self.state_store.get_state_groups( event.room_id, extrem_ids ) - state_sets = list(state_sets.values()) + state_sets = list(state_sets_d.values()) # type: List[Iterable[EventBase]] state_sets.append(state) current_states = await self.state_handler.resolve_events( room_version, state_sets, event @@ -2958,6 +2962,7 @@ async def persist_events_and_notify( ) return result["max_stream_id"] else: + assert self.storage.persistence max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 989f0cbc9d3b..0e31cc811a38 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -24,7 +24,7 @@ from synapse.storage.state import StateFilter from synapse.storage.types import Cursor from synapse.storage.util.sequence import build_sequence_generator -from synapse.types import StateMap +from synapse.types import MutableStateMap, StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache @@ -208,7 +208,7 @@ def _get_state_for_group_using_cache(self, cache, group, state_filter): async def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() - ) -> Dict[int, StateMap[str]]: + ) -> Dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 603cd7d825e0..ded6cf965528 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -197,7 +197,7 @@ def __init__(self, hs, stores: Databases): async def persist_events( self, - events_and_contexts: List[Tuple[EventBase, EventContext]], + events_and_contexts: Iterable[Tuple[EventBase, EventContext]], backfilled: bool = False, ) -> RoomStreamToken: """ diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 8f68d968f0c8..08a69f2f9667 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -20,7 +20,7 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.types import StateMap +from synapse.types import MutableStateMap, StateMap logger = logging.getLogger(__name__) @@ -349,7 +349,7 @@ async def get_state_group_delta(self, state_group: int): async def get_state_groups_ids( self, _room_id: str, event_ids: Iterable[str] - ) -> Dict[int, StateMap[str]]: + ) -> Dict[int, MutableStateMap[str]]: """Get the event IDs of all the state for the state groups for the given events Args: @@ -532,7 +532,7 @@ async def get_state_ids_for_event( def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() - ) -> Awaitable[Dict[int, StateMap[str]]]: + ) -> Awaitable[Dict[int, MutableStateMap[str]]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key From b1433bf231370636b817ffa01e6cda5a567cfafe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Sep 2020 16:42:19 +0100 Subject: [PATCH 079/245] Don't table scan events on worker startup (#8419) * Fix table scan of events on worker startup. This happened because we assumed "new" writers had an initial stream position of 0, so the replication code tried to fetch all events written by the instance between 0 and the current position. Instead, set the initial position of new writers to the current persisted up to position, on the assumption that new writers won't have written anything before that point. * Consider old writers coming back as "new". Otherwise we'd try and fetch entries between the old stale token and the current position, even though it won't have written any rows. Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/8419.feature | 1 + synapse/storage/util/id_generators.py | 26 +++++++++++++++++++++++++- tests/storage/test_id_generators.py | 18 ++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8419.feature diff --git a/changelog.d/8419.feature b/changelog.d/8419.feature new file mode 100644 index 000000000000..b363e929ea8c --- /dev/null +++ b/changelog.d/8419.feature @@ -0,0 +1 @@ +Add experimental support for sharding event persister. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 4fd7573e260d..02fbb656e81c 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -273,6 +273,19 @@ def _load_current_ids( # Load the current positions of all writers for the stream. if self._writers: + # We delete any stale entries in the positions table. This is + # important if we add back a writer after a long time; we want to + # consider that a "new" writer, rather than using the old stale + # entry here. + sql = """ + DELETE FROM stream_positions + WHERE + stream_name = ? + AND instance_name != ALL(?) + """ + sql = self._db.engine.convert_param_style(sql) + cur.execute(sql, (self._stream_name, self._writers)) + sql = """ SELECT instance_name, stream_id FROM stream_positions WHERE stream_name = ? @@ -453,11 +466,22 @@ def get_current_token_for_writer(self, instance_name: str) -> int: """Returns the position of the given writer. """ + # If we don't have an entry for the given instance name, we assume it's a + # new writer. + # + # For new writers we assume their initial position to be the current + # persisted up to position. This stops Synapse from doing a full table + # scan when a new writer announces itself over replication. with self._lock: - return self._return_factor * self._current_positions.get(instance_name, 0) + return self._return_factor * self._current_positions.get( + instance_name, self._persisted_upto_position + ) def get_positions(self) -> Dict[str, int]: """Get a copy of the current positon map. + + Note that this won't necessarily include all configured writers if some + writers haven't written anything yet. """ with self._lock: diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 4558bee7be85..392b08832b05 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -390,17 +390,28 @@ def test_writer_config_change(self): # Initial config has two writers id_gen = self._create_id_generator("first", writers=["first", "second"]) self.assertEqual(id_gen.get_persisted_upto_position(), 3) + self.assertEqual(id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(id_gen.get_current_token_for_writer("second"), 5) # New config removes one of the configs. Note that if the writer is # removed from config we assume that it has been shut down and has # finished persisting, hence why the persisted upto position is 5. id_gen_2 = self._create_id_generator("second", writers=["second"]) self.assertEqual(id_gen_2.get_persisted_upto_position(), 5) + self.assertEqual(id_gen_2.get_current_token_for_writer("second"), 5) # This config points to a single, previously unused writer. id_gen_3 = self._create_id_generator("third", writers=["third"]) self.assertEqual(id_gen_3.get_persisted_upto_position(), 5) + # For new writers we assume their initial position to be the current + # persisted up to position. This stops Synapse from doing a full table + # scan when a new writer comes along. + self.assertEqual(id_gen_3.get_current_token_for_writer("third"), 5) + + id_gen_4 = self._create_id_generator("fourth", writers=["third"]) + self.assertEqual(id_gen_4.get_current_token_for_writer("third"), 5) + # Check that we get a sane next stream ID with this new config. async def _get_next_async(): @@ -410,6 +421,13 @@ async def _get_next_async(): self.get_success(_get_next_async()) self.assertEqual(id_gen_3.get_persisted_upto_position(), 6) + # If we add back the old "first" then we shouldn't see the persisted up + # to position revert back to 3. + id_gen_5 = self._create_id_generator("five", writers=["first", "third"]) + self.assertEqual(id_gen_5.get_persisted_upto_position(), 6) + self.assertEqual(id_gen_5.get_current_token_for_writer("first"), 6) + self.assertEqual(id_gen_5.get_current_token_for_writer("third"), 6) + def test_sequence_consistency(self): """Test that we error out if the table and sequence diverges. """ From c2bdf040aa93f3b542d1b0e2f6ce57853630ec6f Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Tue, 29 Sep 2020 17:15:27 +0100 Subject: [PATCH 080/245] Discard an empty upload_name before persisting an uploaded file (#7905) --- changelog.d/7905.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 7 ++++--- synapse/rest/media/v1/upload_resource.py | 4 ++++ 3 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 changelog.d/7905.bugfix diff --git a/changelog.d/7905.bugfix b/changelog.d/7905.bugfix new file mode 100644 index 000000000000..e60e62441210 --- /dev/null +++ b/changelog.d/7905.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug when storing a media file with an empty `upload_name`. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 69f353d46f9e..ae6822d6e742 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -139,7 +139,7 @@ def mark_recently_accessed(self, server_name, media_id): async def create_content( self, media_type: str, - upload_name: str, + upload_name: Optional[str], content: IO, content_length: int, auth_user: str, @@ -147,8 +147,8 @@ async def create_content( """Store uploaded content for a local user and return the mxc URL Args: - media_type: The content type of the file - upload_name: The name of the file + media_type: The content type of the file. + upload_name: The name of the file, if provided. content: A file like object that is the content to store content_length: The length of the content auth_user: The user_id of the uploader @@ -156,6 +156,7 @@ async def create_content( Returns: The mxc url of the stored content """ + media_id = random_string(24) file_info = FileInfo(server_name=None, file_id=media_id) diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 3ebf7a68e673..d76f7389e101 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -63,6 +63,10 @@ async def _async_render_POST(self, request): msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400 ) + # If the name is falsey (e.g. an empty byte string) ensure it is None. + else: + upload_name = None + headers = request.requestHeaders if headers.hasHeader(b"Content-Type"): From 937393abd81e16c7d4bd4d02fe3c0fafafb9611b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 28 Sep 2020 15:20:02 +0100 Subject: [PATCH 081/245] Move `resolve_events_with_store` into StateResolutionHandler --- synapse/handlers/federation.py | 13 +++-- synapse/state/__init__.py | 92 +++++++++++++++++----------------- 2 files changed, 55 insertions(+), 50 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0073e7c99654..1a8144405ac0 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -21,7 +21,7 @@ import logging from collections.abc import Container from http import HTTPStatus -from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union import attr from signedjson.key import decode_verify_key_bytes @@ -69,7 +69,7 @@ ReplicationFederationSendEventsRestServlet, ReplicationStoreRoomOnInviteRestServlet, ) -from synapse.state import StateResolutionStore, resolve_events_with_store +from synapse.state import StateResolutionStore from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( JsonDict, @@ -85,6 +85,9 @@ from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -116,7 +119,7 @@ class FederationHandler(BaseHandler): rooms. """ - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs @@ -126,6 +129,7 @@ def __init__(self, hs): self.state_store = self.storage.state self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() + self._state_resolution_handler = hs.get_state_resolution_handler() self.server_name = hs.hostname self.keyring = hs.get_keyring() self.action_generator = hs.get_action_generator() @@ -381,8 +385,7 @@ async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: event_map[x.event_id] = x room_version = await self.store.get_room_version_id(room_id) - state_map = await resolve_events_with_store( - self.clock, + state_map = await self._state_resolution_handler.resolve_events_with_store( room_id, room_version, state_maps, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 5a5ea39e0103..98ede2ea4fab 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -449,8 +449,7 @@ async def resolve_events( state_map = {ev.event_id: ev for st in state_sets for ev in st} with Measure(self.clock, "state._resolve_events"): - new_state = await resolve_events_with_store( - self.clock, + new_state = await self._state_resolution_handler.resolve_events_with_store( event.room_id, room_version, state_set_ids, @@ -531,8 +530,7 @@ async def resolve_state_groups( state_groups_histogram.observe(len(state_groups_ids)) with Measure(self.clock, "state._resolve_events"): - new_state = await resolve_events_with_store( - self.clock, + new_state = await self.resolve_events_with_store( room_id, room_version, list(state_groups_ids.values()), @@ -552,6 +550,51 @@ async def resolve_state_groups( return cache + def resolve_events_with_store( + self, + room_id: str, + room_version: str, + state_sets: Sequence[StateMap[str]], + event_map: Optional[Dict[str, EventBase]], + state_res_store: "StateResolutionStore", + ) -> Awaitable[StateMap[str]]: + """ + Args: + room_id: the room we are working in + + room_version: Version of the room + + state_sets: List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + + event_map: + a dict from event_id to event, for any events that we happen to + have in flight (eg, those currently being persisted). This will be + used as a starting point fof finding the state we need; any missing + events will be requested via state_map_factory. + + If None, all events will be fetched via state_res_store. + + state_res_store: a place to fetch events from + + Returns: + a map from (type, state_key) to event_id. + """ + v = KNOWN_ROOM_VERSIONS[room_version] + if v.state_res == StateResolutionVersions.V1: + return v1.resolve_events_with_store( + room_id, state_sets, event_map, state_res_store.get_events + ) + else: + return v2.resolve_events_with_store( + self.clock, + room_id, + room_version, + state_sets, + event_map, + state_res_store, + ) + def _make_state_cache_entry( new_state: StateMap[str], state_groups_ids: Dict[int, StateMap[str]] @@ -605,47 +648,6 @@ def _make_state_cache_entry( ) -def resolve_events_with_store( - clock: Clock, - room_id: str, - room_version: str, - state_sets: Sequence[StateMap[str]], - event_map: Optional[Dict[str, EventBase]], - state_res_store: "StateResolutionStore", -) -> Awaitable[StateMap[str]]: - """ - Args: - room_id: the room we are working in - - room_version: Version of the room - - state_sets: List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - - event_map: - a dict from event_id to event, for any events that we happen to - have in flight (eg, those currently being persisted). This will be - used as a starting point fof finding the state we need; any missing - events will be requested via state_map_factory. - - If None, all events will be fetched via state_res_store. - - state_res_store: a place to fetch events from - - Returns: - a map from (type, state_key) to event_id. - """ - v = KNOWN_ROOM_VERSIONS[room_version] - if v.state_res == StateResolutionVersions.V1: - return v1.resolve_events_with_store( - room_id, state_sets, event_map, state_res_store.get_events - ) - else: - return v2.resolve_events_with_store( - clock, room_id, room_version, state_sets, event_map, state_res_store - ) - - @attr.s(slots=True) class StateResolutionStore: """Interface that allows state resolution algorithms to access the database From ba700074c673597d59d45565e26cf445f89faa57 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:04:52 +0100 Subject: [PATCH 082/245] Expose a `get_resource_usage` method in `Measure` --- synapse/util/metrics.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 6e57c1ee728f..ffdea0de8d56 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -19,7 +19,11 @@ from prometheus_client import Counter -from synapse.logging.context import LoggingContext, current_context +from synapse.logging.context import ( + ContextResourceUsage, + LoggingContext, + current_context, +) from synapse.metrics import InFlightGauge logger = logging.getLogger(__name__) @@ -104,27 +108,27 @@ class Measure: def __init__(self, clock, name): self.clock = clock self.name = name - self._logging_context = None + parent_context = current_context() + self._logging_context = LoggingContext( + "Measure[%s]" % (self.name,), parent_context + ) self.start = None - def __enter__(self): - if self._logging_context: + def __enter__(self) -> "Measure": + if self.start is not None: raise RuntimeError("Measure() objects cannot be re-used") self.start = self.clock.time() - parent_context = current_context() - self._logging_context = LoggingContext( - "Measure[%s]" % (self.name,), parent_context - ) self._logging_context.__enter__() in_flight.register((self.name,), self._update_in_flight) + return self def __exit__(self, exc_type, exc_val, exc_tb): - if not self._logging_context: + if self.start is None: raise RuntimeError("Measure() block exited without being entered") duration = self.clock.time() - self.start - usage = self._logging_context.get_resource_usage() + usage = self.get_resource_usage() in_flight.unregister((self.name,), self._update_in_flight) self._logging_context.__exit__(exc_type, exc_val, exc_tb) @@ -140,6 +144,13 @@ def __exit__(self, exc_type, exc_val, exc_tb): except ValueError: logger.warning("Failed to save metrics! Usage: %s", usage) + def get_resource_usage(self) -> ContextResourceUsage: + """Get the resources used within this Measure block + + If the Measure block is still active, returns the resource usage so far. + """ + return self._logging_context.get_resource_usage() + def _update_in_flight(self, metrics): """Gets called when processing in flight metrics """ From 8412c08a87d35fc127f53063c8ede215237a042a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:07:09 +0100 Subject: [PATCH 083/245] Move Measure calls into `resolve_events_with_store` --- synapse/state/__init__.py | 63 +++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 98ede2ea4fab..b99cf2d8cdbe 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -448,14 +448,13 @@ async def resolve_events( state_map = {ev.event_id: ev for st in state_sets for ev in st} - with Measure(self.clock, "state._resolve_events"): - new_state = await self._state_resolution_handler.resolve_events_with_store( - event.room_id, - room_version, - state_set_ids, - event_map=state_map, - state_res_store=StateResolutionStore(self.store), - ) + new_state = await self._state_resolution_handler.resolve_events_with_store( + event.room_id, + room_version, + state_set_ids, + event_map=state_map, + state_res_store=StateResolutionStore(self.store), + ) return {key: state_map[ev_id] for key, ev_id in new_state.items()} @@ -529,14 +528,13 @@ async def resolve_state_groups( state_groups_histogram.observe(len(state_groups_ids)) - with Measure(self.clock, "state._resolve_events"): - new_state = await self.resolve_events_with_store( - room_id, - room_version, - list(state_groups_ids.values()), - event_map=event_map, - state_res_store=state_res_store, - ) + new_state = await self.resolve_events_with_store( + room_id, + room_version, + list(state_groups_ids.values()), + event_map=event_map, + state_res_store=state_res_store, + ) # if the new state matches any of the input state groups, we can # use that state group again. Otherwise we will generate a state_id @@ -550,14 +548,14 @@ async def resolve_state_groups( return cache - def resolve_events_with_store( + async def resolve_events_with_store( self, room_id: str, room_version: str, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], state_res_store: "StateResolutionStore", - ) -> Awaitable[StateMap[str]]: + ) -> StateMap[str]: """ Args: room_id: the room we are working in @@ -580,20 +578,21 @@ def resolve_events_with_store( Returns: a map from (type, state_key) to event_id. """ - v = KNOWN_ROOM_VERSIONS[room_version] - if v.state_res == StateResolutionVersions.V1: - return v1.resolve_events_with_store( - room_id, state_sets, event_map, state_res_store.get_events - ) - else: - return v2.resolve_events_with_store( - self.clock, - room_id, - room_version, - state_sets, - event_map, - state_res_store, - ) + with Measure(self.clock, "state._resolve_events"): + v = KNOWN_ROOM_VERSIONS[room_version] + if v.state_res == StateResolutionVersions.V1: + return await v1.resolve_events_with_store( + room_id, state_sets, event_map, state_res_store.get_events + ) + else: + return await v2.resolve_events_with_store( + self.clock, + room_id, + room_version, + state_sets, + event_map, + state_res_store, + ) def _make_state_cache_entry( From 057f04fa9fb5134621dff19c758b38fe253ff8a8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:07:45 +0100 Subject: [PATCH 084/245] Report state res metrics to Prometheus and log --- synapse/state/__init__.py | 144 ++++++++++++++++++++++++++++++++------ 1 file changed, 124 insertions(+), 20 deletions(-) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b99cf2d8cdbe..31082bb16a5d 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -13,42 +13,46 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import heapq import logging -from collections import namedtuple +from collections import defaultdict, namedtuple from typing import ( + Any, Awaitable, + Callable, + DefaultDict, Dict, Iterable, List, Optional, Sequence, Set, + Tuple, Union, overload, ) import attr from frozendict import frozendict -from prometheus_client import Histogram +from prometheus_client import Counter, Histogram from typing_extensions import Literal from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions from synapse.events import EventBase from synapse.events.snapshot import EventContext +from synapse.logging.context import ContextResourceUsage from synapse.logging.utils import log_function from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo from synapse.types import Collection, StateMap -from synapse.util import Clock from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) - +metrics_logger = logging.getLogger("synapse.state.metrics") # Metrics for number of state groups involved in a resolution. state_groups_histogram = Histogram( @@ -459,6 +463,33 @@ async def resolve_events( return {key: state_map[ev_id] for key, ev_id in new_state.items()} +@attr.s(slots=True) +class _StateResMetrics: + """Keeps track of some usage metrics about state res.""" + + # System and User CPU time, in seconds + cpu_time = attr.ib(type=float, default=0.0) + + # time spent on database transactions (excluding scheduling time). This roughly + # corresponds to the amount of work done on the db server, excluding event fetches. + db_time = attr.ib(type=float, default=0.0) + + # number of events fetched from the db. + db_events = attr.ib(type=int, default=0) + + +_biggest_room_by_cpu_counter = Counter( + "synapse_state_res_cpu_for_biggest_room_seconds", + "CPU time spent performing state resolution for the single most expensive " + "room for state resolution", +) +_biggest_room_by_db_counter = Counter( + "synapse_state_res_db_for_biggest_room_seconds", + "Database time spent performing state resolution for the single most " + "expensive room for state resolution", +) + + class StateResolutionHandler: """Responsible for doing state conflict resolution. @@ -481,6 +512,17 @@ def __init__(self, hs): reset_expiry_on_get=True, ) + # + # stuff for tracking time spent on state-res by room + # + + # tracks the amount of work done on state res per room + self._state_res_metrics = defaultdict( + _StateResMetrics + ) # type: DefaultDict[str, _StateResMetrics] + + self.clock.looping_call(self._report_metrics, 120 * 1000) + @log_function async def resolve_state_groups( self, @@ -578,21 +620,83 @@ async def resolve_events_with_store( Returns: a map from (type, state_key) to event_id. """ - with Measure(self.clock, "state._resolve_events"): - v = KNOWN_ROOM_VERSIONS[room_version] - if v.state_res == StateResolutionVersions.V1: - return await v1.resolve_events_with_store( - room_id, state_sets, event_map, state_res_store.get_events - ) - else: - return await v2.resolve_events_with_store( - self.clock, - room_id, - room_version, - state_sets, - event_map, - state_res_store, - ) + try: + with Measure(self.clock, "state._resolve_events") as m: + v = KNOWN_ROOM_VERSIONS[room_version] + if v.state_res == StateResolutionVersions.V1: + return await v1.resolve_events_with_store( + room_id, state_sets, event_map, state_res_store.get_events + ) + else: + return await v2.resolve_events_with_store( + self.clock, + room_id, + room_version, + state_sets, + event_map, + state_res_store, + ) + finally: + self._record_state_res_metrics(room_id, m.get_resource_usage()) + + def _record_state_res_metrics(self, room_id: str, rusage: ContextResourceUsage): + room_metrics = self._state_res_metrics[room_id] + room_metrics.cpu_time += rusage.ru_utime + rusage.ru_stime + room_metrics.db_time += rusage.db_txn_duration_sec + room_metrics.db_events += rusage.evt_db_fetch_count + + def _report_metrics(self): + if not self._state_res_metrics: + # no state res has happened since the last iteration: don't bother logging. + return + + self._report_biggest( + lambda i: i.cpu_time, "CPU time", _biggest_room_by_cpu_counter, + ) + + self._report_biggest( + lambda i: i.db_time, "DB time", _biggest_room_by_db_counter, + ) + + self._state_res_metrics.clear() + + def _report_biggest( + self, + extract_key: Callable[[_StateResMetrics], Any], + metric_name: str, + prometheus_counter_metric: Counter, + ) -> None: + """Report metrics on the biggest rooms for state res + + Args: + extract_key: a callable which, given a _StateResMetrics, extracts a single + metric to sort by. + metric_name: the name of the metric we have extracted, for the log line + prometheus_counter_metric: a prometheus metric recording the sum of the + the extracted metric + """ + n_to_log = 10 + if not metrics_logger.isEnabledFor(logging.DEBUG): + # only need the most expensive if we don't have debug logging, which + # allows nlargest() to degrade to max() + n_to_log = 1 + + items = self._state_res_metrics.items() + + # log the N biggest rooms + biggest = heapq.nlargest( + n_to_log, items, key=lambda i: extract_key(i[1]) + ) # type: List[Tuple[str, _StateResMetrics]] + metrics_logger.debug( + "%i biggest rooms for state-res by %s: %s", + len(biggest), + metric_name, + ["%s (%gs)" % (r, extract_key(m)) for (r, m) in biggest], + ) + + # report info on the single biggest to prometheus + _, biggest_metrics = biggest[0] + prometheus_counter_metric.inc(extract_key(biggest_metrics)) def _make_state_cache_entry( From d4274dd17e79296d7501aab19cf575f38501877f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:08:56 +0100 Subject: [PATCH 085/245] changelog --- changelog.d/8420.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8420.feature diff --git a/changelog.d/8420.feature b/changelog.d/8420.feature new file mode 100644 index 000000000000..9d6849624d48 --- /dev/null +++ b/changelog.d/8420.feature @@ -0,0 +1 @@ +Add experimental reporting of metrics on expensive rooms for state-resolution. From 8238b55e08e8fbd7c7169b72281538a3e34c6488 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 29 Sep 2020 12:50:25 -0500 Subject: [PATCH 086/245] Update description of server_name config option (#8415) --- changelog.d/8415.doc | 1 + docs/sample_config.yaml | 21 +++++++++++++++++---- synapse/config/server.py | 21 +++++++++++++++++---- 3 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8415.doc diff --git a/changelog.d/8415.doc b/changelog.d/8415.doc new file mode 100644 index 000000000000..28b579853364 --- /dev/null +++ b/changelog.d/8415.doc @@ -0,0 +1 @@ +Improve description of `server_name` config option in `homserver.yaml`. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 845f53779530..70cc06a6d87d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -33,10 +33,23 @@ ## Server ## -# The domain name of the server, with optional explicit port. -# This is used by remote servers to connect to this server, -# e.g. matrix.org, localhost:8080, etc. -# This is also the last part of your UserID. +# The public-facing domain of the server +# +# The server_name name will appear at the end of usernames and room addresses +# created on this server. For example if the server_name was example.com, +# usernames on this server would be in the format @user:example.com +# +# In most cases you should avoid using a matrix specific subdomain such as +# matrix.example.com or synapse.example.com as the server_name for the same +# reasons you wouldn't use user@email.example.com as your email address. +# See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md +# for information on how to host Synapse on a subdomain while preserving +# a clean server_name. +# +# The server_name cannot be changed later so it is important to +# configure this correctly before you start Synapse. It should be all +# lowercase and may contain an explicit port. +# Examples: matrix.org, localhost:8080 # server_name: "SERVERNAME" diff --git a/synapse/config/server.py b/synapse/config/server.py index 532b91047024..ef6d70e3f857 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -641,10 +641,23 @@ def generate_config_section( """\ ## Server ## - # The domain name of the server, with optional explicit port. - # This is used by remote servers to connect to this server, - # e.g. matrix.org, localhost:8080, etc. - # This is also the last part of your UserID. + # The public-facing domain of the server + # + # The server_name name will appear at the end of usernames and room addresses + # created on this server. For example if the server_name was example.com, + # usernames on this server would be in the format @user:example.com + # + # In most cases you should avoid using a matrix specific subdomain such as + # matrix.example.com or synapse.example.com as the server_name for the same + # reasons you wouldn't use user@email.example.com as your email address. + # See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md + # for information on how to host Synapse on a subdomain while preserving + # a clean server_name. + # + # The server_name cannot be changed later so it is important to + # configure this correctly before you start Synapse. It should be all + # lowercase and may contain an explicit port. + # Examples: matrix.org, localhost:8080 # server_name: "%(server_name)s" From ea70f1c362dc4bd6c0f8a67e16ed0971fe095e5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Sep 2020 21:48:33 +0100 Subject: [PATCH 087/245] Various clean ups to room stream tokens. (#8423) --- changelog.d/8423.misc | 1 + synapse/events/__init__.py | 6 +-- synapse/handlers/admin.py | 2 +- synapse/handlers/device.py | 4 +- synapse/handlers/initial_sync.py | 3 +- synapse/handlers/pagination.py | 5 +-- synapse/handlers/room.py | 4 +- synapse/handlers/sync.py | 20 ++++++--- synapse/notifier.py | 4 +- synapse/replication/tcp/client.py | 6 +-- synapse/rest/admin/__init__.py | 3 +- synapse/storage/databases/main/stream.py | 38 +++++++++-------- synapse/storage/persist_events.py | 5 +-- synapse/types.py | 53 +++++++++++++++--------- tests/rest/client/v1/test_rooms.py | 8 ++-- tests/storage/test_purge.py | 10 ++--- 16 files changed, 96 insertions(+), 76 deletions(-) create mode 100644 changelog.d/8423.misc diff --git a/changelog.d/8423.misc b/changelog.d/8423.misc new file mode 100644 index 000000000000..7260e3fa4182 --- /dev/null +++ b/changelog.d/8423.misc @@ -0,0 +1 @@ +Various refactors to simplify stream token handling. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index bf800a38528e..dc49df08122e 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -23,7 +23,7 @@ from unpaddedbase64 import encode_base64 from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions -from synapse.types import JsonDict +from synapse.types import JsonDict, RoomStreamToken from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -118,8 +118,8 @@ def __init__(self, internal_metadata_dict: JsonDict): # XXX: These are set by StreamWorkerStore._set_before_and_after. # I'm pretty sure that these are never persisted to the database, so shouldn't # be here - before = DictProperty("before") # type: str - after = DictProperty("after") # type: str + before = DictProperty("before") # type: RoomStreamToken + after = DictProperty("after") # type: RoomStreamToken order = DictProperty("order") # type: Tuple[int, int] def get_dict(self) -> JsonDict: diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index dd981c597eff..1ce2091b4649 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -153,7 +153,7 @@ async def export_user_data(self, user_id, writer): if not events: break - from_key = RoomStreamToken.parse(events[-1].internal_metadata.after) + from_key = events[-1].internal_metadata.after events = await filter_events_for_client(self.storage, user_id, events) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 4149520d6c56..b9d90981048a 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -29,7 +29,6 @@ from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( - RoomStreamToken, StreamToken, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -113,8 +112,7 @@ async def get_user_ids_changed(self, user_id: str, from_token: StreamToken): set_tag("user_id", user_id) set_tag("from_token", from_token) - now_room_id = self.store.get_room_max_stream_ordering() - now_room_key = RoomStreamToken(None, now_room_id) + now_room_key = self.store.get_room_max_token() room_ids = await self.store.get_rooms_for_user(user_id) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 8cd7eb22a303..43f15435def5 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -325,7 +325,8 @@ async def _room_initial_sync_parted( if limit is None: limit = 10 - stream_token = await self.store.get_stream_token_for_event(member_event_id) + leave_position = await self.store.get_position_for_event(member_event_id) + stream_token = leave_position.to_room_stream_token() messages, token = await self.store.get_recent_events_for_room( room_id, limit=limit, end_token=stream_token diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a0b3bdb5e0c3..d6779a4b4439 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -25,7 +25,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig -from synapse.types import Requester, RoomStreamToken +from synapse.types import Requester from synapse.util.async_helpers import ReadWriteLock from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client @@ -373,10 +373,9 @@ async def get_messages( # case "JOIN" would have been returned. assert member_event_id - leave_token_str = await self.store.get_topological_token_for_event( + leave_token = await self.store.get_topological_token_for_event( member_event_id ) - leave_token = RoomStreamToken.parse(leave_token_str) assert leave_token.topological is not None if leave_token.topological < curr_topo: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 11bf146bedcd..836b3f381a52 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1134,14 +1134,14 @@ async def get_new_events( events[:] = events[:limit] if events: - end_key = RoomStreamToken.parse(events[-1].internal_metadata.after) + end_key = events[-1].internal_metadata.after else: end_key = to_key return (events, end_key) def get_current_key(self) -> RoomStreamToken: - return RoomStreamToken(None, self.store.get_room_max_stream_ordering()) + return self.store.get_room_max_token() def get_current_key_for_room(self, room_id: str) -> Awaitable[str]: return self.store.get_room_events_max_id(room_id) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index e948efef2e0a..bfe2583002db 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -519,7 +519,7 @@ async def _load_filtered_recents( if len(recents) > timeline_limit: limited = True recents = recents[-timeline_limit:] - room_key = RoomStreamToken.parse(recents[0].internal_metadata.before) + room_key = recents[0].internal_metadata.before prev_batch_token = now_token.copy_and_replace("room_key", room_key) @@ -1595,16 +1595,24 @@ async def _get_rooms_changed( if leave_events: leave_event = leave_events[-1] - leave_stream_token = await self.store.get_stream_token_for_event( + leave_position = await self.store.get_position_for_event( leave_event.event_id ) - leave_token = since_token.copy_and_replace( - "room_key", leave_stream_token - ) - if since_token and since_token.is_after(leave_token): + # If the leave event happened before the since token then we + # bail. + if since_token and not leave_position.persisted_after( + since_token.room_key + ): continue + # We can safely convert the position of the leave event into a + # stream token as it'll only be used in the context of this + # room. (c.f. the docstring of `to_room_stream_token`). + leave_token = since_token.copy_and_replace( + "room_key", leave_position.to_room_stream_token() + ) + # If this is an out of band message, like a remote invite # rejection, we include it in the recents batch. Otherwise, we # let _load_filtered_recents handle fetching the correct diff --git a/synapse/notifier.py b/synapse/notifier.py index 441b3d15e2d5..59415f6f88c5 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -163,7 +163,7 @@ def new_listener(self, token: StreamToken) -> _NotificationListener: """ # Immediately wake up stream if something has already since happened # since their last token. - if self.last_notified_token.is_after(token): + if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) @@ -470,7 +470,7 @@ async def get_events_for( async def check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: - if not after_token.is_after(before_token): + if after_token == before_token: return EventStreamResult([], (from_token, from_token)) events = [] # type: List[EventBase] diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 55af3d41ea77..e165429cad84 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -29,7 +29,7 @@ EventsStreamEventRow, EventsStreamRow, ) -from synapse.types import PersistedEventPosition, RoomStreamToken, UserID +from synapse.types import PersistedEventPosition, UserID from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure @@ -152,9 +152,7 @@ async def on_rdata( if event.type == EventTypes.Member: extra_users = (UserID.from_string(event.state_key),) - max_token = RoomStreamToken( - None, self.store.get_room_max_stream_ordering() - ) + max_token = self.store.get_room_max_token() event_pos = PersistedEventPosition(instance_name, token) self.notifier.on_new_room_event( event, event_pos, max_token, extra_users diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 5c5f00b21376..ba53f66f027d 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -109,7 +109,8 @@ async def on_POST(self, request, room_id, event_id): if event.room_id != room_id: raise SynapseError(400, "Event is for wrong room.") - token = await self.store.get_topological_token_for_event(event_id) + room_token = await self.store.get_topological_token_for_event(event_id) + token = str(room_token) logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) elif "purge_up_to_ts" in body: diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 92e96468b42a..37249f1e3f99 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -35,7 +35,6 @@ - topological tokems: "t%d-%d", where the integers map to the topological and stream ordering columns respectively. """ - import abc import logging from collections import namedtuple @@ -54,7 +53,7 @@ ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -from synapse.types import Collection, RoomStreamToken +from synapse.types import Collection, PersistedEventPosition, RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache if TYPE_CHECKING: @@ -305,6 +304,9 @@ def get_room_max_stream_ordering(self) -> int: def get_room_min_stream_ordering(self) -> int: raise NotImplementedError() + def get_room_max_token(self) -> RoomStreamToken: + return RoomStreamToken(None, self.get_room_max_stream_ordering()) + async def get_room_events_stream_for_rooms( self, room_ids: Collection[str], @@ -611,26 +613,28 @@ def get_stream_id_for_event_txn( allow_none=allow_none, ) - async def get_stream_token_for_event(self, event_id: str) -> RoomStreamToken: - """The stream token for an event - Args: - event_id: The id of the event to look up a stream token for. - Raises: - StoreError if the event wasn't in the database. - Returns: - A stream token. + async def get_position_for_event(self, event_id: str) -> PersistedEventPosition: + """Get the persisted position for an event """ - stream_id = await self.get_stream_id_for_event(event_id) - return RoomStreamToken(None, stream_id) + row = await self.db_pool.simple_select_one( + table="events", + keyvalues={"event_id": event_id}, + retcols=("stream_ordering", "instance_name"), + desc="get_position_for_event", + ) + + return PersistedEventPosition( + row["instance_name"] or "master", row["stream_ordering"] + ) - async def get_topological_token_for_event(self, event_id: str) -> str: + async def get_topological_token_for_event(self, event_id: str) -> RoomStreamToken: """The stream token for an event Args: event_id: The id of the event to look up a stream token for. Raises: StoreError if the event wasn't in the database. Returns: - A "t%d-%d" topological token. + A `RoomStreamToken` topological token. """ row = await self.db_pool.simple_select_one( table="events", @@ -638,7 +642,7 @@ async def get_topological_token_for_event(self, event_id: str) -> str: retcols=("stream_ordering", "topological_ordering"), desc="get_topological_token_for_event", ) - return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"]) + return RoomStreamToken(row["topological_ordering"], row["stream_ordering"]) async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: """Gets the topological token in a room after or at the given stream @@ -687,8 +691,8 @@ def _set_before_and_after( else: topo = None internal = event.internal_metadata - internal.before = str(RoomStreamToken(topo, stream - 1)) - internal.after = str(RoomStreamToken(topo, stream)) + internal.before = RoomStreamToken(topo, stream - 1) + internal.after = RoomStreamToken(topo, stream) internal.order = (int(topo) if topo else 0, int(stream)) async def get_events_around( diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index ded6cf965528..72939f3984bc 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -229,7 +229,7 @@ async def persist_events( defer.gatherResults(deferreds, consumeErrors=True) ) - return RoomStreamToken(None, self.main_store.get_current_events_token()) + return self.main_store.get_room_max_token() async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False @@ -247,11 +247,10 @@ async def persist_event( await make_deferred_yieldable(deferred) - max_persisted_id = self.main_store.get_current_events_token() event_stream_id = event.internal_metadata.stream_ordering pos = PersistedEventPosition(self._instance_name, event_stream_id) - return pos, RoomStreamToken(None, max_persisted_id) + return pos, self.main_store.get_room_max_token() def _maybe_start_persisting(self, room_id: str): async def persisting_queue(item): diff --git a/synapse/types.py b/synapse/types.py index ec39f9e1e885..02bcc197ec7b 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -413,6 +413,18 @@ def parse_stream_token(cls, string: str) -> "RoomStreamToken": pass raise SynapseError(400, "Invalid token %r" % (string,)) + def copy_and_advance(self, other: "RoomStreamToken") -> "RoomStreamToken": + """Return a new token such that if an event is after both this token and + the other token, then its after the returned token too. + """ + + if self.topological or other.topological: + raise Exception("Can't advance topological tokens") + + max_stream = max(self.stream, other.stream) + + return RoomStreamToken(None, max_stream) + def as_tuple(self) -> Tuple[Optional[int], int]: return (self.topological, self.stream) @@ -458,31 +470,20 @@ def to_string(self): def room_stream_id(self): return self.room_key.stream - def is_after(self, other): - """Does this token contain events that the other doesn't?""" - return ( - (other.room_stream_id < self.room_stream_id) - or (int(other.presence_key) < int(self.presence_key)) - or (int(other.typing_key) < int(self.typing_key)) - or (int(other.receipt_key) < int(self.receipt_key)) - or (int(other.account_data_key) < int(self.account_data_key)) - or (int(other.push_rules_key) < int(self.push_rules_key)) - or (int(other.to_device_key) < int(self.to_device_key)) - or (int(other.device_list_key) < int(self.device_list_key)) - or (int(other.groups_key) < int(self.groups_key)) - ) - def copy_and_advance(self, key, new_value) -> "StreamToken": """Advance the given key in the token to a new value if and only if the new value is after the old value. """ - new_token = self.copy_and_replace(key, new_value) if key == "room_key": - new_id = new_token.room_stream_id - old_id = self.room_stream_id - else: - new_id = int(getattr(new_token, key)) - old_id = int(getattr(self, key)) + new_token = self.copy_and_replace( + "room_key", self.room_key.copy_and_advance(new_value) + ) + return new_token + + new_token = self.copy_and_replace(key, new_value) + new_id = int(getattr(new_token, key)) + old_id = int(getattr(self, key)) + if old_id < new_id: return new_token else: @@ -509,6 +510,18 @@ class PersistedEventPosition: def persisted_after(self, token: RoomStreamToken) -> bool: return token.stream < self.stream + def to_room_stream_token(self) -> RoomStreamToken: + """Converts the position to a room stream token such that events + persisted in the same room after this position will be after the + returned `RoomStreamToken`. + + Note: no guarentees are made about ordering w.r.t. events in other + rooms. + """ + # Doing the naive thing satisfies the desired properties described in + # the docstring. + return RoomStreamToken(None, self.stream) + class ThirdPartyInstanceID( namedtuple("ThirdPartyInstanceID", ("appservice_id", "network_id")) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 0a567b032f45..a3287011e94a 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -902,15 +902,15 @@ def test_room_messages_purge(self): # Send a first message in the room, which will be removed by the purge. first_event_id = self.helper.send(self.room_id, "message 1")["event_id"] - first_token = self.get_success( - store.get_topological_token_for_event(first_event_id) + first_token = str( + self.get_success(store.get_topological_token_for_event(first_event_id)) ) # Send a second message in the room, which won't be removed, and which we'll # use as the marker to purge events before. second_event_id = self.helper.send(self.room_id, "message 2")["event_id"] - second_token = self.get_success( - store.get_topological_token_for_event(second_event_id) + second_token = str( + self.get_success(store.get_topological_token_for_event(second_event_id)) ) # Send a third event in the room to ensure we don't fall under any edge case diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 918387733b20..723cd2893354 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -47,8 +47,8 @@ def test_purge(self): storage = self.hs.get_storage() # Get the topological token - event = self.get_success( - store.get_topological_token_for_event(last["event_id"]) + event = str( + self.get_success(store.get_topological_token_for_event(last["event_id"])) ) # Purge everything before this topological token @@ -74,12 +74,10 @@ def test_purge_wont_delete_extrems(self): storage = self.hs.get_datastore() # Set the topological token higher than it should be - event = self.get_success( + token = self.get_success( storage.get_topological_token_for_event(last["event_id"]) ) - event = "t{}-{}".format( - *list(map(lambda x: x + 1, map(int, event[1:].split("-")))) - ) + event = "t{}-{}".format(token.topological + 1, token.stream + 1) # Purge everything before this topological token purge = defer.ensureDeferred(storage.purge_history(self.room_id, event, True)) From ceafb5a1c61f699d659b1b38577b1c2264721e28 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 30 Sep 2020 16:42:05 +0100 Subject: [PATCH 088/245] Drop support for ancient prometheus_client (#8426) Drop compatibility hacks for prometheus-client pre 0.4.0. Debian stretch and Fedora 31 both have newer versions, so hopefully this will be ok. --- changelog.d/8426.removal | 1 + synapse/metrics/_exposition.py | 24 ++---------------------- synapse/python_dependencies.py | 6 +++++- 3 files changed, 8 insertions(+), 23 deletions(-) create mode 100644 changelog.d/8426.removal diff --git a/changelog.d/8426.removal b/changelog.d/8426.removal new file mode 100644 index 000000000000..a56277fe7ad6 --- /dev/null +++ b/changelog.d/8426.removal @@ -0,0 +1 @@ +Drop support for `prometheus_client` older than 0.4.0. diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index 4304c60d56b9..c6457ba450f0 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -24,7 +24,6 @@ import math import threading -from collections import namedtuple from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn from urllib.parse import parse_qs, urlparse @@ -35,14 +34,6 @@ from synapse.util import caches -try: - from prometheus_client.samples import Sample -except ImportError: - Sample = namedtuple( # type: ignore[no-redef] # noqa - "Sample", ["name", "labels", "value", "timestamp", "exemplar"] - ) - - CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8") @@ -93,17 +84,6 @@ def sample_line(line, name): ) -def nameify_sample(sample): - """ - If we get a prometheus_client<0.4.0 sample as a tuple, transform it into a - namedtuple which has the names we expect. - """ - if not isinstance(sample, Sample): - sample = Sample(*sample, None, None) - - return sample - - def generate_latest(registry, emit_help=False): # Trigger the cache metrics to be rescraped, which updates the common @@ -144,7 +124,7 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mname, mtype)) - for sample in map(nameify_sample, metric.samples): + for sample in metric.samples: # Get rid of the OpenMetrics specific samples for suffix in ["_created", "_gsum", "_gcount"]: if sample.name.endswith(suffix): @@ -172,7 +152,7 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mnewname, mtype)) - for sample in map(nameify_sample, metric.samples): + for sample in metric.samples: # Get rid of the OpenMetrics specific samples for suffix in ["_created", "_gsum", "_gcount"]: if sample.name.endswith(suffix): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 288631477eaf..0ddead8a0f49 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -68,7 +68,11 @@ "pymacaroons>=0.13.0", "msgpack>=0.5.2", "phonenumbers>=8.2.0", - "prometheus_client>=0.0.18,<0.9.0", + # we use GaugeHistogramMetric, which was added in prom-client 0.4.0. + # prom-client has a history of breaking backwards compatibility between + # minor versions (https://github.com/prometheus/client_python/issues/317), + # so we also pin the minor version. + "prometheus_client>=0.4.0,<0.9.0", # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note: # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33 # is out in November.) From 1c8ca2c54363dc09744f9618f30181f015e63ffe Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 30 Sep 2020 16:44:10 +0100 Subject: [PATCH 089/245] Fix _exposition.py to stop stripping samples Our hacked-up `_exposition.py` was stripping out some samples it shouldn't have been. Put them back in, to more closely match the upstream `exposition.py`. --- synapse/metrics/_exposition.py | 40 ++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index c6457ba450f0..734271e765ae 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -26,6 +26,7 @@ import threading from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn +from typing import Dict, List from urllib.parse import parse_qs, urlparse from prometheus_client import REGISTRY @@ -124,16 +125,33 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mname, mtype)) - for sample in metric.samples: - # Get rid of the OpenMetrics specific samples + + om_samples = {} # type: Dict[str, List[str]] + for s in metric.samples: for suffix in ["_created", "_gsum", "_gcount"]: - if sample.name.endswith(suffix): + if s.name == metric.name + suffix: + # OpenMetrics specific sample, put in a gauge at the end. + # (these come from gaugehistograms which don't get renamed, + # so no need to faff with mnewname) + om_samples.setdefault(suffix, []).append(sample_line(s, s.name)) break else: - newname = sample.name.replace(mnewname, mname) + newname = s.name.replace(mnewname, mname) if ":" in newname and newname.endswith("_total"): newname = newname[: -len("_total")] - output.append(sample_line(sample, newname)) + output.append(sample_line(s, newname)) + + for suffix, lines in sorted(om_samples.items()): + if emit_help: + output.append( + "# HELP {0}{1} {2}\n".format( + metric.name, + suffix, + metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), + ) + ) + output.append("# TYPE {0}{1} gauge\n".format(metric.name, suffix)) + output.extend(lines) # Get rid of the weird colon things while we're at it if mtype == "counter": @@ -152,16 +170,16 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mnewname, mtype)) - for sample in metric.samples: - # Get rid of the OpenMetrics specific samples + + for s in metric.samples: + # Get rid of the OpenMetrics specific samples (we should already have + # dealt with them above anyway.) for suffix in ["_created", "_gsum", "_gcount"]: - if sample.name.endswith(suffix): + if s.name == metric.name + suffix: break else: output.append( - sample_line( - sample, sample.name.replace(":total", "").replace(":", "_") - ) + sample_line(s, s.name.replace(":total", "").replace(":", "_")) ) return "".join(output).encode("utf-8") From 6d2d42f8fb04599713d3e6e7fc3bc4c9b7063c9a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 22:26:28 +0100 Subject: [PATCH 090/245] Rewrite BucketCollector This was a bit unweildy for what I wanted: in particular, I wanted to assign each measurement straight into a bucket, rather than storing an intermediate Counter which didn't do any bucketing at all. I've replaced it with something that is hopefully a bit easier to use. (I'm not entirely sure what the difference between a HistogramMetricFamily and a GaugeHistogramMetricFamily is, but given our counters can go down as well as up the latter *sounds* more accurate?) --- synapse/metrics/__init__.py | 115 +++++++++++++--------- synapse/storage/databases/main/metrics.py | 26 ++--- tests/storage/test_event_metrics.py | 19 ++-- 3 files changed, 89 insertions(+), 71 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index a1f7ca344924..b8d2a8e8a9bf 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -15,6 +15,7 @@ import functools import gc +import itertools import logging import os import platform @@ -27,8 +28,8 @@ from prometheus_client.core import ( REGISTRY, CounterMetricFamily, + GaugeHistogramMetricFamily, GaugeMetricFamily, - HistogramMetricFamily, ) from twisted.internet import reactor @@ -46,7 +47,7 @@ METRICS_PREFIX = "/_synapse/metrics" running_on_pypy = platform.python_implementation() == "PyPy" -all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]] +all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge]] HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat") @@ -205,63 +206,83 @@ def _register_with_collector(self): all_gauges[self.name] = self -@attr.s(slots=True, hash=True) -class BucketCollector: - """ - Like a Histogram, but allows buckets to be point-in-time instead of - incrementally added to. +class GaugeBucketCollector: + """Like a Histogram, but the buckets are Gauges which are updated atomically. - Args: - name (str): Base name of metric to be exported to Prometheus. - data_collector (callable -> dict): A synchronous callable that - returns a dict mapping bucket to number of items in the - bucket. If these buckets are not the same as the buckets - given to this class, they will be remapped into them. - buckets (list[float]): List of floats/ints of the buckets to - give to Prometheus. +Inf is ignored, if given. + The data is updated by calling `update_data` with an iterable of measurements. + We assume that the data is updated less frequently than it is reported to + Prometheus, and optimise for that case. """ - name = attr.ib() - data_collector = attr.ib() - buckets = attr.ib() + __slots__ = ("_name", "_documentation", "_bucket_bounds", "_metric") - def collect(self): + def __init__( + self, + name: str, + documentation: str, + buckets: Iterable[float], + registry=REGISTRY, + ): + """ + Args: + name: base name of metric to be exported to Prometheus. (a _bucket suffix + will be added.) + documentation: help text for the metric + buckets: The top bounds of the buckets to report + registry: metric registry to register with + """ + self._name = name + self._documentation = documentation - # Fetch the data -- this must be synchronous! - data = self.data_collector() + # the tops of the buckets + self._bucket_bounds = [float(b) for b in buckets] + if self._bucket_bounds != sorted(self._bucket_bounds): + raise ValueError("Buckets not in sorted order") - buckets = {} # type: Dict[float, int] + if self._bucket_bounds[-1] != float("inf"): + self._bucket_bounds.append(float("inf")) - res = [] - for x in data.keys(): - for i, bound in enumerate(self.buckets): - if x <= bound: - buckets[bound] = buckets.get(bound, 0) + data[x] + self._metric = self._values_to_metric([]) + registry.register(self) - for i in self.buckets: - res.append([str(i), buckets.get(i, 0)]) + def collect(self): + yield self._metric - res.append(["+Inf", sum(data.values())]) + def update_data(self, values: Iterable[float]): + """Update the data to be reported by the metric - metric = HistogramMetricFamily( - self.name, "", buckets=res, sum_value=sum(x * y for x, y in data.items()) + The existing data is cleared, and each measurement in the input is assigned + to the relevant bucket. + """ + self._metric = self._values_to_metric(values) + + def _values_to_metric(self, values: Iterable[float]) -> GaugeHistogramMetricFamily: + total = 0.0 + bucket_values = [0 for _ in self._bucket_bounds] + + for v in values: + # assign each value to a bucket + for i, bound in enumerate(self._bucket_bounds): + if v <= bound: + bucket_values[i] += 1 + break + + # ... and increment the sum + total += v + + # now, aggregate the bucket values so that they count the number of entries in + # that bucket or below. + accumulated_values = itertools.accumulate(bucket_values) + + return GaugeHistogramMetricFamily( + self._name, + self._documentation, + buckets=list( + zip((str(b) for b in self._bucket_bounds), accumulated_values) + ), + gsum_value=total, ) - yield metric - - def __attrs_post_init__(self): - self.buckets = [float(x) for x in self.buckets if x != "+Inf"] - if self.buckets != sorted(self.buckets): - raise ValueError("Buckets not sorted") - - self.buckets = tuple(self.buckets) - - if self.name in all_gauges.keys(): - logger.warning("%s already registered, reregistering" % (self.name,)) - REGISTRY.unregister(all_gauges.pop(self.name)) - - REGISTRY.register(self) - all_gauges[self.name] = self # diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 686052bd83c0..4efc093b9e84 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -12,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import typing -from collections import Counter -from synapse.metrics import BucketCollector +from synapse.metrics import GaugeBucketCollector from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool @@ -23,6 +21,14 @@ EventPushActionsWorkerStore, ) +# Collect metrics on the number of forward extremities that exist. +_extremities_collecter = GaugeBucketCollector( + "synapse_forward_extremities", + "Number of rooms on the server with the given number of forward extremities" + " or fewer", + buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500], +) + class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """Functions to pull various metrics from the DB, for e.g. phone home @@ -32,18 +38,6 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - # Collect metrics on the number of forward extremities that exist. - # Counter of number of extremities to count - self._current_forward_extremities_amount = ( - Counter() - ) # type: typing.Counter[int] - - BucketCollector( - "synapse_forward_extremities", - lambda: self._current_forward_extremities_amount, - buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"], - ) - # Read the extrems every 60 minutes def read_forward_extremities(): # run as a background process to make sure that the database transactions @@ -65,7 +59,7 @@ def fetch(txn): return txn.fetchall() res = await self.db_pool.runInteraction("read_forward_extremities", fetch) - self._current_forward_extremities_amount = Counter([x[0] for x in res]) + _extremities_collecter.update_data(x[0] for x in res) async def count_daily_messages(self): """ diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 949846fe33de..3957471f3fbc 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -52,14 +52,14 @@ def test_exposed_to_prometheus(self): self.reactor.advance(60 * 60 * 1000) self.pump(1) - items = set( + items = list( filter( lambda x: b"synapse_forward_extremities_" in x, - generate_latest(REGISTRY).split(b"\n"), + generate_latest(REGISTRY, emit_help=False).split(b"\n"), ) ) - expected = { + expected = [ b'synapse_forward_extremities_bucket{le="1.0"} 0.0', b'synapse_forward_extremities_bucket{le="2.0"} 2.0', b'synapse_forward_extremities_bucket{le="3.0"} 2.0', @@ -72,9 +72,12 @@ def test_exposed_to_prometheus(self): b'synapse_forward_extremities_bucket{le="100.0"} 3.0', b'synapse_forward_extremities_bucket{le="200.0"} 3.0', b'synapse_forward_extremities_bucket{le="500.0"} 3.0', - b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', - b"synapse_forward_extremities_count 3.0", - b"synapse_forward_extremities_sum 10.0", - } - + # per https://docs.google.com/document/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit#heading=h.wghdjzzh72j9, + # "inf" is valid: "this includes variants such as inf" + b'synapse_forward_extremities_bucket{le="inf"} 3.0', + b"# TYPE synapse_forward_extremities_gcount gauge", + b"synapse_forward_extremities_gcount 3.0", + b"# TYPE synapse_forward_extremities_gsum gauge", + b"synapse_forward_extremities_gsum 10.0", + ] self.assertEqual(items, expected) From 20e7c4de262746479000ec507b7a3c37f1779a60 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 22:30:00 +0100 Subject: [PATCH 091/245] Add an improved "forward extremities" metric Hopefully, N(extremities) * N(state_events) is a more realistic approximation to "how big a problem is this room?". --- synapse/storage/databases/main/metrics.py | 27 +++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 4efc093b9e84..92099f95cefb 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -29,6 +29,18 @@ buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500], ) +# we also expose metrics on the "number of excess extremity events", which is +# (E-1)*N, where E is the number of extremities and N is the number of state +# events in the room. This is an approximation to the number of state events +# we could remove from state resolution by reducing the graph to a single +# forward extremity. +_excess_state_events_collecter = GaugeBucketCollector( + "synapse_excess_extremity_events", + "Number of rooms on the server with the given number of excess extremity " + "events, or fewer", + buckets=[0] + [1 << n for n in range(12)], +) + class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """Functions to pull various metrics from the DB, for e.g. phone home @@ -52,15 +64,26 @@ async def _read_forward_extremities(self): def fetch(txn): txn.execute( """ - select count(*) c from event_forward_extremities - group by room_id + SELECT t1.c, t2.c + FROM ( + SELECT room_id, COUNT(*) c FROM event_forward_extremities + GROUP BY room_id + ) t1 LEFT JOIN ( + SELECT room_id, COUNT(*) c FROM current_state_events + GROUP BY room_id + ) t2 ON t1.room_id = t2.room_id """ ) return txn.fetchall() res = await self.db_pool.runInteraction("read_forward_extremities", fetch) + _extremities_collecter.update_data(x[0] for x in res) + _excess_state_events_collecter.update_data( + (x[0] - 1) * x[1] for x in res if x[1] + ) + async def count_daily_messages(self): """ Returns an estimate of the number of messages sent in the last day. From 32acab3fa2f81890787b4fddab78a1d71e47ea94 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 22:31:45 +0100 Subject: [PATCH 092/245] changelog --- changelog.d/8425.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8425.feature diff --git a/changelog.d/8425.feature b/changelog.d/8425.feature new file mode 100644 index 000000000000..b4ee5bb74b9d --- /dev/null +++ b/changelog.d/8425.feature @@ -0,0 +1 @@ +Add experimental prometheus metric to track numbers of "large" rooms for state resolutiom. From 8b40843392e2df80d4f1108295ae6acd972100b0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 30 Sep 2020 13:02:43 -0400 Subject: [PATCH 093/245] Allow additional SSO properties to be passed to the client (#8413) --- changelog.d/8413.feature | 1 + docs/sample_config.yaml | 8 ++ docs/sso_mapping_providers.md | 14 ++- docs/workers.md | 16 ++++ synapse/config/oidc_config.py | 8 ++ synapse/handlers/auth.py | 60 +++++++++++- synapse/handlers/oidc_handler.py | 56 ++++++++++- synapse/rest/client/v1/login.py | 22 +++-- tests/handlers/test_oidc.py | 160 ++++++++++++++++++++----------- 9 files changed, 278 insertions(+), 67 deletions(-) create mode 100644 changelog.d/8413.feature diff --git a/changelog.d/8413.feature b/changelog.d/8413.feature new file mode 100644 index 000000000000..abe40a901cf6 --- /dev/null +++ b/changelog.d/8413.feature @@ -0,0 +1 @@ +Support passing additional single sign-on parameters to the client. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 70cc06a6d87d..066844b5a997 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1748,6 +1748,14 @@ oidc_config: # #display_name_template: "{{ user.given_name }} {{ user.last_name }}" + # Jinja2 templates for extra attributes to send back to the client during + # login. + # + # Note that these are non-standard and clients will ignore them without modifications. + # + #extra_attributes: + #birthdate: "{{ user.birthdate }}" + # Enable CAS for registration and login. diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index abea432343f7..32b06aa2c570 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -57,7 +57,7 @@ A custom mapping provider must specify the following methods: - This method must return a string, which is the unique identifier for the user. Commonly the ``sub`` claim of the response. * `map_user_attributes(self, userinfo, token)` - - This method should be async. + - This method must be async. - Arguments: - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user information from. @@ -66,6 +66,18 @@ A custom mapping provider must specify the following methods: - Returns a dictionary with two keys: - localpart: A required string, used to generate the Matrix ID. - displayname: An optional string, the display name for the user. +* `get_extra_attributes(self, userinfo, token)` + - This method must be async. + - Arguments: + - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user + information from. + - `token` - A dictionary which includes information necessary to make + further requests to the OpenID provider. + - Returns a dictionary that is suitable to be serialized to JSON. This + will be returned as part of the response during a successful login. + + Note that care should be taken to not overwrite any of the parameters + usually returned as part of the [login response](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login). ### Default OpenID Mapping Provider diff --git a/docs/workers.md b/docs/workers.md index df0ac84d9466..ad4d8ca9f25a 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -243,6 +243,22 @@ for the room are in flight: ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$ +Additionally, the following endpoints should be included if Synapse is configured +to use SSO (you only need to include the ones for whichever SSO provider you're +using): + + # OpenID Connect requests. + ^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect$ + ^/_synapse/oidc/callback$ + + # SAML requests. + ^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect$ + ^/_matrix/saml2/authn_response$ + + # CAS requests. + ^/_matrix/client/(api/v1|r0|unstable)/login/(cas|sso)/redirect$ + ^/_matrix/client/(api/v1|r0|unstable)/login/cas/ticket$ + Note that a HTTP listener with `client` and `federation` resources must be configured in the `worker_listeners` option in the worker config. diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py index 70fc8a2f6268..f92411681999 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc_config.py @@ -204,6 +204,14 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # If unset, no displayname will be set. # #display_name_template: "{{{{ user.given_name }}}} {{{{ user.last_name }}}}" + + # Jinja2 templates for extra attributes to send back to the client during + # login. + # + # Note that these are non-standard and clients will ignore them without modifications. + # + #extra_attributes: + #birthdate: "{{{{ user.birthdate }}}}" """.format( mapping_provider=DEFAULT_USER_MAPPING_PROVIDER ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 0322b60cfc63..00eae9205267 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -137,6 +137,15 @@ def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]: } +@attr.s(slots=True) +class SsoLoginExtraAttributes: + """Data we track about SAML2 sessions""" + + # time the session was created, in milliseconds + creation_time = attr.ib(type=int) + extra_attributes = attr.ib(type=JsonDict) + + class AuthHandler(BaseHandler): SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 @@ -239,6 +248,10 @@ def __init__(self, hs): # cast to tuple for use with str.startswith self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) + # A mapping of user ID to extra attributes to include in the login + # response. + self._extra_attributes = {} # type: Dict[str, SsoLoginExtraAttributes] + async def validate_user_via_ui_auth( self, requester: Requester, @@ -1165,6 +1178,7 @@ async def complete_sso_login( registered_user_id: str, request: SynapseRequest, client_redirect_url: str, + extra_attributes: Optional[JsonDict] = None, ): """Having figured out a mxid for this user, complete the HTTP request @@ -1173,6 +1187,8 @@ async def complete_sso_login( request: The request to complete. client_redirect_url: The URL to which to redirect the user at the end of the process. + extra_attributes: Extra attributes which will be passed to the client + during successful login. Must be JSON serializable. """ # If the account has been deactivated, do not proceed with the login # flow. @@ -1181,19 +1197,30 @@ async def complete_sso_login( respond_with_html(request, 403, self._sso_account_deactivated_template) return - self._complete_sso_login(registered_user_id, request, client_redirect_url) + self._complete_sso_login( + registered_user_id, request, client_redirect_url, extra_attributes + ) def _complete_sso_login( self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str, + extra_attributes: Optional[JsonDict] = None, ): """ The synchronous portion of complete_sso_login. This exists purely for backwards compatibility of synapse.module_api.ModuleApi. """ + # Store any extra attributes which will be passed in the login response. + # Note that this is per-user so it may overwrite a previous value, this + # is considered OK since the newest SSO attributes should be most valid. + if extra_attributes: + self._extra_attributes[registered_user_id] = SsoLoginExtraAttributes( + self._clock.time_msec(), extra_attributes, + ) + # Create a login token login_token = self.macaroon_gen.generate_short_term_login_token( registered_user_id @@ -1226,6 +1253,37 @@ def _complete_sso_login( ) respond_with_html(request, 200, html) + async def _sso_login_callback(self, login_result: JsonDict) -> None: + """ + A login callback which might add additional attributes to the login response. + + Args: + login_result: The data to be sent to the client. Includes the user + ID and access token. + """ + # Expire attributes before processing. Note that there shouldn't be any + # valid logins that still have extra attributes. + self._expire_sso_extra_attributes() + + extra_attributes = self._extra_attributes.get(login_result["user_id"]) + if extra_attributes: + login_result.update(extra_attributes.extra_attributes) + + def _expire_sso_extra_attributes(self) -> None: + """ + Iterate through the mapping of user IDs to extra attributes and remove any that are no longer valid. + """ + # TODO This should match the amount of time the macaroon is valid for. + LOGIN_TOKEN_EXPIRATION_TIME = 2 * 60 * 1000 + expire_before = self._clock.time_msec() - LOGIN_TOKEN_EXPIRATION_TIME + to_expire = set() + for user_id, data in self._extra_attributes.items(): + if data.creation_time < expire_before: + to_expire.add(user_id) + for user_id in to_expire: + logger.debug("Expiring extra attributes for user %s", user_id) + del self._extra_attributes[user_id] + @staticmethod def add_query_param_to_url(url: str, param_name: str, param: Any): url_parts = list(urllib.parse.urlparse(url)) diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 0e06e4408d3b..19cd65267535 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -37,7 +37,7 @@ from synapse.http.server import respond_with_html from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.types import UserID, map_username_to_mxid_localpart +from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart from synapse.util import json_decoder if TYPE_CHECKING: @@ -707,6 +707,15 @@ async def handle_oidc_callback(self, request: SynapseRequest) -> None: self._render_error(request, "mapping_error", str(e)) return + # Mapping providers might not have get_extra_attributes: only call this + # method if it exists. + extra_attributes = None + get_extra_attributes = getattr( + self._user_mapping_provider, "get_extra_attributes", None + ) + if get_extra_attributes: + extra_attributes = await get_extra_attributes(userinfo, token) + # and finally complete the login if ui_auth_session_id: await self._auth_handler.complete_sso_ui_auth( @@ -714,7 +723,7 @@ async def handle_oidc_callback(self, request: SynapseRequest) -> None: ) else: await self._auth_handler.complete_sso_login( - user_id, request, client_redirect_url + user_id, request, client_redirect_url, extra_attributes ) def _generate_oidc_session_token( @@ -984,7 +993,7 @@ def get_remote_user_id(self, userinfo: UserInfo) -> str: async def map_user_attributes( self, userinfo: UserInfo, token: Token ) -> UserAttribute: - """Map a ``UserInfo`` objects into user attributes. + """Map a `UserInfo` object into user attributes. Args: userinfo: An object representing the user given by the OIDC provider @@ -995,6 +1004,18 @@ async def map_user_attributes( """ raise NotImplementedError() + async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: + """Map a `UserInfo` object into additional attributes passed to the client during login. + + Args: + userinfo: An object representing the user given by the OIDC provider + token: A dict with the tokens returned by the provider + + Returns: + A dict containing additional attributes. Must be JSON serializable. + """ + return {} + # Used to clear out "None" values in templates def jinja_finalize(thing): @@ -1009,6 +1030,7 @@ class JinjaOidcMappingConfig: subject_claim = attr.ib() # type: str localpart_template = attr.ib() # type: Template display_name_template = attr.ib() # type: Optional[Template] + extra_attributes = attr.ib() # type: Dict[str, Template] class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): @@ -1047,10 +1069,28 @@ def parse_config(config: dict) -> JinjaOidcMappingConfig: % (e,) ) + extra_attributes = {} # type Dict[str, Template] + if "extra_attributes" in config: + extra_attributes_config = config.get("extra_attributes") or {} + if not isinstance(extra_attributes_config, dict): + raise ConfigError( + "oidc_config.user_mapping_provider.config.extra_attributes must be a dict" + ) + + for key, value in extra_attributes_config.items(): + try: + extra_attributes[key] = env.from_string(value) + except Exception as e: + raise ConfigError( + "invalid jinja template for oidc_config.user_mapping_provider.config.extra_attributes.%s: %r" + % (key, e) + ) + return JinjaOidcMappingConfig( subject_claim=subject_claim, localpart_template=localpart_template, display_name_template=display_name_template, + extra_attributes=extra_attributes, ) def get_remote_user_id(self, userinfo: UserInfo) -> str: @@ -1071,3 +1111,13 @@ async def map_user_attributes( display_name = None return UserAttribute(localpart=localpart, display_name=display_name) + + async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: + extras = {} # type: Dict[str, str] + for key, template in self._config.extra_attributes.items(): + try: + extras[key] = template.render(user=userinfo).strip() + except Exception as e: + # Log an error and skip this value (don't break login for this). + logger.error("Failed to render OIDC extra attribute %s: %s" % (key, e)) + return extras diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 250b03a02536..b9347b87c7c6 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -284,9 +284,7 @@ async def _complete_login( self, user_id: str, login_submission: JsonDict, - callback: Optional[ - Callable[[Dict[str, str]], Awaitable[Dict[str, str]]] - ] = None, + callback: Optional[Callable[[Dict[str, str]], Awaitable[None]]] = None, create_non_existent_users: bool = False, ) -> Dict[str, str]: """Called when we've successfully authed the user and now need to @@ -299,12 +297,12 @@ async def _complete_login( Args: user_id: ID of the user to register. login_submission: Dictionary of login information. - callback: Callback function to run after registration. + callback: Callback function to run after login. create_non_existent_users: Whether to create the user if they don't exist. Defaults to False. Returns: - result: Dictionary of account information after successful registration. + result: Dictionary of account information after successful login. """ # Before we actually log them in we check if they've already logged in @@ -339,14 +337,24 @@ async def _complete_login( return result async def _do_token_login(self, login_submission: JsonDict) -> Dict[str, str]: + """ + Handle the final stage of SSO login. + + Args: + login_submission: The JSON request body. + + Returns: + The body of the JSON response. + """ token = login_submission["token"] auth_handler = self.auth_handler user_id = await auth_handler.validate_short_term_login_token_and_get_user_id( token ) - result = await self._complete_login(user_id, login_submission) - return result + return await self._complete_login( + user_id, login_submission, self.auth_handler._sso_login_callback + ) async def _do_jwt_login(self, login_submission: JsonDict) -> Dict[str, str]: token = login_submission.get("token", None) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 5910772aa8d5..d5087e58be9a 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -21,7 +21,6 @@ import attr import pymacaroons -from twisted.internet import defer from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone @@ -87,6 +86,13 @@ def get_remote_user_id(self, userinfo): async def map_user_attributes(self, userinfo, token): return {"localpart": userinfo["username"], "display_name": None} + # Do not include get_extra_attributes to test backwards compatibility paths. + + +class TestMappingProviderExtra(TestMappingProvider): + async def get_extra_attributes(self, userinfo, token): + return {"phone": userinfo["phone"]} + def simple_async_mock(return_value=None, raises=None): # AsyncMock is not available in python3.5, this mimics part of its behaviour @@ -126,7 +132,7 @@ def make_homeserver(self, reactor, clock): config = self.default_config() config["public_baseurl"] = BASE_URL - oidc_config = config.get("oidc_config", {}) + oidc_config = {} oidc_config["enabled"] = True oidc_config["client_id"] = CLIENT_ID oidc_config["client_secret"] = CLIENT_SECRET @@ -135,6 +141,10 @@ def make_homeserver(self, reactor, clock): oidc_config["user_mapping_provider"] = { "module": __name__ + ".TestMappingProvider", } + + # Update this config with what's in the default config so that + # override_config works as expected. + oidc_config.update(config.get("oidc_config", {})) config["oidc_config"] = oidc_config hs = self.setup_test_homeserver( @@ -165,11 +175,10 @@ def test_config(self): self.assertEqual(self.handler._client_auth.client_secret, CLIENT_SECRET) @override_config({"oidc_config": {"discover": True}}) - @defer.inlineCallbacks def test_discovery(self): """The handler should discover the endpoints from OIDC discovery document.""" # This would throw if some metadata were invalid - metadata = yield defer.ensureDeferred(self.handler.load_metadata()) + metadata = self.get_success(self.handler.load_metadata()) self.http_client.get_json.assert_called_once_with(WELL_KNOWN) self.assertEqual(metadata.issuer, ISSUER) @@ -181,43 +190,40 @@ def test_discovery(self): # subsequent calls should be cached self.http_client.reset_mock() - yield defer.ensureDeferred(self.handler.load_metadata()) + self.get_success(self.handler.load_metadata()) self.http_client.get_json.assert_not_called() @override_config({"oidc_config": COMMON_CONFIG}) - @defer.inlineCallbacks def test_no_discovery(self): """When discovery is disabled, it should not try to load from discovery document.""" - yield defer.ensureDeferred(self.handler.load_metadata()) + self.get_success(self.handler.load_metadata()) self.http_client.get_json.assert_not_called() @override_config({"oidc_config": COMMON_CONFIG}) - @defer.inlineCallbacks def test_load_jwks(self): """JWKS loading is done once (then cached) if used.""" - jwks = yield defer.ensureDeferred(self.handler.load_jwks()) + jwks = self.get_success(self.handler.load_jwks()) self.http_client.get_json.assert_called_once_with(JWKS_URI) self.assertEqual(jwks, {"keys": []}) # subsequent calls should be cached… self.http_client.reset_mock() - yield defer.ensureDeferred(self.handler.load_jwks()) + self.get_success(self.handler.load_jwks()) self.http_client.get_json.assert_not_called() # …unless forced self.http_client.reset_mock() - yield defer.ensureDeferred(self.handler.load_jwks(force=True)) + self.get_success(self.handler.load_jwks(force=True)) self.http_client.get_json.assert_called_once_with(JWKS_URI) # Throw if the JWKS uri is missing with self.metadata_edit({"jwks_uri": None}): - with self.assertRaises(RuntimeError): - yield defer.ensureDeferred(self.handler.load_jwks(force=True)) + self.get_failure(self.handler.load_jwks(force=True), RuntimeError) # Return empty key set if JWKS are not used self.handler._scopes = [] # not asking the openid scope self.http_client.get_json.reset_mock() - jwks = yield defer.ensureDeferred(self.handler.load_jwks(force=True)) + jwks = self.get_success(self.handler.load_jwks(force=True)) self.http_client.get_json.assert_not_called() self.assertEqual(jwks, {"keys": []}) @@ -299,11 +305,10 @@ def test_skip_verification(self): # This should not throw self.handler._validate_metadata() - @defer.inlineCallbacks def test_redirect_request(self): """The redirect request has the right arguments & generates a valid session cookie.""" req = Mock(spec=["addCookie"]) - url = yield defer.ensureDeferred( + url = self.get_success( self.handler.handle_redirect_request(req, b"http://client/redirect") ) url = urlparse(url) @@ -343,20 +348,18 @@ def test_redirect_request(self): self.assertEqual(params["nonce"], [nonce]) self.assertEqual(redirect, "http://client/redirect") - @defer.inlineCallbacks def test_callback_error(self): """Errors from the provider returned in the callback are displayed.""" self.handler._render_error = Mock() request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "") request.args[b"error_description"] = [b"some description"] - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "some description") - @defer.inlineCallbacks def test_callback(self): """Code callback works and display errors if something went wrong. @@ -377,7 +380,7 @@ def test_callback(self): "sub": "foo", "preferred_username": "bar", } - user_id = UserID("foo", "domain.org") + user_id = "@foo:domain.org" self.handler._render_error = Mock(return_value=None) self.handler._exchange_code = simple_async_mock(return_value=token) self.handler._parse_id_token = simple_async_mock(return_value=userinfo) @@ -394,13 +397,12 @@ def test_callback(self): client_redirect_url = "http://client/redirect" user_agent = "Browser" ip_address = "10.0.0.1" - session = self.handler._generate_oidc_session_token( + request.getCookie.return_value = self.handler._generate_oidc_session_token( state=state, nonce=nonce, client_redirect_url=client_redirect_url, ui_auth_session_id=None, ) - request.getCookie.return_value = session request.args = {} request.args[b"code"] = [code.encode("utf-8")] @@ -410,10 +412,10 @@ def test_callback(self): request.requestHeaders.getRawHeaders.return_value = [user_agent.encode("ascii")] request.getClientIP.return_value = ip_address - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.handler._auth_handler.complete_sso_login.assert_called_once_with( - user_id, request, client_redirect_url, + user_id, request, client_redirect_url, {}, ) self.handler._exchange_code.assert_called_once_with(code) self.handler._parse_id_token.assert_called_once_with(token, nonce=nonce) @@ -427,13 +429,13 @@ def test_callback(self): self.handler._map_userinfo_to_user = simple_async_mock( raises=MappingException() ) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mapping_error") self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) # Handle ID token errors self.handler._parse_id_token = simple_async_mock(raises=Exception()) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_token") self.handler._auth_handler.complete_sso_login.reset_mock() @@ -444,10 +446,10 @@ def test_callback(self): # With userinfo fetching self.handler._scopes = [] # do not ask the "openid" scope - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.handler._auth_handler.complete_sso_login.assert_called_once_with( - user_id, request, client_redirect_url, + user_id, request, client_redirect_url, {}, ) self.handler._exchange_code.assert_called_once_with(code) self.handler._parse_id_token.assert_not_called() @@ -459,17 +461,16 @@ def test_callback(self): # Handle userinfo fetching error self.handler._fetch_userinfo = simple_async_mock(raises=Exception()) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("fetch_error") # Handle code exchange failure self.handler._exchange_code = simple_async_mock( raises=OidcError("invalid_request") ) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") - @defer.inlineCallbacks def test_callback_session(self): """The callback verifies the session presence and validity""" self.handler._render_error = Mock(return_value=None) @@ -478,20 +479,20 @@ def test_callback_session(self): # Missing cookie request.args = {} request.getCookie.return_value = None - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("missing_session", "No session cookie found") # Missing session parameter request.args = {} request.getCookie.return_value = "session" - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request", "State parameter is missing") # Invalid cookie request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = "session" - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_session") # Mismatching session @@ -504,18 +505,17 @@ def test_callback_session(self): request.args = {} request.args[b"state"] = [b"mismatching state"] request.getCookie.return_value = session - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mismatching_session") # Valid session request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = session - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") @override_config({"oidc_config": {"client_auth_method": "client_secret_post"}}) - @defer.inlineCallbacks def test_exchange_code(self): """Code exchange behaves correctly and handles various error scenarios.""" token = {"type": "bearer"} @@ -524,7 +524,7 @@ def test_exchange_code(self): return_value=FakeResponse(code=200, phrase=b"OK", body=token_json) ) code = "code" - ret = yield defer.ensureDeferred(self.handler._exchange_code(code)) + ret = self.get_success(self.handler._exchange_code(code)) kwargs = self.http_client.request.call_args[1] self.assertEqual(ret, token) @@ -546,10 +546,9 @@ def test_exchange_code(self): body=b'{"error": "foo", "error_description": "bar"}', ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "foo") - self.assertEqual(exc.exception.error_description, "bar") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "foo") + self.assertEqual(exc.value.error_description, "bar") # Internal server error with no JSON body self.http_client.request = simple_async_mock( @@ -557,9 +556,8 @@ def test_exchange_code(self): code=500, phrase=b"Internal Server Error", body=b"Not JSON", ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "server_error") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "server_error") # Internal server error with JSON body self.http_client.request = simple_async_mock( @@ -569,17 +567,16 @@ def test_exchange_code(self): body=b'{"error": "internal_server_error"}', ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "internal_server_error") + + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "internal_server_error") # 4xx error without "error" field self.http_client.request = simple_async_mock( return_value=FakeResponse(code=400, phrase=b"Bad request", body=b"{}",) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "server_error") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "server_error") # 2xx error with "error" field self.http_client.request = simple_async_mock( @@ -587,9 +584,62 @@ def test_exchange_code(self): code=200, phrase=b"OK", body=b'{"error": "some_error"}', ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "some_error") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "some_error") + + @override_config( + { + "oidc_config": { + "user_mapping_provider": { + "module": __name__ + ".TestMappingProviderExtra" + } + } + } + ) + def test_extra_attributes(self): + """ + Login while using a mapping provider that implements get_extra_attributes. + """ + token = { + "type": "bearer", + "id_token": "id_token", + "access_token": "access_token", + } + userinfo = { + "sub": "foo", + "phone": "1234567", + } + user_id = "@foo:domain.org" + self.handler._exchange_code = simple_async_mock(return_value=token) + self.handler._parse_id_token = simple_async_mock(return_value=userinfo) + self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) + self.handler._auth_handler.complete_sso_login = simple_async_mock() + request = Mock( + spec=["args", "getCookie", "addCookie", "requestHeaders", "getClientIP"] + ) + + state = "state" + client_redirect_url = "http://client/redirect" + request.getCookie.return_value = self.handler._generate_oidc_session_token( + state=state, + nonce="nonce", + client_redirect_url=client_redirect_url, + ui_auth_session_id=None, + ) + + request.args = {} + request.args[b"code"] = [b"code"] + request.args[b"state"] = [state.encode("utf-8")] + + request.requestHeaders = Mock(spec=["getRawHeaders"]) + request.requestHeaders.getRawHeaders.return_value = [b"Browser"] + request.getClientIP.return_value = "10.0.0.1" + + self.get_success(self.handler.handle_oidc_callback(request)) + + self.handler._auth_handler.complete_sso_login.assert_called_once_with( + user_id, request, client_redirect_url, {"phone": "1234567"}, + ) def test_map_userinfo_to_user(self): """Ensure that mapping the userinfo returned from a provider to an MXID works properly.""" From 7941372ec84786f85ae6d75fd2d7a4af5b72ac98 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Sep 2020 20:29:19 +0100 Subject: [PATCH 094/245] Make token serializing/deserializing async (#8427) The idea is that in future tokens will encode a mapping of instance to position. However, we don't want to include the full instance name in the string representation, so instead we'll have a mapping between instance name and an immutable integer ID in the DB that we can use instead. We'll then do the lookup when we serialize/deserialize the token (we could alternatively pass around an `Instance` type that includes both the name and ID, but that turns out to be a lot more invasive). --- changelog.d/8427.misc | 1 + synapse/handlers/events.py | 4 +- synapse/handlers/initial_sync.py | 14 +++--- synapse/handlers/pagination.py | 8 ++-- synapse/handlers/room.py | 8 ++-- synapse/handlers/search.py | 8 ++-- synapse/rest/admin/__init__.py | 2 +- synapse/rest/client/v1/events.py | 3 +- synapse/rest/client/v1/initial_sync.py | 3 +- synapse/rest/client/v1/room.py | 11 +++-- synapse/rest/client/v2_alpha/keys.py | 3 +- synapse/rest/client/v2_alpha/sync.py | 10 ++--- .../storage/databases/main/purge_events.py | 8 ++-- synapse/streams/config.py | 9 ++-- synapse/types.py | 43 +++++++++++++++---- tests/rest/client/v1/test_rooms.py | 30 +++++++++---- tests/storage/test_purge.py | 9 ++-- 17 files changed, 115 insertions(+), 59 deletions(-) create mode 100644 changelog.d/8427.misc diff --git a/changelog.d/8427.misc b/changelog.d/8427.misc new file mode 100644 index 000000000000..c9656b9112b2 --- /dev/null +++ b/changelog.d/8427.misc @@ -0,0 +1 @@ +Make stream token serializing/deserializing async. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 0875b74ea89c..539b4fc32e95 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -133,8 +133,8 @@ async def get_stream( chunk = { "chunk": chunks, - "start": tokens[0].to_string(), - "end": tokens[1].to_string(), + "start": await tokens[0].to_string(self.store), + "end": await tokens[1].to_string(self.store), } return chunk diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 43f15435def5..39a85801c1ad 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -203,8 +203,8 @@ async def handle_room(event: RoomsForUser): messages, time_now=time_now, as_client_event=as_client_event ) ), - "start": start_token.to_string(), - "end": end_token.to_string(), + "start": await start_token.to_string(self.store), + "end": await end_token.to_string(self.store), } d["state"] = await self._event_serializer.serialize_events( @@ -249,7 +249,7 @@ async def handle_room(event: RoomsForUser): ], "account_data": account_data_events, "receipts": receipt, - "end": now_token.to_string(), + "end": await now_token.to_string(self.store), } return ret @@ -348,8 +348,8 @@ async def _room_initial_sync_parted( "chunk": ( await self._event_serializer.serialize_events(messages, time_now) ), - "start": start_token.to_string(), - "end": end_token.to_string(), + "start": await start_token.to_string(self.store), + "end": await end_token.to_string(self.store), }, "state": ( await self._event_serializer.serialize_events( @@ -447,8 +447,8 @@ async def get_receipts(): "chunk": ( await self._event_serializer.serialize_events(messages, time_now) ), - "start": start_token.to_string(), - "end": end_token.to_string(), + "start": await start_token.to_string(self.store), + "end": await end_token.to_string(self.store), }, "state": state, "presence": presence, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index d6779a4b4439..2c2a633938ba 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -413,8 +413,8 @@ async def get_messages( if not events: return { "chunk": [], - "start": from_token.to_string(), - "end": next_token.to_string(), + "start": await from_token.to_string(self.store), + "end": await next_token.to_string(self.store), } state = None @@ -442,8 +442,8 @@ async def get_messages( events, time_now, as_client_event=as_client_event ) ), - "start": from_token.to_string(), - "end": next_token.to_string(), + "start": await from_token.to_string(self.store), + "end": await next_token.to_string(self.store), } if state: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 836b3f381a52..d5f7c78edf52 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1077,11 +1077,13 @@ def filter_evts(events): # the token, which we replace. token = StreamToken.START - results["start"] = token.copy_and_replace( + results["start"] = await token.copy_and_replace( "room_key", results["start"] - ).to_string() + ).to_string(self.store) - results["end"] = token.copy_and_replace("room_key", results["end"]).to_string() + results["end"] = await token.copy_and_replace( + "room_key", results["end"] + ).to_string(self.store) return results diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 6a76c20d7913..e9402e6e2efc 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -362,13 +362,13 @@ async def search(self, user, content, batch=None): self.storage, user.to_string(), res["events_after"] ) - res["start"] = now_token.copy_and_replace( + res["start"] = await now_token.copy_and_replace( "room_key", res["start"] - ).to_string() + ).to_string(self.store) - res["end"] = now_token.copy_and_replace( + res["end"] = await now_token.copy_and_replace( "room_key", res["end"] - ).to_string() + ).to_string(self.store) if include_profile: senders = { diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index ba53f66f027d..57cac22252f7 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -110,7 +110,7 @@ async def on_POST(self, request, room_id, event_id): raise SynapseError(400, "Event is for wrong room.") room_token = await self.store.get_topological_token_for_event(event_id) - token = str(room_token) + token = await room_token.to_string(self.store) logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) elif "purge_up_to_ts" in body: diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 985d994f6bb5..1ecb77aa2694 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -33,6 +33,7 @@ def __init__(self, hs): super().__init__() self.event_stream_handler = hs.get_event_stream_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request, allow_guest=True) @@ -44,7 +45,7 @@ async def on_GET(self, request): if b"room_id" in request.args: room_id = request.args[b"room_id"][0].decode("ascii") - pagin_config = PaginationConfig.from_request(request) + pagin_config = await PaginationConfig.from_request(self.store, request) timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS if b"timeout" in request.args: try: diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index d7042786ce0c..91da0ee57303 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -27,11 +27,12 @@ def __init__(self, hs): super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request) as_client_event = b"raw" not in request.args - pagination_config = PaginationConfig.from_request(request) + pagination_config = await PaginationConfig.from_request(self.store, request) include_archived = parse_boolean(request, "archived", default=False) content = await self.initial_sync_handler.snapshot_all_rooms( user_id=requester.user.to_string(), diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7e64a2e0fe36..b63389e5fedf 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -451,6 +451,7 @@ def __init__(self, hs): super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request, room_id): # TODO support Pagination stream API (limit/tokens) @@ -465,7 +466,7 @@ async def on_GET(self, request, room_id): if at_token_string is None: at_token = None else: - at_token = StreamToken.from_string(at_token_string) + at_token = await StreamToken.from_string(self.store, at_token_string) # let you filter down on particular memberships. # XXX: this may not be the best shape for this API - we could pass in a filter @@ -521,10 +522,13 @@ def __init__(self, hs): super().__init__() self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request, room_id): requester = await self.auth.get_user_by_req(request, allow_guest=True) - pagination_config = PaginationConfig.from_request(request, default_limit=10) + pagination_config = await PaginationConfig.from_request( + self.store, request, default_limit=10 + ) as_client_event = b"raw" not in request.args filter_str = parse_string(request, b"filter", encoding="utf-8") if filter_str: @@ -580,10 +584,11 @@ def __init__(self, hs): super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request, room_id): requester = await self.auth.get_user_by_req(request, allow_guest=True) - pagination_config = PaginationConfig.from_request(request) + pagination_config = await PaginationConfig.from_request(self.store, request) content = await self.initial_sync_handler.room_initial_sync( room_id=room_id, requester=requester, pagin_config=pagination_config ) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 7abd6ff333b2..55c46065694b 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -180,6 +180,7 @@ def __init__(self, hs): super().__init__() self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() + self.store = hs.get_datastore() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request, allow_guest=True) @@ -191,7 +192,7 @@ async def on_GET(self, request): # changes after the "to" as well as before. set_tag("to", parse_string(request, "to")) - from_token = StreamToken.from_string(from_token_string) + from_token = await StreamToken.from_string(self.store, from_token_string) user_id = requester.user.to_string() diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 51e395cc6424..6779df952f77 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -77,6 +77,7 @@ def __init__(self, hs): super().__init__() self.hs = hs self.auth = hs.get_auth() + self.store = hs.get_datastore() self.sync_handler = hs.get_sync_handler() self.clock = hs.get_clock() self.filtering = hs.get_filtering() @@ -151,10 +152,9 @@ async def on_GET(self, request): device_id=device_id, ) + since_token = None if since is not None: - since_token = StreamToken.from_string(since) - else: - since_token = None + since_token = await StreamToken.from_string(self.store, since) # send any outstanding server notices to the user. await self._server_notices_sender.on_user_syncing(user.to_string()) @@ -236,7 +236,7 @@ async def encode_response(self, time_now, sync_result, access_token_id, filter): "leave": sync_result.groups.leave, }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, - "next_batch": sync_result.next_batch.to_string(), + "next_batch": await sync_result.next_batch.to_string(self.store), } @staticmethod @@ -413,7 +413,7 @@ def serialize(events): result = { "timeline": { "events": serialized_timeline, - "prev_batch": room.timeline.prev_batch.to_string(), + "prev_batch": await room.timeline.prev_batch.to_string(self.store), "limited": room.timeline.limited, }, "state": {"events": serialized_state}, diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index d7a03cbf7dfc..ecfc6717b33b 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -42,17 +42,17 @@ async def purge_history( The set of state groups that are referenced by deleted events. """ + parsed_token = await RoomStreamToken.parse(self, token) + return await self.db_pool.runInteraction( "purge_history", self._purge_history_txn, room_id, - token, + parsed_token, delete_local_events, ) - def _purge_history_txn(self, txn, room_id, token_str, delete_local_events): - token = RoomStreamToken.parse(token_str) - + def _purge_history_txn(self, txn, room_id, token, delete_local_events): # Tables that should be pruned: # event_auth # event_backward_extremities diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 0bdf846edf62..fdda21d16584 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import Optional @@ -21,6 +20,7 @@ from synapse.api.errors import SynapseError from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest +from synapse.storage.databases.main import DataStore from synapse.types import StreamToken logger = logging.getLogger(__name__) @@ -39,8 +39,9 @@ class PaginationConfig: limit = attr.ib(type=Optional[int]) @classmethod - def from_request( + async def from_request( cls, + store: "DataStore", request: SynapseRequest, raise_invalid_params: bool = True, default_limit: Optional[int] = None, @@ -54,13 +55,13 @@ def from_request( if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: - from_tok = StreamToken.from_string(from_tok) + from_tok = await StreamToken.from_string(store, from_tok) except Exception: raise SynapseError(400, "'from' parameter is invalid") try: if to_tok: - to_tok = StreamToken.from_string(to_tok) + to_tok = await StreamToken.from_string(store, to_tok) except Exception: raise SynapseError(400, "'to' parameter is invalid") diff --git a/synapse/types.py b/synapse/types.py index 02bcc197ec7b..bd271f9f1611 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -18,7 +18,17 @@ import string import sys from collections import namedtuple -from typing import Any, Dict, Mapping, MutableMapping, Optional, Tuple, Type, TypeVar +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Mapping, + MutableMapping, + Optional, + Tuple, + Type, + TypeVar, +) import attr from signedjson.key import decode_verify_key_bytes @@ -26,6 +36,9 @@ from synapse.api.errors import Codes, SynapseError +if TYPE_CHECKING: + from synapse.storage.databases.main import DataStore + # define a version of typing.Collection that works on python 3.5 if sys.version_info[:3] >= (3, 6, 0): from typing import Collection @@ -393,7 +406,7 @@ class RoomStreamToken: stream = attr.ib(type=int, validator=attr.validators.instance_of(int)) @classmethod - def parse(cls, string: str) -> "RoomStreamToken": + async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken": try: if string[0] == "s": return cls(topological=None, stream=int(string[1:])) @@ -428,7 +441,7 @@ def copy_and_advance(self, other: "RoomStreamToken") -> "RoomStreamToken": def as_tuple(self) -> Tuple[Optional[int], int]: return (self.topological, self.stream) - def __str__(self) -> str: + async def to_string(self, store: "DataStore") -> str: if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) else: @@ -453,18 +466,32 @@ class StreamToken: START = None # type: StreamToken @classmethod - def from_string(cls, string): + async def from_string(cls, store: "DataStore", string: str) -> "StreamToken": try: keys = string.split(cls._SEPARATOR) while len(keys) < len(attr.fields(cls)): # i.e. old token from before receipt_key keys.append("0") - return cls(RoomStreamToken.parse(keys[0]), *(int(k) for k in keys[1:])) + return cls( + await RoomStreamToken.parse(store, keys[0]), *(int(k) for k in keys[1:]) + ) except Exception: raise SynapseError(400, "Invalid Token") - def to_string(self): - return self._SEPARATOR.join([str(k) for k in attr.astuple(self, recurse=False)]) + async def to_string(self, store: "DataStore") -> str: + return self._SEPARATOR.join( + [ + await self.room_key.to_string(store), + str(self.presence_key), + str(self.typing_key), + str(self.receipt_key), + str(self.account_data_key), + str(self.push_rules_key), + str(self.to_device_key), + str(self.device_list_key), + str(self.groups_key), + ] + ) @property def room_stream_id(self): @@ -493,7 +520,7 @@ def copy_and_replace(self, key, new_value) -> "StreamToken": return attr.evolve(self, **{key: new_value}) -StreamToken.START = StreamToken.from_string("s0_0") +StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0) @attr.s(slots=True, frozen=True) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index a3287011e94a..0d809d25d5d4 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -902,16 +902,18 @@ def test_room_messages_purge(self): # Send a first message in the room, which will be removed by the purge. first_event_id = self.helper.send(self.room_id, "message 1")["event_id"] - first_token = str( - self.get_success(store.get_topological_token_for_event(first_event_id)) + first_token = self.get_success( + store.get_topological_token_for_event(first_event_id) ) + first_token_str = self.get_success(first_token.to_string(store)) # Send a second message in the room, which won't be removed, and which we'll # use as the marker to purge events before. second_event_id = self.helper.send(self.room_id, "message 2")["event_id"] - second_token = str( - self.get_success(store.get_topological_token_for_event(second_event_id)) + second_token = self.get_success( + store.get_topological_token_for_event(second_event_id) ) + second_token_str = self.get_success(second_token.to_string(store)) # Send a third event in the room to ensure we don't fall under any edge case # due to our marker being the latest forward extremity in the room. @@ -921,7 +923,11 @@ def test_room_messages_purge(self): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" - % (self.room_id, second_token, json.dumps({"types": [EventTypes.Message]})), + % ( + self.room_id, + second_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), ) self.render(request) self.assertEqual(channel.code, 200, channel.json_body) @@ -936,7 +942,7 @@ def test_room_messages_purge(self): pagination_handler._purge_history( purge_id=purge_id, room_id=self.room_id, - token=second_token, + token=second_token_str, delete_local_events=True, ) ) @@ -946,7 +952,11 @@ def test_room_messages_purge(self): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" - % (self.room_id, second_token, json.dumps({"types": [EventTypes.Message]})), + % ( + self.room_id, + second_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), ) self.render(request) self.assertEqual(channel.code, 200, channel.json_body) @@ -960,7 +970,11 @@ def test_room_messages_purge(self): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" - % (self.room_id, first_token, json.dumps({"types": [EventTypes.Message]})), + % ( + self.room_id, + first_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), ) self.render(request) self.assertEqual(channel.code, 200, channel.json_body) diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 723cd2893354..cc1f3c53c51c 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -47,12 +47,15 @@ def test_purge(self): storage = self.hs.get_storage() # Get the topological token - event = str( - self.get_success(store.get_topological_token_for_event(last["event_id"])) + token = self.get_success( + store.get_topological_token_for_event(last["event_id"]) ) + token_str = self.get_success(token.to_string(self.hs.get_datastore())) # Purge everything before this topological token - self.get_success(storage.purge_events.purge_history(self.room_id, event, True)) + self.get_success( + storage.purge_events.purge_history(self.room_id, token_str, True) + ) # 1-3 should fail and last will succeed, meaning that 1-3 are deleted # and last is not. From c1ef579b63a39d8d6fb31ddc1d3cf173eaf7e5b7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 1 Oct 2020 11:09:12 +0100 Subject: [PATCH 095/245] Add prometheus metrics to track federation delays (#8430) Add a pair of federation metrics to track the delays in sending PDUs to/from particular servers. --- changelog.d/8430.feature | 1 + docs/sample_config.yaml | 12 +++++++++ synapse/config/_util.py | 6 +++-- synapse/config/federation.py | 27 ++++++++++++++++++- synapse/config/homeserver.py | 1 - synapse/config/tls.py | 1 - synapse/federation/federation_server.py | 24 ++++++++++++++++- .../federation/sender/transaction_manager.py | 22 +++++++++++++++ 8 files changed, 88 insertions(+), 6 deletions(-) create mode 100644 changelog.d/8430.feature diff --git a/changelog.d/8430.feature b/changelog.d/8430.feature new file mode 100644 index 000000000000..1f31d42bc1de --- /dev/null +++ b/changelog.d/8430.feature @@ -0,0 +1 @@ +Add prometheus metrics to track federation delays. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 066844b5a997..8a3206e8454a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -629,6 +629,7 @@ acme: #tls_fingerprints: [{"sha256": ""}] +## Federation ## # Restrict federation to the following whitelist of domains. # N.B. we recommend also firewalling your federation listener to limit @@ -662,6 +663,17 @@ federation_ip_range_blacklist: - 'fe80::/64' - 'fc00::/7' +# Report prometheus metrics on the age of PDUs being sent to and received from +# the following domains. This can be used to give an idea of "delay" on inbound +# and outbound federation, though be aware that any delay can be due to problems +# at either end or with the intermediate network. +# +# By default, no domains are monitored in this way. +# +#federation_metrics_domains: +# - matrix.org +# - example.com + ## Caching ## diff --git a/synapse/config/_util.py b/synapse/config/_util.py index cd31b1c3c9d0..c74969a97741 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List +from typing import Any, Iterable import jsonschema @@ -20,7 +20,9 @@ from synapse.types import JsonDict -def validate_config(json_schema: JsonDict, config: Any, config_path: List[str]) -> None: +def validate_config( + json_schema: JsonDict, config: Any, config_path: Iterable[str] +) -> None: """Validates a config setting against a JsonSchema definition This can be used to validate a section of the config file against a schema diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 2c77d8f85bb7..ffd8fca54e05 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -17,7 +17,8 @@ from netaddr import IPSet -from ._base import Config, ConfigError +from synapse.config._base import Config, ConfigError +from synapse.config._util import validate_config class FederationConfig(Config): @@ -52,8 +53,18 @@ def read_config(self, config, **kwargs): "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e ) + federation_metrics_domains = config.get("federation_metrics_domains") or [] + validate_config( + _METRICS_FOR_DOMAINS_SCHEMA, + federation_metrics_domains, + ("federation_metrics_domains",), + ) + self.federation_metrics_domains = set(federation_metrics_domains) + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ + ## Federation ## + # Restrict federation to the following whitelist of domains. # N.B. we recommend also firewalling your federation listener to limit # inbound federation traffic as early as possible, rather than relying @@ -85,4 +96,18 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): - '::1/128' - 'fe80::/64' - 'fc00::/7' + + # Report prometheus metrics on the age of PDUs being sent to and received from + # the following domains. This can be used to give an idea of "delay" on inbound + # and outbound federation, though be aware that any delay can be due to problems + # at either end or with the intermediate network. + # + # By default, no domains are monitored in this way. + # + #federation_metrics_domains: + # - matrix.org + # - example.com """ + + +_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 556e291495f5..be6555452487 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -92,5 +92,4 @@ class HomeServerConfig(RootConfig): TracerConfig, WorkerConfig, RedisConfig, - FederationConfig, ] diff --git a/synapse/config/tls.py b/synapse/config/tls.py index e368ea564d94..9ddb8b546bad 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -471,7 +471,6 @@ def generate_config_section( # or by checking matrix.org/federationtester/api/report?server_name=$host # #tls_fingerprints: [{"sha256": ""}] - """ # Lowercase the string representation of boolean values % { diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2dcd081cbc24..24329dd0e32f 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -28,7 +28,7 @@ Union, ) -from prometheus_client import Counter, Histogram +from prometheus_client import Counter, Gauge, Histogram from twisted.internet import defer from twisted.internet.abstract import isIPAddress @@ -88,6 +88,13 @@ ) +last_pdu_age_metric = Gauge( + "synapse_federation_last_received_pdu_age", + "The age (in seconds) of the last PDU successfully received from the given domain", + labelnames=("server_name",), +) + + class FederationServer(FederationBase): def __init__(self, hs): super().__init__(hs) @@ -118,6 +125,10 @@ def __init__(self, hs): hs, "state_ids_resp", timeout_ms=30000 ) + self._federation_metrics_domains = ( + hs.get_config().federation.federation_metrics_domains + ) + async def on_backfill_request( self, origin: str, room_id: str, versions: List[str], limit: int ) -> Tuple[int, Dict[str, Any]]: @@ -262,7 +273,11 @@ async def _handle_pdus_in_txn( pdus_by_room = {} # type: Dict[str, List[EventBase]] + newest_pdu_ts = 0 + for p in transaction.pdus: # type: ignore + # FIXME (richardv): I don't think this works: + # https://github.com/matrix-org/synapse/issues/8429 if "unsigned" in p: unsigned = p["unsigned"] if "age" in unsigned: @@ -300,6 +315,9 @@ async def _handle_pdus_in_txn( event = event_from_pdu_json(p, room_version) pdus_by_room.setdefault(room_id, []).append(event) + if event.origin_server_ts > newest_pdu_ts: + newest_pdu_ts = event.origin_server_ts + pdu_results = {} # we can process different rooms in parallel (which is useful if they @@ -340,6 +358,10 @@ async def process_pdus_for_room(room_id: str): process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT ) + if newest_pdu_ts and origin in self._federation_metrics_domains: + newest_pdu_age = self._clock.time_msec() - newest_pdu_ts + last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000) + return pdu_results async def _handle_edus_in_txn(self, origin: str, transaction: Transaction): diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index c84072ab730c..3e07f925e00b 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -15,6 +15,8 @@ import logging from typing import TYPE_CHECKING, List +from prometheus_client import Gauge + from synapse.api.errors import HttpResponseException from synapse.events import EventBase from synapse.federation.persistence import TransactionActions @@ -34,6 +36,12 @@ logger = logging.getLogger(__name__) +last_pdu_age_metric = Gauge( + "synapse_federation_last_sent_pdu_age", + "The age (in seconds) of the last PDU successfully sent to the given domain", + labelnames=("server_name",), +) + class TransactionManager: """Helper class which handles building and sending transactions @@ -48,6 +56,10 @@ def __init__(self, hs: "synapse.server.HomeServer"): self._transaction_actions = TransactionActions(self._store) self._transport_layer = hs.get_federation_transport_client() + self._federation_metrics_domains = ( + hs.get_config().federation.federation_metrics_domains + ) + # HACK to get unique tx id self._next_txn_id = int(self.clock.time_msec()) @@ -119,6 +131,9 @@ async def send_new_transaction( # FIXME (erikj): This is a bit of a hack to make the Pdu age # keys work + # FIXME (richardv): I also believe it no longer works. We (now?) store + # "age_ts" in "unsigned" rather than at the top level. See + # https://github.com/matrix-org/synapse/issues/8429. def json_data_cb(): data = transaction.get_dict() now = int(self.clock.time_msec()) @@ -167,5 +182,12 @@ def json_data_cb(): ) success = False + if success and pdus and destination in self._federation_metrics_domains: + last_pdu = pdus[-1] + last_pdu_age = self.clock.time_msec() - last_pdu.origin_server_ts + last_pdu_age_metric.labels(server_name=destination).set( + last_pdu_age / 1000 + ) + set_tag(tags.ERROR, not success) return success From 4ff0201e6235b8b2efc5ce5a7dc3c479ea96df53 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Oct 2020 08:09:18 -0400 Subject: [PATCH 096/245] Enable mypy checking for unreachable code and fix instances. (#8432) --- changelog.d/8432.misc | 1 + mypy.ini | 1 + synapse/config/tls.py | 18 +++++++++--------- synapse/federation/federation_server.py | 5 ++--- synapse/handlers/directory.py | 2 +- synapse/handlers/room.py | 2 -- synapse/handlers/room_member.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/http/server.py | 4 ++-- synapse/logging/_structured.py | 10 +--------- synapse/push/push_rule_evaluator.py | 4 ++-- synapse/replication/tcp/protocol.py | 10 ++++++---- synapse/state/__init__.py | 2 +- .../storage/databases/main/censor_events.py | 6 +++--- synapse/storage/databases/main/events.py | 18 +++++------------- synapse/storage/databases/main/stream.py | 2 +- synapse/storage/util/id_generators.py | 2 +- 17 files changed, 38 insertions(+), 53 deletions(-) create mode 100644 changelog.d/8432.misc diff --git a/changelog.d/8432.misc b/changelog.d/8432.misc new file mode 100644 index 000000000000..01fdad4caf6a --- /dev/null +++ b/changelog.d/8432.misc @@ -0,0 +1 @@ +Check for unreachable code with mypy. diff --git a/mypy.ini b/mypy.ini index 79867814328b..c283f15b21e3 100644 --- a/mypy.ini +++ b/mypy.ini @@ -6,6 +6,7 @@ check_untyped_defs = True show_error_codes = True show_traceback = True mypy_path = stubs +warn_unreachable = True files = synapse/api, synapse/appservice, diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 9ddb8b546bad..ad37b93c0252 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -18,7 +18,7 @@ import warnings from datetime import datetime from hashlib import sha256 -from typing import List +from typing import List, Optional from unpaddedbase64 import encode_base64 @@ -177,8 +177,8 @@ def read_config(self, config: dict, config_dir_path: str, **kwargs): "use_insecure_ssl_client_just_for_testing_do_not_use" ) - self.tls_certificate = None - self.tls_private_key = None + self.tls_certificate = None # type: Optional[crypto.X509] + self.tls_private_key = None # type: Optional[crypto.PKey] def is_disk_cert_valid(self, allow_self_signed=True): """ @@ -226,12 +226,12 @@ def is_disk_cert_valid(self, allow_self_signed=True): days_remaining = (expires_on - now).days return days_remaining - def read_certificate_from_disk(self, require_cert_and_key): + def read_certificate_from_disk(self, require_cert_and_key: bool): """ Read the certificates and private key from disk. Args: - require_cert_and_key (bool): set to True to throw an error if the certificate + require_cert_and_key: set to True to throw an error if the certificate and key file are not given """ if require_cert_and_key: @@ -479,13 +479,13 @@ def generate_config_section( } ) - def read_tls_certificate(self): + def read_tls_certificate(self) -> crypto.X509: """Reads the TLS certificate from the configured file, and returns it Also checks if it is self-signed, and warns if so Returns: - OpenSSL.crypto.X509: the certificate + The certificate """ cert_path = self.tls_certificate_file logger.info("Loading TLS certificate from %s", cert_path) @@ -504,11 +504,11 @@ def read_tls_certificate(self): return cert - def read_tls_private_key(self): + def read_tls_private_key(self) -> crypto.PKey: """Reads the TLS private key from the configured file, and returns it Returns: - OpenSSL.crypto.PKey: the private key + The private key """ private_key_path = self.tls_private_key_file logger.info("Loading TLS key from %s", private_key_path) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 24329dd0e32f..02f11e120997 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -22,7 +22,6 @@ Callable, Dict, List, - Match, Optional, Tuple, Union, @@ -825,14 +824,14 @@ def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool: return False -def _acl_entry_matches(server_name: str, acl_entry: str) -> Match: +def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool: if not isinstance(acl_entry, str): logger.warning( "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry) ) return False regex = glob_to_regex(acl_entry) - return regex.match(server_name) + return bool(regex.match(server_name)) class FederationHandlerRegistry: diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 62aa9a2da8f2..6f15c682405c 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -383,7 +383,7 @@ async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): """ creator = await self.store.get_room_alias_creator(alias.to_string()) - if creator is not None and creator == user_id: + if creator == user_id: return True # Resolve the alias to the corresponding room. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d5f7c78edf52..f1a6699cd447 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -962,8 +962,6 @@ async def _generate_room_id( try: random_string = stringutils.random_string(18) gen_room_id = RoomID(random_string, self.hs.hostname).to_string() - if isinstance(gen_room_id, bytes): - gen_room_id = gen_room_id.decode("utf-8") await self.store.store_room( room_id=gen_room_id, room_creator_user_id=creator_id, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 8feba8c90a39..5ec36f591d78 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -642,7 +642,7 @@ async def copy_user_state_on_room_upgrade( async def send_membership_event( self, - requester: Requester, + requester: Optional[Requester], event: EventBase, context: EventContext, ratelimit: bool = True, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index bfe2583002db..260ec19b4169 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -87,7 +87,7 @@ class SyncConfig: class TimelineBatch: prev_batch = attr.ib(type=StreamToken) events = attr.ib(type=List[EventBase]) - limited = attr.ib(bool) + limited = attr.ib(type=bool) def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used diff --git a/synapse/http/server.py b/synapse/http/server.py index 996a31a9ec0e..09ed74f6ce06 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -257,7 +257,7 @@ async def _async_render(self, request: Request): if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)): callback_return = await raw_callback_return else: - callback_return = raw_callback_return + callback_return = raw_callback_return # type: ignore return callback_return @@ -406,7 +406,7 @@ async def _async_render(self, request): if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)): callback_return = await raw_callback_return else: - callback_return = raw_callback_return + callback_return = raw_callback_return # type: ignore return callback_return diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py index 144506c8f243..0fc2ea609e7e 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import os.path import sys @@ -89,14 +88,7 @@ def __call__(self, event: dict) -> None: context = current_context() # Copy the context information to the log event. - if context is not None: - context.copy_to_twisted_log_entry(event) - else: - # If there's no logging context, not even the root one, we might be - # starting up or it might be from non-Synapse code. Log it as if it - # came from the root logger. - event["request"] = None - event["scope"] = None + context.copy_to_twisted_log_entry(event) self.observer(event) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 709ace01e5b8..3a68ce636fac 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -16,7 +16,7 @@ import logging import re -from typing import Any, Dict, List, Pattern, Union +from typing import Any, Dict, List, Optional, Pattern, Union from synapse.events import EventBase from synapse.types import UserID @@ -181,7 +181,7 @@ def _contains_display_name(self, display_name: str) -> bool: return r.search(body) - def _get_value(self, dotted_key: str) -> str: + def _get_value(self, dotted_key: str) -> Optional[str]: return self._value_cache.get(dotted_key, None) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 0b0d204e64bc..a509e599c20e 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -51,10 +51,11 @@ import logging import struct from inspect import isawaitable -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING, List, Optional from prometheus_client import Counter +from twisted.internet import task from twisted.protocols.basic import LineOnlyReceiver from twisted.python.failure import Failure @@ -152,9 +153,10 @@ def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): self.last_received_command = self.clock.time_msec() self.last_sent_command = 0 - self.time_we_closed = None # When we requested the connection be closed + # When we requested the connection be closed + self.time_we_closed = None # type: Optional[int] - self.received_ping = False # Have we reecived a ping from the other side + self.received_ping = False # Have we received a ping from the other side self.state = ConnectionStates.CONNECTING @@ -165,7 +167,7 @@ def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): self.pending_commands = [] # type: List[Command] # The LoopingCall for sending pings. - self._send_ping_loop = None + self._send_ping_loop = None # type: Optional[task.LoopingCall] # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 31082bb16a5d..5b0900aa3cb0 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -738,7 +738,7 @@ def _make_state_cache_entry( # failing that, look for the closest match. prev_group = None - delta_ids = None + delta_ids = None # type: Optional[StateMap[str]] for old_group, old_state in state_groups_ids.items(): n_delta_ids = {k: v for k, v in new_state.items() if old_state.get(k) != v} diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index f211ddbaf88e..4bb2b9c28c8e 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -21,8 +21,8 @@ from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore -from synapse.storage.databases.main.events import encode_json from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.util.frozenutils import frozendict_json_encoder if TYPE_CHECKING: from synapse.server import HomeServer @@ -105,7 +105,7 @@ async def _censor_redactions(self): and original_event.internal_metadata.is_redacted() ): # Redaction was allowed - pruned_json = encode_json( + pruned_json = frozendict_json_encoder.encode( prune_event_dict( original_event.room_version, original_event.get_dict() ) @@ -171,7 +171,7 @@ def delete_expired_event_txn(txn): return # Prune the event's dict then convert it to JSON. - pruned_json = encode_json( + pruned_json = frozendict_json_encoder.encode( prune_event_dict(event.room_version, event.get_dict()) ) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 18def01f5041..78e645592f6d 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -52,16 +52,6 @@ ) -def encode_json(json_object): - """ - Encode a Python object as JSON and return it in a Unicode string. - """ - out = frozendict_json_encoder.encode(json_object) - if isinstance(out, bytes): - out = out.decode("utf8") - return out - - _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) @@ -743,7 +733,9 @@ def _update_outliers_txn(self, txn, events_and_contexts): logger.exception("") raise - metadata_json = encode_json(event.internal_metadata.get_dict()) + metadata_json = frozendict_json_encoder.encode( + event.internal_metadata.get_dict() + ) sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?" txn.execute(sql, (metadata_json, event.event_id)) @@ -797,10 +789,10 @@ def event_dict(event): { "event_id": event.event_id, "room_id": event.room_id, - "internal_metadata": encode_json( + "internal_metadata": frozendict_json_encoder.encode( event.internal_metadata.get_dict() ), - "json": encode_json(event_dict(event)), + "json": frozendict_json_encoder.encode(event_dict(event)), "format_version": event.format_version, } for event, _ in events_and_contexts diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 37249f1e3f99..1d27439536b7 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -546,7 +546,7 @@ async def get_recent_event_ids_for_room( async def get_room_event_before_stream_ordering( self, room_id: str, stream_ordering: int - ) -> Tuple[int, int, str]: + ) -> Optional[Tuple[int, int, str]]: """Gets details of the first event in a room at or before a stream ordering Args: diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 02fbb656e81c..ec356b2e4f06 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -421,7 +421,7 @@ def _mark_id_as_finished(self, next_id: int): self._unfinished_ids.discard(next_id) self._finished_ids.add(next_id) - new_cur = None + new_cur = None # type: Optional[int] if self._unfinished_ids: # If there are unfinished IDs then the new position will be the From cc40a59b4a94534105667ae95fd5602ebdc57dce Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:14:56 +0100 Subject: [PATCH 097/245] 1.21.0 --- CHANGES.md | 102 +++++++++++++++++++++++++++++++++++++++ changelog.d/7124.bugfix | 1 - changelog.d/7796.bugfix | 1 - changelog.d/7905.bugfix | 1 - changelog.d/8004.feature | 1 - changelog.d/8208.misc | 1 - changelog.d/8216.misc | 1 - changelog.d/8217.feature | 1 - changelog.d/8227.doc | 1 - changelog.d/8230.bugfix | 1 - changelog.d/8236.bugfix | 1 - changelog.d/8243.misc | 1 - changelog.d/8247.bugfix | 1 - changelog.d/8248.feature | 1 - changelog.d/8250.misc | 1 - changelog.d/8256.misc | 1 - changelog.d/8257.misc | 1 - changelog.d/8258.bugfix | 1 - changelog.d/8259.misc | 1 - changelog.d/8260.misc | 1 - changelog.d/8261.misc | 1 - changelog.d/8262.bugfix | 1 - changelog.d/8265.bugfix | 1 - changelog.d/8268.bugfix | 1 - changelog.d/8272.bugfix | 1 - changelog.d/8275.feature | 1 - changelog.d/8278.bugfix | 1 - changelog.d/8279.misc | 1 - changelog.d/8281.misc | 1 - changelog.d/8282.misc | 1 - changelog.d/8287.bugfix | 1 - changelog.d/8288.misc | 1 - changelog.d/8294.feature | 1 - changelog.d/8296.misc | 1 - changelog.d/8305.feature | 1 - changelog.d/8306.feature | 1 - changelog.d/8317.feature | 1 - changelog.d/8320.feature | 1 - changelog.d/8322.bugfix | 1 - changelog.d/8324.bugfix | 1 - changelog.d/8326.misc | 1 - changelog.d/8329.bugfix | 1 - changelog.d/8330.misc | 1 - changelog.d/8331.misc | 1 - changelog.d/8335.misc | 1 - changelog.d/8337.misc | 1 - changelog.d/8344.misc | 1 - changelog.d/8345.feature | 1 - changelog.d/8353.bugfix | 1 - changelog.d/8354.misc | 1 - changelog.d/8362.bugfix | 1 - changelog.d/8364.bugfix | 2 - changelog.d/8370.misc | 1 - changelog.d/8371.misc | 1 - changelog.d/8372.misc | 1 - changelog.d/8373.bugfix | 1 - changelog.d/8374.bugfix | 1 - changelog.d/8375.doc | 1 - changelog.d/8377.misc | 1 - changelog.d/8383.misc | 1 - changelog.d/8385.bugfix | 1 - changelog.d/8386.bugfix | 1 - changelog.d/8387.feature | 1 - changelog.d/8388.misc | 1 - changelog.d/8396.feature | 1 - changelog.d/8398.bugfix | 1 - changelog.d/8399.misc | 1 - changelog.d/8400.bugfix | 1 - changelog.d/8401.misc | 1 - changelog.d/8402.misc | 1 - changelog.d/8404.misc | 1 - changelog.d/8405.feature | 1 - changelog.d/8406.feature | 1 - changelog.d/8410.bugfix | 1 - changelog.d/8413.feature | 1 - changelog.d/8414.bugfix | 1 - changelog.d/8415.doc | 1 - changelog.d/8417.feature | 1 - changelog.d/8419.feature | 1 - changelog.d/8420.feature | 1 - changelog.d/8422.misc | 1 - changelog.d/8423.misc | 1 - changelog.d/8425.feature | 1 - changelog.d/8426.removal | 1 - changelog.d/8427.misc | 1 - changelog.d/8430.feature | 1 - synapse/__init__.py | 2 +- 87 files changed, 103 insertions(+), 87 deletions(-) delete mode 100644 changelog.d/7124.bugfix delete mode 100644 changelog.d/7796.bugfix delete mode 100644 changelog.d/7905.bugfix delete mode 100644 changelog.d/8004.feature delete mode 100644 changelog.d/8208.misc delete mode 100644 changelog.d/8216.misc delete mode 100644 changelog.d/8217.feature delete mode 100644 changelog.d/8227.doc delete mode 100644 changelog.d/8230.bugfix delete mode 100644 changelog.d/8236.bugfix delete mode 100644 changelog.d/8243.misc delete mode 100644 changelog.d/8247.bugfix delete mode 100644 changelog.d/8248.feature delete mode 100644 changelog.d/8250.misc delete mode 100644 changelog.d/8256.misc delete mode 100644 changelog.d/8257.misc delete mode 100644 changelog.d/8258.bugfix delete mode 100644 changelog.d/8259.misc delete mode 100644 changelog.d/8260.misc delete mode 100644 changelog.d/8261.misc delete mode 100644 changelog.d/8262.bugfix delete mode 100644 changelog.d/8265.bugfix delete mode 100644 changelog.d/8268.bugfix delete mode 100644 changelog.d/8272.bugfix delete mode 100644 changelog.d/8275.feature delete mode 100644 changelog.d/8278.bugfix delete mode 100644 changelog.d/8279.misc delete mode 100644 changelog.d/8281.misc delete mode 100644 changelog.d/8282.misc delete mode 100644 changelog.d/8287.bugfix delete mode 100644 changelog.d/8288.misc delete mode 100644 changelog.d/8294.feature delete mode 100644 changelog.d/8296.misc delete mode 100644 changelog.d/8305.feature delete mode 100644 changelog.d/8306.feature delete mode 100644 changelog.d/8317.feature delete mode 100644 changelog.d/8320.feature delete mode 100644 changelog.d/8322.bugfix delete mode 100644 changelog.d/8324.bugfix delete mode 100644 changelog.d/8326.misc delete mode 100644 changelog.d/8329.bugfix delete mode 100644 changelog.d/8330.misc delete mode 100644 changelog.d/8331.misc delete mode 100644 changelog.d/8335.misc delete mode 100644 changelog.d/8337.misc delete mode 100644 changelog.d/8344.misc delete mode 100644 changelog.d/8345.feature delete mode 100644 changelog.d/8353.bugfix delete mode 100644 changelog.d/8354.misc delete mode 100644 changelog.d/8362.bugfix delete mode 100644 changelog.d/8364.bugfix delete mode 100644 changelog.d/8370.misc delete mode 100644 changelog.d/8371.misc delete mode 100644 changelog.d/8372.misc delete mode 100644 changelog.d/8373.bugfix delete mode 100644 changelog.d/8374.bugfix delete mode 100644 changelog.d/8375.doc delete mode 100644 changelog.d/8377.misc delete mode 100644 changelog.d/8383.misc delete mode 100644 changelog.d/8385.bugfix delete mode 100644 changelog.d/8386.bugfix delete mode 100644 changelog.d/8387.feature delete mode 100644 changelog.d/8388.misc delete mode 100644 changelog.d/8396.feature delete mode 100644 changelog.d/8398.bugfix delete mode 100644 changelog.d/8399.misc delete mode 100644 changelog.d/8400.bugfix delete mode 100644 changelog.d/8401.misc delete mode 100644 changelog.d/8402.misc delete mode 100644 changelog.d/8404.misc delete mode 100644 changelog.d/8405.feature delete mode 100644 changelog.d/8406.feature delete mode 100644 changelog.d/8410.bugfix delete mode 100644 changelog.d/8413.feature delete mode 100644 changelog.d/8414.bugfix delete mode 100644 changelog.d/8415.doc delete mode 100644 changelog.d/8417.feature delete mode 100644 changelog.d/8419.feature delete mode 100644 changelog.d/8420.feature delete mode 100644 changelog.d/8422.misc delete mode 100644 changelog.d/8423.misc delete mode 100644 changelog.d/8425.feature delete mode 100644 changelog.d/8426.removal delete mode 100644 changelog.d/8427.misc delete mode 100644 changelog.d/8430.feature diff --git a/CHANGES.md b/CHANGES.md index 5de819ea1e4d..38906ade49ed 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,105 @@ +Synapse 1.21.0 (2020-10-01) +=========================== + +Features +-------- + +- Require the user to confirm that their password should be reset after clicking the email confirmation link. ([\#8004](https://github.com/matrix-org/synapse/issues/8004)) +- Add an admin API `GET /_synapse/admin/v1/event_reports` to read entries of table `event_reports`. Contributed by @dklimpel. ([\#8217](https://github.com/matrix-org/synapse/issues/8217)) +- Consolidate the SSO error template across all configuration. ([\#8248](https://github.com/matrix-org/synapse/issues/8248), [\#8405](https://github.com/matrix-org/synapse/issues/8405)) +- Add a configuration option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. ([\#8275](https://github.com/matrix-org/synapse/issues/8275), [\#8417](https://github.com/matrix-org/synapse/issues/8417)) +- Add experimental support for sharding event persister. ([\#8294](https://github.com/matrix-org/synapse/issues/8294), [\#8387](https://github.com/matrix-org/synapse/issues/8387), [\#8396](https://github.com/matrix-org/synapse/issues/8396), [\#8419](https://github.com/matrix-org/synapse/issues/8419)) +- Add the room topic and avatar to the room details admin API. ([\#8305](https://github.com/matrix-org/synapse/issues/8305)) +- Add an admin API for querying rooms where a user is a member. Contributed by @dklimpel. ([\#8306](https://github.com/matrix-org/synapse/issues/8306)) +- Add `uk.half-shot.msc2778.login.application_service` login type to allow appservices to login. ([\#8320](https://github.com/matrix-org/synapse/issues/8320)) +- Add a configuration option that allows existing users to log in with OpenID Connect. Contributed by @BBBSnowball and @OmmyZhang. ([\#8345](https://github.com/matrix-org/synapse/issues/8345)) +- Add prometheus metrics for replication requests. ([\#8406](https://github.com/matrix-org/synapse/issues/8406)) +- Support passing additional single sign-on parameters to the client. ([\#8413](https://github.com/matrix-org/synapse/issues/8413)) +- Add experimental reporting of metrics on expensive rooms for state-resolution. ([\#8420](https://github.com/matrix-org/synapse/issues/8420)) +- Add experimental prometheus metric to track numbers of "large" rooms for state resolutiom. ([\#8425](https://github.com/matrix-org/synapse/issues/8425)) +- Add prometheus metrics to track federation delays. ([\#8430](https://github.com/matrix-org/synapse/issues/8430)) + + +Bugfixes +-------- + +- Fix a bug in the media repository where remote thumbnails with the same size but different crop methods would overwrite each other. Contributed by @deepbluev7. ([\#7124](https://github.com/matrix-org/synapse/issues/7124)) +- Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. ([\#7796](https://github.com/matrix-org/synapse/issues/7796)) +- Fix a longstanding bug when storing a media file with an empty `upload_name`. ([\#7905](https://github.com/matrix-org/synapse/issues/7905)) +- Fix messages not being sent over federation until an event is sent into the same room. ([\#8230](https://github.com/matrix-org/synapse/issues/8230), [\#8247](https://github.com/matrix-org/synapse/issues/8247), [\#8258](https://github.com/matrix-org/synapse/issues/8258), [\#8272](https://github.com/matrix-org/synapse/issues/8272), [\#8322](https://github.com/matrix-org/synapse/issues/8322)) +- Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236)) +- Upgrade minimum version of `canonicaljson` to version 1.4.0, to fix an unicode encoding issue. ([\#8262](https://github.com/matrix-org/synapse/issues/8262)) +- Fix logstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) +- Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. ([\#8268](https://github.com/matrix-org/synapse/issues/8268)) +- Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. ([\#8278](https://github.com/matrix-org/synapse/issues/8278)) +- Fix edge case where push could get delayed for a user until a later event was pushed. ([\#8287](https://github.com/matrix-org/synapse/issues/8287)) +- Fix fetching malformed events from remote servers. ([\#8324](https://github.com/matrix-org/synapse/issues/8324)) +- Fix `UnboundLocalError` from occuring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329)) +- Don't send push notifications to expired user accounts. ([\#8353](https://github.com/matrix-org/synapse/issues/8353)) +- Fix a regression in v1.19.0 with reactivating users through the admin API. ([\#8362](https://github.com/matrix-org/synapse/issues/8362)) +- Fix a bug where during device registration the length of the device name wasn't limited. ([\#8364](https://github.com/matrix-org/synapse/issues/8364)) +- Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. ([\#8373](https://github.com/matrix-org/synapse/issues/8373)) +- Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. ([\#8374](https://github.com/matrix-org/synapse/issues/8374)) +- Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. ([\#8385](https://github.com/matrix-org/synapse/issues/8385)) +- Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) +- Fix "Re-starting finished log context" warning when receiving an event we already had over federation. ([\#8398](https://github.com/matrix-org/synapse/issues/8398)) +- Fix incorrect handling of timeouts on outgoing HTTP requests. ([\#8400](https://github.com/matrix-org/synapse/issues/8400)) +- Fix a regression in v1.20.0 in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. ([\#8410](https://github.com/matrix-org/synapse/issues/8410)) +- Remove unnecessary 3PID registration check when resetting password via an email address. Bug introduced in v0.34.0rc2. ([\#8414](https://github.com/matrix-org/synapse/issues/8414)) + + +Improved Documentation +---------------------- + +- Add `/_synapse/client` to the reverse proxy documentation. ([\#8227](https://github.com/matrix-org/synapse/issues/8227)) +- Add note to the reverse proxy settings documentation about disabling Apache's mod_security2. Contributed by Julian Fietkau (@jfietkau). ([\#8375](https://github.com/matrix-org/synapse/issues/8375)) +- Improve description of `server_name` config option in `homserver.yaml`. ([\#8415](https://github.com/matrix-org/synapse/issues/8415)) + + +Deprecations and Removals +------------------------- + +- Drop support for `prometheus_client` older than 0.4.0. ([\#8426](https://github.com/matrix-org/synapse/issues/8426)) + + +Internal Changes +---------------- + +- Fix tests on distros which disable TLSv1.0. Contributed by @danc86. ([\#8208](https://github.com/matrix-org/synapse/issues/8208)) +- Simplify the distributor code to avoid unnecessary work. ([\#8216](https://github.com/matrix-org/synapse/issues/8216)) +- Remove the `populate_stats_process_rooms_2` background job and restore functionality to `populate_stats_process_rooms`. ([\#8243](https://github.com/matrix-org/synapse/issues/8243)) +- Clean up type hints for `PaginationConfig`. ([\#8250](https://github.com/matrix-org/synapse/issues/8250), [\#8282](https://github.com/matrix-org/synapse/issues/8282)) +- Track the latest event for every destination and room for catch-up after federation outage. ([\#8256](https://github.com/matrix-org/synapse/issues/8256)) +- Fix non-user visible bug in implementation of `MultiWriterIdGenerator.get_current_token_for_writer`. ([\#8257](https://github.com/matrix-org/synapse/issues/8257)) +- Switch to the JSON implementation from the standard library. ([\#8259](https://github.com/matrix-org/synapse/issues/8259)) +- Add type hints to `synapse.util.async_helpers`. ([\#8260](https://github.com/matrix-org/synapse/issues/8260)) +- Simplify tests that mock asynchronous functions. ([\#8261](https://github.com/matrix-org/synapse/issues/8261)) +- Add type hints to `StreamToken` and `RoomStreamToken` classes. ([\#8279](https://github.com/matrix-org/synapse/issues/8279)) +- Change `StreamToken.room_key` to be a `RoomStreamToken` instance. ([\#8281](https://github.com/matrix-org/synapse/issues/8281)) +- Refactor notifier code to correctly use the max event stream position. ([\#8288](https://github.com/matrix-org/synapse/issues/8288)) +- Use slotted classes where possible. ([\#8296](https://github.com/matrix-org/synapse/issues/8296)) +- Support testing the local Synapse checkout against the [Complement homeserver test suite](https://github.com/matrix-org/complement/). ([\#8317](https://github.com/matrix-org/synapse/issues/8317)) +- Update outdated usages of `metaclass` to python 3 syntax. ([\#8326](https://github.com/matrix-org/synapse/issues/8326)) +- Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. ([\#8330](https://github.com/matrix-org/synapse/issues/8330), [\#8377](https://github.com/matrix-org/synapse/issues/8377)) +- Use the `admin_patterns` helper in additional locations. ([\#8331](https://github.com/matrix-org/synapse/issues/8331)) +- Fix test logging to allow braces in log output. ([\#8335](https://github.com/matrix-org/synapse/issues/8335)) +- Remove `__future__` imports related to Python 2 compatibility. ([\#8337](https://github.com/matrix-org/synapse/issues/8337)) +- Simplify `super()` calls to Python 3 syntax. ([\#8344](https://github.com/matrix-org/synapse/issues/8344)) +- Fix bad merge from `release-v1.20.0` branch to `develop`. ([\#8354](https://github.com/matrix-org/synapse/issues/8354)) +- Factor out a `_send_dummy_event_for_room` method. ([\#8370](https://github.com/matrix-org/synapse/issues/8370)) +- Improve logging of state resolution. ([\#8371](https://github.com/matrix-org/synapse/issues/8371)) +- Add type annotations to `SimpleHttpClient`. ([\#8372](https://github.com/matrix-org/synapse/issues/8372)) +- Refactor ID generators to use `async with` syntax. ([\#8383](https://github.com/matrix-org/synapse/issues/8383)) +- Add `EventStreamPosition` type. ([\#8388](https://github.com/matrix-org/synapse/issues/8388)) +- Create a mechanism for marking tests "logcontext clean". ([\#8399](https://github.com/matrix-org/synapse/issues/8399)) +- A pair of tiny cleanups in the federation request code. ([\#8401](https://github.com/matrix-org/synapse/issues/8401)) +- Add checks on startup that PostgreSQL sequences are consistent with their associated tables. ([\#8402](https://github.com/matrix-org/synapse/issues/8402)) +- Do not include appservice users when calculating the total MAU for a server. ([\#8404](https://github.com/matrix-org/synapse/issues/8404)) +- Typing fixes for `synapse.handlers.federation`. ([\#8422](https://github.com/matrix-org/synapse/issues/8422)) +- Various refactors to simplify stream token handling. ([\#8423](https://github.com/matrix-org/synapse/issues/8423)) +- Make stream token serializing/deserializing async. ([\#8427](https://github.com/matrix-org/synapse/issues/8427)) + + Synapse 1.20.1 (2020-09-24) =========================== diff --git a/changelog.d/7124.bugfix b/changelog.d/7124.bugfix deleted file mode 100644 index 8fd177780d7d..000000000000 --- a/changelog.d/7124.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in the media repository where remote thumbnails with the same size but different crop methods would overwrite each other. Contributed by @deepbluev7. diff --git a/changelog.d/7796.bugfix b/changelog.d/7796.bugfix deleted file mode 100644 index 65e5eb42a248..000000000000 --- a/changelog.d/7796.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. diff --git a/changelog.d/7905.bugfix b/changelog.d/7905.bugfix deleted file mode 100644 index e60e62441210..000000000000 --- a/changelog.d/7905.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug when storing a media file with an empty `upload_name`. diff --git a/changelog.d/8004.feature b/changelog.d/8004.feature deleted file mode 100644 index a91b75e0e0fd..000000000000 --- a/changelog.d/8004.feature +++ /dev/null @@ -1 +0,0 @@ -Require the user to confirm that their password should be reset after clicking the email confirmation link. \ No newline at end of file diff --git a/changelog.d/8208.misc b/changelog.d/8208.misc deleted file mode 100644 index e65da88c4643..000000000000 --- a/changelog.d/8208.misc +++ /dev/null @@ -1 +0,0 @@ -Fix tests on distros which disable TLSv1.0. Contributed by @danc86. diff --git a/changelog.d/8216.misc b/changelog.d/8216.misc deleted file mode 100644 index b38911b0e582..000000000000 --- a/changelog.d/8216.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the distributor code to avoid unnecessary work. diff --git a/changelog.d/8217.feature b/changelog.d/8217.feature deleted file mode 100644 index 899cbf14ef56..000000000000 --- a/changelog.d/8217.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API `GET /_synapse/admin/v1/event_reports` to read entries of table `event_reports`. Contributed by @dklimpel. \ No newline at end of file diff --git a/changelog.d/8227.doc b/changelog.d/8227.doc deleted file mode 100644 index 4a43015a8306..000000000000 --- a/changelog.d/8227.doc +++ /dev/null @@ -1 +0,0 @@ -Add `/_synapse/client` to the reverse proxy documentation. diff --git a/changelog.d/8230.bugfix b/changelog.d/8230.bugfix deleted file mode 100644 index 532d0e22fefb..000000000000 --- a/changelog.d/8230.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8236.bugfix b/changelog.d/8236.bugfix deleted file mode 100644 index 6f048710159f..000000000000 --- a/changelog.d/8236.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. diff --git a/changelog.d/8243.misc b/changelog.d/8243.misc deleted file mode 100644 index f7375d32d33e..000000000000 --- a/changelog.d/8243.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the 'populate_stats_process_rooms_2' background job and restore functionality to 'populate_stats_process_rooms'. \ No newline at end of file diff --git a/changelog.d/8247.bugfix b/changelog.d/8247.bugfix deleted file mode 100644 index 532d0e22fefb..000000000000 --- a/changelog.d/8247.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8248.feature b/changelog.d/8248.feature deleted file mode 100644 index f3c4a74bc79b..000000000000 --- a/changelog.d/8248.feature +++ /dev/null @@ -1 +0,0 @@ -Consolidate the SSO error template across all configuration. diff --git a/changelog.d/8250.misc b/changelog.d/8250.misc deleted file mode 100644 index b6896a9300d5..000000000000 --- a/changelog.d/8250.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up type hints for `PaginationConfig`. diff --git a/changelog.d/8256.misc b/changelog.d/8256.misc deleted file mode 100644 index bf0ba767307d..000000000000 --- a/changelog.d/8256.misc +++ /dev/null @@ -1 +0,0 @@ -Track the latest event for every destination and room for catch-up after federation outage. diff --git a/changelog.d/8257.misc b/changelog.d/8257.misc deleted file mode 100644 index 47ac583eb4f2..000000000000 --- a/changelog.d/8257.misc +++ /dev/null @@ -1 +0,0 @@ -Fix non-user visible bug in implementation of `MultiWriterIdGenerator.get_current_token_for_writer`. diff --git a/changelog.d/8258.bugfix b/changelog.d/8258.bugfix deleted file mode 100644 index 532d0e22fefb..000000000000 --- a/changelog.d/8258.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8259.misc b/changelog.d/8259.misc deleted file mode 100644 index a26779a664c7..000000000000 --- a/changelog.d/8259.misc +++ /dev/null @@ -1 +0,0 @@ -Switch to the JSON implementation from the standard library. diff --git a/changelog.d/8260.misc b/changelog.d/8260.misc deleted file mode 100644 index 164eea8b59eb..000000000000 --- a/changelog.d/8260.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `synapse.util.async_helpers`. diff --git a/changelog.d/8261.misc b/changelog.d/8261.misc deleted file mode 100644 index bc91e9375ce0..000000000000 --- a/changelog.d/8261.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify tests that mock asynchronous functions. diff --git a/changelog.d/8262.bugfix b/changelog.d/8262.bugfix deleted file mode 100644 index 2b84927de3ee..000000000000 --- a/changelog.d/8262.bugfix +++ /dev/null @@ -1 +0,0 @@ -Upgrade canonicaljson to version 1.4.0 to fix an unicode encoding issue. diff --git a/changelog.d/8265.bugfix b/changelog.d/8265.bugfix deleted file mode 100644 index 981a836d218c..000000000000 --- a/changelog.d/8265.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix logstanding bug which could lead to incomplete database upgrades on SQLite. diff --git a/changelog.d/8268.bugfix b/changelog.d/8268.bugfix deleted file mode 100644 index 4b15a60253b7..000000000000 --- a/changelog.d/8268.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. diff --git a/changelog.d/8272.bugfix b/changelog.d/8272.bugfix deleted file mode 100644 index 532d0e22fefb..000000000000 --- a/changelog.d/8272.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8275.feature b/changelog.d/8275.feature deleted file mode 100644 index 17549c3df39a..000000000000 --- a/changelog.d/8275.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/changelog.d/8278.bugfix b/changelog.d/8278.bugfix deleted file mode 100644 index 50e40ca2a9da..000000000000 --- a/changelog.d/8278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. diff --git a/changelog.d/8279.misc b/changelog.d/8279.misc deleted file mode 100644 index 99f669001f18..000000000000 --- a/changelog.d/8279.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `StreamToken` and `RoomStreamToken` classes. diff --git a/changelog.d/8281.misc b/changelog.d/8281.misc deleted file mode 100644 index 74357120a726..000000000000 --- a/changelog.d/8281.misc +++ /dev/null @@ -1 +0,0 @@ -Change `StreamToken.room_key` to be a `RoomStreamToken` instance. diff --git a/changelog.d/8282.misc b/changelog.d/8282.misc deleted file mode 100644 index b6896a9300d5..000000000000 --- a/changelog.d/8282.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up type hints for `PaginationConfig`. diff --git a/changelog.d/8287.bugfix b/changelog.d/8287.bugfix deleted file mode 100644 index 839781aa0753..000000000000 --- a/changelog.d/8287.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix edge case where push could get delayed for a user until a later event was pushed. diff --git a/changelog.d/8288.misc b/changelog.d/8288.misc deleted file mode 100644 index c08a53a5ee27..000000000000 --- a/changelog.d/8288.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor notifier code to correctly use the max event stream position. diff --git a/changelog.d/8294.feature b/changelog.d/8294.feature deleted file mode 100644 index b363e929ea8c..000000000000 --- a/changelog.d/8294.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8296.misc b/changelog.d/8296.misc deleted file mode 100644 index f593a5b34778..000000000000 --- a/changelog.d/8296.misc +++ /dev/null @@ -1 +0,0 @@ -Use slotted classes where possible. diff --git a/changelog.d/8305.feature b/changelog.d/8305.feature deleted file mode 100644 index 862dfdf95986..000000000000 --- a/changelog.d/8305.feature +++ /dev/null @@ -1 +0,0 @@ -Add the room topic and avatar to the room details admin API. diff --git a/changelog.d/8306.feature b/changelog.d/8306.feature deleted file mode 100644 index 5c23da4030ff..000000000000 --- a/changelog.d/8306.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API for querying rooms where a user is a member. Contributed by @dklimpel. \ No newline at end of file diff --git a/changelog.d/8317.feature b/changelog.d/8317.feature deleted file mode 100644 index f9edda099c40..000000000000 --- a/changelog.d/8317.feature +++ /dev/null @@ -1 +0,0 @@ -Support testing the local Synapse checkout against the [Complement homeserver test suite](https://github.com/matrix-org/complement/). \ No newline at end of file diff --git a/changelog.d/8320.feature b/changelog.d/8320.feature deleted file mode 100644 index 475a5fe62d97..000000000000 --- a/changelog.d/8320.feature +++ /dev/null @@ -1 +0,0 @@ -Add `uk.half-shot.msc2778.login.application_service` login type to allow appservices to login. diff --git a/changelog.d/8322.bugfix b/changelog.d/8322.bugfix deleted file mode 100644 index 532d0e22fefb..000000000000 --- a/changelog.d/8322.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8324.bugfix b/changelog.d/8324.bugfix deleted file mode 100644 index 32788a92848c..000000000000 --- a/changelog.d/8324.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix fetching events from remote servers that are malformed. diff --git a/changelog.d/8326.misc b/changelog.d/8326.misc deleted file mode 100644 index 985d2c027aa9..000000000000 --- a/changelog.d/8326.misc +++ /dev/null @@ -1 +0,0 @@ -Update outdated usages of `metaclass` to python 3 syntax. \ No newline at end of file diff --git a/changelog.d/8329.bugfix b/changelog.d/8329.bugfix deleted file mode 100644 index 2f71f1f4b956..000000000000 --- a/changelog.d/8329.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix UnboundLocalError from occuring when appservices send malformed register request. \ No newline at end of file diff --git a/changelog.d/8330.misc b/changelog.d/8330.misc deleted file mode 100644 index fbfdd524730a..000000000000 --- a/changelog.d/8330.misc +++ /dev/null @@ -1 +0,0 @@ -Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/changelog.d/8331.misc b/changelog.d/8331.misc deleted file mode 100644 index 0e1bae20efa3..000000000000 --- a/changelog.d/8331.misc +++ /dev/null @@ -1 +0,0 @@ -Use the `admin_patterns` helper in additional locations. diff --git a/changelog.d/8335.misc b/changelog.d/8335.misc deleted file mode 100644 index 7e0a4c7d839f..000000000000 --- a/changelog.d/8335.misc +++ /dev/null @@ -1 +0,0 @@ -Fix test logging to allow braces in log output. \ No newline at end of file diff --git a/changelog.d/8337.misc b/changelog.d/8337.misc deleted file mode 100644 index 4daf27220443..000000000000 --- a/changelog.d/8337.misc +++ /dev/null @@ -1 +0,0 @@ -Remove `__future__` imports related to Python 2 compatibility. \ No newline at end of file diff --git a/changelog.d/8344.misc b/changelog.d/8344.misc deleted file mode 100644 index 0b342d513727..000000000000 --- a/changelog.d/8344.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify `super()` calls to Python 3 syntax. diff --git a/changelog.d/8345.feature b/changelog.d/8345.feature deleted file mode 100644 index 4ee5b6a56e37..000000000000 --- a/changelog.d/8345.feature +++ /dev/null @@ -1 +0,0 @@ -Add a configuration option that allows existing users to log in with OpenID Connect. Contributed by @BBBSnowball and @OmmyZhang. diff --git a/changelog.d/8353.bugfix b/changelog.d/8353.bugfix deleted file mode 100644 index 45fc0adb8dd5..000000000000 --- a/changelog.d/8353.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't send push notifications to expired user accounts. diff --git a/changelog.d/8354.misc b/changelog.d/8354.misc deleted file mode 100644 index 1d33cde2da6c..000000000000 --- a/changelog.d/8354.misc +++ /dev/null @@ -1 +0,0 @@ -Fix bad merge from `release-v1.20.0` branch to `develop`. diff --git a/changelog.d/8362.bugfix b/changelog.d/8362.bugfix deleted file mode 100644 index 4e50067c87b5..000000000000 --- a/changelog.d/8362.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed a regression in v1.19.0 with reactivating users through the admin API. diff --git a/changelog.d/8364.bugfix b/changelog.d/8364.bugfix deleted file mode 100644 index 7b82cbc3881c..000000000000 --- a/changelog.d/8364.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug where during device registration the length of the device name wasn't -limited. diff --git a/changelog.d/8370.misc b/changelog.d/8370.misc deleted file mode 100644 index 1aaac1e0bf90..000000000000 --- a/changelog.d/8370.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out a `_send_dummy_event_for_room` method. diff --git a/changelog.d/8371.misc b/changelog.d/8371.misc deleted file mode 100644 index 6a54a9496afb..000000000000 --- a/changelog.d/8371.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging of state resolution. diff --git a/changelog.d/8372.misc b/changelog.d/8372.misc deleted file mode 100644 index a56e36de4be1..000000000000 --- a/changelog.d/8372.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to `SimpleHttpClient`. diff --git a/changelog.d/8373.bugfix b/changelog.d/8373.bugfix deleted file mode 100644 index e9d66a2088c4..000000000000 --- a/changelog.d/8373.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. \ No newline at end of file diff --git a/changelog.d/8374.bugfix b/changelog.d/8374.bugfix deleted file mode 100644 index 155bc3404f52..000000000000 --- a/changelog.d/8374.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. diff --git a/changelog.d/8375.doc b/changelog.d/8375.doc deleted file mode 100644 index d291fb92fa18..000000000000 --- a/changelog.d/8375.doc +++ /dev/null @@ -1 +0,0 @@ -Add note to the reverse proxy settings documentation about disabling Apache's mod_security2. Contributed by Julian Fietkau (@jfietkau). diff --git a/changelog.d/8377.misc b/changelog.d/8377.misc deleted file mode 100644 index fbfdd524730a..000000000000 --- a/changelog.d/8377.misc +++ /dev/null @@ -1 +0,0 @@ -Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/changelog.d/8383.misc b/changelog.d/8383.misc deleted file mode 100644 index cb8318bf5704..000000000000 --- a/changelog.d/8383.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor ID generators to use `async with` syntax. diff --git a/changelog.d/8385.bugfix b/changelog.d/8385.bugfix deleted file mode 100644 index c42502a8e020..000000000000 --- a/changelog.d/8385.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix deleted file mode 100644 index 24983a1e950f..000000000000 --- a/changelog.d/8386.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/changelog.d/8387.feature b/changelog.d/8387.feature deleted file mode 100644 index b363e929ea8c..000000000000 --- a/changelog.d/8387.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8388.misc b/changelog.d/8388.misc deleted file mode 100644 index aaaef88b661e..000000000000 --- a/changelog.d/8388.misc +++ /dev/null @@ -1 +0,0 @@ -Add `EventStreamPosition` type. diff --git a/changelog.d/8396.feature b/changelog.d/8396.feature deleted file mode 100644 index b363e929ea8c..000000000000 --- a/changelog.d/8396.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8398.bugfix b/changelog.d/8398.bugfix deleted file mode 100644 index e432aeebf190..000000000000 --- a/changelog.d/8398.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix "Re-starting finished log context" warning when receiving an event we already had over federation. diff --git a/changelog.d/8399.misc b/changelog.d/8399.misc deleted file mode 100644 index ce6e8123cf8b..000000000000 --- a/changelog.d/8399.misc +++ /dev/null @@ -1 +0,0 @@ -Create a mechanism for marking tests "logcontext clean". diff --git a/changelog.d/8400.bugfix b/changelog.d/8400.bugfix deleted file mode 100644 index 835658ba5eac..000000000000 --- a/changelog.d/8400.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect handling of timeouts on outgoing HTTP requests. diff --git a/changelog.d/8401.misc b/changelog.d/8401.misc deleted file mode 100644 index 27fd7ab129d9..000000000000 --- a/changelog.d/8401.misc +++ /dev/null @@ -1 +0,0 @@ -A pair of tiny cleanups in the federation request code. diff --git a/changelog.d/8402.misc b/changelog.d/8402.misc deleted file mode 100644 index ad1804d207aa..000000000000 --- a/changelog.d/8402.misc +++ /dev/null @@ -1 +0,0 @@ -Add checks on startup that PostgreSQL sequences are consistent with their associated tables. diff --git a/changelog.d/8404.misc b/changelog.d/8404.misc deleted file mode 100644 index 7aadded6c1dd..000000000000 --- a/changelog.d/8404.misc +++ /dev/null @@ -1 +0,0 @@ -Do not include appservice users when calculating the total MAU for a server. diff --git a/changelog.d/8405.feature b/changelog.d/8405.feature deleted file mode 100644 index f3c4a74bc79b..000000000000 --- a/changelog.d/8405.feature +++ /dev/null @@ -1 +0,0 @@ -Consolidate the SSO error template across all configuration. diff --git a/changelog.d/8406.feature b/changelog.d/8406.feature deleted file mode 100644 index 1c6472ae7eae..000000000000 --- a/changelog.d/8406.feature +++ /dev/null @@ -1 +0,0 @@ -Add prometheus metrics for replication requests. diff --git a/changelog.d/8410.bugfix b/changelog.d/8410.bugfix deleted file mode 100644 index 1323ddc525db..000000000000 --- a/changelog.d/8410.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a v1.20.0 regression in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. diff --git a/changelog.d/8413.feature b/changelog.d/8413.feature deleted file mode 100644 index abe40a901cf6..000000000000 --- a/changelog.d/8413.feature +++ /dev/null @@ -1 +0,0 @@ -Support passing additional single sign-on parameters to the client. diff --git a/changelog.d/8414.bugfix b/changelog.d/8414.bugfix deleted file mode 100644 index 315876e89238..000000000000 --- a/changelog.d/8414.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary 3PID registration check when resetting password via an email address. Bug introduced in v0.34.0rc2. \ No newline at end of file diff --git a/changelog.d/8415.doc b/changelog.d/8415.doc deleted file mode 100644 index 28b579853364..000000000000 --- a/changelog.d/8415.doc +++ /dev/null @@ -1 +0,0 @@ -Improve description of `server_name` config option in `homserver.yaml`. \ No newline at end of file diff --git a/changelog.d/8417.feature b/changelog.d/8417.feature deleted file mode 100644 index 17549c3df39a..000000000000 --- a/changelog.d/8417.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/changelog.d/8419.feature b/changelog.d/8419.feature deleted file mode 100644 index b363e929ea8c..000000000000 --- a/changelog.d/8419.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8420.feature b/changelog.d/8420.feature deleted file mode 100644 index 9d6849624d48..000000000000 --- a/changelog.d/8420.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental reporting of metrics on expensive rooms for state-resolution. diff --git a/changelog.d/8422.misc b/changelog.d/8422.misc deleted file mode 100644 index 03fba120c6d9..000000000000 --- a/changelog.d/8422.misc +++ /dev/null @@ -1 +0,0 @@ -Typing fixes for `synapse.handlers.federation`. diff --git a/changelog.d/8423.misc b/changelog.d/8423.misc deleted file mode 100644 index 7260e3fa4182..000000000000 --- a/changelog.d/8423.misc +++ /dev/null @@ -1 +0,0 @@ -Various refactors to simplify stream token handling. diff --git a/changelog.d/8425.feature b/changelog.d/8425.feature deleted file mode 100644 index b4ee5bb74b9d..000000000000 --- a/changelog.d/8425.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental prometheus metric to track numbers of "large" rooms for state resolutiom. diff --git a/changelog.d/8426.removal b/changelog.d/8426.removal deleted file mode 100644 index a56277fe7ad6..000000000000 --- a/changelog.d/8426.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for `prometheus_client` older than 0.4.0. diff --git a/changelog.d/8427.misc b/changelog.d/8427.misc deleted file mode 100644 index c9656b9112b2..000000000000 --- a/changelog.d/8427.misc +++ /dev/null @@ -1 +0,0 @@ -Make stream token serializing/deserializing async. diff --git a/changelog.d/8430.feature b/changelog.d/8430.feature deleted file mode 100644 index 1f31d42bc1de..000000000000 --- a/changelog.d/8430.feature +++ /dev/null @@ -1 +0,0 @@ -Add prometheus metrics to track federation delays. diff --git a/synapse/__init__.py b/synapse/__init__.py index e40b582bd585..57f818125a81 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.20.1" +__version__ = "1.21.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From c501c80e467a0c7a2429633a5160a580195a8826 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:17:59 +0100 Subject: [PATCH 098/245] fix version number we're not doing a final release yet! --- CHANGES.md | 4 ++-- synapse/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 38906ade49ed..8368ac0b92d2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,5 @@ -Synapse 1.21.0 (2020-10-01) -=========================== +Synapse 1.21.0rc1 (2020-10-01) +============================== Features -------- diff --git a/synapse/__init__.py b/synapse/__init__.py index 57f818125a81..47069745080f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.0" +__version__ = "1.21.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 50e5174e8687ae3d368386dc020d869006cb6750 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:27:01 +0100 Subject: [PATCH 099/245] changelog fixes --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8368ac0b92d2..267909d3e96f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -29,7 +29,7 @@ Bugfixes - Fix messages not being sent over federation until an event is sent into the same room. ([\#8230](https://github.com/matrix-org/synapse/issues/8230), [\#8247](https://github.com/matrix-org/synapse/issues/8247), [\#8258](https://github.com/matrix-org/synapse/issues/8258), [\#8272](https://github.com/matrix-org/synapse/issues/8272), [\#8322](https://github.com/matrix-org/synapse/issues/8322)) - Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236)) - Upgrade minimum version of `canonicaljson` to version 1.4.0, to fix an unicode encoding issue. ([\#8262](https://github.com/matrix-org/synapse/issues/8262)) -- Fix logstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) +- Fix longstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) - Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. ([\#8268](https://github.com/matrix-org/synapse/issues/8268)) - Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. ([\#8278](https://github.com/matrix-org/synapse/issues/8278)) - Fix edge case where push could get delayed for a user until a later event was pushed. ([\#8287](https://github.com/matrix-org/synapse/issues/8287)) @@ -41,7 +41,6 @@ Bugfixes - Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. ([\#8373](https://github.com/matrix-org/synapse/issues/8373)) - Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. ([\#8374](https://github.com/matrix-org/synapse/issues/8374)) - Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. ([\#8385](https://github.com/matrix-org/synapse/issues/8385)) -- Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) - Fix "Re-starting finished log context" warning when receiving an event we already had over federation. ([\#8398](https://github.com/matrix-org/synapse/issues/8398)) - Fix incorrect handling of timeouts on outgoing HTTP requests. ([\#8400](https://github.com/matrix-org/synapse/issues/8400)) - Fix a regression in v1.20.0 in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. ([\#8410](https://github.com/matrix-org/synapse/issues/8410)) From b1f4e6e4fc3d0cf5e10d6a79ef89abdcc9e63e8c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 1 Oct 2020 13:34:24 +0100 Subject: [PATCH 100/245] fix a logging error in thumbnailer (#8435) Introduced in #8236 --- changelog.d/8435.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8435.bugfix diff --git a/changelog.d/8435.bugfix b/changelog.d/8435.bugfix new file mode 100644 index 000000000000..6f048710159f --- /dev/null +++ b/changelog.d/8435.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index ae6822d6e742..e1192b47cdb0 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -637,7 +637,7 @@ async def _generate_thumbnails( thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( - "Unable to generate thumbnails for remote media %s from %s using a method of %s and type of %s: %s", + "Unable to generate thumbnails for remote media %s from %s of type %s: %s", media_id, server_name, media_type, From 2eb947e0eee7d66a77fc4a7e7af5234cacece3e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:38:26 +0100 Subject: [PATCH 101/245] update changelog --- CHANGES.md | 2 +- changelog.d/8435.bugfix | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 changelog.d/8435.bugfix diff --git a/CHANGES.md b/CHANGES.md index 267909d3e96f..29711c60ce6c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -27,7 +27,7 @@ Bugfixes - Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. ([\#7796](https://github.com/matrix-org/synapse/issues/7796)) - Fix a longstanding bug when storing a media file with an empty `upload_name`. ([\#7905](https://github.com/matrix-org/synapse/issues/7905)) - Fix messages not being sent over federation until an event is sent into the same room. ([\#8230](https://github.com/matrix-org/synapse/issues/8230), [\#8247](https://github.com/matrix-org/synapse/issues/8247), [\#8258](https://github.com/matrix-org/synapse/issues/8258), [\#8272](https://github.com/matrix-org/synapse/issues/8272), [\#8322](https://github.com/matrix-org/synapse/issues/8322)) -- Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236)) +- Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236), [\#8435](https://github.com/matrix-org/synapse/issues/8435)) - Upgrade minimum version of `canonicaljson` to version 1.4.0, to fix an unicode encoding issue. ([\#8262](https://github.com/matrix-org/synapse/issues/8262)) - Fix longstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) - Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. ([\#8268](https://github.com/matrix-org/synapse/issues/8268)) diff --git a/changelog.d/8435.bugfix b/changelog.d/8435.bugfix deleted file mode 100644 index 6f048710159f..000000000000 --- a/changelog.d/8435.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. From 61aaf36a1cdaa0057d0f4d8784a8e126d5f3988a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Oct 2020 13:38:20 -0400 Subject: [PATCH 102/245] Do not expose the experimental appservice login flow to clients. (#8440) --- changelog.d/8440.bugfix | 1 + synapse/rest/client/v1/login.py | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/8440.bugfix diff --git a/changelog.d/8440.bugfix b/changelog.d/8440.bugfix new file mode 100644 index 000000000000..84d5f541d18c --- /dev/null +++ b/changelog.d/8440.bugfix @@ -0,0 +1 @@ +Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index b9347b87c7c6..3d1693d7acfb 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -111,8 +111,6 @@ def on_GET(self, request: SynapseRequest): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) - flows.append({"type": LoginRestServlet.APPSERVICE_TYPE}) - return 200, {"flows": flows} def on_OPTIONS(self, request: SynapseRequest): From 05ee048f2c9ce0bb8a7d2430b21ca3682ef5858b Mon Sep 17 00:00:00 2001 From: BBBSnowball Date: Thu, 1 Oct 2020 19:54:35 +0200 Subject: [PATCH 103/245] Add config option for always using "userinfo endpoint" for OIDC (#7658) This allows for connecting to certain IdPs, e.g. GitLab. --- changelog.d/7658.feature | 1 + docs/openid.md | 41 +++++++++++++++++++++++++------- docs/sample_config.yaml | 8 +++++++ synapse/config/oidc_config.py | 9 +++++++ synapse/handlers/oidc_handler.py | 11 +++++---- tests/handlers/test_oidc.py | 10 ++++++-- 6 files changed, 65 insertions(+), 15 deletions(-) create mode 100644 changelog.d/7658.feature diff --git a/changelog.d/7658.feature b/changelog.d/7658.feature new file mode 100644 index 000000000000..fbf345988d35 --- /dev/null +++ b/changelog.d/7658.feature @@ -0,0 +1 @@ +Add a configuration option for always using the "userinfo endpoint" for OpenID Connect. This fixes support for some identity providers, e.g. GitLab. Contributed by Benjamin Koch. diff --git a/docs/openid.md b/docs/openid.md index 70b37f858bd8..48736819995a 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -238,13 +238,36 @@ Synapse config: ```yaml oidc_config: - enabled: true - issuer: "https://id.twitch.tv/oauth2/" - client_id: "your-client-id" # TO BE FILLED - client_secret: "your-client-secret" # TO BE FILLED - client_auth_method: "client_secret_post" - user_mapping_provider: - config: - localpart_template: '{{ user.preferred_username }}' - display_name_template: '{{ user.name }}' + enabled: true + issuer: "https://id.twitch.tv/oauth2/" + client_id: "your-client-id" # TO BE FILLED + client_secret: "your-client-secret" # TO BE FILLED + client_auth_method: "client_secret_post" + user_mapping_provider: + config: + localpart_template: "{{ user.preferred_username }}" + display_name_template: "{{ user.name }}" +``` + +### GitLab + +1. Create a [new application](https://gitlab.com/profile/applications). +2. Add the `read_user` and `openid` scopes. +3. Add this Callback URL: `[synapse public baseurl]/_synapse/oidc/callback` + +Synapse config: + +```yaml +oidc_config: + enabled: true + issuer: "https://gitlab.com/" + client_id: "your-client-id" # TO BE FILLED + client_secret: "your-client-secret" # TO BE FILLED + client_auth_method: "client_secret_post" + scopes: ["openid", "read_user"] + user_profile_method: "userinfo_endpoint" + user_mapping_provider: + config: + localpart_template: '{{ user.nickname }}' + display_name_template: '{{ user.name }}' ``` diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8a3206e8454a..b2c1d7a73713 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1714,6 +1714,14 @@ oidc_config: # #skip_verification: true + # Whether to fetch the user profile from the userinfo endpoint. Valid + # values are: "auto" or "userinfo_endpoint". + # + # Defaults to "auto", which fetches the userinfo endpoint if "openid" is included + # in `scopes`. Uncomment the following to always fetch the userinfo endpoint. + # + #user_profile_method: "userinfo_endpoint" + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead # of failing. This could be used if switching from password logins to OIDC. Defaults to false. # diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py index f92411681999..7597fbc86484 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc_config.py @@ -56,6 +56,7 @@ def read_config(self, config, **kwargs): self.oidc_userinfo_endpoint = oidc_config.get("userinfo_endpoint") self.oidc_jwks_uri = oidc_config.get("jwks_uri") self.oidc_skip_verification = oidc_config.get("skip_verification", False) + self.oidc_user_profile_method = oidc_config.get("user_profile_method", "auto") self.oidc_allow_existing_users = oidc_config.get("allow_existing_users", False) ump_config = oidc_config.get("user_mapping_provider", {}) @@ -159,6 +160,14 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # #skip_verification: true + # Whether to fetch the user profile from the userinfo endpoint. Valid + # values are: "auto" or "userinfo_endpoint". + # + # Defaults to "auto", which fetches the userinfo endpoint if "openid" is included + # in `scopes`. Uncomment the following to always fetch the userinfo endpoint. + # + #user_profile_method: "userinfo_endpoint" + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead # of failing. This could be used if switching from password logins to OIDC. Defaults to false. # diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 19cd65267535..05ac86e69714 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -96,6 +96,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self._callback_url = hs.config.oidc_callback_url # type: str self._scopes = hs.config.oidc_scopes # type: List[str] + self._user_profile_method = hs.config.oidc_user_profile_method # type: str self._client_auth = ClientAuth( hs.config.oidc_client_id, hs.config.oidc_client_secret, @@ -196,11 +197,11 @@ def _validate_metadata(self): % (m["response_types_supported"],) ) - # If the openid scope was not requested, we need a userinfo endpoint to fetch user infos + # Ensure there's a userinfo endpoint to fetch from if it is required. if self._uses_userinfo: if m.get("userinfo_endpoint") is None: raise ValueError( - 'provider has no "userinfo_endpoint", even though it is required because the "openid" scope is not requested' + 'provider has no "userinfo_endpoint", even though it is required' ) else: # If we're not using userinfo, we need a valid jwks to validate the ID token @@ -220,8 +221,10 @@ def _uses_userinfo(self) -> bool: ``access_token`` with the ``userinfo_endpoint``. """ - # Maybe that should be user-configurable and not inferred? - return "openid" not in self._scopes + return ( + "openid" not in self._scopes + or self._user_profile_method == "userinfo_endpoint" + ) async def load_metadata(self) -> OpenIDProviderMetadata: """Load and validate the provider metadata. diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index d5087e58be9a..b6f436c01678 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -286,9 +286,15 @@ def test_validate_config(self): h._validate_metadata, ) - # Tests for configs that the userinfo endpoint + # Tests for configs that require the userinfo endpoint self.assertFalse(h._uses_userinfo) - h._scopes = [] # do not request the openid scope + self.assertEqual(h._user_profile_method, "auto") + h._user_profile_method = "userinfo_endpoint" + self.assertTrue(h._uses_userinfo) + + # Revert the profile method and do not request the "openid" scope. + h._user_profile_method = "auto" + h._scopes = [] self.assertTrue(h._uses_userinfo) self.assertRaisesRegex(ValueError, "userinfo_endpoint", h._validate_metadata) From 6c5d5e507e629cf57ae8c1034879e8ffaef33e9f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 09:57:12 +0100 Subject: [PATCH 104/245] Add unit test for event persister sharding (#8433) --- changelog.d/8433.misc | 1 + mypy.ini | 3 + stubs/txredisapi.pyi | 20 +- synapse/replication/tcp/handler.py | 6 +- synapse/replication/tcp/redis.py | 40 +++- tests/replication/_base.py | 224 ++++++++++++++++-- .../test_sharded_event_persister.py | 102 ++++++++ tests/unittest.py | 2 +- 8 files changed, 371 insertions(+), 27 deletions(-) create mode 100644 changelog.d/8433.misc create mode 100644 tests/replication/test_sharded_event_persister.py diff --git a/changelog.d/8433.misc b/changelog.d/8433.misc new file mode 100644 index 000000000000..05f8b5bbf41e --- /dev/null +++ b/changelog.d/8433.misc @@ -0,0 +1 @@ +Add unit test for event persister sharding. diff --git a/mypy.ini b/mypy.ini index c283f15b21e3..e84ad04e412c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -143,3 +143,6 @@ ignore_missing_imports = True [mypy-nacl.*] ignore_missing_imports = True + +[mypy-hiredis] +ignore_missing_imports = True diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index c66413f003cd..522244bb57f7 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -16,7 +16,7 @@ """Contains *incomplete* type hints for txredisapi. """ -from typing import List, Optional, Union +from typing import List, Optional, Union, Type class RedisProtocol: def publish(self, channel: str, message: bytes): ... @@ -42,3 +42,21 @@ def lazyConnection( class SubscriberFactory: def buildProtocol(self, addr): ... + +class ConnectionHandler: ... + +class RedisFactory: + continueTrying: bool + handler: RedisProtocol + def __init__( + self, + uuid: str, + dbid: Optional[int], + poolsize: int, + isLazy: bool = False, + handler: Type = ConnectionHandler, + charset: str = "utf-8", + password: Optional[str] = None, + replyTimeout: Optional[int] = None, + convertNumbers: Optional[int] = True, + ): ... diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index b323841f73cb..e92da7b26378 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -251,10 +251,9 @@ def start_replication(self, hs): using TCP. """ if hs.config.redis.redis_enabled: - import txredisapi - from synapse.replication.tcp.redis import ( RedisDirectTcpReplicationClientFactory, + lazyConnection, ) logger.info( @@ -271,7 +270,8 @@ def start_replication(self, hs): # connection after SUBSCRIBE is called). # First create the connection for sending commands. - outbound_redis_connection = txredisapi.lazyConnection( + outbound_redis_connection = lazyConnection( + reactor=hs.get_reactor(), host=hs.config.redis_host, port=hs.config.redis_port, password=hs.config.redis.redis_password, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index f225e533de5b..de19705c1f41 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -15,7 +15,7 @@ import logging from inspect import isawaitable -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import txredisapi @@ -228,3 +228,41 @@ def buildProtocol(self, addr): p.password = self.password return p + + +def lazyConnection( + reactor, + host: str = "localhost", + port: int = 6379, + dbid: Optional[int] = None, + reconnect: bool = True, + charset: str = "utf-8", + password: Optional[str] = None, + connectTimeout: Optional[int] = None, + replyTimeout: Optional[int] = None, + convertNumbers: bool = True, +) -> txredisapi.RedisProtocol: + """Equivalent to `txredisapi.lazyConnection`, except allows specifying a + reactor. + """ + + isLazy = True + poolsize = 1 + + uuid = "%s:%d" % (host, port) + factory = txredisapi.RedisFactory( + uuid, + dbid, + poolsize, + isLazy, + txredisapi.ConnectionHandler, + charset, + password, + replyTimeout, + convertNumbers, + ) + factory.continueTrying = reconnect + for x in range(poolsize): + reactor.connectTCP(host, port, factory, connectTimeout) + + return factory.handler diff --git a/tests/replication/_base.py b/tests/replication/_base.py index ae60874ec3c2..81ea985b9f43 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -12,13 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import Any, Callable, List, Optional, Tuple import attr +import hiredis from twisted.internet.interfaces import IConsumer, IPullProducer, IReactorTime +from twisted.internet.protocol import Protocol from twisted.internet.task import LoopingCall from twisted.web.http import HTTPChannel @@ -27,7 +28,7 @@ GenericWorkerServer, ) from synapse.http.server import JsonResource -from synapse.http.site import SynapseRequest +from synapse.http.site import SynapseRequest, SynapseSite from synapse.replication.http import ReplicationRestResource, streams from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol @@ -197,19 +198,37 @@ def setUp(self): self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() + # Fake in memory Redis server that servers can connect to. + self._redis_server = FakeRedisPubSubServer() + store = self.hs.get_datastore() self.database_pool = store.db_pool self.reactor.lookups["testserv"] = "1.2.3.4" + self.reactor.lookups["localhost"] = "127.0.0.1" + + # A map from a HS instance to the associated HTTP Site to use for + # handling inbound HTTP requests to that instance. + self._hs_to_site = {self.hs: self.site} + + if self.hs.config.redis.redis_enabled: + # Handle attempts to connect to fake redis server. + self.reactor.add_tcp_client_callback( + "localhost", 6379, self.connect_any_redis_attempts, + ) - self._worker_hs_to_resource = {} + self.hs.get_tcp_replication().start_replication(self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't # manually have to go and explicitly set it up each time (plus sometimes # it is impossible to write the handling explicitly in the tests). + # + # Register the master replication listener: self.reactor.add_tcp_client_callback( - "1.2.3.4", 8765, self._handle_http_replication_attempt + "1.2.3.4", + 8765, + lambda: self._handle_http_replication_attempt(self.hs, 8765), ) def create_test_json_resource(self): @@ -253,28 +272,63 @@ def make_worker_hs( **kwargs ) + # If the instance is in the `instance_map` config then workers may try + # and send HTTP requests to it, so we register it with + # `_handle_http_replication_attempt` like we do with the master HS. + instance_name = worker_hs.get_instance_name() + instance_loc = worker_hs.config.worker.instance_map.get(instance_name) + if instance_loc: + # Ensure the host is one that has a fake DNS entry. + if instance_loc.host not in self.reactor.lookups: + raise Exception( + "Host does not have an IP for instance_map[%r].host = %r" + % (instance_name, instance_loc.host,) + ) + + self.reactor.add_tcp_client_callback( + self.reactor.lookups[instance_loc.host], + instance_loc.port, + lambda: self._handle_http_replication_attempt( + worker_hs, instance_loc.port + ), + ) + store = worker_hs.get_datastore() store.db_pool._db_pool = self.database_pool._db_pool - repl_handler = ReplicationCommandHandler(worker_hs) - client = ClientReplicationStreamProtocol( - worker_hs, "client", "test", self.clock, repl_handler, - ) - server = self.server_factory.buildProtocol(None) + # Set up TCP replication between master and the new worker if we don't + # have Redis support enabled. + if not worker_hs.config.redis_enabled: + repl_handler = ReplicationCommandHandler(worker_hs) + client = ClientReplicationStreamProtocol( + worker_hs, "client", "test", self.clock, repl_handler, + ) + server = self.server_factory.buildProtocol(None) - client_transport = FakeTransport(server, self.reactor) - client.makeConnection(client_transport) + client_transport = FakeTransport(server, self.reactor) + client.makeConnection(client_transport) - server_transport = FakeTransport(client, self.reactor) - server.makeConnection(server_transport) + server_transport = FakeTransport(client, self.reactor) + server.makeConnection(server_transport) # Set up a resource for the worker - resource = ReplicationRestResource(self.hs) + resource = ReplicationRestResource(worker_hs) for servlet in self.servlets: servlet(worker_hs, resource) - self._worker_hs_to_resource[worker_hs] = resource + self._hs_to_site[worker_hs] = SynapseSite( + logger_name="synapse.access.http.fake", + site_tag="{}-{}".format( + worker_hs.config.server.server_name, worker_hs.get_instance_name() + ), + config=worker_hs.config.server.listeners[0], + resource=resource, + server_version_string="1", + ) + + if worker_hs.config.redis.redis_enabled: + worker_hs.get_tcp_replication().start_replication(worker_hs) return worker_hs @@ -285,7 +339,7 @@ def _get_worker_hs_config(self) -> dict: return config def render_on_worker(self, worker_hs: HomeServer, request: SynapseRequest): - render(request, self._worker_hs_to_resource[worker_hs], self.reactor) + render(request, self._hs_to_site[worker_hs].resource, self.reactor) def replicate(self): """Tell the master side of replication that something has happened, and then @@ -294,9 +348,9 @@ def replicate(self): self.streamer.on_notifier_poke() self.pump() - def _handle_http_replication_attempt(self): - """Handles a connection attempt to the master replication HTTP - listener. + def _handle_http_replication_attempt(self, hs, repl_port): + """Handles a connection attempt to the given HS replication HTTP + listener on the given port. """ # We should have at least one outbound connection attempt, where the @@ -305,7 +359,7 @@ def _handle_http_replication_attempt(self): self.assertGreaterEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop() self.assertEqual(host, "1.2.3.4") - self.assertEqual(port, 8765) + self.assertEqual(port, repl_port) # Set up client side protocol client_protocol = client_factory.buildProtocol(None) @@ -315,7 +369,7 @@ def _handle_http_replication_attempt(self): # Set up the server side protocol channel = _PushHTTPChannel(self.reactor) channel.requestFactory = request_factory - channel.site = self.site + channel.site = self._hs_to_site[hs] # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -333,6 +387,32 @@ def _handle_http_replication_attempt(self): # inside `connecTCP` before the connection has been passed back to the # code that requested the TCP connection. + def connect_any_redis_attempts(self): + """If redis is enabled we need to deal with workers connecting to a + redis server. We don't want to use a real Redis server so we use a + fake one. + """ + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "localhost") + self.assertEqual(port, 6379) + + client_protocol = client_factory.buildProtocol(None) + server_protocol = self._redis_server.buildProtocol(None) + + client_to_server_transport = FakeTransport( + server_protocol, self.reactor, client_protocol + ) + client_protocol.makeConnection(client_to_server_transport) + + server_to_client_transport = FakeTransport( + client_protocol, self.reactor, server_protocol + ) + server_protocol.makeConnection(server_to_client_transport) + + return client_to_server_transport, server_to_client_transport + class TestReplicationDataHandler(GenericWorkerReplicationHandler): """Drop-in for ReplicationDataHandler which just collects RDATA rows""" @@ -467,3 +547,105 @@ def _run_once(self): pass self.stopProducing() + + +class FakeRedisPubSubServer: + """A fake Redis server for pub/sub. + """ + + def __init__(self): + self._subscribers = set() + + def add_subscriber(self, conn): + """A connection has called SUBSCRIBE + """ + self._subscribers.add(conn) + + def remove_subscriber(self, conn): + """A connection has called UNSUBSCRIBE + """ + self._subscribers.discard(conn) + + def publish(self, conn, channel, msg) -> int: + """A connection want to publish a message to subscribers. + """ + for sub in self._subscribers: + sub.send(["message", channel, msg]) + + return len(self._subscribers) + + def buildProtocol(self, addr): + return FakeRedisPubSubProtocol(self) + + +class FakeRedisPubSubProtocol(Protocol): + """A connection from a client talking to the fake Redis server. + """ + + def __init__(self, server: FakeRedisPubSubServer): + self._server = server + self._reader = hiredis.Reader() + + def dataReceived(self, data): + self._reader.feed(data) + + # We might get multiple messages in one packet. + while True: + msg = self._reader.gets() + + if msg is False: + # No more messages. + return + + if not isinstance(msg, list): + # Inbound commands should always be a list + raise Exception("Expected redis list") + + self.handle_command(msg[0], *msg[1:]) + + def handle_command(self, command, *args): + """Received a Redis command from the client. + """ + + # We currently only support pub/sub. + if command == b"PUBLISH": + channel, message = args + num_subscribers = self._server.publish(self, channel, message) + self.send(num_subscribers) + elif command == b"SUBSCRIBE": + (channel,) = args + self._server.add_subscriber(self) + self.send(["subscribe", channel, 1]) + else: + raise Exception("Unknown command") + + def send(self, msg): + """Send a message back to the client. + """ + raw = self.encode(msg).encode("utf-8") + + self.transport.write(raw) + self.transport.flush() + + def encode(self, obj): + """Encode an object to its Redis format. + + Supports: strings/bytes, integers and list/tuples. + """ + + if isinstance(obj, bytes): + # We assume bytes are just unicode strings. + obj = obj.decode("utf-8") + + if isinstance(obj, str): + return "${len}\r\n{str}\r\n".format(len=len(obj), str=obj) + if isinstance(obj, int): + return ":{val}\r\n".format(val=obj) + if isinstance(obj, (list, tuple)): + items = "".join(self.encode(a) for a in obj) + return "*{len}\r\n{items}".format(len=len(obj), items=items) + + raise Exception("Unrecognized type for encoding redis: %r: %r", type(obj), obj) + + def connectionLost(self, reason): + self._server.remove_subscriber(self) diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py new file mode 100644 index 000000000000..6068d1490538 --- /dev/null +++ b/tests/replication/test_sharded_event_persister.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from synapse.rest import admin +from synapse.rest.client.v1 import login, room + +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.utils import USE_POSTGRES_FOR_TESTS + +logger = logging.getLogger(__name__) + + +class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): + """Checks event persisting sharding works + """ + + # Event persister sharding requires postgres (due to needing + # `MutliWriterIdGenerator`). + if not USE_POSTGRES_FOR_TESTS: + skip = "Requires Postgres" + + servlets = [ + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + # Register a user who sends a message that we'll get notified about + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + def default_config(self): + conf = super().default_config() + conf["redis"] = {"enabled": "true"} + conf["stream_writers"] = {"events": ["worker1", "worker2"]} + conf["instance_map"] = { + "worker1": {"host": "testserv", "port": 1001}, + "worker2": {"host": "testserv", "port": 1002}, + } + return conf + + def test_basic(self): + """Simple test to ensure that multiple rooms can be created and joined, + and that different rooms get handled by different instances. + """ + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker1"}, + ) + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker2"}, + ) + + persisted_on_1 = False + persisted_on_2 = False + + store = self.hs.get_datastore() + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Keep making new rooms until we see rooms being persisted on both + # workers. + for _ in range(10): + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join( + room=room, user=self.other_user_id, tok=self.other_access_token + ) + + # The other user sends some messages + rseponse = self.helper.send(room, body="Hi!", tok=self.other_access_token) + event_id = rseponse["event_id"] + + # The event position includes which instance persisted the event. + pos = self.get_success(store.get_position_for_event(event_id)) + + persisted_on_1 |= pos.instance_name == "worker1" + persisted_on_2 |= pos.instance_name == "worker2" + + if persisted_on_1 and persisted_on_2: + break + + self.assertTrue(persisted_on_1) + self.assertTrue(persisted_on_2) diff --git a/tests/unittest.py b/tests/unittest.py index e654c0442d6c..82ede9de3444 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -241,7 +241,7 @@ def setUp(self): # create a site to wrap the resource. self.site = SynapseSite( logger_name="synapse.access.http.fake", - site_tag="test", + site_tag=self.hs.config.server.server_name, config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", From 3bd3707cb9615b5a9f7f7449ebe3ec495017ee9f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 2 Oct 2020 11:05:29 +0100 Subject: [PATCH 105/245] Fix malformed log line in new federation "catch up" logic (#8442) --- changelog.d/8442.bugfix | 1 + synapse/federation/sender/per_destination_queue.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8442.bugfix diff --git a/changelog.d/8442.bugfix b/changelog.d/8442.bugfix new file mode 100644 index 000000000000..6f779a1de57b --- /dev/null +++ b/changelog.d/8442.bugfix @@ -0,0 +1 @@ +Fix malformed log line in new federation "catch up" logic. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 2657767fd153..bc99af3fdd80 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -490,7 +490,7 @@ async def _catch_up_transmission_loop(self) -> None: ) if logger.isEnabledFor(logging.INFO): - rooms = (p.room_id for p in catchup_pdus) + rooms = [p.room_id for p in catchup_pdus] logger.info("Catching up rooms to %s: %r", self._destination, rooms) success = await self._transaction_manager.send_new_transaction( From 34ff8da83b54024289f515c6d73e6b486574d699 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 2 Oct 2020 06:15:53 -0400 Subject: [PATCH 106/245] Convert additional templates to Jinja (#8444) This converts a few more of our inline HTML templates to Jinja. This is somewhat part of #7280 and should make it a bit easier to customize these in the future. --- changelog.d/8444.bugfix | 1 + synapse/config/_base.py | 11 +- synapse/config/captcha.py | 3 + synapse/config/consent_config.py | 2 + synapse/config/registration.py | 5 + synapse/res/templates/auth_success.html | 21 ++++ synapse/res/templates/recaptcha.html | 38 +++++++ synapse/res/templates/terms.html | 20 ++++ synapse/rest/client/v2_alpha/auth.py | 136 +++++------------------- 9 files changed, 121 insertions(+), 116 deletions(-) create mode 100644 changelog.d/8444.bugfix create mode 100644 synapse/res/templates/auth_success.html create mode 100644 synapse/res/templates/recaptcha.html create mode 100644 synapse/res/templates/terms.html diff --git a/changelog.d/8444.bugfix b/changelog.d/8444.bugfix new file mode 100644 index 000000000000..30c4328d4bd9 --- /dev/null +++ b/changelog.d/8444.bugfix @@ -0,0 +1 @@ +Convert additional templates from inline HTML to Jinja2 templates. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 05a66841c338..85f65da4d95f 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -242,12 +242,11 @@ def read_templates( env = jinja2.Environment(loader=loader, autoescape=autoescape) # Update the environment with our custom filters - env.filters.update( - { - "format_ts": _format_ts_filter, - "mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl), - } - ) + env.filters.update({"format_ts": _format_ts_filter}) + if self.public_baseurl: + env.filters.update( + {"mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl)} + ) for filename in filenames: # Load the template diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index 82f04d7966e2..cb009581651b 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -28,6 +28,9 @@ def read_config(self, config, **kwargs): "recaptcha_siteverify_api", "https://www.recaptcha.net/recaptcha/api/siteverify", ) + self.recaptcha_template = self.read_templates( + ["recaptcha.html"], autoescape=True + )[0] def generate_config_section(self, **kwargs): return """\ diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index fbddebeeab2a..6efa59b110b0 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -89,6 +89,8 @@ def __init__(self, *args): def read_config(self, config, **kwargs): consent_config = config.get("user_consent") + self.terms_template = self.read_templates(["terms.html"], autoescape=True)[0] + if consent_config is None: return self.user_consent_version = str(consent_config["version"]) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 5ffbb934fe2d..d7e3690a32fb 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -187,6 +187,11 @@ def read_config(self, config, **kwargs): session_lifetime = self.parse_duration(session_lifetime) self.session_lifetime = session_lifetime + # The success template used during fallback auth. + self.fallback_success_template = self.read_templates( + ["auth_success.html"], autoescape=True + )[0] + def generate_config_section(self, generate_secrets=False, **kwargs): if generate_secrets: registration_shared_secret = 'registration_shared_secret: "%s"' % ( diff --git a/synapse/res/templates/auth_success.html b/synapse/res/templates/auth_success.html new file mode 100644 index 000000000000..baf463314258 --- /dev/null +++ b/synapse/res/templates/auth_success.html @@ -0,0 +1,21 @@ + + +Success! + + + + + +

+

Thank you

+

You may now close this window and return to the application

+
+ + diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html new file mode 100644 index 000000000000..63944dc60814 --- /dev/null +++ b/synapse/res/templates/recaptcha.html @@ -0,0 +1,38 @@ + + +Authentication + + + + + + + +
+
+

+ Hello! We need to prevent computer programs and other automated + things from creating accounts on this server. +

+

+ Please verify that you're not a robot. +

+ +
+
+ +
+ +
+ + diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html new file mode 100644 index 000000000000..dfef9897ee40 --- /dev/null +++ b/synapse/res/templates/terms.html @@ -0,0 +1,20 @@ + + +Authentication + + + + +
+ +
+ + diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 097538f96864..5fbfae599101 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -25,94 +25,6 @@ logger = logging.getLogger(__name__) -RECAPTCHA_TEMPLATE = """ - - -Authentication - - - - - - - -
-
-

- Hello! We need to prevent computer programs and other automated - things from creating accounts on this server. -

-

- Please verify that you're not a robot. -

- -
-
- -
- -
- - -""" - -TERMS_TEMPLATE = """ - - -Authentication - - - - -
-
-

- Please click the button below if you agree to the - privacy policy of this homeserver. -

- - -
-
- - -""" - -SUCCESS_TEMPLATE = """ - - -Success! - - - - - -
-

Thank you

-

You may now close this window and return to the application

-
- - -""" - class AuthRestServlet(RestServlet): """ @@ -145,26 +57,30 @@ def __init__(self, hs): self._cas_server_url = hs.config.cas_server_url self._cas_service_url = hs.config.cas_service_url + self.recaptcha_template = hs.config.recaptcha_template + self.terms_template = hs.config.terms_template + self.success_template = hs.config.fallback_success_template + async def on_GET(self, request, stagetype): session = parse_string(request, "session") if not session: raise SynapseError(400, "No session supplied") if stagetype == LoginType.RECAPTCHA: - html = RECAPTCHA_TEMPLATE % { - "session": session, - "myurl": "%s/r0/auth/%s/fallback/web" + html = self.recaptcha_template.render( + session=session, + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), - "sitekey": self.hs.config.recaptcha_public_key, - } + sitekey=self.hs.config.recaptcha_public_key, + ) elif stagetype == LoginType.TERMS: - html = TERMS_TEMPLATE % { - "session": session, - "terms_url": "%s_matrix/consent?v=%s" + html = self.terms_template.render( + session=session, + terms_url="%s_matrix/consent?v=%s" % (self.hs.config.public_baseurl, self.hs.config.user_consent_version), - "myurl": "%s/r0/auth/%s/fallback/web" + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), - } + ) elif stagetype == LoginType.SSO: # Display a confirmation page which prompts the user to @@ -222,14 +138,14 @@ async def on_POST(self, request, stagetype): ) if success: - html = SUCCESS_TEMPLATE + html = self.success_template.render() else: - html = RECAPTCHA_TEMPLATE % { - "session": session, - "myurl": "%s/r0/auth/%s/fallback/web" + html = self.recaptcha_template.render( + session=session, + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), - "sitekey": self.hs.config.recaptcha_public_key, - } + sitekey=self.hs.config.recaptcha_public_key, + ) elif stagetype == LoginType.TERMS: authdict = {"session": session} @@ -238,18 +154,18 @@ async def on_POST(self, request, stagetype): ) if success: - html = SUCCESS_TEMPLATE + html = self.success_template.render() else: - html = TERMS_TEMPLATE % { - "session": session, - "terms_url": "%s_matrix/consent?v=%s" + html = self.terms_template.render( + session=session, + terms_url="%s_matrix/consent?v=%s" % ( self.hs.config.public_baseurl, self.hs.config.user_consent_version, ), - "myurl": "%s/r0/auth/%s/fallback/web" + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), - } + ) elif stagetype == LoginType.SSO: # The SSO fallback workflow should not post here, raise SynapseError(404, "Fallback SSO auth does not support POST requests.") From 695240d34a9dd1c34379ded1fbbbe42a1850549e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 12:22:19 +0100 Subject: [PATCH 107/245] Fix DB query on startup for negative streams. (#8447) For negative streams we have to negate the internal stream ID before querying the DB. The effect of this bug was to query far too many rows, slowing start up time, but we would correctly filter the results afterwards so there was no ill effect. --- changelog.d/8447.bugfix | 1 + synapse/storage/util/id_generators.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8447.bugfix diff --git a/changelog.d/8447.bugfix b/changelog.d/8447.bugfix new file mode 100644 index 000000000000..88edaf322e01 --- /dev/null +++ b/changelog.d/8447.bugfix @@ -0,0 +1 @@ +Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 02fbb656e81c..48efbb5067bd 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -341,7 +341,7 @@ def _load_current_ids( "cmp": "<=" if self._positive else ">=", } sql = self._db.engine.convert_param_style(sql) - cur.execute(sql, (min_stream_id,)) + cur.execute(sql, (min_stream_id * self._return_factor,)) self._persisted_upto_position = min_stream_id From 3bd2a2cbb1adffdbd0783ec58e88511cb4e90735 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 2 Oct 2020 07:24:07 -0400 Subject: [PATCH 108/245] Include a public_baseurl in configs generated by the demo script. (#8443) --- changelog.d/8443.misc | 1 + demo/start.sh | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/8443.misc diff --git a/changelog.d/8443.misc b/changelog.d/8443.misc new file mode 100644 index 000000000000..633598e6b302 --- /dev/null +++ b/changelog.d/8443.misc @@ -0,0 +1 @@ +Configure `public_baseurl` when using demo scripts. diff --git a/demo/start.sh b/demo/start.sh index 83396e5c336f..f6b5ea137f84 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -30,6 +30,8 @@ for port in 8080 8081 8082; do if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config + echo "public_baseurl: http://localhost:$port/" >> $DIR/etc/$port.config + echo 'enable_registration: true' >> $DIR/etc/$port.config # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't From 73d93039ff6c3addd54bb29a57808a3f2eed7a05 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 2 Oct 2020 12:29:29 +0100 Subject: [PATCH 109/245] Fix bug in remote thumbnail search (#8438) #7124 changed the behaviour of remote thumbnails so that the thumbnailing method was included in the filename of the thumbnail. To support existing files, it included a fallback so that we would check the old filename if the new filename didn't exist. Unfortunately, it didn't apply this logic to storage providers, so any thumbnails stored on such a storage provider was broken. --- changelog.d/8438.bugfix | 1 + synapse/rest/media/v1/media_storage.py | 43 ++++++++++++++------------ 2 files changed, 24 insertions(+), 20 deletions(-) create mode 100644 changelog.d/8438.bugfix diff --git a/changelog.d/8438.bugfix b/changelog.d/8438.bugfix new file mode 100644 index 000000000000..3edc394149d3 --- /dev/null +++ b/changelog.d/8438.bugfix @@ -0,0 +1 @@ +Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 5681677fc93d..a9586fb0b73d 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -141,31 +141,34 @@ async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: Returns: Returns a Responder if the file was found, otherwise None. """ + paths = [self._file_info_to_path(file_info)] - path = self._file_info_to_path(file_info) - local_path = os.path.join(self.local_media_directory, path) - if os.path.exists(local_path): - return FileResponder(open(local_path, "rb")) - - # Fallback for paths without method names - # Should be removed in the future + # fallback for remote thumbnails with no method in the filename if file_info.thumbnail and file_info.server_name: - legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail_width, - height=file_info.thumbnail_height, - content_type=file_info.thumbnail_type, + paths.append( + self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail_width, + height=file_info.thumbnail_height, + content_type=file_info.thumbnail_type, + ) ) - legacy_local_path = os.path.join(self.local_media_directory, legacy_path) - if os.path.exists(legacy_local_path): - return FileResponder(open(legacy_local_path, "rb")) + + for path in paths: + local_path = os.path.join(self.local_media_directory, path) + if os.path.exists(local_path): + logger.debug("responding with local file %s", local_path) + return FileResponder(open(local_path, "rb")) + logger.debug("local file %s did not exist", local_path) for provider in self.storage_providers: - res = await provider.fetch(path, file_info) # type: Any - if res: - logger.debug("Streaming %s from %s", path, provider) - return res + for path in paths: + res = await provider.fetch(path, file_info) # type: Any + if res: + logger.debug("Streaming %s from %s", path, provider) + return res + logger.debug("%s not found on %s", path, provider) return None From f6c526ce6732a1af1228a08513f6a795b61c2b71 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:46:58 +0100 Subject: [PATCH 110/245] 1.21.0rc2 --- CHANGES.md | 13 +++++++++++++ changelog.d/8438.bugfix | 1 - changelog.d/8440.bugfix | 1 - changelog.d/8442.bugfix | 1 - changelog.d/8444.bugfix | 1 - changelog.d/8447.bugfix | 1 - synapse/__init__.py | 2 +- 7 files changed, 14 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/8438.bugfix delete mode 100644 changelog.d/8440.bugfix delete mode 100644 changelog.d/8442.bugfix delete mode 100644 changelog.d/8444.bugfix delete mode 100644 changelog.d/8447.bugfix diff --git a/CHANGES.md b/CHANGES.md index 29711c60ce6c..e5177e714daa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,16 @@ +Synapse 1.21.0rc2 (2020-10-02) +============================== + +Bugfixes +-------- + +- Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. ([\#8438](https://github.com/matrix-org/synapse/issues/8438)) +- Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) +- Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) +- Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) +- Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) + + Synapse 1.21.0rc1 (2020-10-01) ============================== diff --git a/changelog.d/8438.bugfix b/changelog.d/8438.bugfix deleted file mode 100644 index 3edc394149d3..000000000000 --- a/changelog.d/8438.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. diff --git a/changelog.d/8440.bugfix b/changelog.d/8440.bugfix deleted file mode 100644 index 84d5f541d18c..000000000000 --- a/changelog.d/8440.bugfix +++ /dev/null @@ -1 +0,0 @@ -Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. diff --git a/changelog.d/8442.bugfix b/changelog.d/8442.bugfix deleted file mode 100644 index 6f779a1de57b..000000000000 --- a/changelog.d/8442.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix malformed log line in new federation "catch up" logic. diff --git a/changelog.d/8444.bugfix b/changelog.d/8444.bugfix deleted file mode 100644 index 30c4328d4bd9..000000000000 --- a/changelog.d/8444.bugfix +++ /dev/null @@ -1 +0,0 @@ -Convert additional templates from inline HTML to Jinja2 templates. diff --git a/changelog.d/8447.bugfix b/changelog.d/8447.bugfix deleted file mode 100644 index 88edaf322e01..000000000000 --- a/changelog.d/8447.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. diff --git a/synapse/__init__.py b/synapse/__init__.py index 47069745080f..500558bbdf8b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.0rc1" +__version__ = "1.21.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 6a8fd03acbce30c5f30f0225f21063e58f52eb37 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:48:33 +0100 Subject: [PATCH 111/245] 1.21.0rc2 --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e5177e714daa..e9872ff05246 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. ([\#8438](https://github.com/matrix-org/synapse/issues/8438)) -- Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) +- Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API, which caused a compatibility problem with Element iOS. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) - Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) - Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) - Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) From 8672642225c9415935345057411bc7da732cb16a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:54:53 +0100 Subject: [PATCH 112/245] linkify changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e9872ff05246..0437e420bcb7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,7 +8,7 @@ Bugfixes - Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API, which caused a compatibility problem with Element iOS. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) - Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) - Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) -- Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) +- Fix DB query on startup for negative streams which caused long start up times. Introduced in [\#8374](https://github.com/matrix-org/synapse/issues/8374). ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) Synapse 1.21.0rc1 (2020-10-01) From 9de6e9e249d7d2940e847b68fe9995154b1a3f74 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:56:40 +0100 Subject: [PATCH 113/245] move #8444 to 'feature' --- CHANGES.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0437e420bcb7..5d4e80499eec 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,13 +1,17 @@ Synapse 1.21.0rc2 (2020-10-02) ============================== +Features +-------- + +- Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) + Bugfixes -------- - Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. ([\#8438](https://github.com/matrix-org/synapse/issues/8438)) - Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API, which caused a compatibility problem with Element iOS. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) - Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) -- Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) - Fix DB query on startup for negative streams which caused long start up times. Introduced in [\#8374](https://github.com/matrix-org/synapse/issues/8374). ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) From 62894673e69f7beb0d0a748ad01c2e95c5fed106 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 2 Oct 2020 08:23:15 -0400 Subject: [PATCH 114/245] Allow background tasks to be run on a separate worker. (#8369) --- changelog.d/8369.feature | 1 + docs/sample_config.yaml | 5 + docs/workers.md | 17 ++ synapse/app/_base.py | 6 + synapse/app/admin_cmd.py | 1 + synapse/app/generic_worker.py | 4 + synapse/app/homeserver.py | 182 ---------------- synapse/app/phone_stats_home.py | 202 ++++++++++++++++++ synapse/config/workers.py | 18 ++ synapse/handlers/auth.py | 2 +- synapse/handlers/stats.py | 2 +- synapse/server.py | 17 +- synapse/storage/databases/main/__init__.py | 191 ----------------- synapse/storage/databases/main/metrics.py | 195 +++++++++++++++++ .../databases/main/monthly_active_users.py | 109 +++++----- synapse/storage/databases/main/room.py | 24 +-- synapse/storage/databases/main/ui_auth.py | 6 +- tests/test_phone_home.py | 2 +- tests/utils.py | 2 +- 19 files changed, 537 insertions(+), 449 deletions(-) create mode 100644 changelog.d/8369.feature create mode 100644 synapse/app/phone_stats_home.py diff --git a/changelog.d/8369.feature b/changelog.d/8369.feature new file mode 100644 index 000000000000..542993110bc8 --- /dev/null +++ b/changelog.d/8369.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b2c1d7a73713..7126ade2de6f 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2504,6 +2504,11 @@ opentracing: # events: worker1 # typing: worker1 +# The worker that is used to run background tasks (e.g. cleaning up expired +# data). If not provided this defaults to the main process. +# +#run_background_tasks_on: worker1 + # Configuration for Redis when using workers. This *must* be enabled when # using workers (unless using old style direct TCP configuration). diff --git a/docs/workers.md b/docs/workers.md index ad4d8ca9f25a..84a9759e34c9 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -319,6 +319,23 @@ stream_writers: events: event_persister1 ``` +#### Background tasks + +There is also *experimental* support for moving background tasks to a separate +worker. Background tasks are run periodically or started via replication. Exactly +which tasks are configured to run depends on your Synapse configuration (e.g. if +stats is enabled). + +To enable this, the worker must have a `worker_name` and can be configured to run +background tasks. For example, to move background tasks to a dedicated worker, +the shared configuration would include: + +```yaml +run_background_tasks_on: background_worker +``` + +You might also wish to investigate the `update_user_directory` and +`media_instance_running_background_jobs` settings. ### `synapse.app.pusher` diff --git a/synapse/app/_base.py b/synapse/app/_base.py index fb476ddaf571..8bb0b142ca5b 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -28,6 +28,7 @@ import synapse from synapse.app import check_bind_error +from synapse.app.phone_stats_home import start_phone_stats_home from synapse.config.server import ListenerConfig from synapse.crypto import context_factory from synapse.logging.context import PreserveLoggingContext @@ -274,6 +275,11 @@ def handle_sighup(*args, **kwargs): setup_sentry(hs) setup_sdnotify(hs) + # If background tasks are running on the main process, start collecting the + # phone home stats. + if hs.config.run_background_tasks: + start_phone_stats_home(hs) + # We now freeze all allocated objects in the hopes that (almost) # everything currently allocated are things that will be used for the # rest of time. Doing so means less work each GC (hopefully). diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 7d309b1bb00a..f0d65d08d72d 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -208,6 +208,7 @@ def start(config_options): # Explicitly disable background processes config.update_user_directory = False + config.run_background_tasks = False config.start_pushers = False config.send_federation = False diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index c38413c8937b..fc5188ce95ed 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -128,11 +128,13 @@ from synapse.server import HomeServer, cache_in_self from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore +from synapse.storage.databases.main.metrics import ServerMetricsStore from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore +from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import ReadReceipt @@ -454,6 +456,7 @@ class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. UserDirectoryStore, + StatsStore, UIAuthWorkerStore, SlavedDeviceInboxStore, SlavedDeviceStore, @@ -476,6 +479,7 @@ class GenericWorkerSlavedStore( SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, + ServerMetricsStore, SearchWorkerStore, BaseSlavedStore, ): diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index dff739e1062d..4ed4a2c2533a 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -17,14 +17,10 @@ import gc import logging -import math import os -import resource import sys from typing import Iterable -from prometheus_client import Gauge - from twisted.application import service from twisted.internet import defer, reactor from twisted.python.failure import Failure @@ -60,7 +56,6 @@ from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import ModuleApi from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource @@ -334,20 +329,6 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): logger.warning("Unrecognized listener type: %s", listener.type) -# Gauges to expose monthly active user control metrics -current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") -current_mau_by_service_gauge = Gauge( - "synapse_admin_mau_current_mau_by_service", - "Current MAU by service", - ["app_service"], -) -max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") -registered_reserved_users_mau_gauge = Gauge( - "synapse_admin_mau:registered_reserved_users", - "Registered users with reserved threepids", -) - - def setup(config_options): """ Args: @@ -389,8 +370,6 @@ def setup(config_options): except UpgradeDatabaseException as e: quit_with_error("Failed to upgrade database: %s" % (e,)) - hs.setup_master() - async def do_acme() -> bool: """ Reprovision an ACME certificate, if it's required. @@ -486,92 +465,6 @@ def stopService(self): return self._port.stopListening() -# Contains the list of processes we will be monitoring -# currently either 0 or 1 -_stats_process = [] - - -async def phone_stats_home(hs, stats, stats_process=_stats_process): - logger.info("Gathering stats for reporting") - now = int(hs.get_clock().time()) - uptime = int(now - hs.start_time) - if uptime < 0: - uptime = 0 - - # - # Performance statistics. Keep this early in the function to maintain reliability of `test_performance_100` test. - # - old = stats_process[0] - new = (now, resource.getrusage(resource.RUSAGE_SELF)) - stats_process[0] = new - - # Get RSS in bytes - stats["memory_rss"] = new[1].ru_maxrss - - # Get CPU time in % of a single core, not % of all cores - used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - ( - old[1].ru_utime + old[1].ru_stime - ) - if used_cpu_time == 0 or new[0] == old[0]: - stats["cpu_average"] = 0 - else: - stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100) - - # - # General statistics - # - - stats["homeserver"] = hs.config.server_name - stats["server_context"] = hs.config.server_context - stats["timestamp"] = now - stats["uptime_seconds"] = uptime - version = sys.version_info - stats["python_version"] = "{}.{}.{}".format( - version.major, version.minor, version.micro - ) - stats["total_users"] = await hs.get_datastore().count_all_users() - - total_nonbridged_users = await hs.get_datastore().count_nonbridged_users() - stats["total_nonbridged_users"] = total_nonbridged_users - - daily_user_type_results = await hs.get_datastore().count_daily_user_type() - for name, count in daily_user_type_results.items(): - stats["daily_user_type_" + name] = count - - room_count = await hs.get_datastore().get_room_count() - stats["total_room_count"] = room_count - - stats["daily_active_users"] = await hs.get_datastore().count_daily_users() - stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users() - stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms() - stats["daily_messages"] = await hs.get_datastore().count_daily_messages() - - r30_results = await hs.get_datastore().count_r30_users() - for name, count in r30_results.items(): - stats["r30_users_" + name] = count - - daily_sent_messages = await hs.get_datastore().count_daily_sent_messages() - stats["daily_sent_messages"] = daily_sent_messages - stats["cache_factor"] = hs.config.caches.global_factor - stats["event_cache_size"] = hs.config.caches.event_cache_size - - # - # Database version - # - - # This only reports info about the *main* database. - stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ - stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version - - logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) - try: - await hs.get_proxied_http_client().put_json( - hs.config.report_stats_endpoint, stats - ) - except Exception as e: - logger.warning("Error reporting stats: %s", e) - - def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: @@ -597,81 +490,6 @@ def profiled(*args, **kargs): ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) - clock = hs.get_clock() - - stats = {} - - def performance_stats_init(): - _stats_process.clear() - _stats_process.append( - (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) - ) - - def start_phone_stats_home(): - return run_as_background_process( - "phone_stats_home", phone_stats_home, hs, stats - ) - - def generate_user_daily_visit_stats(): - return run_as_background_process( - "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits - ) - - # Rather than update on per session basis, batch up the requests. - # If you increase the loop period, the accuracy of user_daily_visits - # table will decrease - clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) - - # monthly active user limiting functionality - def reap_monthly_active_users(): - return run_as_background_process( - "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users - ) - - clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) - reap_monthly_active_users() - - async def generate_monthly_active_users(): - current_mau_count = 0 - current_mau_count_by_service = {} - reserved_users = () - store = hs.get_datastore() - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - current_mau_count = await store.get_monthly_active_count() - current_mau_count_by_service = ( - await store.get_monthly_active_count_by_service() - ) - reserved_users = await store.get_registered_reserved_users() - current_mau_gauge.set(float(current_mau_count)) - - for app_service, count in current_mau_count_by_service.items(): - current_mau_by_service_gauge.labels(app_service).set(float(count)) - - registered_reserved_users_mau_gauge.set(float(len(reserved_users))) - max_mau_gauge.set(float(hs.config.max_mau_value)) - - def start_generate_monthly_active_users(): - return run_as_background_process( - "generate_monthly_active_users", generate_monthly_active_users - ) - - start_generate_monthly_active_users() - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) - # End of monthly active user settings - - if hs.config.report_stats: - logger.info("Scheduling stats reporting for 3 hour intervals") - clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) - - # We need to defer this init for the cases that we daemonize - # otherwise the process ID we get is that of the non-daemon process - clock.call_later(0, performance_stats_init) - - # We wait 5 minutes to send the first set of stats as the server can - # be quite busy the first few minutes - clock.call_later(5 * 60, start_phone_stats_home) - _base.start_reactor( "synapse-homeserver", soft_file_limit=hs.config.soft_file_limit, diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py new file mode 100644 index 000000000000..2c8e14a8c0ca --- /dev/null +++ b/synapse/app/phone_stats_home.py @@ -0,0 +1,202 @@ +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import resource +import sys + +from prometheus_client import Gauge + +from synapse.metrics.background_process_metrics import run_as_background_process + +logger = logging.getLogger("synapse.app.homeserver") + +# Contains the list of processes we will be monitoring +# currently either 0 or 1 +_stats_process = [] + +# Gauges to expose monthly active user control metrics +current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") +current_mau_by_service_gauge = Gauge( + "synapse_admin_mau_current_mau_by_service", + "Current MAU by service", + ["app_service"], +) +max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") +registered_reserved_users_mau_gauge = Gauge( + "synapse_admin_mau:registered_reserved_users", + "Registered users with reserved threepids", +) + + +async def phone_stats_home(hs, stats, stats_process=_stats_process): + logger.info("Gathering stats for reporting") + now = int(hs.get_clock().time()) + uptime = int(now - hs.start_time) + if uptime < 0: + uptime = 0 + + # + # Performance statistics. Keep this early in the function to maintain reliability of `test_performance_100` test. + # + old = stats_process[0] + new = (now, resource.getrusage(resource.RUSAGE_SELF)) + stats_process[0] = new + + # Get RSS in bytes + stats["memory_rss"] = new[1].ru_maxrss + + # Get CPU time in % of a single core, not % of all cores + used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - ( + old[1].ru_utime + old[1].ru_stime + ) + if used_cpu_time == 0 or new[0] == old[0]: + stats["cpu_average"] = 0 + else: + stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100) + + # + # General statistics + # + + stats["homeserver"] = hs.config.server_name + stats["server_context"] = hs.config.server_context + stats["timestamp"] = now + stats["uptime_seconds"] = uptime + version = sys.version_info + stats["python_version"] = "{}.{}.{}".format( + version.major, version.minor, version.micro + ) + stats["total_users"] = await hs.get_datastore().count_all_users() + + total_nonbridged_users = await hs.get_datastore().count_nonbridged_users() + stats["total_nonbridged_users"] = total_nonbridged_users + + daily_user_type_results = await hs.get_datastore().count_daily_user_type() + for name, count in daily_user_type_results.items(): + stats["daily_user_type_" + name] = count + + room_count = await hs.get_datastore().get_room_count() + stats["total_room_count"] = room_count + + stats["daily_active_users"] = await hs.get_datastore().count_daily_users() + stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users() + stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms() + stats["daily_messages"] = await hs.get_datastore().count_daily_messages() + + r30_results = await hs.get_datastore().count_r30_users() + for name, count in r30_results.items(): + stats["r30_users_" + name] = count + + daily_sent_messages = await hs.get_datastore().count_daily_sent_messages() + stats["daily_sent_messages"] = daily_sent_messages + stats["cache_factor"] = hs.config.caches.global_factor + stats["event_cache_size"] = hs.config.caches.event_cache_size + + # + # Database version + # + + # This only reports info about the *main* database. + stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ + stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version + + logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) + try: + await hs.get_proxied_http_client().put_json( + hs.config.report_stats_endpoint, stats + ) + except Exception as e: + logger.warning("Error reporting stats: %s", e) + + +def start_phone_stats_home(hs): + """ + Start the background tasks which report phone home stats. + """ + clock = hs.get_clock() + + stats = {} + + def performance_stats_init(): + _stats_process.clear() + _stats_process.append( + (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) + ) + + def start_phone_stats_home(): + return run_as_background_process( + "phone_stats_home", phone_stats_home, hs, stats + ) + + def generate_user_daily_visit_stats(): + return run_as_background_process( + "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits + ) + + # Rather than update on per session basis, batch up the requests. + # If you increase the loop period, the accuracy of user_daily_visits + # table will decrease + clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) + + # monthly active user limiting functionality + def reap_monthly_active_users(): + return run_as_background_process( + "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users + ) + + clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) + reap_monthly_active_users() + + async def generate_monthly_active_users(): + current_mau_count = 0 + current_mau_count_by_service = {} + reserved_users = () + store = hs.get_datastore() + if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + current_mau_count = await store.get_monthly_active_count() + current_mau_count_by_service = ( + await store.get_monthly_active_count_by_service() + ) + reserved_users = await store.get_registered_reserved_users() + current_mau_gauge.set(float(current_mau_count)) + + for app_service, count in current_mau_count_by_service.items(): + current_mau_by_service_gauge.labels(app_service).set(float(count)) + + registered_reserved_users_mau_gauge.set(float(len(reserved_users))) + max_mau_gauge.set(float(hs.config.max_mau_value)) + + def start_generate_monthly_active_users(): + return run_as_background_process( + "generate_monthly_active_users", generate_monthly_active_users + ) + + if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + start_generate_monthly_active_users() + clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) + # End of monthly active user settings + + if hs.config.report_stats: + logger.info("Scheduling stats reporting for 3 hour intervals") + clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) + + # We need to defer this init for the cases that we daemonize + # otherwise the process ID we get is that of the non-daemon process + clock.call_later(0, performance_stats_init) + + # We wait 5 minutes to send the first set of stats as the server can + # be quite busy the first few minutes + clock.call_later(5 * 60, start_phone_stats_home) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f23e42cdf98c..57ab097eba3e 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -132,6 +132,19 @@ def read_config(self, config, **kwargs): self.events_shard_config = ShardedWorkerHandlingConfig(self.writers.events) + # Whether this worker should run background tasks or not. + # + # As a note for developers, the background tasks guarded by this should + # be able to run on only a single instance (meaning that they don't + # depend on any in-memory state of a particular worker). + # + # No effort is made to ensure only a single instance of these tasks is + # running. + background_tasks_instance = config.get("run_background_tasks_on") or "master" + self.run_background_tasks = ( + self.worker_name is None and background_tasks_instance == "master" + ) or self.worker_name == background_tasks_instance + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ ## Workers ## @@ -167,6 +180,11 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): #stream_writers: # events: worker1 # typing: worker1 + + # The worker that is used to run background tasks (e.g. cleaning up expired + # data). If not provided this defaults to the main process. + # + #run_background_tasks_on: worker1 """ def read_arguments(self, args): diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 00eae9205267..7c4b716b2853 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -212,7 +212,7 @@ def __init__(self, hs): self._clock = self.hs.get_clock() # Expire old UI auth sessions after a period of time. - if hs.config.worker_app is None: + if hs.config.run_background_tasks: self._clock.looping_call( run_as_background_process, 5 * 60 * 1000, diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 249ffe2a55c8..dc62b21c06f9 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -49,7 +49,7 @@ def __init__(self, hs): # Guard to ensure we only process deltas one at a time self._is_processing = False - if hs.config.stats_enabled: + if self.stats_enabled and hs.config.run_background_tasks: self.notifier.add_replication_callback(self.notify_new_event) # We kick this off so that we don't have to wait for a change before diff --git a/synapse/server.py b/synapse/server.py index 5e3752c3334f..aa2273955cd4 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -185,7 +185,10 @@ class HomeServer(metaclass=abc.ABCMeta): we are listening on to provide HTTP services. """ - REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"] + REQUIRED_ON_BACKGROUND_TASK_STARTUP = [ + "auth", + "stats", + ] # This is overridden in derived application classes # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be @@ -251,14 +254,20 @@ def setup(self) -> None: self.datastores = Databases(self.DATASTORE_CLASS, self) logger.info("Finished setting up.") - def setup_master(self) -> None: + # Register background tasks required by this server. This must be done + # somewhat manually due to the background tasks not being registered + # unless handlers are instantiated. + if self.config.run_background_tasks: + self.setup_background_tasks() + + def setup_background_tasks(self) -> None: """ Some handlers have side effects on instantiation (like registering background updates). This function causes them to be fetched, and therefore instantiated, to run those side effects. """ - for i in self.REQUIRED_ON_MASTER_STARTUP: - getattr(self, "get_" + i)() + for i in self.REQUIRED_ON_BACKGROUND_TASK_STARTUP: + getattr(self, "get_" + i + "_handler")() def get_reactor(self) -> twisted.internet.base.ReactorBase: """ diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 0cb12f4c61be..f823d66709e2 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -15,9 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import calendar import logging -import time from typing import Any, Dict, List, Optional, Tuple from synapse.api.constants import PresenceState @@ -268,9 +266,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._stream_order_on_start = self.get_room_max_stream_ordering() self._min_stream_order_on_start = self.get_room_min_stream_ordering() - # Used in _generate_user_daily_visits to keep track of progress - self._last_user_visit_update = self._get_start_of_day() - def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() @@ -301,192 +296,6 @@ def _get_active_presence(self, db_conn): return [UserPresenceState(**row) for row in rows] - async def count_daily_users(self) -> int: - """ - Counts the number of users who used this homeserver in the last 24 hours. - """ - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) - return await self.db_pool.runInteraction( - "count_daily_users", self._count_users, yesterday - ) - - async def count_monthly_users(self) -> int: - """ - Counts the number of users who used this homeserver in the last 30 days. - Note this method is intended for phonehome metrics only and is different - from the mau figure in synapse.storage.monthly_active_users which, - amongst other things, includes a 3 day grace period before a user counts. - """ - thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) - return await self.db_pool.runInteraction( - "count_monthly_users", self._count_users, thirty_days_ago - ) - - def _count_users(self, txn, time_from): - """ - Returns number of users seen in the past time_from period - """ - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT user_id FROM user_ips - WHERE last_seen > ? - GROUP BY user_id - ) u - """ - txn.execute(sql, (time_from,)) - (count,) = txn.fetchone() - return count - - async def count_r30_users(self) -> Dict[str, int]: - """ - Counts the number of 30 day retained users, defined as:- - * Users who have created their accounts more than 30 days ago - * Where last seen at most 30 days ago - * Where account creation and last_seen are > 30 days apart - - Returns: - A mapping of counts globally as well as broken out by platform. - """ - - def _count_r30_users(txn): - thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time()) - thirty_days_ago_in_secs = now - thirty_days_in_secs - - sql = """ - SELECT platform, COALESCE(count(*), 0) FROM ( - SELECT - users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen, - CASE - WHEN user_agent LIKE '%%Android%%' THEN 'android' - WHEN user_agent LIKE '%%iOS%%' THEN 'ios' - WHEN user_agent LIKE '%%Electron%%' THEN 'electron' - WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' - WHEN user_agent LIKE '%%Gecko%%' THEN 'web' - ELSE 'unknown' - END - AS platform - FROM user_ips - ) uip - ON users.name = uip.user_id - AND users.appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, platform, users.creation_ts - ) u GROUP BY platform - """ - - results = {} - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - for row in txn: - if row[0] == "unknown": - pass - results[row[0]] = row[1] - - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT users.name, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen - FROM user_ips - ) uip - ON users.name = uip.user_id - AND appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, users.creation_ts - ) u - """ - - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - (count,) = txn.fetchone() - results["all"] = count - - return results - - return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) - - def _get_start_of_day(self): - """ - Returns millisecond unixtime for start of UTC day. - """ - now = time.gmtime() - today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) - return today_start * 1000 - - async def generate_user_daily_visits(self) -> None: - """ - Generates daily visit data for use in cohort/ retention analysis - """ - - def _generate_user_daily_visits(txn): - logger.info("Calling _generate_user_daily_visits") - today_start = self._get_start_of_day() - a_day_in_milliseconds = 24 * 60 * 60 * 1000 - now = self.clock.time_msec() - - sql = """ - INSERT INTO user_daily_visits (user_id, device_id, timestamp) - SELECT u.user_id, u.device_id, ? - FROM user_ips AS u - LEFT JOIN ( - SELECT user_id, device_id, timestamp FROM user_daily_visits - WHERE timestamp = ? - ) udv - ON u.user_id = udv.user_id AND u.device_id=udv.device_id - INNER JOIN users ON users.name=u.user_id - WHERE last_seen > ? AND last_seen <= ? - AND udv.timestamp IS NULL AND users.is_guest=0 - AND users.appservice_id IS NULL - GROUP BY u.user_id, u.device_id - """ - - # This means that the day has rolled over but there could still - # be entries from the previous day. There is an edge case - # where if the user logs in at 23:59 and overwrites their - # last_seen at 00:01 then they will not be counted in the - # previous day's stats - it is important that the query is run - # often to minimise this case. - if today_start > self._last_user_visit_update: - yesterday_start = today_start - a_day_in_milliseconds - txn.execute( - sql, - ( - yesterday_start, - yesterday_start, - self._last_user_visit_update, - today_start, - ), - ) - self._last_user_visit_update = today_start - - txn.execute( - sql, (today_start, today_start, self._last_user_visit_update, now) - ) - # Update _last_user_visit_update to now. The reason to do this - # rather just clamping to the beginning of the day is to limit - # the size of the join - meaning that the query can be run more - # frequently - self._last_user_visit_update = now - - await self.db_pool.runInteraction( - "generate_user_daily_visits", _generate_user_daily_visits - ) - async def get_users(self) -> List[Dict[str, Any]]: """Function to retrieve a list of users in users table. diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 92099f95cefb..2c5a4fdbf6f9 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -12,6 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import calendar +import logging +import time +from typing import Dict from synapse.metrics import GaugeBucketCollector from synapse.metrics.background_process_metrics import run_as_background_process @@ -21,6 +25,8 @@ EventPushActionsWorkerStore, ) +logger = logging.getLogger(__name__) + # Collect metrics on the number of forward extremities that exist. _extremities_collecter = GaugeBucketCollector( "synapse_forward_extremities", @@ -60,6 +66,9 @@ def read_forward_extremities(): hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000) + # Used in _generate_user_daily_visits to keep track of progress + self._last_user_visit_update = self._get_start_of_day() + async def _read_forward_extremities(self): def fetch(txn): txn.execute( @@ -137,3 +146,189 @@ def _count(txn): return count return await self.db_pool.runInteraction("count_daily_active_rooms", _count) + + async def count_daily_users(self) -> int: + """ + Counts the number of users who used this homeserver in the last 24 hours. + """ + yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) + return await self.db_pool.runInteraction( + "count_daily_users", self._count_users, yesterday + ) + + async def count_monthly_users(self) -> int: + """ + Counts the number of users who used this homeserver in the last 30 days. + Note this method is intended for phonehome metrics only and is different + from the mau figure in synapse.storage.monthly_active_users which, + amongst other things, includes a 3 day grace period before a user counts. + """ + thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) + return await self.db_pool.runInteraction( + "count_monthly_users", self._count_users, thirty_days_ago + ) + + def _count_users(self, txn, time_from): + """ + Returns number of users seen in the past time_from period + """ + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT user_id FROM user_ips + WHERE last_seen > ? + GROUP BY user_id + ) u + """ + txn.execute(sql, (time_from,)) + (count,) = txn.fetchone() + return count + + async def count_r30_users(self) -> Dict[str, int]: + """ + Counts the number of 30 day retained users, defined as:- + * Users who have created their accounts more than 30 days ago + * Where last seen at most 30 days ago + * Where account creation and last_seen are > 30 days apart + + Returns: + A mapping of counts globally as well as broken out by platform. + """ + + def _count_r30_users(txn): + thirty_days_in_secs = 86400 * 30 + now = int(self._clock.time()) + thirty_days_ago_in_secs = now - thirty_days_in_secs + + sql = """ + SELECT platform, COALESCE(count(*), 0) FROM ( + SELECT + users.name, platform, users.creation_ts * 1000, + MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen, + CASE + WHEN user_agent LIKE '%%Android%%' THEN 'android' + WHEN user_agent LIKE '%%iOS%%' THEN 'ios' + WHEN user_agent LIKE '%%Electron%%' THEN 'electron' + WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' + WHEN user_agent LIKE '%%Gecko%%' THEN 'web' + ELSE 'unknown' + END + AS platform + FROM user_ips + ) uip + ON users.name = uip.user_id + AND users.appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, platform, users.creation_ts + ) u GROUP BY platform + """ + + results = {} + txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) + + for row in txn: + if row[0] == "unknown": + pass + results[row[0]] = row[1] + + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT users.name, users.creation_ts * 1000, + MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen + FROM user_ips + ) uip + ON users.name = uip.user_id + AND appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, users.creation_ts + ) u + """ + + txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) + + (count,) = txn.fetchone() + results["all"] = count + + return results + + return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) + + def _get_start_of_day(self): + """ + Returns millisecond unixtime for start of UTC day. + """ + now = time.gmtime() + today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) + return today_start * 1000 + + async def generate_user_daily_visits(self) -> None: + """ + Generates daily visit data for use in cohort/ retention analysis + """ + + def _generate_user_daily_visits(txn): + logger.info("Calling _generate_user_daily_visits") + today_start = self._get_start_of_day() + a_day_in_milliseconds = 24 * 60 * 60 * 1000 + now = self._clock.time_msec() + + sql = """ + INSERT INTO user_daily_visits (user_id, device_id, timestamp) + SELECT u.user_id, u.device_id, ? + FROM user_ips AS u + LEFT JOIN ( + SELECT user_id, device_id, timestamp FROM user_daily_visits + WHERE timestamp = ? + ) udv + ON u.user_id = udv.user_id AND u.device_id=udv.device_id + INNER JOIN users ON users.name=u.user_id + WHERE last_seen > ? AND last_seen <= ? + AND udv.timestamp IS NULL AND users.is_guest=0 + AND users.appservice_id IS NULL + GROUP BY u.user_id, u.device_id + """ + + # This means that the day has rolled over but there could still + # be entries from the previous day. There is an edge case + # where if the user logs in at 23:59 and overwrites their + # last_seen at 00:01 then they will not be counted in the + # previous day's stats - it is important that the query is run + # often to minimise this case. + if today_start > self._last_user_visit_update: + yesterday_start = today_start - a_day_in_milliseconds + txn.execute( + sql, + ( + yesterday_start, + yesterday_start, + self._last_user_visit_update, + today_start, + ), + ) + self._last_user_visit_update = today_start + + txn.execute( + sql, (today_start, today_start, self._last_user_visit_update, now) + ) + # Update _last_user_visit_update to now. The reason to do this + # rather just clamping to the beginning of the day is to limit + # the size of the join - meaning that the query can be run more + # frequently + self._last_user_visit_update = now + + await self.db_pool.runInteraction( + "generate_user_daily_visits", _generate_user_daily_visits + ) diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index e93aad33cd89..b2127598efea 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -32,6 +32,9 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._clock = hs.get_clock() self.hs = hs + self._limit_usage_by_mau = hs.config.limit_usage_by_mau + self._max_mau_value = hs.config.max_mau_value + @cached(num_args=0) async def get_monthly_active_count(self) -> int: """Generates current count of monthly active users @@ -124,60 +127,6 @@ async def user_last_seen_monthly_active(self, user_id: str) -> int: desc="user_last_seen_monthly_active", ) - -class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): - def __init__(self, database: DatabasePool, db_conn, hs): - super().__init__(database, db_conn, hs) - - self._limit_usage_by_mau = hs.config.limit_usage_by_mau - self._mau_stats_only = hs.config.mau_stats_only - self._max_mau_value = hs.config.max_mau_value - - # Do not add more reserved users than the total allowable number - # cur = LoggingTransaction( - self.db_pool.new_transaction( - db_conn, - "initialise_mau_threepids", - [], - [], - self._initialise_reserved_users, - hs.config.mau_limits_reserved_threepids[: self._max_mau_value], - ) - - def _initialise_reserved_users(self, txn, threepids): - """Ensures that reserved threepids are accounted for in the MAU table, should - be called on start up. - - Args: - txn (cursor): - threepids (list[dict]): List of threepid dicts to reserve - """ - - # XXX what is this function trying to achieve? It upserts into - # monthly_active_users for each *registered* reserved mau user, but why? - # - # - shouldn't there already be an entry for each reserved user (at least - # if they have been active recently)? - # - # - if it's important that the timestamp is kept up to date, why do we only - # run this at startup? - - for tp in threepids: - user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) - - if user_id: - is_support = self.is_support_user_txn(txn, user_id) - if not is_support: - # We do this manually here to avoid hitting #6791 - self.db_pool.simple_upsert_txn( - txn, - table="monthly_active_users", - keyvalues={"user_id": user_id}, - values={"timestamp": int(self._clock.time_msec())}, - ) - else: - logger.warning("mau limit reserved threepid %s not found in db" % tp) - async def reap_monthly_active_users(self): """Cleans out monthly active user table to ensure that no stale entries exist. @@ -257,6 +206,58 @@ def _reap_users(txn, reserved_users): "reap_monthly_active_users", _reap_users, reserved_users ) + +class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + self._mau_stats_only = hs.config.mau_stats_only + + # Do not add more reserved users than the total allowable number + # cur = LoggingTransaction( + self.db_pool.new_transaction( + db_conn, + "initialise_mau_threepids", + [], + [], + self._initialise_reserved_users, + hs.config.mau_limits_reserved_threepids[: self._max_mau_value], + ) + + def _initialise_reserved_users(self, txn, threepids): + """Ensures that reserved threepids are accounted for in the MAU table, should + be called on start up. + + Args: + txn (cursor): + threepids (list[dict]): List of threepid dicts to reserve + """ + + # XXX what is this function trying to achieve? It upserts into + # monthly_active_users for each *registered* reserved mau user, but why? + # + # - shouldn't there already be an entry for each reserved user (at least + # if they have been active recently)? + # + # - if it's important that the timestamp is kept up to date, why do we only + # run this at startup? + + for tp in threepids: + user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) + + if user_id: + is_support = self.is_support_user_txn(txn, user_id) + if not is_support: + # We do this manually here to avoid hitting #6791 + self.db_pool.simple_upsert_txn( + txn, + table="monthly_active_users", + keyvalues={"user_id": user_id}, + values={"timestamp": int(self._clock.time_msec())}, + ) + else: + logger.warning("mau limit reserved threepid %s not found in db" % tp) + async def upsert_monthly_active_user(self, user_id: str) -> None: """Updates or inserts the user into the monthly active user table, which is used to track the current MAU usage of the server diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3c7630857f5d..c0f2af07850b 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -192,6 +192,18 @@ def _count_public_rooms_txn(txn): "count_public_rooms", _count_public_rooms_txn ) + async def get_room_count(self) -> int: + """Retrieve the total number of rooms. + """ + + def f(txn): + sql = "SELECT count(*) FROM rooms" + txn.execute(sql) + row = txn.fetchone() + return row[0] or 0 + + return await self.db_pool.runInteraction("get_rooms", f) + async def get_largest_public_rooms( self, network_tuple: Optional[ThirdPartyInstanceID], @@ -1292,18 +1304,6 @@ def set_room_is_public_appservice_txn(txn, next_id): ) self.hs.get_notifier().on_new_replication_data() - async def get_room_count(self) -> int: - """Retrieve the total number of rooms. - """ - - def f(txn): - sql = "SELECT count(*) FROM rooms" - txn.execute(sql) - row = txn.fetchone() - return row[0] or 0 - - return await self.db_pool.runInteraction("get_rooms", f) - async def add_event_report( self, room_id: str, diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 3b9211a6d235..79b7ece3302a 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -288,8 +288,6 @@ async def get_user_agents_ips_to_ui_auth_session( ) return [(row["user_agent"], row["ip"]) for row in rows] - -class UIAuthStore(UIAuthWorkerStore): async def delete_old_ui_auth_sessions(self, expiration_time: int) -> None: """ Remove sessions which were last used earlier than the expiration time. @@ -339,3 +337,7 @@ def _delete_old_ui_auth_sessions_txn( iterable=session_ids, keyvalues={}, ) + + +class UIAuthStore(UIAuthWorkerStore): + pass diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index 7657bddea5be..e7aed092c275 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -17,7 +17,7 @@ import mock -from synapse.app.homeserver import phone_stats_home +from synapse.app.phone_stats_home import phone_stats_home from tests.unittest import HomeserverTestCase diff --git a/tests/utils.py b/tests/utils.py index 4673872f8890..7a927c7f7421 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -276,7 +276,7 @@ def setup_test_homeserver( hs.setup() if homeserverToUse.__name__ == "TestHomeServer": - hs.setup_master() + hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): database = hs.get_datastores().databases[0] From ec10bdd32bb52af73789f5f60b39135578a739b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 15:09:31 +0100 Subject: [PATCH 115/245] Speed up unit tests when using PostgreSQL (#8450) --- changelog.d/8450.misc | 1 + synapse/storage/databases/main/events_worker.py | 13 ++++++++++++- tests/server.py | 4 ++++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8450.misc diff --git a/changelog.d/8450.misc b/changelog.d/8450.misc new file mode 100644 index 000000000000..4e04c523abef --- /dev/null +++ b/changelog.d/8450.misc @@ -0,0 +1 @@ +Speed up unit tests when using PostgreSQL. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index f95679ebc440..723ced4ff09d 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -74,6 +74,13 @@ class EventRedactBehaviour(Names): class EventsWorkerStore(SQLBaseStore): + # Whether to use dedicated DB threads for event fetching. This is only used + # if there are multiple DB threads available. When used will lock the DB + # thread for periods of time (so unit tests want to disable this when they + # run DB transactions on the main thread). See EVENT_QUEUE_* for more + # options controlling this. + USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = True + def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) @@ -522,7 +529,11 @@ def _do_fetch(self, conn): if not event_list: single_threaded = self.database_engine.single_threaded - if single_threaded or i > EVENT_QUEUE_ITERATIONS: + if ( + not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING + or single_threaded + or i > EVENT_QUEUE_ITERATIONS + ): self._event_fetch_ongoing -= 1 return else: diff --git a/tests/server.py b/tests/server.py index b404ad4e2a49..f7f5276b2152 100644 --- a/tests/server.py +++ b/tests/server.py @@ -372,6 +372,10 @@ def runInteraction(interaction, *args, **kwargs): pool.threadpool = ThreadPool(clock._reactor) pool.running = True + # We've just changed the Databases to run DB transactions on the same + # thread, so we need to disable the dedicated thread behaviour. + server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False + return server From e3debf9682ed59b2972f236fe2982b6af0a9bb9a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 15:20:45 +0100 Subject: [PATCH 116/245] Add logging on startup/shutdown (#8448) This is so we can tell what is going on when things are taking a while to start up. The main change here is to ensure that transactions that are created during startup get correctly logged like normal transactions. --- changelog.d/8448.misc | 1 + scripts/synapse_port_db | 2 +- synapse/app/_base.py | 5 ++ synapse/storage/database.py | 89 +++++++++++++++---- synapse/storage/databases/__init__.py | 2 +- synapse/storage/databases/main/__init__.py | 1 - .../databases/main/event_push_actions.py | 8 +- .../databases/main/monthly_active_users.py | 1 - synapse/storage/databases/main/roommember.py | 13 +-- .../databases/main/schema/delta/20/pushers.py | 19 ++-- .../databases/main/schema/delta/25/fts.py | 2 - .../databases/main/schema/delta/27/ts.py | 2 - .../main/schema/delta/30/as_users.py | 6 +- .../databases/main/schema/delta/31/pushers.py | 19 ++-- .../main/schema/delta/31/search_update.py | 2 - .../main/schema/delta/33/event_fields.py | 2 - .../main/schema/delta/33/remote_media_ts.py | 5 +- .../delta/56/unique_user_filter_index.py | 7 +- .../delta/57/local_current_membership.py | 1 - synapse/storage/prepare_database.py | 33 +++---- synapse/storage/types.py | 6 ++ synapse/storage/util/id_generators.py | 8 +- synapse/storage/util/sequence.py | 15 +++- tests/storage/test_appservice.py | 14 +-- tests/utils.py | 2 + 25 files changed, 152 insertions(+), 113 deletions(-) create mode 100644 changelog.d/8448.misc diff --git a/changelog.d/8448.misc b/changelog.d/8448.misc new file mode 100644 index 000000000000..5ddda1803b9b --- /dev/null +++ b/changelog.d/8448.misc @@ -0,0 +1 @@ +Add SQL logging on queries that happen during startup. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index ae2887b7d2f4..7e12f5440cc4 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -489,7 +489,7 @@ class Porter(object): hs = MockHomeserver(self.hs_config) - with make_conn(db_config, engine) as db_conn: + with make_conn(db_config, engine, "portdb") as db_conn: engine.check_database( db_conn, allow_outdated_version=allow_outdated_version ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 8bb0b142ca5b..f6f7b2bf42cb 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -272,6 +272,11 @@ def handle_sighup(*args, **kwargs): hs.get_datastore().db_pool.start_profiling() hs.get_pusherpool().start() + # Log when we start the shut down process. + hs.get_reactor().addSystemEventTrigger( + "before", "shutdown", logger.info, "Shutting down..." + ) + setup_sentry(hs) setup_sdnotify(hs) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 79ec8f119df7..0d9d9b7cc07a 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -32,6 +32,7 @@ overload, ) +import attr from prometheus_client import Histogram from typing_extensions import Literal @@ -90,13 +91,17 @@ def make_pool( return adbapi.ConnectionPool( db_config.config["name"], cp_reactor=reactor, - cp_openfun=engine.on_new_connection, + cp_openfun=lambda conn: engine.on_new_connection( + LoggingDatabaseConnection(conn, engine, "on_new_connection") + ), **db_config.config.get("args", {}) ) def make_conn( - db_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine + db_config: DatabaseConnectionConfig, + engine: BaseDatabaseEngine, + default_txn_name: str, ) -> Connection: """Make a new connection to the database and return it. @@ -109,11 +114,60 @@ def make_conn( for k, v in db_config.config.get("args", {}).items() if not k.startswith("cp_") } - db_conn = engine.module.connect(**db_params) + native_db_conn = engine.module.connect(**db_params) + db_conn = LoggingDatabaseConnection(native_db_conn, engine, default_txn_name) + engine.on_new_connection(db_conn) return db_conn +@attr.s(slots=True) +class LoggingDatabaseConnection: + """A wrapper around a database connection that returns `LoggingTransaction` + as its cursor class. + + This is mainly used on startup to ensure that queries get logged correctly + """ + + conn = attr.ib(type=Connection) + engine = attr.ib(type=BaseDatabaseEngine) + default_txn_name = attr.ib(type=str) + + def cursor( + self, *, txn_name=None, after_callbacks=None, exception_callbacks=None + ) -> "LoggingTransaction": + if not txn_name: + txn_name = self.default_txn_name + + return LoggingTransaction( + self.conn.cursor(), + name=txn_name, + database_engine=self.engine, + after_callbacks=after_callbacks, + exception_callbacks=exception_callbacks, + ) + + def close(self) -> None: + self.conn.close() + + def commit(self) -> None: + self.conn.commit() + + def rollback(self, *args, **kwargs) -> None: + self.conn.rollback(*args, **kwargs) + + def __enter__(self) -> "Connection": + self.conn.__enter__() + return self + + def __exit__(self, exc_type, exc_value, traceback) -> bool: + return self.conn.__exit__(exc_type, exc_value, traceback) + + # Proxy through any unknown lookups to the DB conn class. + def __getattr__(self, name): + return getattr(self.conn, name) + + # The type of entry which goes on our after_callbacks and exception_callbacks lists. # # Python 3.5.2 doesn't support Callable with an ellipsis, so we wrap it in quotes so @@ -247,6 +301,12 @@ def _do_execute(self, func, sql: str, *args: Any) -> None: def close(self) -> None: self.txn.close() + def __enter__(self) -> "LoggingTransaction": + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + class PerformanceCounters: def __init__(self): @@ -395,7 +455,7 @@ def loop(): def new_transaction( self, - conn: Connection, + conn: LoggingDatabaseConnection, desc: str, after_callbacks: List[_CallbackListEntry], exception_callbacks: List[_CallbackListEntry], @@ -418,12 +478,10 @@ def new_transaction( i = 0 N = 5 while True: - cursor = LoggingTransaction( - conn.cursor(), - name, - self.engine, - after_callbacks, - exception_callbacks, + cursor = conn.cursor( + txn_name=name, + after_callbacks=after_callbacks, + exception_callbacks=exception_callbacks, ) try: r = func(cursor, *args, **kwargs) @@ -584,7 +642,10 @@ def inner_func(conn, *args, **kwargs): logger.debug("Reconnecting closed database connection") conn.reconnect() - return func(conn, *args, **kwargs) + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) @@ -1621,7 +1682,7 @@ def simple_delete_many_txn( def get_cache_dict( self, - db_conn: Connection, + db_conn: LoggingDatabaseConnection, table: str, entity_column: str, stream_column: str, @@ -1642,9 +1703,7 @@ def get_cache_dict( "limit": limit, } - sql = self.engine.convert_param_style(sql) - - txn = db_conn.cursor() + txn = db_conn.cursor(txn_name="get_cache_dict") txn.execute(sql, (int(max_value),)) cache = {row[0]: int(row[1]) for row in txn} diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index aa5d490624ce..0c243250117e 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -46,7 +46,7 @@ def __init__(self, main_store_class, hs): db_name = database_config.name engine = create_engine(database_config.config) - with make_conn(database_config, engine) as db_conn: + with make_conn(database_config, engine, "startup") as db_conn: logger.info("[database config %r]: Checking database server", db_name) engine.check_database(db_conn) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index f823d66709e2..9b16f45f3eff 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -284,7 +284,6 @@ def _get_active_presence(self, db_conn): " last_user_sync_ts, status_msg, currently_active FROM presence_stream" " WHERE state != ?" ) - sql = self.database_engine.convert_param_style(sql) txn = db_conn.cursor() txn.execute(sql, (PresenceState.OFFLINE,)) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 62f1738732f3..80f3b4d74007 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -20,7 +20,7 @@ import attr from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage._base import LoggingTransaction, SQLBaseStore, db_to_json +from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -74,11 +74,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): self.stream_ordering_month_ago = None self.stream_ordering_day_ago = None - cur = LoggingTransaction( - db_conn.cursor(), - name="_find_stream_orderings_for_times_txn", - database_engine=self.database_engine, - ) + cur = db_conn.cursor(txn_name="_find_stream_orderings_for_times_txn") self._find_stream_orderings_for_times_txn(cur) cur.close() diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index b2127598efea..c66f558567b9 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -214,7 +214,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._mau_stats_only = hs.config.mau_stats_only # Do not add more reserved users than the total allowable number - # cur = LoggingTransaction( self.db_pool.new_transaction( db_conn, "initialise_mau_threepids", diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 86ffe2479ed7..bae1bd22d328 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -21,12 +21,7 @@ from synapse.events.snapshot import EventContext from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage._base import ( - LoggingTransaction, - SQLBaseStore, - db_to_json, - make_in_list_sql_clause, -) +from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import Sqlite3Engine @@ -60,10 +55,8 @@ def __init__(self, database: DatabasePool, db_conn, hs): # background update still running? self._current_state_events_membership_up_to_date = False - txn = LoggingTransaction( - db_conn.cursor(), - name="_check_safe_current_state_events_membership_updated", - database_engine=self.database_engine, + txn = db_conn.cursor( + txn_name="_check_safe_current_state_events_membership_updated" ) self._check_safe_current_state_events_membership_updated_txn(txn) txn.close() diff --git a/synapse/storage/databases/main/schema/delta/20/pushers.py b/synapse/storage/databases/main/schema/delta/20/pushers.py index 3edfcfd78320..45b846e6a7d5 100644 --- a/synapse/storage/databases/main/schema/delta/20/pushers.py +++ b/synapse/storage/databases/main/schema/delta/20/pushers.py @@ -66,16 +66,15 @@ def run_create(cur, database_engine, *args, **kwargs): row[8] = bytes(row[8]).decode("utf-8") row[11] = bytes(row[11]).decode("utf-8") cur.execute( - database_engine.convert_param_style( - """ - INSERT into pushers2 ( - id, user_name, access_token, profile_tag, kind, - app_id, app_display_name, device_display_name, - pushkey, ts, lang, data, last_token, last_success, - failing_since - ) values (%s)""" - % (",".join(["?" for _ in range(len(row))])) - ), + """ + INSERT into pushers2 ( + id, user_name, access_token, profile_tag, kind, + app_id, app_display_name, device_display_name, + pushkey, ts, lang, data, last_token, last_success, + failing_since + ) values (%s) + """ + % (",".join(["?" for _ in range(len(row))])), row, ) count += 1 diff --git a/synapse/storage/databases/main/schema/delta/25/fts.py b/synapse/storage/databases/main/schema/delta/25/fts.py index ee675e71ffcf..21f57825d4ed 100644 --- a/synapse/storage/databases/main/schema/delta/25/fts.py +++ b/synapse/storage/databases/main/schema/delta/25/fts.py @@ -71,8 +71,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_search", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/27/ts.py b/synapse/storage/databases/main/schema/delta/27/ts.py index b7972cfa8ea3..1c6058063fb6 100644 --- a/synapse/storage/databases/main/schema/delta/27/ts.py +++ b/synapse/storage/databases/main/schema/delta/27/ts.py @@ -50,8 +50,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_origin_server_ts", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/30/as_users.py b/synapse/storage/databases/main/schema/delta/30/as_users.py index b42c02710a5b..7f08fabe9f23 100644 --- a/synapse/storage/databases/main/schema/delta/30/as_users.py +++ b/synapse/storage/databases/main/schema/delta/30/as_users.py @@ -59,9 +59,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): user_chunks = (user_ids[i : i + 100] for i in range(0, len(user_ids), n)) for chunk in user_chunks: cur.execute( - database_engine.convert_param_style( - "UPDATE users SET appservice_id = ? WHERE name IN (%s)" - % (",".join("?" for _ in chunk),) - ), + "UPDATE users SET appservice_id = ? WHERE name IN (%s)" + % (",".join("?" for _ in chunk),), [as_id] + chunk, ) diff --git a/synapse/storage/databases/main/schema/delta/31/pushers.py b/synapse/storage/databases/main/schema/delta/31/pushers.py index 9bb504aad5fa..5be81c806a28 100644 --- a/synapse/storage/databases/main/schema/delta/31/pushers.py +++ b/synapse/storage/databases/main/schema/delta/31/pushers.py @@ -65,16 +65,15 @@ def run_create(cur, database_engine, *args, **kwargs): row = list(row) row[12] = token_to_stream_ordering(row[12]) cur.execute( - database_engine.convert_param_style( - """ - INSERT into pushers2 ( - id, user_name, access_token, profile_tag, kind, - app_id, app_display_name, device_display_name, - pushkey, ts, lang, data, last_stream_ordering, last_success, - failing_since - ) values (%s)""" - % (",".join(["?" for _ in range(len(row))])) - ), + """ + INSERT into pushers2 ( + id, user_name, access_token, profile_tag, kind, + app_id, app_display_name, device_display_name, + pushkey, ts, lang, data, last_stream_ordering, last_success, + failing_since + ) values (%s) + """ + % (",".join(["?" for _ in range(len(row))])), row, ) count += 1 diff --git a/synapse/storage/databases/main/schema/delta/31/search_update.py b/synapse/storage/databases/main/schema/delta/31/search_update.py index 63b757ade6dc..b84c844e3af4 100644 --- a/synapse/storage/databases/main/schema/delta/31/search_update.py +++ b/synapse/storage/databases/main/schema/delta/31/search_update.py @@ -55,8 +55,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_search_order", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/33/event_fields.py b/synapse/storage/databases/main/schema/delta/33/event_fields.py index a3e81eeac70a..e928c66a8f2d 100644 --- a/synapse/storage/databases/main/schema/delta/33/event_fields.py +++ b/synapse/storage/databases/main/schema/delta/33/event_fields.py @@ -50,8 +50,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_fields_sender_url", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py index a26057dfb6ef..ad875c733a9c 100644 --- a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py +++ b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py @@ -23,8 +23,5 @@ def run_create(cur, database_engine, *args, **kwargs): def run_upgrade(cur, database_engine, *args, **kwargs): cur.execute( - database_engine.convert_param_style( - "UPDATE remote_media_cache SET last_access_ts = ?" - ), - (int(time.time() * 1000),), + "UPDATE remote_media_cache SET last_access_ts = ?", (int(time.time() * 1000),), ) diff --git a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py index 1de8b549619f..bb7296852a61 100644 --- a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py +++ b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py @@ -1,6 +1,8 @@ import logging +from io import StringIO from synapse.storage.engines import PostgresEngine +from synapse.storage.prepare_database import execute_statements_from_stream logger = logging.getLogger(__name__) @@ -46,7 +48,4 @@ def run_create(cur, database_engine, *args, **kwargs): select_clause, ) - if isinstance(database_engine, PostgresEngine): - cur.execute(sql) - else: - cur.executescript(sql) + execute_statements_from_stream(cur, StringIO(sql)) diff --git a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py index 63b5acdcf7ab..44917f0a2ef3 100644 --- a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py +++ b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py @@ -68,7 +68,6 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): INNER JOIN room_memberships AS r USING (event_id) WHERE type = 'm.room.member' AND state_key LIKE ? """ - sql = database_engine.convert_param_style(sql) cur.execute(sql, ("%:" + config.server_name,)) cur.execute( diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 4957e77f4c27..459754feabf7 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import imp import logging import os @@ -24,9 +23,10 @@ import attr from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine -from synapse.storage.types import Connection, Cursor +from synapse.storage.types import Cursor from synapse.types import Collection logger = logging.getLogger(__name__) @@ -67,7 +67,7 @@ class UpgradeDatabaseException(PrepareDatabaseException): def prepare_database( - db_conn: Connection, + db_conn: LoggingDatabaseConnection, database_engine: BaseDatabaseEngine, config: Optional[HomeServerConfig], databases: Collection[str] = ["main", "state"], @@ -89,7 +89,7 @@ def prepare_database( """ try: - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="prepare_database") # sqlite does not automatically start transactions for DDL / SELECT statements, # so we start one before running anything. This ensures that any upgrades @@ -258,9 +258,7 @@ def _setup_new_database(cur, database_engine, databases): executescript(cur, entry.absolute_path) cur.execute( - database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" - ), + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)", (max_current_ver, False), ) @@ -486,17 +484,13 @@ def _upgrade_existing_database( # Mark as done. cur.execute( - database_engine.convert_param_style( - "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)" - ), + "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)", (v, relative_path), ) cur.execute("DELETE FROM schema_version") cur.execute( - database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" - ), + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)", (v, True), ) @@ -532,10 +526,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) schemas to be applied """ cur.execute( - database_engine.convert_param_style( - "SELECT file FROM applied_module_schemas WHERE module_name = ?" - ), - (modname,), + "SELECT file FROM applied_module_schemas WHERE module_name = ?", (modname,), ) applied_deltas = {d for d, in cur} for (name, stream) in names_and_streams: @@ -553,9 +544,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) # Mark as done. cur.execute( - database_engine.convert_param_style( - "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)" - ), + "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)", (modname, name), ) @@ -627,9 +616,7 @@ def _get_or_create_schema_state(txn, database_engine): if current_version: txn.execute( - database_engine.convert_param_style( - "SELECT file FROM applied_schema_deltas WHERE version >= ?" - ), + "SELECT file FROM applied_schema_deltas WHERE version >= ?", (current_version,), ) applied_deltas = [d for d, in txn] diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 2d2b560e748e..970bb1b9da35 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -61,3 +61,9 @@ def commit(self) -> None: def rollback(self, *args, **kwargs) -> None: ... + + def __enter__(self) -> "Connection": + ... + + def __exit__(self, exc_type, exc_value, traceback) -> bool: + ... diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index c92cd4a6bae3..51f680d05d81 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -54,7 +54,7 @@ def _load_current_id(db_conn, table, column, step=1): """ # debug logging for https://github.com/matrix-org/synapse/issues/7968 logger.info("initialising stream generator for %s(%s)", table, column) - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="_load_current_id") if step == 1: cur.execute("SELECT MAX(%s) FROM %s" % (column, table)) else: @@ -269,7 +269,7 @@ def __init__( def _load_current_ids( self, db_conn, table: str, instance_column: str, id_column: str ): - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="_load_current_ids") # Load the current positions of all writers for the stream. if self._writers: @@ -283,15 +283,12 @@ def _load_current_ids( stream_name = ? AND instance_name != ALL(?) """ - sql = self._db.engine.convert_param_style(sql) cur.execute(sql, (self._stream_name, self._writers)) sql = """ SELECT instance_name, stream_id FROM stream_positions WHERE stream_name = ? """ - sql = self._db.engine.convert_param_style(sql) - cur.execute(sql, (self._stream_name,)) self._current_positions = { @@ -340,7 +337,6 @@ def _load_current_ids( "instance": instance_column, "cmp": "<=" if self._positive else ">=", } - sql = self._db.engine.convert_param_style(sql) cur.execute(sql, (min_stream_id * self._return_factor,)) self._persisted_upto_position = min_stream_id diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index 2dd95e270920..ff2d038ad2d2 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -17,6 +17,7 @@ import threading from typing import Callable, List, Optional +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import ( BaseDatabaseEngine, IncorrectDatabaseSetup, @@ -53,7 +54,11 @@ def get_next_id_txn(self, txn: Cursor) -> int: @abc.abstractmethod def check_consistency( - self, db_conn: Connection, table: str, id_column: str, positive: bool = True + self, + db_conn: LoggingDatabaseConnection, + table: str, + id_column: str, + positive: bool = True, ): """Should be called during start up to test that the current value of the sequence is greater than or equal to the maximum ID in the table. @@ -82,9 +87,13 @@ def get_next_mult_txn(self, txn: Cursor, n: int) -> List[int]: return [i for (i,) in txn] def check_consistency( - self, db_conn: Connection, table: str, id_column: str, positive: bool = True + self, + db_conn: LoggingDatabaseConnection, + table: str, + id_column: str, + positive: bool = True, ): - txn = db_conn.cursor() + txn = db_conn.cursor(txn_name="sequence.check_consistency") # First we get the current max ID from the table. table_sql = "SELECT GREATEST(%(agg)s(%(id)s), 0) FROM %(table)s" % { diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 46f94914ffb4..c905a3893075 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -58,7 +58,7 @@ def setUp(self): # must be done after inserts database = hs.get_datastores().databases[0] self.store = ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, make_conn(database._database_config, database.engine, "test"), hs ) def tearDown(self): @@ -132,7 +132,7 @@ def setUp(self): db_config = hs.config.get_single_database() self.store = TestTransactionStore( - database, make_conn(db_config, self.engine), hs + database, make_conn(db_config, self.engine, "test"), hs ) def _add_service(self, url, as_token, id): @@ -448,7 +448,7 @@ def test_unique_works(self): database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, make_conn(database._database_config, database.engine, "test"), hs ) @defer.inlineCallbacks @@ -467,7 +467,9 @@ def test_duplicate_ids(self): with self.assertRaises(ConfigError) as cm: database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, + make_conn(database._database_config, database.engine, "test"), + hs, ) e = cm.exception @@ -491,7 +493,9 @@ def test_duplicate_as_tokens(self): with self.assertRaises(ConfigError) as cm: database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, + make_conn(database._database_config, database.engine, "test"), + hs, ) e = cm.exception diff --git a/tests/utils.py b/tests/utils.py index 7a927c7f7421..af563ffe0f5d 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -38,6 +38,7 @@ from synapse.logging.context import current_context, set_current_context from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import PostgresEngine, create_engine from synapse.storage.prepare_database import prepare_database from synapse.util.ratelimitutils import FederationRateLimiter @@ -88,6 +89,7 @@ def setupdb(): host=POSTGRES_HOST, password=POSTGRES_PASSWORD, ) + db_conn = LoggingDatabaseConnection(db_conn, db_engine, "tests") prepare_database(db_conn, db_engine, None) db_conn.close() From c5251c6fbd2722d54d33e02021f286053e611efc Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 5 Oct 2020 09:28:05 -0400 Subject: [PATCH 117/245] Do not assume that account data is of the correct form. (#8454) This fixes a bug where `m.ignored_user_list` was assumed to be a dict, leading to odd behavior for users who set it to something else. --- changelog.d/8454.bugfix | 1 + synapse/api/constants.py | 5 +++++ synapse/handlers/room_member.py | 6 +++--- synapse/handlers/sync.py | 19 +++++++++++-------- .../storage/databases/main/account_data.py | 9 +++++++-- synapse/visibility.py | 15 +++++++-------- 6 files changed, 34 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8454.bugfix diff --git a/changelog.d/8454.bugfix b/changelog.d/8454.bugfix new file mode 100644 index 000000000000..c06d490b6f15 --- /dev/null +++ b/changelog.d/8454.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug where invalid ignored users in account data could break clients. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 46013cde15a4..592abd844b8a 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -155,3 +155,8 @@ class EventContentFields: class RoomEncryptionAlgorithms: MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2" DEFAULT = MEGOLM_V1_AES_SHA2 + + +class AccountDataTypes: + DIRECT = "m.direct" + IGNORED_USER_LIST = "m.ignored_user_list" diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 5ec36f591d78..567a14bd0af5 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -22,7 +22,7 @@ from unpaddedbase64 import encode_base64 from synapse import types -from synapse.api.constants import MAX_DEPTH, EventTypes, Membership +from synapse.api.constants import MAX_DEPTH, AccountDataTypes, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -247,7 +247,7 @@ async def copy_room_tags_and_direct_to_room( user_account_data, _ = await self.store.get_account_data_for_user(user_id) # Copy direct message state if applicable - direct_rooms = user_account_data.get("m.direct", {}) + direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {}) # Check which key this room is under if isinstance(direct_rooms, dict): @@ -258,7 +258,7 @@ async def copy_room_tags_and_direct_to_room( # Save back to user's m.direct account data await self.store.add_account_data_for_user( - user_id, "m.direct", direct_rooms + user_id, AccountDataTypes.DIRECT, direct_rooms ) break diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 260ec19b4169..a998e6b7f63d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -21,7 +21,7 @@ import attr from prometheus_client import Counter -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.api.filtering import FilterCollection from synapse.events import EventBase from synapse.logging.context import current_context @@ -1378,13 +1378,16 @@ async def _generate_sync_entry_for_rooms( return set(), set(), set(), set() ignored_account_data = await self.store.get_global_account_data_by_type_for_user( - "m.ignored_user_list", user_id=user_id + AccountDataTypes.IGNORED_USER_LIST, user_id=user_id ) + # If there is ignored users account data and it matches the proper type, + # then use it. + ignored_users = frozenset() # type: FrozenSet[str] if ignored_account_data: - ignored_users = ignored_account_data.get("ignored_users", {}).keys() - else: - ignored_users = frozenset() + ignored_users_data = ignored_account_data.get("ignored_users", {}) + if isinstance(ignored_users_data, dict): + ignored_users = frozenset(ignored_users_data.keys()) if since_token: room_changes = await self._get_rooms_changed( @@ -1478,7 +1481,7 @@ async def _have_rooms_changed( return False async def _get_rooms_changed( - self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str] ) -> _RoomChanges: """Gets the the changes that have happened since the last sync. """ @@ -1690,7 +1693,7 @@ async def _get_rooms_changed( return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms) async def _get_all_rooms( - self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str] ) -> _RoomChanges: """Returns entries for all rooms for the user. @@ -1764,7 +1767,7 @@ async def _get_all_rooms( async def _generate_room_entry( self, sync_result_builder: "SyncResultBuilder", - ignored_users: Set[str], + ignored_users: FrozenSet[str], room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index ef81d73573f0..49ee23470d61 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -18,6 +18,7 @@ import logging from typing import Dict, List, Optional, Tuple +from synapse.api.constants import AccountDataTypes from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool from synapse.storage.util.id_generators import StreamIdGenerator @@ -291,14 +292,18 @@ async def is_ignored_by( self, ignored_user_id: str, ignorer_user_id: str, cache_context: _CacheContext ) -> bool: ignored_account_data = await self.get_global_account_data_by_type_for_user( - "m.ignored_user_list", + AccountDataTypes.IGNORED_USER_LIST, ignorer_user_id, on_invalidate=cache_context.invalidate, ) if not ignored_account_data: return False - return ignored_user_id in ignored_account_data.get("ignored_users", {}) + try: + return ignored_user_id in ignored_account_data.get("ignored_users", {}) + except TypeError: + # The type of the ignored_users field is invalid. + return False class AccountDataStore(AccountDataWorkerStore): diff --git a/synapse/visibility.py b/synapse/visibility.py index e3da7744d20f..527365498e68 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -16,7 +16,7 @@ import logging import operator -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.events.utils import prune_event from synapse.storage import Storage from synapse.storage.state import StateFilter @@ -77,15 +77,14 @@ async def filter_events_for_client( ) ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( - "m.ignored_user_list", user_id + AccountDataTypes.IGNORED_USER_LIST, user_id ) - # FIXME: This will explode if people upload something incorrect. - ignore_list = frozenset( - ignore_dict_content.get("ignored_users", {}).keys() - if ignore_dict_content - else [] - ) + ignore_list = frozenset() + if ignore_dict_content: + ignored_users_dict = ignore_dict_content.get("ignored_users", {}) + if isinstance(ignored_users_dict, dict): + ignore_list = frozenset(ignored_users_dict.keys()) erased_senders = await storage.main.are_users_erased((e.sender for e in events)) From f64c6aae68932df95a98a75fb707450260b614df Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 5 Oct 2020 09:40:19 -0400 Subject: [PATCH 118/245] Update manhole documentation for async/await. (#8462) --- changelog.d/8462.doc | 1 + docs/manhole.md | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8462.doc diff --git a/changelog.d/8462.doc b/changelog.d/8462.doc new file mode 100644 index 000000000000..cf84db6db7f2 --- /dev/null +++ b/changelog.d/8462.doc @@ -0,0 +1 @@ +Update the directions for using the manhole with coroutines. diff --git a/docs/manhole.md b/docs/manhole.md index 7375f5ad46c2..75b6ae40e0cb 100644 --- a/docs/manhole.md +++ b/docs/manhole.md @@ -35,9 +35,12 @@ This gives a Python REPL in which `hs` gives access to the `synapse.server.HomeServer` object - which in turn gives access to many other parts of the process. +Note that any call which returns a coroutine will need to be wrapped in `ensureDeferred`. + As a simple example, retrieving an event from the database: -``` ->>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org') +```pycon +>>> from twisted.internet import defer +>>> defer.ensureDeferred(hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')) > ``` From f31f8e63198cfe46af48d788dbb294aba9155e5a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 5 Oct 2020 14:43:14 +0100 Subject: [PATCH 119/245] Remove stream ordering from Metadata dict (#8452) There's no need for it to be in the dict as well as the events table. Instead, we store it in a separate attribute in the EventInternalMetadata object, and populate that on load. This means that we can rely on it being correctly populated for any event which has been persited to the database. --- changelog.d/8452.misc | 1 + synapse/events/__init__.py | 6 +++-- synapse/events/utils.py | 5 ++++ synapse/federation/sender/__init__.py | 2 ++ .../sender/per_destination_queue.py | 2 ++ synapse/handlers/federation.py | 3 +++ synapse/handlers/message.py | 4 ++- synapse/handlers/room_member.py | 13 +++++----- synapse/rest/admin/__init__.py | 5 +++- synapse/storage/databases/main/events.py | 4 +++ .../storage/databases/main/events_worker.py | 26 ++++++++++++------- synapse/storage/databases/main/stream.py | 13 ---------- synapse/storage/persist_events.py | 2 ++ 13 files changed, 53 insertions(+), 33 deletions(-) create mode 100644 changelog.d/8452.misc diff --git a/changelog.d/8452.misc b/changelog.d/8452.misc new file mode 100644 index 000000000000..8288d91c78b1 --- /dev/null +++ b/changelog.d/8452.misc @@ -0,0 +1 @@ +Remove redundant databae loads of stream_ordering for events we already have. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index dc49df08122e..7a51d0a22fe9 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -97,13 +97,16 @@ def __get__(self, instance, owner=None): class _EventInternalMetadata: - __slots__ = ["_dict"] + __slots__ = ["_dict", "stream_ordering"] def __init__(self, internal_metadata_dict: JsonDict): # we have to copy the dict, because it turns out that the same dict is # reused. TODO: fix that self._dict = dict(internal_metadata_dict) + # the stream ordering of this event. None, until it has been persisted. + self.stream_ordering = None # type: Optional[int] + outlier = DictProperty("outlier") # type: bool out_of_band_membership = DictProperty("out_of_band_membership") # type: bool send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str @@ -113,7 +116,6 @@ def __init__(self, internal_metadata_dict: JsonDict): redacted = DictProperty("redacted") # type: bool txn_id = DictProperty("txn_id") # type: str token_id = DictProperty("token_id") # type: str - stream_ordering = DictProperty("stream_ordering") # type: int # XXX: These are set by StreamWorkerStore._set_before_and_after. # I'm pretty sure that these are never persisted to the database, so shouldn't diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 32c73d34139d..355cbe05f13b 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -49,6 +49,11 @@ def prune_event(event: EventBase) -> EventBase: pruned_event_dict, event.room_version, event.internal_metadata.get_dict() ) + # copy the internal fields + pruned_event.internal_metadata.stream_ordering = ( + event.internal_metadata.stream_ordering + ) + # Mark the event as redacted pruned_event.internal_metadata.redacted = True diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 8bb17b3a05d3..e33b29a42c60 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -297,6 +297,8 @@ async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None: sent_pdus_destination_dist_total.inc(len(destinations)) sent_pdus_destination_dist_count.inc() + assert pdu.internal_metadata.stream_ordering + # track the fact that we have a PDU for these destinations, # to allow us to perform catch-up later on if the remote is unreachable # for a while. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index bc99af3fdd80..db8e456fe8dc 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -158,6 +158,7 @@ def send_pdu(self, pdu: EventBase) -> None: # yet know if we have anything to catch up (None) self._pending_pdus.append(pdu) else: + assert pdu.internal_metadata.stream_ordering self._catchup_last_skipped = pdu.internal_metadata.stream_ordering self.attempt_new_transaction() @@ -361,6 +362,7 @@ async def _transaction_transmission_loop(self) -> None: last_successful_stream_ordering = ( final_pdu.internal_metadata.stream_ordering ) + assert last_successful_stream_ordering await self._store.set_destination_last_successful_stream_ordering( self._destination, last_successful_stream_ordering ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1a8144405ac0..5ac2fc56567b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -3008,6 +3008,9 @@ async def _notify_persisted_event( elif event.internal_metadata.is_outlier(): return + # the event has been persisted so it should have a stream ordering. + assert event.internal_metadata.stream_ordering + event_pos = PersistedEventPosition( self._instance_name, event.internal_metadata.stream_ordering ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ee271e85e551..00513fbf37c4 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -682,7 +682,9 @@ async def send_nonmember_event( event.event_id, prev_event.event_id, ) - return await self.store.get_stream_id_for_event(prev_event.event_id) + # we know it was persisted, so must have a stream ordering + assert prev_event.internal_metadata.stream_ordering + return prev_event.internal_metadata.stream_ordering return await self.handle_new_client_event( requester=requester, event=event, context=context, ratelimit=ratelimit diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 567a14bd0af5..13b749b7cb1c 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -194,8 +194,9 @@ async def _local_membership_update( ) if duplicate is not None: # Discard the new event since this membership change is a no-op. - _, stream_id = await self.store.get_event_ordering(duplicate.event_id) - return duplicate.event_id, stream_id + # we know it was persisted, so must have a stream ordering. + assert duplicate.internal_metadata.stream_ordering + return duplicate.event_id, duplicate.internal_metadata.stream_ordering prev_state_ids = await context.get_prev_state_ids() @@ -441,12 +442,12 @@ async def _update_membership( same_membership = old_membership == effective_membership_state same_sender = requester.user.to_string() == old_state.sender if same_sender and same_membership and same_content: - _, stream_id = await self.store.get_event_ordering( - old_state.event_id - ) + # duplicate event. + # we know it was persisted, so must have a stream ordering. + assert old_state.internal_metadata.stream_ordering return ( old_state.event_id, - stream_id, + old_state.internal_metadata.stream_ordering, ) if old_membership in ["ban", "leave"] and action == "kick": diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 57cac22252f7..789431ef25b2 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -57,6 +57,7 @@ UsersRestServletV2, WhoisRestServlet, ) +from synapse.types import RoomStreamToken from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) @@ -109,7 +110,9 @@ async def on_POST(self, request, room_id, event_id): if event.room_id != room_id: raise SynapseError(400, "Event is for wrong room.") - room_token = await self.store.get_topological_token_for_event(event_id) + room_token = RoomStreamToken( + event.depth, event.internal_metadata.stream_ordering + ) token = await room_token.to_string(self.store) logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 78e645592f6d..b4abd961b97f 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -331,6 +331,10 @@ def _persist_events_txn( min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering + # stream orderings should have been assigned by now + assert min_stream_order + assert max_stream_order + self._update_forward_extremities_txn( txn, new_forward_extremities=new_forward_extremeties, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 723ced4ff09d..b7ed8ca6ab06 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -723,6 +723,7 @@ async def _get_events_from_db(self, event_ids, allow_rejected=False): internal_metadata_dict=internal_metadata, rejected_reason=rejected_reason, ) + original_ev.internal_metadata.stream_ordering = row["stream_ordering"] event_map[event_id] = original_ev @@ -790,6 +791,8 @@ def _fetch_event_rows(self, txn, event_ids): * event_id (str) + * stream_ordering (int): stream ordering for this event + * json (str): json-encoded event structure * internal_metadata (str): json-encoded internal metadata dict @@ -822,13 +825,15 @@ def _fetch_event_rows(self, txn, event_ids): sql = """\ SELECT e.event_id, - e.internal_metadata, - e.json, - e.format_version, + e.stream_ordering, + ej.internal_metadata, + ej.json, + ej.format_version, r.room_version, rej.reason - FROM event_json as e - LEFT JOIN rooms r USING (room_id) + FROM events AS e + JOIN event_json AS ej USING (event_id) + LEFT JOIN rooms r ON r.room_id = e.room_id LEFT JOIN rejections as rej USING (event_id) WHERE """ @@ -842,11 +847,12 @@ def _fetch_event_rows(self, txn, event_ids): event_id = row[0] event_dict[event_id] = { "event_id": event_id, - "internal_metadata": row[1], - "json": row[2], - "format_version": row[3], - "room_version_id": row[4], - "rejected_reason": row[5], + "stream_ordering": row[1], + "internal_metadata": row[2], + "json": row[3], + "format_version": row[4], + "room_version_id": row[5], + "rejected_reason": row[6], "redactions": [], } diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 1d27439536b7..a94bec1ac528 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -589,19 +589,6 @@ async def get_room_events_max_id(self, room_id: Optional[str] = None) -> str: ) return "t%d-%d" % (topo, token) - async def get_stream_id_for_event(self, event_id: str) -> int: - """The stream ID for an event - Args: - event_id: The id of the event to look up a stream token for. - Raises: - StoreError if the event wasn't in the database. - Returns: - A stream ID. - """ - return await self.db_pool.runInteraction( - "get_stream_id_for_event", self.get_stream_id_for_event_txn, event_id, - ) - def get_stream_id_for_event_txn( self, txn: LoggingTransaction, event_id: str, allow_none=False, ) -> int: diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 72939f3984bc..4d2d88d1f02d 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -248,6 +248,8 @@ async def persist_event( await make_deferred_yieldable(deferred) event_stream_id = event.internal_metadata.stream_ordering + # stream ordering should have been assigned by now + assert event_stream_id pos = PersistedEventPosition(self._instance_name, event_stream_id) return pos, self.main_store.get_room_max_token() From 0991a2da93b6b2010e6ef8f732ffdc3b5b382bab Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 5 Oct 2020 14:57:46 +0100 Subject: [PATCH 120/245] Allow ThirdPartyEventRules modules to manipulate public room state (#8292) This PR allows `ThirdPartyEventRules` modules to view, manipulate and block changes to the state of whether a room is published in the public rooms directory. While the idea of whether a room is in the public rooms list is not kept within an event in the room, `ThirdPartyEventRules` generally deal with controlling which modifications can happen to a room. Public rooms fits within that idea, even if its toggle state isn't controlled through a state event. --- UPGRADE.rst | 17 +++++++ changelog.d/8292.feature | 1 + synapse/events/third_party_rules.py | 51 +++++++++++++++++--- synapse/handlers/directory.py | 10 ++++ synapse/handlers/room.py | 9 ++++ synapse/module_api/__init__.py | 67 ++++++++++++++++++++++++++ tests/module_api/test_api.py | 56 ++++++++++++++++++++- tests/rest/client/third_party_rules.py | 31 +++++++----- 8 files changed, 223 insertions(+), 19 deletions(-) create mode 100644 changelog.d/8292.feature diff --git a/UPGRADE.rst b/UPGRADE.rst index 49e86e628fa4..5a683122178e 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -75,6 +75,23 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.22.0 +==================== + +ThirdPartyEventRules breaking changes +------------------------------------- + +This release introduces a backwards-incompatible change to modules making use of +``ThirdPartyEventRules`` in Synapse. If you make use of a module defined under the +``third_party_event_rules`` config option, please make sure it is updated to handle +the below change: + +The ``http_client`` argument is no longer passed to modules as they are initialised. Instead, +modules are expected to make use of the ``http_client`` property on the ``ModuleApi`` class. +Modules are now passed a ``module_api`` argument during initialisation, which is an instance of +``ModuleApi``. ``ModuleApi`` instances have a ``http_client`` property which acts the same as +the ``http_client`` argument previously passed to ``ThirdPartyEventRules`` modules. + Upgrading to v1.21.0 ==================== diff --git a/changelog.d/8292.feature b/changelog.d/8292.feature new file mode 100644 index 000000000000..6d0335e2c827 --- /dev/null +++ b/changelog.d/8292.feature @@ -0,0 +1 @@ +Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 9d5310851cdc..fed459198a06 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -12,10 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Callable from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.types import Requester +from synapse.module_api import ModuleApi +from synapse.types import Requester, StateMap class ThirdPartyEventRules: @@ -38,7 +40,7 @@ def __init__(self, hs): if module is not None: self.third_party_rules = module( - config=config, http_client=hs.get_simple_http_client() + config=config, module_api=ModuleApi(hs, hs.get_auth_handler()), ) async def check_event_allowed( @@ -106,6 +108,46 @@ async def check_threepid_can_be_invited( if self.third_party_rules is None: return True + state_events = await self._get_state_map_for_room(room_id) + + ret = await self.third_party_rules.check_threepid_can_be_invited( + medium, address, state_events + ) + return ret + + async def check_visibility_can_be_modified( + self, room_id: str, new_visibility: str + ) -> bool: + """Check if a room is allowed to be published to, or removed from, the public room + list. + + Args: + room_id: The ID of the room. + new_visibility: The new visibility state. Either "public" or "private". + + Returns: + True if the room's visibility can be modified, False if not. + """ + if self.third_party_rules is None: + return True + + check_func = getattr(self.third_party_rules, "check_visibility_can_be_modified") + if not check_func or not isinstance(check_func, Callable): + return True + + state_events = await self._get_state_map_for_room(room_id) + + return await check_func(room_id, state_events, new_visibility) + + async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: + """Given a room ID, return the state events of that room. + + Args: + room_id: The ID of the room. + + Returns: + A dict mapping (event type, state key) to state event. + """ state_ids = await self.store.get_filtered_current_state_ids(room_id) room_state_events = await self.store.get_events(state_ids.values()) @@ -113,7 +155,4 @@ async def check_threepid_can_be_invited( for key, event_id in state_ids.items(): state_events[key] = room_state_events[event_id] - ret = await self.third_party_rules.check_threepid_can_be_invited( - medium, address, state_events - ) - return ret + return state_events diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 6f15c682405c..ad5683d25197 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -46,6 +46,7 @@ def __init__(self, hs): self.config = hs.config self.enable_room_list_search = hs.config.enable_room_list_search self.require_membership = hs.config.require_membership_for_aliases + self.third_party_event_rules = hs.get_third_party_event_rules() self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -454,6 +455,15 @@ async def edit_published_room_list( # per alias creation rule? raise SynapseError(403, "Not allowed to publish room") + # Check if publishing is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Not allowed to publish room") + await self.store.set_room_is_public(room_id, making_public) async def edit_published_appservice_room_list( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f1a6699cd447..f14f79158614 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -681,6 +681,15 @@ async def create_room( creator_id=user_id, is_public=is_public, room_version=room_version, ) + # Check whether this visibility value is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Room visibility value not allowed.") + directory_handler = self.hs.get_handlers().directory_handler if room_alias: await directory_handler.create_association( diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index fcbd5378c486..646f09d2bc03 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -14,13 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import TYPE_CHECKING from twisted.internet import defer +from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID +if TYPE_CHECKING: + from synapse.server import HomeServer + """ This package defines the 'stable' API which can be used by extension modules which are loaded into Synapse. @@ -43,6 +48,27 @@ def __init__(self, hs, auth_handler): self._auth = hs.get_auth() self._auth_handler = auth_handler + # We expose these as properties below in order to attach a helpful docstring. + self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient + self._public_room_list_manager = PublicRoomListManager(hs) + + @property + def http_client(self): + """Allows making outbound HTTP requests to remote resources. + + An instance of synapse.http.client.SimpleHttpClient + """ + return self._http_client + + @property + def public_room_list_manager(self): + """Allows adding to, removing from and checking the status of rooms in the + public room list. + + An instance of synapse.module_api.PublicRoomListManager + """ + return self._public_room_list_manager + def get_user_by_req(self, req, allow_guest=False): """Check the access_token provided for a request @@ -266,3 +292,44 @@ async def complete_sso_login_async( await self._auth_handler.complete_sso_login( registered_user_id, request, client_redirect_url, ) + + +class PublicRoomListManager: + """Contains methods for adding to, removing from and querying whether a room + is in the public room list. + """ + + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + + async def room_is_in_public_room_list(self, room_id: str) -> bool: + """Checks whether a room is in the public room list. + + Args: + room_id: The ID of the room. + + Returns: + Whether the room is in the public room list. Returns False if the room does + not exist. + """ + room = await self._store.get_room(room_id) + if not room: + return False + + return room.get("is_public", False) + + async def add_room_to_public_room_list(self, room_id: str) -> None: + """Publishes a room to the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, True) + + async def remove_room_from_public_room_list(self, room_id: str) -> None: + """Removes a room from the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, False) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 04de0b9dbe39..54600ad9833c 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,13 +12,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.module_api import ModuleApi +from synapse.rest import admin +from synapse.rest.client.v1 import login, room from tests.unittest import HomeserverTestCase class ModuleApiTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler()) @@ -52,3 +59,50 @@ def test_can_register_user(self): # Check that the displayname was assigned displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") + + def test_public_rooms(self): + """Tests that a room can be added and removed from the public rooms list, + as well as have its public rooms directory state queried. + """ + # Create a user and room to play with + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + room_id = self.helper.create_room_as(user_id, tok=tok) + + # The room should not currently be in the public rooms directory + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) + + # Let's try adding it to the public rooms directory + self.get_success( + self.module_api.public_room_list_manager.add_room_to_public_room_list( + room_id + ) + ) + + # And checking whether it's in there... + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertTrue(is_in_public_rooms) + + # Let's remove it again + self.get_success( + self.module_api.public_room_list_manager.remove_room_from_public_room_list( + room_id + ) + ) + + # Should be gone + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/third_party_rules.py index 8c24add5303a..715e87de082b 100644 --- a/tests/rest/client/third_party_rules.py +++ b/tests/rest/client/third_party_rules.py @@ -12,18 +12,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import Requester from tests import unittest class ThirdPartyRulesTestModule: - def __init__(self, config): + def __init__(self, config, *args, **kwargs): pass - def check_event_allowed(self, event, context): + async def on_create_room( + self, requester: Requester, config: dict, is_requester_admin: bool + ): + return True + + async def check_event_allowed(self, event, context): if event.type == "foo.bar.forbidden": return False else: @@ -51,29 +56,31 @@ def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver(config=config) return self.hs + def prepare(self, reactor, clock, homeserver): + # Create a user and room to play with during the tests + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + def test_third_party_rules(self): """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - - room_id = self.helper.create_room_as(user_id, tok=tok) - request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) From da11cc22be37e8858c19774779ad7d02d64a458c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 5 Oct 2020 10:24:17 -0400 Subject: [PATCH 121/245] Ensure that event.redacts is the proper type before handling it (#8457) This fixes a bug when backfilling invalid events. --- changelog.d/8457.bugfix | 1 + synapse/event_auth.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/8457.bugfix diff --git a/changelog.d/8457.bugfix b/changelog.d/8457.bugfix new file mode 100644 index 000000000000..545b06d180c1 --- /dev/null +++ b/changelog.d/8457.bugfix @@ -0,0 +1 @@ +Fix a bug where backfilling a room with an event that was missing the `redacts` field would break. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 8c907ad5969a..56f8dc9caf9e 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -446,6 +446,8 @@ def check_redaction( if room_version_obj.event_format == EventFormatVersions.V1: redacter_domain = get_domain_from_id(event.event_id) + if not isinstance(event.redacts, str): + return False redactee_domain = get_domain_from_id(event.redacts) if redacter_domain == redactee_domain: return True From b520a1bf5a272b04473f485def18a9e6f6e4c3b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 16:45:41 +0100 Subject: [PATCH 122/245] De-duplicate duplicate handling move the "duplicate state event" handling down into `handle_new_client_event` where it can be shared between multiple call paths. --- synapse/handlers/message.py | 41 ++++++++++++++++++--------------- synapse/handlers/room_member.py | 29 ++++++++--------------- 2 files changed, 32 insertions(+), 38 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 00513fbf37c4..ea8e3517d780 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -674,22 +674,14 @@ async def send_nonmember_event( assert self.hs.is_mine(user), "User must be our own: %s" % (user,) - if event.is_state(): - prev_event = await self.deduplicate_state_event(event, context) - if prev_event is not None: - logger.info( - "Not bothering to persist state event %s duplicated by %s", - event.event_id, - prev_event.event_id, - ) - # we know it was persisted, so must have a stream ordering - assert prev_event.internal_metadata.stream_ordering - return prev_event.internal_metadata.stream_ordering - - return await self.handle_new_client_event( + ev = await self.handle_new_client_event( requester=requester, event=event, context=context, ratelimit=ratelimit ) + # we know it was persisted, so must have a stream ordering + assert ev.internal_metadata.stream_ordering + return ev.internal_metadata.stream_ordering + async def deduplicate_state_event( self, event: EventBase, context: EventContext ) -> Optional[EventBase]: @@ -845,8 +837,10 @@ async def handle_new_client_event( context: EventContext, ratelimit: bool = True, extra_users: List[UserID] = [], - ) -> int: - """Processes a new event. This includes checking auth, persisting it, + ) -> EventBase: + """Processes a new event. + + This includes deduplicating, checking auth, persisting, notifying users, sending to remote servers, etc. If called from a worker will hit out to the master process for final @@ -860,9 +854,20 @@ async def handle_new_client_event( extra_users: Any extra users to notify about event Return: - The stream_id of the persisted event. + If the event was deduplicated, the previous, duplicate, event. Otherwise, + `event`. """ + if event.is_state(): + prev_event = await self.deduplicate_state_event(event, context) + if prev_event is not None: + logger.info( + "Not bothering to persist state event %s duplicated by %s", + event.event_id, + prev_event.event_id, + ) + return prev_event + if event.is_state() and (event.type, event.state_key) == ( EventTypes.Create, "", @@ -917,13 +922,13 @@ async def handle_new_client_event( ) stream_id = result["stream_id"] event.internal_metadata.stream_ordering = stream_id - return stream_id + return event stream_id = await self.persist_and_notify_client_event( requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - return stream_id + return event except Exception: # Ensure that we actually remove the entries in the push actions # staging area, if we calculated them. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 13b749b7cb1c..fd8114a64d25 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -188,16 +188,6 @@ async def _local_membership_update( require_consent=require_consent, ) - # Check if this event matches the previous membership event for the user. - duplicate = await self.event_creation_handler.deduplicate_state_event( - event, context - ) - if duplicate is not None: - # Discard the new event since this membership change is a no-op. - # we know it was persisted, so must have a stream ordering. - assert duplicate.internal_metadata.stream_ordering - return duplicate.event_id, duplicate.internal_metadata.stream_ordering - prev_state_ids = await context.get_prev_state_ids() prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) @@ -222,7 +212,7 @@ async def _local_membership_update( retry_after_ms=int(1000 * (time_allowed - time_now_s)) ) - stream_id = await self.event_creation_handler.handle_new_client_event( + result_event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[target], ratelimit=ratelimit, ) @@ -232,7 +222,9 @@ async def _local_membership_update( if prev_member_event.membership == Membership.JOIN: await self._user_left_room(target, room_id) - return event.event_id, stream_id + # we know it was persisted, so should have a stream ordering + assert result_event.internal_metadata.stream_ordering + return result_event.event_id, result_event.internal_metadata.stream_ordering async def copy_room_tags_and_direct_to_room( self, old_room_id, new_room_id, user_id @@ -673,12 +665,6 @@ async def send_membership_event( else: requester = types.create_requester(target_user) - prev_event = await self.event_creation_handler.deduplicate_state_event( - event, context - ) - if prev_event is not None: - return - prev_state_ids = await context.get_prev_state_ids() if event.membership == Membership.JOIN: if requester.is_guest: @@ -1186,10 +1172,13 @@ async def _locally_reject_invite( context = await self.state_handler.compute_event_context(event) context.app_service = requester.app_service - stream_id = await self.event_creation_handler.handle_new_client_event( + result_event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[UserID.from_string(target_user)], ) - return event.event_id, stream_id + # we know it was persisted, so must have a stream ordering + assert result_event.internal_metadata.stream_ordering + + return result_event.event_id, result_event.internal_metadata.stream_ordering async def _user_left_room(self, target: UserID, room_id: str) -> None: """Implements RoomMemberHandler._user_left_room From 2ee302d0164b3e0495c3cd1ebb6b906fd3e04e27 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 18:03:21 +0100 Subject: [PATCH 123/245] Move shadow-ban check down into `handle_new_client_event`. --- synapse/handlers/message.py | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ea8e3517d780..8852db4eaf4f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -657,25 +657,23 @@ async def send_nonmember_event( Return: The stream_id of the persisted event. - Raises: - ShadowBanError if the requester has been shadow-banned. + """ if event.type == EventTypes.Member: raise SynapseError( 500, "Tried to send member event through non-member codepath" ) - if not ignore_shadow_ban and requester.shadow_banned: - # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) - raise ShadowBanError() - user = UserID.from_string(event.sender) assert self.hs.is_mine(user), "User must be our own: %s" % (user,) ev = await self.handle_new_client_event( - requester=requester, event=event, context=context, ratelimit=ratelimit + requester=requester, + event=event, + context=context, + ratelimit=ratelimit, + ignore_shadow_ban=ignore_shadow_ban, ) # we know it was persisted, so must have a stream ordering @@ -837,6 +835,7 @@ async def handle_new_client_event( context: EventContext, ratelimit: bool = True, extra_users: List[UserID] = [], + ignore_shadow_ban: bool = False, ) -> EventBase: """Processes a new event. @@ -853,11 +852,28 @@ async def handle_new_client_event( ratelimit extra_users: Any extra users to notify about event + ignore_shadow_ban: True if shadow-banned users should be allowed to + send this event. + Return: If the event was deduplicated, the previous, duplicate, event. Otherwise, `event`. + + Raises: + ShadowBanError if the requester has been shadow-banned. """ + # we don't apply shadow-banning to membership events, so that the user + # can come and go as they want. + if ( + event.type != EventTypes.Member + and not ignore_shadow_ban + and requester.shadow_banned + ): + # We randomly sleep a bit just to annoy the requester. + await self.clock.sleep(random.randint(1, 10)) + raise ShadowBanError() + if event.is_state(): prev_event = await self.deduplicate_state_event(event, context) if prev_event is not None: From fd0282201e8876e3a860766fff89561564041260 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 5 Oct 2020 19:00:50 +0100 Subject: [PATCH 124/245] pull up event.sender assertion --- synapse/handlers/message.py | 8 ++++---- synapse/handlers/room.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8852db4eaf4f..5969a7130fe0 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -664,10 +664,6 @@ async def send_nonmember_event( 500, "Tried to send member event through non-member codepath" ) - user = UserID.from_string(event.sender) - - assert self.hs.is_mine(user), "User must be our own: %s" % (user,) - ev = await self.handle_new_client_event( requester=requester, event=event, @@ -748,6 +744,10 @@ async def create_and_send_nonmember_event( requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id ) + assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( + event.sender, + ) + spam_error = self.spam_checker.check_event_for_spam(event) if spam_error: if not isinstance(spam_error, str): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f14f79158614..530bf0ab8810 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -185,6 +185,7 @@ async def _upgrade_room( ShadowBanError if the requester is shadow-banned. """ user_id = requester.user.to_string() + assert self.hs.is_mine_id(user_id), "User must be our own: %s" % (user_id,) # start by allocating a new room id r = await self.store.get_room(old_room_id) From e775b5bb5bb2b9e42607f9514ae2e270270a8932 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 18:10:55 +0100 Subject: [PATCH 125/245] kill off `send_nonmember_event` This is now redundant, and we can just call `handle_new_client_event` directly. --- synapse/handlers/message.py | 74 +++++++++++---------------------- synapse/handlers/room.py | 4 +- tests/handlers/test_register.py | 2 +- tests/unittest.py | 4 +- 4 files changed, 31 insertions(+), 53 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5969a7130fe0..6d136930bf57 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -635,47 +635,6 @@ async def assert_accepted_privacy_policy(self, requester: Requester) -> None: msg = self._block_events_without_consent_error % {"consent_uri": consent_uri} raise ConsentNotGivenError(msg=msg, consent_uri=consent_uri) - async def send_nonmember_event( - self, - requester: Requester, - event: EventBase, - context: EventContext, - ratelimit: bool = True, - ignore_shadow_ban: bool = False, - ) -> int: - """ - Persists and notifies local clients and federation of an event. - - Args: - requester: The requester sending the event. - event: The event to send. - context: The context of the event. - ratelimit: Whether to rate limit this send. - ignore_shadow_ban: True if shadow-banned users should be allowed to - send this event. - - Return: - The stream_id of the persisted event. - - - """ - if event.type == EventTypes.Member: - raise SynapseError( - 500, "Tried to send member event through non-member codepath" - ) - - ev = await self.handle_new_client_event( - requester=requester, - event=event, - context=context, - ratelimit=ratelimit, - ignore_shadow_ban=ignore_shadow_ban, - ) - - # we know it was persisted, so must have a stream ordering - assert ev.internal_metadata.stream_ordering - return ev.internal_metadata.stream_ordering - async def deduplicate_state_event( self, event: EventBase, context: EventContext ) -> Optional[EventBase]: @@ -716,7 +675,7 @@ async def create_and_send_nonmember_event( """ Creates an event, then sends it. - See self.create_event and self.send_nonmember_event. + See self.create_event and self.handle_new_client_event. Args: requester: The requester sending the event. @@ -726,9 +685,19 @@ async def create_and_send_nonmember_event( ignore_shadow_ban: True if shadow-banned users should be allowed to send this event. + Returns: + The event, and its stream ordering (if state event deduplication happened, + the previous, duplicate event). + Raises: ShadowBanError if the requester has been shadow-banned. """ + + if event_dict["type"] == EventTypes.Member: + raise SynapseError( + 500, "Tried to send member event through non-member codepath" + ) + if not ignore_shadow_ban and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. await self.clock.sleep(random.randint(1, 10)) @@ -754,14 +723,17 @@ async def create_and_send_nonmember_event( spam_error = "Spam is not permitted here" raise SynapseError(403, spam_error, Codes.FORBIDDEN) - stream_id = await self.send_nonmember_event( - requester, - event, - context, + ev = await self.handle_new_client_event( + requester=requester, + event=event, + context=context, ratelimit=ratelimit, ignore_shadow_ban=ignore_shadow_ban, ) - return event, stream_id + + # we know it was persisted, so must have a stream ordering + assert ev.internal_metadata.stream_ordering + return ev, ev.internal_metadata.stream_ordering @measure_func("create_new_client_event") async def create_new_client_event( @@ -1255,8 +1227,12 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: # Since this is a dummy-event it is OK if it is sent by a # shadow-banned user. - await self.send_nonmember_event( - requester, event, context, ratelimit=False, ignore_shadow_ban=True, + await self.handle_new_client_event( + requester=requester, + event=event, + context=context, + ratelimit=False, + ignore_shadow_ban=True, ) return True except ConsentNotGivenError: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 530bf0ab8810..d0530a446c83 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -230,8 +230,8 @@ async def _upgrade_room( ) # now send the tombstone - await self.event_creation_handler.send_nonmember_event( - requester, tombstone_event, tombstone_context + await self.event_creation_handler.handle_new_client_event( + requester=requester, event=tombstone_event, context=tombstone_context, ) old_room_state = await tombstone_context.get_current_state_ids() diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index cb7c0ed51a66..702c6aa089b8 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -413,7 +413,7 @@ def test_auto_create_auto_join_room_preset_invalid_permissions(self): ) ) self.get_success( - event_creation_handler.send_nonmember_event(requester, event, context) + event_creation_handler.handle_new_client_event(requester, event, context) ) # Register a second user, which won't be be in the room (or even have an invite) diff --git a/tests/unittest.py b/tests/unittest.py index 82ede9de3444..5c87f6097ec8 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -608,7 +608,9 @@ def create_and_send_event( if soft_failed: event.internal_metadata.soft_failed = True - self.get_success(event_creator.send_nonmember_event(requester, event, context)) + self.get_success( + event_creator.handle_new_client_event(requester, event, context) + ) return event.event_id From 103f72929aecfc4d60527590e3bb4d7e3677ef53 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 5 Oct 2020 14:51:08 +0100 Subject: [PATCH 126/245] changelog --- changelog.d/8463.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8463.misc diff --git a/changelog.d/8463.misc b/changelog.d/8463.misc new file mode 100644 index 000000000000..040c9bb90f12 --- /dev/null +++ b/changelog.d/8463.misc @@ -0,0 +1 @@ +Reduce inconsistencies between codepaths for membership and non-membership events. From 4cd1448d0e16d19a1f255ed6746a7372221e84cd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 5 Oct 2020 20:27:14 +0100 Subject: [PATCH 127/245] Fix third-party event modules for `check_visibility_can_be_modified` check PR #8292 tried to maintain backwards compat with modules which don't provide a `check_visibility_can_be_modified` method, but the tests weren't being run, and the check didn't work. --- changelog.d/8467.feature | 1 + synapse/events/third_party_rules.py | 4 +++- .../{third_party_rules.py => test_third_party_rules.py} | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8467.feature rename tests/rest/client/{third_party_rules.py => test_third_party_rules.py} (96%) diff --git a/changelog.d/8467.feature b/changelog.d/8467.feature new file mode 100644 index 000000000000..6d0335e2c827 --- /dev/null +++ b/changelog.d/8467.feature @@ -0,0 +1 @@ +Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index fed459198a06..1ca77519d59a 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -131,7 +131,9 @@ async def check_visibility_can_be_modified( if self.third_party_rules is None: return True - check_func = getattr(self.third_party_rules, "check_visibility_can_be_modified") + check_func = getattr( + self.third_party_rules, "check_visibility_can_be_modified", None + ) if not check_func or not isinstance(check_func, Callable): return True diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/test_third_party_rules.py similarity index 96% rename from tests/rest/client/third_party_rules.py rename to tests/rest/client/test_third_party_rules.py index 715e87de082b..7b322f526c86 100644 --- a/tests/rest/client/third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -49,7 +49,7 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() config["third_party_event_rules"] = { - "module": "tests.rest.client.third_party_rules.ThirdPartyRulesTestModule", + "module": __name__ + ".ThirdPartyRulesTestModule", "config": {}, } From 785437dc0ddfb23012748895e1f8665338e4f3df Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 5 Oct 2020 21:40:51 +0100 Subject: [PATCH 128/245] Update default room version to 6 (#8461) Per https://github.com/matrix-org/matrix-doc/pull/2788 --- changelog.d/8461.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- tests/rest/client/v1/test_directory.py | 11 ++++++++++- 4 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8461.feature diff --git a/changelog.d/8461.feature b/changelog.d/8461.feature new file mode 100644 index 000000000000..3665d670e117 --- /dev/null +++ b/changelog.d/8461.feature @@ -0,0 +1 @@ +Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7126ade2de6f..bb64662e2883 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -119,7 +119,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "5" +#default_room_version: "6" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index ef6d70e3f857..85aa49c02d04 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -39,7 +39,7 @@ # in the list. DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"] -DEFAULT_ROOM_VERSION = "5" +DEFAULT_ROOM_VERSION = "6" ROOM_COMPLEXITY_TOO_GREAT = ( "Your homeserver is unable to join rooms this large or complex. " diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index 633b7dbda093..ea5a7f373917 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -21,6 +21,7 @@ from synapse.util.stringutils import random_string from tests import unittest +from tests.unittest import override_config class DirectoryTestCase(unittest.HomeserverTestCase): @@ -67,10 +68,18 @@ def test_directory_in_room_too_long(self): self.ensure_user_joined_room() self.set_alias_via_directory(400, alias_length=256) - def test_state_event_in_room(self): + @override_config({"default_room_version": 5}) + def test_state_event_user_in_v5_room(self): + """Test that a regular user can add alias events before room v6""" self.ensure_user_joined_room() self.set_alias_via_state_event(200) + @override_config({"default_room_version": 6}) + def test_state_event_v6_room(self): + """Test that a regular user can *not* add alias events from room v6""" + self.ensure_user_joined_room() + self.set_alias_via_state_event(403) + def test_directory_in_room(self): self.ensure_user_joined_room() self.set_alias_via_directory(200) From 3e58ce72b42f2ae473c1e76a967548cd6fa7e2e6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Oct 2020 10:03:39 +0100 Subject: [PATCH 129/245] Don't bother responding to client requests that have already disconnected (#8465) This PR ports the quick fix from https://github.com/matrix-org/synapse/pull/2796 to further methods which handle media, URL preview and `/key/v2/server` requests. This prevents a harmless `ERROR` that comes up in the logs when we were unable to respond to a client request when the client had already disconnected. In this case we simply bail out if the client has already done so. This is the 'simple fix' as suggested by https://github.com/matrix-org/synapse/issues/5304#issuecomment-574740003. Fixes https://github.com/matrix-org/synapse/issues/6700 Fixes https://github.com/matrix-org/synapse/issues/5304 --- changelog.d/8465.bugfix | 1 + synapse/http/server.py | 5 +++++ synapse/rest/media/v1/_base.py | 6 ++++++ 3 files changed, 12 insertions(+) create mode 100644 changelog.d/8465.bugfix diff --git a/changelog.d/8465.bugfix b/changelog.d/8465.bugfix new file mode 100644 index 000000000000..73f895b26879 --- /dev/null +++ b/changelog.d/8465.bugfix @@ -0,0 +1 @@ +Don't attempt to respond to some requests if the client has already disconnected. \ No newline at end of file diff --git a/synapse/http/server.py b/synapse/http/server.py index 09ed74f6ce06..00b98af3d40f 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -651,6 +651,11 @@ def respond_with_json_bytes( Returns: twisted.web.server.NOT_DONE_YET if the request is still active. """ + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return request.setResponseCode(code) request.setHeader(b"Content-Type", b"application/json") diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 6568e61829bd..67aa993f1930 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -213,6 +213,12 @@ async def respond_with_responder( file_size (int|None): Size in bytes of the media. If not known it should be None upload_name (str|None): The name of the requested file, if any. """ + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return + if not responder: respond_404(request) return From a02446113012920c92264f632832308588649ed8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 6 Oct 2020 16:31:31 +0100 Subject: [PATCH 130/245] Additional tests for third-party event rules (#8468) * Optimise and test state fetching for 3p event rules Getting all the events at once is much more efficient than getting them individually * Test that 3p event rules can modify events --- changelog.d/8468.misc | 1 + synapse/events/third_party_rules.py | 12 +-- tests/rest/client/test_third_party_rules.py | 84 +++++++++++++++++---- 3 files changed, 79 insertions(+), 18 deletions(-) create mode 100644 changelog.d/8468.misc diff --git a/changelog.d/8468.misc b/changelog.d/8468.misc new file mode 100644 index 000000000000..32ba991e6421 --- /dev/null +++ b/changelog.d/8468.misc @@ -0,0 +1 @@ +Additional testing for `ThirdPartyEventRules`. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 1ca77519d59a..e38b8e67fb8c 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -61,12 +61,14 @@ async def check_event_allowed( prev_state_ids = await context.get_prev_state_ids() # Retrieve the state events from the database. - state_events = {} - for key, event_id in prev_state_ids.items(): - state_events[key] = await self.store.get_event(event_id, allow_none=True) + events = await self.store.get_events(prev_state_ids.values()) + state_events = {(ev.type, ev.state_key): ev for ev in events.values()} - ret = await self.third_party_rules.check_event_allowed(event, state_events) - return ret + # The module can modify the event slightly if it wants, but caution should be + # exercised, and it's likely to go very wrong if applied to events received over + # federation. + + return await self.third_party_rules.check_event_allowed(event, state_events) async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 7b322f526c86..c12518c93105 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -12,33 +12,43 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import threading + +from mock import Mock + +from synapse.events import EventBase from synapse.rest import admin from synapse.rest.client.v1 import login, room -from synapse.types import Requester +from synapse.types import Requester, StateMap from tests import unittest +thread_local = threading.local() + class ThirdPartyRulesTestModule: - def __init__(self, config, *args, **kwargs): - pass + def __init__(self, config, module_api): + # keep a record of the "current" rules module, so that the test can patch + # it if desired. + thread_local.rules_module = self async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool ): return True - async def check_event_allowed(self, event, context): - if event.type == "foo.bar.forbidden": - return False - else: - return True + async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]): + return True @staticmethod def parse_config(config): return config +def current_rules_module() -> ThirdPartyRulesTestModule: + return thread_local.rules_module + + class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): servlets = [ admin.register_servlets, @@ -46,15 +56,13 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): - config = self.default_config() + def default_config(self): + config = super().default_config() config["third_party_event_rules"] = { "module": __name__ + ".ThirdPartyRulesTestModule", "config": {}, } - - self.hs = self.setup_test_homeserver(config=config) - return self.hs + return config def prepare(self, reactor, clock, homeserver): # Create a user and room to play with during the tests @@ -67,6 +75,14 @@ def test_third_party_rules(self): """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ + # patch the rules module with a Mock which will return False for some event + # types + async def check(ev, state): + return ev.type != "foo.bar.forbidden" + + callback = Mock(spec=[], side_effect=check) + current_rules_module().check_event_allowed = callback + request, channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, @@ -76,6 +92,16 @@ def test_third_party_rules(self): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + callback.assert_called_once() + + # there should be various state events in the state arg: do some basic checks + state_arg = callback.call_args[0][1] + for k in (("m.room.create", ""), ("m.room.member", self.user_id)): + self.assertIn(k, state_arg) + ev = state_arg[k] + self.assertEqual(ev.type, k[0]) + self.assertEqual(ev.state_key, k[1]) + request, channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, @@ -84,3 +110,35 @@ def test_third_party_rules(self): ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) + + def test_modify_event(self): + """Tests that the module can successfully tweak an event before it is persisted. + """ + # first patch the event checker so that it will modify the event + async def check(ev: EventBase, state): + ev.content = {"x": "y"} + return True + + current_rules_module().check_event_allowed = check + + # now send the event + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id, + {"x": "x"}, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"200", channel.result) + event_id = channel.json_body["event_id"] + + # ... and check that it got modified + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"200", channel.result) + ev = channel.json_body + self.assertEqual(ev["content"]["x"], "y") From 3cd78bbe9e208d2e93ccebee5d3586ee5f5a5d31 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 6 Oct 2020 13:26:29 -0400 Subject: [PATCH 131/245] Add support for MSC2732: olm fallback keys (#8312) --- changelog.d/8312.feature | 1 + scripts/synapse_port_db | 1 + synapse/handlers/e2e_keys.py | 16 +++ synapse/handlers/sync.py | 8 ++ synapse/rest/client/v2_alpha/sync.py | 1 + .../storage/databases/main/end_to_end_keys.py | 100 +++++++++++++++++- .../main/schema/delta/58/11fallback.sql | 24 +++++ tests/handlers/test_e2e_keys.py | 65 ++++++++++++ 8 files changed, 215 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8312.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/11fallback.sql diff --git a/changelog.d/8312.feature b/changelog.d/8312.feature new file mode 100644 index 000000000000..222a1b032a4d --- /dev/null +++ b/changelog.d/8312.feature @@ -0,0 +1 @@ +Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). \ No newline at end of file diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 7e12f5440cc4..2d0b59ab534e 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -90,6 +90,7 @@ BOOLEAN_COLUMNS = { "room_stats_state": ["is_federatable"], "local_media_repository": ["safe_from_quarantine"], "users": ["shadow_banned"], + "e2e_fallback_keys_json": ["used"], } diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index dd40fd129936..611742ae72d5 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -496,6 +496,22 @@ async def upload_keys_for_user(self, user_id, device_id, keys): log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) + fallback_keys = keys.get("org.matrix.msc2732.fallback_keys", None) + if fallback_keys and isinstance(fallback_keys, dict): + log_kv( + { + "message": "Updating fallback_keys for device.", + "user_id": user_id, + "device_id": device_id, + } + ) + await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys) + elif fallback_keys: + log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"}) + else: + log_kv( + {"message": "Did not update fallback_keys", "reason": "no keys given"} + ) # the device should have been registered already, but it may have been # deleted due to a race with a DELETE request. Or we may be using an diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a998e6b7f63d..dd1f90e3594b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -201,6 +201,8 @@ class SyncResult: device_lists: List of user_ids whose devices have changed device_one_time_keys_count: Dict of algorithm to count for one time keys for this device + device_unused_fallback_key_types: List of key types that have an unused fallback + key groups: Group updates, if any """ @@ -213,6 +215,7 @@ class SyncResult: to_device = attr.ib(type=List[JsonDict]) device_lists = attr.ib(type=DeviceLists) device_one_time_keys_count = attr.ib(type=JsonDict) + device_unused_fallback_key_types = attr.ib(type=List[str]) groups = attr.ib(type=Optional[GroupsSyncResult]) def __bool__(self) -> bool: @@ -1014,10 +1017,14 @@ async def generate_sync_result( logger.debug("Fetching OTK data") device_id = sync_config.device_id one_time_key_counts = {} # type: JsonDict + unused_fallback_key_types = [] # type: List[str] if device_id: one_time_key_counts = await self.store.count_e2e_one_time_keys( user_id, device_id ) + unused_fallback_key_types = await self.store.get_e2e_unused_fallback_key_types( + user_id, device_id + ) logger.debug("Fetching group data") await self._generate_sync_entry_for_groups(sync_result_builder) @@ -1041,6 +1048,7 @@ async def generate_sync_result( device_lists=device_lists, groups=sync_result_builder.groups, device_one_time_keys_count=one_time_key_counts, + device_unused_fallback_key_types=unused_fallback_key_types, next_batch=sync_result_builder.now_token, ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 6779df952f77..2b84eb89c02c 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -236,6 +236,7 @@ async def encode_response(self, time_now, sync_result, access_token_id, filter): "leave": sync_result.groups.leave, }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types, "next_batch": await sync_result.next_batch.to_string(self.store), } diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 22e1ed15d056..8c97f2af5ce5 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -367,6 +367,57 @@ def _count_e2e_one_time_keys(txn): "count_e2e_one_time_keys", _count_e2e_one_time_keys ) + async def set_e2e_fallback_keys( + self, user_id: str, device_id: str, fallback_keys: JsonDict + ) -> None: + """Set the user's e2e fallback keys. + + Args: + user_id: the user whose keys are being set + device_id: the device whose keys are being set + fallback_keys: the keys to set. This is a map from key ID (which is + of the form "algorithm:id") to key data. + """ + # fallback_keys will usually only have one item in it, so using a for + # loop (as opposed to calling simple_upsert_many_txn) won't be too bad + # FIXME: make sure that only one key per algorithm is uploaded + for key_id, fallback_key in fallback_keys.items(): + algorithm, key_id = key_id.split(":", 1) + await self.db_pool.simple_upsert( + "e2e_fallback_keys_json", + keyvalues={ + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + }, + values={ + "key_id": key_id, + "key_json": json_encoder.encode(fallback_key), + "used": False, + }, + desc="set_e2e_fallback_key", + ) + + @cached(max_entries=10000) + async def get_e2e_unused_fallback_key_types( + self, user_id: str, device_id: str + ) -> List[str]: + """Returns the fallback key types that have an unused key. + + Args: + user_id: the user whose keys are being queried + device_id: the device whose keys are being queried + + Returns: + a list of key types + """ + return await self.db_pool.simple_select_onecol( + "e2e_fallback_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id, "used": False}, + retcol="algorithm", + desc="get_e2e_unused_fallback_key_types", + ) + async def get_e2e_cross_signing_key( self, user_id: str, key_type: str, from_user_id: Optional[str] = None ) -> Optional[dict]: @@ -701,15 +752,37 @@ def _claim_e2e_one_time_keys(txn): " WHERE user_id = ? AND device_id = ? AND algorithm = ?" " LIMIT 1" ) + fallback_sql = ( + "SELECT key_id, key_json, used FROM e2e_fallback_keys_json" + " WHERE user_id = ? AND device_id = ? AND algorithm = ?" + " LIMIT 1" + ) result = {} delete = [] + used_fallbacks = [] for user_id, device_id, algorithm in query_list: user_result = result.setdefault(user_id, {}) device_result = user_result.setdefault(device_id, {}) txn.execute(sql, (user_id, device_id, algorithm)) - for key_id, key_json in txn: + otk_row = txn.fetchone() + if otk_row is not None: + key_id, key_json = otk_row device_result[algorithm + ":" + key_id] = key_json delete.append((user_id, device_id, algorithm, key_id)) + else: + # no one-time key available, so see if there's a fallback + # key + txn.execute(fallback_sql, (user_id, device_id, algorithm)) + fallback_row = txn.fetchone() + if fallback_row is not None: + key_id, key_json, used = fallback_row + device_result[algorithm + ":" + key_id] = key_json + if not used: + used_fallbacks.append( + (user_id, device_id, algorithm, key_id) + ) + + # drop any one-time keys that were claimed sql = ( "DELETE FROM e2e_one_time_keys_json" " WHERE user_id = ? AND device_id = ? AND algorithm = ?" @@ -726,6 +799,23 @@ def _claim_e2e_one_time_keys(txn): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + # mark fallback keys as used + for user_id, device_id, algorithm, key_id in used_fallbacks: + self.db_pool.simple_update_txn( + txn, + "e2e_fallback_keys_json", + { + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + "key_id": key_id, + }, + {"used": True}, + ) + self._invalidate_cache_and_stream( + txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id) + ) + return result return await self.db_pool.runInteraction( @@ -754,6 +844,14 @@ def delete_e2e_keys_by_device_txn(txn): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + self.db_pool.simple_delete_txn( + txn, + table="e2e_fallback_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id}, + ) + self._invalidate_cache_and_stream( + txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id) + ) await self.db_pool.runInteraction( "delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn diff --git a/synapse/storage/databases/main/schema/delta/58/11fallback.sql b/synapse/storage/databases/main/schema/delta/58/11fallback.sql new file mode 100644 index 000000000000..4ed981dbf89e --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/11fallback.sql @@ -0,0 +1,24 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS e2e_fallback_keys_json ( + user_id TEXT NOT NULL, -- The user this fallback key is for. + device_id TEXT NOT NULL, -- The device this fallback key is for. + algorithm TEXT NOT NULL, -- Which algorithm this fallback key is for. + key_id TEXT NOT NULL, -- An id for suppressing duplicate uploads. + key_json TEXT NOT NULL, -- The key as a JSON blob. + used BOOLEAN NOT NULL DEFAULT FALSE, -- Whether the key has been used or not. + CONSTRAINT e2e_fallback_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm) +); diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 366dcfb6701a..4e9e3dcbc267 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -171,6 +171,71 @@ def test_claim_one_time_key(self): }, ) + @defer.inlineCallbacks + def test_fallback_key(self): + local_user = "@boris:" + self.hs.hostname + device_id = "xyz" + fallback_key = {"alg1:k1": "key1"} + otk = {"alg1:k2": "key2"} + + yield defer.ensureDeferred( + self.handler.upload_keys_for_user( + local_user, + device_id, + {"org.matrix.msc2732.fallback_keys": fallback_key}, + ) + ) + + # claiming an OTK when no OTKs are available should return the fallback + # key + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + + # claiming an OTK again should return the same fallback key + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + + # if the user uploads a one-time key, the next claim should fetch the + # one-time key, and then go back to the fallback + yield defer.ensureDeferred( + self.handler.upload_keys_for_user( + local_user, device_id, {"one_time_keys": otk} + ) + ) + + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, {"failures": {}, "one_time_keys": {local_user: {device_id: otk}}}, + ) + + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + @defer.inlineCallbacks def test_replace_master_key(self): """uploading a new signing key should make the old signing key unavailable""" From 903fcd2d3561813b80706b07e1dcc19eb47ec260 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 7 Oct 2020 11:28:05 +0100 Subject: [PATCH 132/245] update wording --- synapse/handlers/message.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6d136930bf57..3e9a22e8f334 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -835,8 +835,9 @@ async def handle_new_client_event( ShadowBanError if the requester has been shadow-banned. """ - # we don't apply shadow-banning to membership events, so that the user - # can come and go as they want. + # we don't apply shadow-banning to membership events here. Invites are blocked + # higher up the stack, and we allow shadow-banned users to send join and leave + # events as normal. if ( event.type != EventTypes.Member and not ignore_shadow_ban From 01f82bfe32c52e0020e63d544a8f59e5e97aab52 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 7 Oct 2020 11:45:31 +0100 Subject: [PATCH 133/245] Remove docs/sphinx and related references (#8480) https://github.com/matrix-org/synapse/tree/develop/docs/sphinx doesn't seem to really be utilised or changed recently since the initial commit. I like the idea of exportable documentation of the codebase, but at the moment after running through the build instructions the generated website wasn't very useful... --- README.rst | 13 - changelog.d/8480.misc | 1 + docs/code_style.md | 2 - docs/sphinx/README.rst | 1 - docs/sphinx/conf.py | 271 ------------------ docs/sphinx/index.rst | 20 -- docs/sphinx/modules.rst | 7 - docs/sphinx/synapse.api.auth.rst | 7 - docs/sphinx/synapse.api.constants.rst | 7 - docs/sphinx/synapse.api.dbobjects.rst | 7 - docs/sphinx/synapse.api.errors.rst | 7 - docs/sphinx/synapse.api.event_stream.rst | 7 - docs/sphinx/synapse.api.events.factory.rst | 7 - docs/sphinx/synapse.api.events.room.rst | 7 - docs/sphinx/synapse.api.events.rst | 18 -- docs/sphinx/synapse.api.handlers.events.rst | 7 - docs/sphinx/synapse.api.handlers.factory.rst | 7 - .../synapse.api.handlers.federation.rst | 7 - docs/sphinx/synapse.api.handlers.register.rst | 7 - docs/sphinx/synapse.api.handlers.room.rst | 7 - docs/sphinx/synapse.api.handlers.rst | 21 -- docs/sphinx/synapse.api.notifier.rst | 7 - docs/sphinx/synapse.api.register_events.rst | 7 - docs/sphinx/synapse.api.room_events.rst | 7 - docs/sphinx/synapse.api.rst | 30 -- docs/sphinx/synapse.api.server.rst | 7 - docs/sphinx/synapse.api.storage.rst | 7 - docs/sphinx/synapse.api.stream.rst | 7 - docs/sphinx/synapse.api.streams.event.rst | 7 - docs/sphinx/synapse.api.streams.rst | 17 -- docs/sphinx/synapse.app.homeserver.rst | 7 - docs/sphinx/synapse.app.rst | 17 -- docs/sphinx/synapse.db.rst | 10 - docs/sphinx/synapse.federation.handler.rst | 7 - docs/sphinx/synapse.federation.messaging.rst | 7 - docs/sphinx/synapse.federation.pdu_codec.rst | 7 - .../sphinx/synapse.federation.persistence.rst | 7 - .../sphinx/synapse.federation.replication.rst | 7 - docs/sphinx/synapse.federation.rst | 22 -- docs/sphinx/synapse.federation.transport.rst | 7 - docs/sphinx/synapse.federation.units.rst | 7 - docs/sphinx/synapse.persistence.rst | 19 -- docs/sphinx/synapse.persistence.service.rst | 7 - docs/sphinx/synapse.persistence.tables.rst | 7 - .../synapse.persistence.transactions.rst | 7 - docs/sphinx/synapse.rest.base.rst | 7 - docs/sphinx/synapse.rest.events.rst | 7 - docs/sphinx/synapse.rest.register.rst | 7 - docs/sphinx/synapse.rest.room.rst | 7 - docs/sphinx/synapse.rest.rst | 20 -- docs/sphinx/synapse.rst | 30 -- docs/sphinx/synapse.server.rst | 7 - docs/sphinx/synapse.state.rst | 7 - docs/sphinx/synapse.util.async.rst | 7 - docs/sphinx/synapse.util.dbutils.rst | 7 - docs/sphinx/synapse.util.http.rst | 7 - docs/sphinx/synapse.util.lockutils.rst | 7 - docs/sphinx/synapse.util.logutils.rst | 7 - docs/sphinx/synapse.util.rst | 21 -- docs/sphinx/synapse.util.stringutils.rst | 7 - scripts-dev/sphinx_api_docs.sh | 1 - setup.cfg | 5 - 62 files changed, 1 insertion(+), 839 deletions(-) create mode 100644 changelog.d/8480.misc delete mode 100644 docs/sphinx/README.rst delete mode 100644 docs/sphinx/conf.py delete mode 100644 docs/sphinx/index.rst delete mode 100644 docs/sphinx/modules.rst delete mode 100644 docs/sphinx/synapse.api.auth.rst delete mode 100644 docs/sphinx/synapse.api.constants.rst delete mode 100644 docs/sphinx/synapse.api.dbobjects.rst delete mode 100644 docs/sphinx/synapse.api.errors.rst delete mode 100644 docs/sphinx/synapse.api.event_stream.rst delete mode 100644 docs/sphinx/synapse.api.events.factory.rst delete mode 100644 docs/sphinx/synapse.api.events.room.rst delete mode 100644 docs/sphinx/synapse.api.events.rst delete mode 100644 docs/sphinx/synapse.api.handlers.events.rst delete mode 100644 docs/sphinx/synapse.api.handlers.factory.rst delete mode 100644 docs/sphinx/synapse.api.handlers.federation.rst delete mode 100644 docs/sphinx/synapse.api.handlers.register.rst delete mode 100644 docs/sphinx/synapse.api.handlers.room.rst delete mode 100644 docs/sphinx/synapse.api.handlers.rst delete mode 100644 docs/sphinx/synapse.api.notifier.rst delete mode 100644 docs/sphinx/synapse.api.register_events.rst delete mode 100644 docs/sphinx/synapse.api.room_events.rst delete mode 100644 docs/sphinx/synapse.api.rst delete mode 100644 docs/sphinx/synapse.api.server.rst delete mode 100644 docs/sphinx/synapse.api.storage.rst delete mode 100644 docs/sphinx/synapse.api.stream.rst delete mode 100644 docs/sphinx/synapse.api.streams.event.rst delete mode 100644 docs/sphinx/synapse.api.streams.rst delete mode 100644 docs/sphinx/synapse.app.homeserver.rst delete mode 100644 docs/sphinx/synapse.app.rst delete mode 100644 docs/sphinx/synapse.db.rst delete mode 100644 docs/sphinx/synapse.federation.handler.rst delete mode 100644 docs/sphinx/synapse.federation.messaging.rst delete mode 100644 docs/sphinx/synapse.federation.pdu_codec.rst delete mode 100644 docs/sphinx/synapse.federation.persistence.rst delete mode 100644 docs/sphinx/synapse.federation.replication.rst delete mode 100644 docs/sphinx/synapse.federation.rst delete mode 100644 docs/sphinx/synapse.federation.transport.rst delete mode 100644 docs/sphinx/synapse.federation.units.rst delete mode 100644 docs/sphinx/synapse.persistence.rst delete mode 100644 docs/sphinx/synapse.persistence.service.rst delete mode 100644 docs/sphinx/synapse.persistence.tables.rst delete mode 100644 docs/sphinx/synapse.persistence.transactions.rst delete mode 100644 docs/sphinx/synapse.rest.base.rst delete mode 100644 docs/sphinx/synapse.rest.events.rst delete mode 100644 docs/sphinx/synapse.rest.register.rst delete mode 100644 docs/sphinx/synapse.rest.room.rst delete mode 100644 docs/sphinx/synapse.rest.rst delete mode 100644 docs/sphinx/synapse.rst delete mode 100644 docs/sphinx/synapse.server.rst delete mode 100644 docs/sphinx/synapse.state.rst delete mode 100644 docs/sphinx/synapse.util.async.rst delete mode 100644 docs/sphinx/synapse.util.dbutils.rst delete mode 100644 docs/sphinx/synapse.util.http.rst delete mode 100644 docs/sphinx/synapse.util.lockutils.rst delete mode 100644 docs/sphinx/synapse.util.logutils.rst delete mode 100644 docs/sphinx/synapse.util.rst delete mode 100644 docs/sphinx/synapse.util.stringutils.rst delete mode 100644 scripts-dev/sphinx_api_docs.sh diff --git a/README.rst b/README.rst index 4a189c8bc4d4..e623cf863a5c 100644 --- a/README.rst +++ b/README.rst @@ -290,19 +290,6 @@ Testing with SyTest is recommended for verifying that changes related to the Client-Server API are functioning correctly. See the `installation instructions `_ for details. -Building Internal API Documentation -=================================== - -Before building internal API documentation install sphinx and -sphinxcontrib-napoleon:: - - pip install sphinx - pip install sphinxcontrib-napoleon - -Building internal API documentation:: - - python setup.py build_sphinx - Troubleshooting =============== diff --git a/changelog.d/8480.misc b/changelog.d/8480.misc new file mode 100644 index 000000000000..81633af2965e --- /dev/null +++ b/changelog.d/8480.misc @@ -0,0 +1 @@ +Remove outdated sphinx documentation, scripts and configuration. \ No newline at end of file diff --git a/docs/code_style.md b/docs/code_style.md index 6ef6f802908d..f6c825d7d410 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -64,8 +64,6 @@ save as it takes a while and is very resource intensive. - Use underscores for functions and variables. - **Docstrings**: should follow the [google code style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). - This is so that we can generate documentation with - [sphinx](http://sphinxcontrib-napoleon.readthedocs.org/en/latest/). See the [examples](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) in the sphinx documentation. diff --git a/docs/sphinx/README.rst b/docs/sphinx/README.rst deleted file mode 100644 index a7ab7c550041..000000000000 --- a/docs/sphinx/README.rst +++ /dev/null @@ -1 +0,0 @@ -TODO: how (if at all) is this actually maintained? diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py deleted file mode 100644 index ca4b879526df..000000000000 --- a/docs/sphinx/conf.py +++ /dev/null @@ -1,271 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Synapse documentation build configuration file, created by -# sphinx-quickstart on Tue Jun 10 17:31:02 2014. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.ifconfig", - "sphinxcontrib.napoleon", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix of source filenames. -source_suffix = ".rst" - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "Synapse" -copyright = ( - "Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd" -) - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "1.0" -# The full version, including alpha/beta/rc tags. -release = "1.0" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "default" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = "Synapsedoc" - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [("index", "Synapse.tex", "Synapse Documentation", "TNG", "manual")] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [("index", "synapse", "Synapse Documentation", ["TNG"], 1)] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - "index", - "Synapse", - "Synapse Documentation", - "TNG", - "Synapse", - "One line description of project.", - "Miscellaneous", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {"http://docs.python.org/": None} - -napoleon_include_special_with_doc = True -napoleon_use_ivar = True diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst deleted file mode 100644 index 76a4c0c7bf66..000000000000 --- a/docs/sphinx/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. Synapse documentation master file, created by - sphinx-quickstart on Tue Jun 10 17:31:02 2014. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Synapse's documentation! -=================================== - -Contents: - -.. toctree:: - synapse - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/docs/sphinx/modules.rst b/docs/sphinx/modules.rst deleted file mode 100644 index 1c7f70bd134e..000000000000 --- a/docs/sphinx/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse -======= - -.. toctree:: - :maxdepth: 4 - - synapse diff --git a/docs/sphinx/synapse.api.auth.rst b/docs/sphinx/synapse.api.auth.rst deleted file mode 100644 index 931eb5983666..000000000000 --- a/docs/sphinx/synapse.api.auth.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.auth module -======================= - -.. automodule:: synapse.api.auth - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.constants.rst b/docs/sphinx/synapse.api.constants.rst deleted file mode 100644 index a1e3c47f68ea..000000000000 --- a/docs/sphinx/synapse.api.constants.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.constants module -============================ - -.. automodule:: synapse.api.constants - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.dbobjects.rst b/docs/sphinx/synapse.api.dbobjects.rst deleted file mode 100644 index e9d31167e01e..000000000000 --- a/docs/sphinx/synapse.api.dbobjects.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.dbobjects module -============================ - -.. automodule:: synapse.api.dbobjects - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.errors.rst b/docs/sphinx/synapse.api.errors.rst deleted file mode 100644 index f1c688147848..000000000000 --- a/docs/sphinx/synapse.api.errors.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.errors module -========================= - -.. automodule:: synapse.api.errors - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.event_stream.rst b/docs/sphinx/synapse.api.event_stream.rst deleted file mode 100644 index 9291cb2dbce0..000000000000 --- a/docs/sphinx/synapse.api.event_stream.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.event_stream module -=============================== - -.. automodule:: synapse.api.event_stream - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.events.factory.rst b/docs/sphinx/synapse.api.events.factory.rst deleted file mode 100644 index 2e71ff6070cd..000000000000 --- a/docs/sphinx/synapse.api.events.factory.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.events.factory module -================================= - -.. automodule:: synapse.api.events.factory - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.events.room.rst b/docs/sphinx/synapse.api.events.room.rst deleted file mode 100644 index 6cd599859999..000000000000 --- a/docs/sphinx/synapse.api.events.room.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.events.room module -============================== - -.. automodule:: synapse.api.events.room - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.events.rst b/docs/sphinx/synapse.api.events.rst deleted file mode 100644 index b762da55ee65..000000000000 --- a/docs/sphinx/synapse.api.events.rst +++ /dev/null @@ -1,18 +0,0 @@ -synapse.api.events package -========================== - -Submodules ----------- - -.. toctree:: - - synapse.api.events.factory - synapse.api.events.room - -Module contents ---------------- - -.. automodule:: synapse.api.events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.events.rst b/docs/sphinx/synapse.api.handlers.events.rst deleted file mode 100644 index d2e1b54ac00b..000000000000 --- a/docs/sphinx/synapse.api.handlers.events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.events module -================================== - -.. automodule:: synapse.api.handlers.events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.factory.rst b/docs/sphinx/synapse.api.handlers.factory.rst deleted file mode 100644 index b04a93f740ab..000000000000 --- a/docs/sphinx/synapse.api.handlers.factory.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.factory module -=================================== - -.. automodule:: synapse.api.handlers.factory - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.federation.rst b/docs/sphinx/synapse.api.handlers.federation.rst deleted file mode 100644 index 61a6542210b3..000000000000 --- a/docs/sphinx/synapse.api.handlers.federation.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.federation module -====================================== - -.. automodule:: synapse.api.handlers.federation - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.register.rst b/docs/sphinx/synapse.api.handlers.register.rst deleted file mode 100644 index 388f144eca7c..000000000000 --- a/docs/sphinx/synapse.api.handlers.register.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.register module -==================================== - -.. automodule:: synapse.api.handlers.register - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.room.rst b/docs/sphinx/synapse.api.handlers.room.rst deleted file mode 100644 index 8ca156c7ff5a..000000000000 --- a/docs/sphinx/synapse.api.handlers.room.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.room module -================================ - -.. automodule:: synapse.api.handlers.room - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.rst b/docs/sphinx/synapse.api.handlers.rst deleted file mode 100644 index e84f563fcb90..000000000000 --- a/docs/sphinx/synapse.api.handlers.rst +++ /dev/null @@ -1,21 +0,0 @@ -synapse.api.handlers package -============================ - -Submodules ----------- - -.. toctree:: - - synapse.api.handlers.events - synapse.api.handlers.factory - synapse.api.handlers.federation - synapse.api.handlers.register - synapse.api.handlers.room - -Module contents ---------------- - -.. automodule:: synapse.api.handlers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.notifier.rst b/docs/sphinx/synapse.api.notifier.rst deleted file mode 100644 index 631b42a497be..000000000000 --- a/docs/sphinx/synapse.api.notifier.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.notifier module -=========================== - -.. automodule:: synapse.api.notifier - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.register_events.rst b/docs/sphinx/synapse.api.register_events.rst deleted file mode 100644 index 79ad4ce211af..000000000000 --- a/docs/sphinx/synapse.api.register_events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.register_events module -================================== - -.. automodule:: synapse.api.register_events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.room_events.rst b/docs/sphinx/synapse.api.room_events.rst deleted file mode 100644 index bead1711f565..000000000000 --- a/docs/sphinx/synapse.api.room_events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.room_events module -============================== - -.. automodule:: synapse.api.room_events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.rst b/docs/sphinx/synapse.api.rst deleted file mode 100644 index f4d39ff331d6..000000000000 --- a/docs/sphinx/synapse.api.rst +++ /dev/null @@ -1,30 +0,0 @@ -synapse.api package -=================== - -Subpackages ------------ - -.. toctree:: - - synapse.api.events - synapse.api.handlers - synapse.api.streams - -Submodules ----------- - -.. toctree:: - - synapse.api.auth - synapse.api.constants - synapse.api.errors - synapse.api.notifier - synapse.api.storage - -Module contents ---------------- - -.. automodule:: synapse.api - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.server.rst b/docs/sphinx/synapse.api.server.rst deleted file mode 100644 index b01600235eab..000000000000 --- a/docs/sphinx/synapse.api.server.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.server module -========================= - -.. automodule:: synapse.api.server - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.storage.rst b/docs/sphinx/synapse.api.storage.rst deleted file mode 100644 index afa40685c4de..000000000000 --- a/docs/sphinx/synapse.api.storage.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.storage module -========================== - -.. automodule:: synapse.api.storage - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.stream.rst b/docs/sphinx/synapse.api.stream.rst deleted file mode 100644 index 0d5e3f01bf60..000000000000 --- a/docs/sphinx/synapse.api.stream.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.stream module -========================= - -.. automodule:: synapse.api.stream - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.streams.event.rst b/docs/sphinx/synapse.api.streams.event.rst deleted file mode 100644 index 2ac45a35c84f..000000000000 --- a/docs/sphinx/synapse.api.streams.event.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.streams.event module -================================ - -.. automodule:: synapse.api.streams.event - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.streams.rst b/docs/sphinx/synapse.api.streams.rst deleted file mode 100644 index 72eb205caf37..000000000000 --- a/docs/sphinx/synapse.api.streams.rst +++ /dev/null @@ -1,17 +0,0 @@ -synapse.api.streams package -=========================== - -Submodules ----------- - -.. toctree:: - - synapse.api.streams.event - -Module contents ---------------- - -.. automodule:: synapse.api.streams - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.app.homeserver.rst b/docs/sphinx/synapse.app.homeserver.rst deleted file mode 100644 index 54b93da8feed..000000000000 --- a/docs/sphinx/synapse.app.homeserver.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.app.homeserver module -============================= - -.. automodule:: synapse.app.homeserver - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.app.rst b/docs/sphinx/synapse.app.rst deleted file mode 100644 index 4535b7982760..000000000000 --- a/docs/sphinx/synapse.app.rst +++ /dev/null @@ -1,17 +0,0 @@ -synapse.app package -=================== - -Submodules ----------- - -.. toctree:: - - synapse.app.homeserver - -Module contents ---------------- - -.. automodule:: synapse.app - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.db.rst b/docs/sphinx/synapse.db.rst deleted file mode 100644 index 83df6c03db02..000000000000 --- a/docs/sphinx/synapse.db.rst +++ /dev/null @@ -1,10 +0,0 @@ -synapse.db package -================== - -Module contents ---------------- - -.. automodule:: synapse.db - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.handler.rst b/docs/sphinx/synapse.federation.handler.rst deleted file mode 100644 index 5597f5c46d27..000000000000 --- a/docs/sphinx/synapse.federation.handler.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.handler module -================================= - -.. automodule:: synapse.federation.handler - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.messaging.rst b/docs/sphinx/synapse.federation.messaging.rst deleted file mode 100644 index 4bbaabf3efd0..000000000000 --- a/docs/sphinx/synapse.federation.messaging.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.messaging module -=================================== - -.. automodule:: synapse.federation.messaging - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.pdu_codec.rst b/docs/sphinx/synapse.federation.pdu_codec.rst deleted file mode 100644 index 8f0b15a63cc0..000000000000 --- a/docs/sphinx/synapse.federation.pdu_codec.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.pdu_codec module -=================================== - -.. automodule:: synapse.federation.pdu_codec - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.persistence.rst b/docs/sphinx/synapse.federation.persistence.rst deleted file mode 100644 index db7ab8ade14b..000000000000 --- a/docs/sphinx/synapse.federation.persistence.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.persistence module -===================================== - -.. automodule:: synapse.federation.persistence - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.replication.rst b/docs/sphinx/synapse.federation.replication.rst deleted file mode 100644 index 49e26e0928d8..000000000000 --- a/docs/sphinx/synapse.federation.replication.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.replication module -===================================== - -.. automodule:: synapse.federation.replication - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.rst b/docs/sphinx/synapse.federation.rst deleted file mode 100644 index 7240c7901b49..000000000000 --- a/docs/sphinx/synapse.federation.rst +++ /dev/null @@ -1,22 +0,0 @@ -synapse.federation package -========================== - -Submodules ----------- - -.. toctree:: - - synapse.federation.handler - synapse.federation.pdu_codec - synapse.federation.persistence - synapse.federation.replication - synapse.federation.transport - synapse.federation.units - -Module contents ---------------- - -.. automodule:: synapse.federation - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.transport.rst b/docs/sphinx/synapse.federation.transport.rst deleted file mode 100644 index 877956b3c954..000000000000 --- a/docs/sphinx/synapse.federation.transport.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.transport module -=================================== - -.. automodule:: synapse.federation.transport - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.units.rst b/docs/sphinx/synapse.federation.units.rst deleted file mode 100644 index 8f9212b07dc4..000000000000 --- a/docs/sphinx/synapse.federation.units.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.units module -=============================== - -.. automodule:: synapse.federation.units - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.rst b/docs/sphinx/synapse.persistence.rst deleted file mode 100644 index 37c0c23720fd..000000000000 --- a/docs/sphinx/synapse.persistence.rst +++ /dev/null @@ -1,19 +0,0 @@ -synapse.persistence package -=========================== - -Submodules ----------- - -.. toctree:: - - synapse.persistence.service - synapse.persistence.tables - synapse.persistence.transactions - -Module contents ---------------- - -.. automodule:: synapse.persistence - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.service.rst b/docs/sphinx/synapse.persistence.service.rst deleted file mode 100644 index 3514d3c76f3a..000000000000 --- a/docs/sphinx/synapse.persistence.service.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.persistence.service module -================================== - -.. automodule:: synapse.persistence.service - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.tables.rst b/docs/sphinx/synapse.persistence.tables.rst deleted file mode 100644 index 907b02769dd2..000000000000 --- a/docs/sphinx/synapse.persistence.tables.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.persistence.tables module -================================= - -.. automodule:: synapse.persistence.tables - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.transactions.rst b/docs/sphinx/synapse.persistence.transactions.rst deleted file mode 100644 index 475c02a8c519..000000000000 --- a/docs/sphinx/synapse.persistence.transactions.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.persistence.transactions module -======================================= - -.. automodule:: synapse.persistence.transactions - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.base.rst b/docs/sphinx/synapse.rest.base.rst deleted file mode 100644 index 84d2d9b31da1..000000000000 --- a/docs/sphinx/synapse.rest.base.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.base module -======================== - -.. automodule:: synapse.rest.base - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.events.rst b/docs/sphinx/synapse.rest.events.rst deleted file mode 100644 index ebbe26c7469d..000000000000 --- a/docs/sphinx/synapse.rest.events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.events module -========================== - -.. automodule:: synapse.rest.events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.register.rst b/docs/sphinx/synapse.rest.register.rst deleted file mode 100644 index a4a48a8a8f47..000000000000 --- a/docs/sphinx/synapse.rest.register.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.register module -============================ - -.. automodule:: synapse.rest.register - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.room.rst b/docs/sphinx/synapse.rest.room.rst deleted file mode 100644 index 63fc5c284080..000000000000 --- a/docs/sphinx/synapse.rest.room.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.room module -======================== - -.. automodule:: synapse.rest.room - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.rst b/docs/sphinx/synapse.rest.rst deleted file mode 100644 index 016af926b2f3..000000000000 --- a/docs/sphinx/synapse.rest.rst +++ /dev/null @@ -1,20 +0,0 @@ -synapse.rest package -==================== - -Submodules ----------- - -.. toctree:: - - synapse.rest.base - synapse.rest.events - synapse.rest.register - synapse.rest.room - -Module contents ---------------- - -.. automodule:: synapse.rest - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rst b/docs/sphinx/synapse.rst deleted file mode 100644 index e7869e0e5d06..000000000000 --- a/docs/sphinx/synapse.rst +++ /dev/null @@ -1,30 +0,0 @@ -synapse package -=============== - -Subpackages ------------ - -.. toctree:: - - synapse.api - synapse.app - synapse.federation - synapse.persistence - synapse.rest - synapse.util - -Submodules ----------- - -.. toctree:: - - synapse.server - synapse.state - -Module contents ---------------- - -.. automodule:: synapse - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.server.rst b/docs/sphinx/synapse.server.rst deleted file mode 100644 index 7f33f084d755..000000000000 --- a/docs/sphinx/synapse.server.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.server module -===================== - -.. automodule:: synapse.server - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.state.rst b/docs/sphinx/synapse.state.rst deleted file mode 100644 index 744be2a8be83..000000000000 --- a/docs/sphinx/synapse.state.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.state module -==================== - -.. automodule:: synapse.state - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.async.rst b/docs/sphinx/synapse.util.async.rst deleted file mode 100644 index 542bb54444f8..000000000000 --- a/docs/sphinx/synapse.util.async.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.async module -========================= - -.. automodule:: synapse.util.async - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.dbutils.rst b/docs/sphinx/synapse.util.dbutils.rst deleted file mode 100644 index afaa9eb7496c..000000000000 --- a/docs/sphinx/synapse.util.dbutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.dbutils module -=========================== - -.. automodule:: synapse.util.dbutils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.http.rst b/docs/sphinx/synapse.util.http.rst deleted file mode 100644 index 344af5a49027..000000000000 --- a/docs/sphinx/synapse.util.http.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.http module -======================== - -.. automodule:: synapse.util.http - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.lockutils.rst b/docs/sphinx/synapse.util.lockutils.rst deleted file mode 100644 index 16ee26cabdda..000000000000 --- a/docs/sphinx/synapse.util.lockutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.lockutils module -============================= - -.. automodule:: synapse.util.lockutils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.logutils.rst b/docs/sphinx/synapse.util.logutils.rst deleted file mode 100644 index 2b79fa7a4be4..000000000000 --- a/docs/sphinx/synapse.util.logutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.logutils module -============================ - -.. automodule:: synapse.util.logutils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.rst b/docs/sphinx/synapse.util.rst deleted file mode 100644 index 01a0c3a59158..000000000000 --- a/docs/sphinx/synapse.util.rst +++ /dev/null @@ -1,21 +0,0 @@ -synapse.util package -==================== - -Submodules ----------- - -.. toctree:: - - synapse.util.async - synapse.util.http - synapse.util.lockutils - synapse.util.logutils - synapse.util.stringutils - -Module contents ---------------- - -.. automodule:: synapse.util - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.stringutils.rst b/docs/sphinx/synapse.util.stringutils.rst deleted file mode 100644 index ec626eee28b5..000000000000 --- a/docs/sphinx/synapse.util.stringutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.stringutils module -=============================== - -.. automodule:: synapse.util.stringutils - :members: - :undoc-members: - :show-inheritance: diff --git a/scripts-dev/sphinx_api_docs.sh b/scripts-dev/sphinx_api_docs.sh deleted file mode 100644 index ee72b29657a0..000000000000 --- a/scripts-dev/sphinx_api_docs.sh +++ /dev/null @@ -1 +0,0 @@ -sphinx-apidoc -o docs/sphinx/ synapse/ -ef diff --git a/setup.cfg b/setup.cfg index a32278ea8a08..f46e43fad0bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1,3 @@ -[build_sphinx] -source-dir = docs/sphinx -build-dir = docs/build -all_files = 1 - [trial] test_suite = tests From 4f0637346a194a3343b4fea6cf38c1548e56648d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 7 Oct 2020 12:03:26 +0100 Subject: [PATCH 134/245] Combine `SpamCheckerApi` with the more generic `ModuleApi`. (#8464) Lots of different module apis is not easy to maintain. Rather than adding yet another ModuleApi(hs, hs.get_auth_handler()) incantation, first add an hs.get_module_api() method and use it where possible. --- changelog.d/8464.misc | 1 + docs/spam_checker.md | 9 ++---- synapse/app/homeserver.py | 3 +- synapse/events/spamcheck.py | 5 ++-- synapse/events/third_party_rules.py | 3 +- synapse/handlers/auth.py | 7 +++++ synapse/module_api/__init__.py | 29 ++++++++++++++++++- synapse/server.py | 5 ++++ synapse/spam_checker_api/__init__.py | 43 ---------------------------- tests/module_api/test_api.py | 4 +-- 10 files changed, 51 insertions(+), 58 deletions(-) create mode 100644 changelog.d/8464.misc diff --git a/changelog.d/8464.misc b/changelog.d/8464.misc new file mode 100644 index 000000000000..a552e88f9fc8 --- /dev/null +++ b/changelog.d/8464.misc @@ -0,0 +1 @@ +Combine `SpamCheckerApi` with the more generic `ModuleApi`. diff --git a/docs/spam_checker.md b/docs/spam_checker.md index eb10e115f98f..7fc08f1b7021 100644 --- a/docs/spam_checker.md +++ b/docs/spam_checker.md @@ -11,7 +11,7 @@ able to be imported by the running Synapse. The Python class is instantiated with two objects: * Any configuration (see below). -* An instance of `synapse.spam_checker_api.SpamCheckerApi`. +* An instance of `synapse.module_api.ModuleApi`. It then implements methods which return a boolean to alter behavior in Synapse. @@ -26,11 +26,8 @@ well as some specific methods: The details of the each of these methods (as well as their inputs and outputs) are documented in the `synapse.events.spamcheck.SpamChecker` class. -The `SpamCheckerApi` class provides a way for the custom spam checker class to -call back into the homeserver internals. It currently implements the following -methods: - -* `get_state_events_in_room` +The `ModuleApi` class provides a way for the custom spam checker class to +call back into the homeserver internals. ### Example diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 4ed4a2c2533a..2b5465417f4c 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -56,7 +56,6 @@ from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.module_api import ModuleApi from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory @@ -106,7 +105,7 @@ def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConf additional_resources = listener_config.http_options.additional_resources logger.debug("Configuring additional resources: %r", additional_resources) - module_api = ModuleApi(self, self.get_auth_handler()) + module_api = self.get_module_api() for path, resmodule in additional_resources.items(): handler_cls, config = load_module(resmodule) handler = handler_cls(config, module_api) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index b0fc859a4712..bad18f7fdf98 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -17,24 +17,25 @@ import inspect from typing import Any, Dict, List, Optional, Tuple -from synapse.spam_checker_api import RegistrationBehaviour, SpamCheckerApi +from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import Collection MYPY = False if MYPY: + import synapse.events import synapse.server class SpamChecker: def __init__(self, hs: "synapse.server.HomeServer"): self.spam_checkers = [] # type: List[Any] + api = hs.get_module_api() for module, config in hs.config.spam_checkers: # Older spam checkers don't accept the `api` argument, so we # try and detect support. spam_args = inspect.getfullargspec(module) if "api" in spam_args.args: - api = SpamCheckerApi(hs) self.spam_checkers.append(module(config=config, api=api)) else: self.spam_checkers.append(module(config=config)) diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index e38b8e67fb8c..1535cc53391a 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -16,7 +16,6 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.module_api import ModuleApi from synapse.types import Requester, StateMap @@ -40,7 +39,7 @@ def __init__(self, hs): if module is not None: self.third_party_rules = module( - config=config, module_api=ModuleApi(hs, hs.get_auth_handler()), + config=config, module_api=hs.get_module_api(), ) async def check_event_allowed( diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7c4b716b2853..f6d17c53b15e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -164,7 +164,14 @@ def __init__(self, hs): self.bcrypt_rounds = hs.config.bcrypt_rounds + # we can't use hs.get_module_api() here, because to do so will create an + # import loop. + # + # TODO: refactor this class to separate the lower-level stuff that + # ModuleApi can use from the higher-level stuff that uses ModuleApi, as + # better way to break the loop account_handler = ModuleApi(hs, self) + self.password_providers = [ module(config=config, account_handler=account_handler) for module, config in hs.config.password_providers diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 646f09d2bc03..b410e3ad9c55 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -14,13 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Iterable, Optional, Tuple from twisted.internet import defer from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.storage.state import StateFilter from synapse.types import UserID if TYPE_CHECKING: @@ -293,6 +294,32 @@ async def complete_sso_login_async( registered_user_id, request, client_redirect_url, ) + @defer.inlineCallbacks + def get_state_events_in_room( + self, room_id: str, types: Iterable[Tuple[str, Optional[str]]] + ) -> defer.Deferred: + """Gets current state events for the given room. + + (This is exposed for compatibility with the old SpamCheckerApi. We should + probably deprecate it and replace it with an async method in a subclass.) + + Args: + room_id: The room ID to get state events in. + types: The event type and state key (using None + to represent 'any') of the room state to acquire. + + Returns: + twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: + The filtered state events in the room. + """ + state_ids = yield defer.ensureDeferred( + self._store.get_filtered_current_state_ids( + room_id=room_id, state_filter=StateFilter.from_types(types) + ) + ) + state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) + return state.values() + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/server.py b/synapse/server.py index aa2273955cd4..f83dd6148c96 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -91,6 +91,7 @@ from synapse.handlers.user_directory import UserDirectoryHandler from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.module_api import ModuleApi from synapse.notifier import Notifier from synapse.push.action_generator import ActionGenerator from synapse.push.pusherpool import PusherPool @@ -656,6 +657,10 @@ def get_replication_streams(self) -> Dict[str, Stream]: def get_federation_ratelimiter(self) -> FederationRateLimiter: return FederationRateLimiter(self.clock, config=self.config.rc_federation) + @cache_in_self + def get_module_api(self) -> ModuleApi: + return ModuleApi(self, self.get_auth_handler()) + async def remove_pusher(self, app_id: str, push_key: str, user_id: str): return await self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py index 395ac5ab0283..3ce25bb012a0 100644 --- a/synapse/spam_checker_api/__init__.py +++ b/synapse/spam_checker_api/__init__.py @@ -12,19 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging from enum import Enum -from twisted.internet import defer - -from synapse.storage.state import StateFilter - -MYPY = False -if MYPY: - import synapse.server - -logger = logging.getLogger(__name__) - class RegistrationBehaviour(Enum): """ @@ -34,35 +23,3 @@ class RegistrationBehaviour(Enum): ALLOW = "allow" SHADOW_BAN = "shadow_ban" DENY = "deny" - - -class SpamCheckerApi: - """A proxy object that gets passed to spam checkers so they can get - access to rooms and other relevant information. - """ - - def __init__(self, hs: "synapse.server.HomeServer"): - self.hs = hs - - self._store = hs.get_datastore() - - @defer.inlineCallbacks - def get_state_events_in_room(self, room_id: str, types: tuple) -> defer.Deferred: - """Gets state events for the given room. - - Args: - room_id: The room ID to get state events in. - types: The event type and state key (using None - to represent 'any') of the room state to acquire. - - Returns: - twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: - The filtered state events in the room. - """ - state_ids = yield defer.ensureDeferred( - self._store.get_filtered_current_state_ids( - room_id=room_id, state_filter=StateFilter.from_types(types) - ) - ) - state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) - return state.values() diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 54600ad9833c..7c790bee7dca 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.module_api import ModuleApi + from synapse.rest import admin from synapse.rest.client.v1 import login, room @@ -28,7 +28,7 @@ class ModuleApiTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() - self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler()) + self.module_api = homeserver.get_module_api() def test_can_register_user(self): """Tests that an external module can register a user""" From 4cb44a158549e83d42061b02a8b704e7d5873b21 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Wed, 7 Oct 2020 08:00:17 -0400 Subject: [PATCH 135/245] Add support for MSC2697: Dehydrated devices (#8380) This allows a user to store an offline device on the server and then restore it at a subsequent login. --- changelog.d/8380.feature | 1 + synapse/handlers/device.py | 84 ++++++++++- synapse/rest/client/v2_alpha/devices.py | 134 ++++++++++++++++++ synapse/rest/client/v2_alpha/keys.py | 37 +++-- synapse/storage/databases/main/devices.py | 78 +++++++++- .../storage/databases/main/end_to_end_keys.py | 7 +- .../storage/databases/main/registration.py | 32 ++++- .../main/schema/delta/58/11dehydration.sql | 20 +++ tests/handlers/test_device.py | 82 +++++++++++ 9 files changed, 454 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8380.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/11dehydration.sql diff --git a/changelog.d/8380.feature b/changelog.d/8380.feature new file mode 100644 index 000000000000..05ccea19dce4 --- /dev/null +++ b/changelog.d/8380.feature @@ -0,0 +1 @@ +Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index b9d90981048a..e883ed1e37ea 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple from synapse.api import errors from synapse.api.constants import EventTypes @@ -29,6 +29,7 @@ from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( + JsonDict, StreamToken, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -505,6 +506,85 @@ async def user_left_room(self, user, room_id): # receive device updates. Mark this in DB. await self.store.mark_remote_user_device_list_as_unsubscribed(user_id) + async def store_dehydrated_device( + self, + user_id: str, + device_data: JsonDict, + initial_device_display_name: Optional[str] = None, + ) -> str: + """Store a dehydrated device for a user. If the user had a previous + dehydrated device, it is removed. + + Args: + user_id: the user that we are storing the device for + device_data: the dehydrated device information + initial_device_display_name: The display name to use for the device + Returns: + device id of the dehydrated device + """ + device_id = await self.check_device_registered( + user_id, None, initial_device_display_name, + ) + old_device_id = await self.store.store_dehydrated_device( + user_id, device_id, device_data + ) + if old_device_id is not None: + await self.delete_device(user_id, old_device_id) + return device_id + + async def get_dehydrated_device( + self, user_id: str + ) -> Optional[Tuple[str, JsonDict]]: + """Retrieve the information for a dehydrated device. + + Args: + user_id: the user whose dehydrated device we are looking for + Returns: + a tuple whose first item is the device ID, and the second item is + the dehydrated device information + """ + return await self.store.get_dehydrated_device(user_id) + + async def rehydrate_device( + self, user_id: str, access_token: str, device_id: str + ) -> dict: + """Process a rehydration request from the user. + + Args: + user_id: the user who is rehydrating the device + access_token: the access token used for the request + device_id: the ID of the device that will be rehydrated + Returns: + a dict containing {"success": True} + """ + success = await self.store.remove_dehydrated_device(user_id, device_id) + + if not success: + raise errors.NotFoundError() + + # If the dehydrated device was successfully deleted (the device ID + # matched the stored dehydrated device), then modify the access + # token to use the dehydrated device's ID and copy the old device + # display name to the dehydrated device, and destroy the old device + # ID + old_device_id = await self.store.set_device_for_access_token( + access_token, device_id + ) + old_device = await self.store.get_device(user_id, old_device_id) + await self.store.update_device(user_id, device_id, old_device["display_name"]) + # can't call self.delete_device because that will clobber the + # access token so call the storage layer directly + await self.store.delete_device(user_id, old_device_id) + await self.store.delete_e2e_keys_by_device( + user_id=user_id, device_id=old_device_id + ) + + # tell everyone that the old device is gone and that the dehydrated + # device has a new display name + await self.notify_device_update(user_id, [old_device_id, device_id]) + + return {"success": True} + def _update_device_from_client_ips(device, client_ips): ip = client_ips.get((device["user_id"], device["device_id"]), {}) diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 7e174de692f6..af117cb27c16 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +22,7 @@ assert_params_in_dict, parse_json_object_from_request, ) +from synapse.http.site import SynapseRequest from ._base import client_patterns, interactive_auth_handler @@ -151,7 +153,139 @@ async def on_PUT(self, request, device_id): return 200, {} +class DehydratedDeviceServlet(RestServlet): + """Retrieve or store a dehydrated device. + + GET /org.matrix.msc2697.v2/dehydrated_device + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id", + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + } + } + + PUT /org.matrix.msc2697/dehydrated_device + Content-Type: application/json + + { + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + } + } + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id" + } + + """ + + PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=()) + + def __init__(self, hs): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + async def on_GET(self, request: SynapseRequest): + requester = await self.auth.get_user_by_req(request) + dehydrated_device = await self.device_handler.get_dehydrated_device( + requester.user.to_string() + ) + if dehydrated_device is not None: + (device_id, device_data) = dehydrated_device + result = {"device_id": device_id, "device_data": device_data} + return (200, result) + else: + raise errors.NotFoundError("No dehydrated device available") + + async def on_PUT(self, request: SynapseRequest): + submission = parse_json_object_from_request(request) + requester = await self.auth.get_user_by_req(request) + + if "device_data" not in submission: + raise errors.SynapseError( + 400, "device_data missing", errcode=errors.Codes.MISSING_PARAM, + ) + elif not isinstance(submission["device_data"], dict): + raise errors.SynapseError( + 400, + "device_data must be an object", + errcode=errors.Codes.INVALID_PARAM, + ) + + device_id = await self.device_handler.store_dehydrated_device( + requester.user.to_string(), + submission["device_data"], + submission.get("initial_device_display_name", None), + ) + return 200, {"device_id": device_id} + + +class ClaimDehydratedDeviceServlet(RestServlet): + """Claim a dehydrated device. + + POST /org.matrix.msc2697.v2/dehydrated_device/claim + Content-Type: application/json + + { + "device_id": "dehydrated_device_id" + } + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "success": true, + } + + """ + + PATTERNS = client_patterns( + "/org.matrix.msc2697.v2/dehydrated_device/claim", releases=() + ) + + def __init__(self, hs): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + async def on_POST(self, request: SynapseRequest): + requester = await self.auth.get_user_by_req(request) + + submission = parse_json_object_from_request(request) + + if "device_id" not in submission: + raise errors.SynapseError( + 400, "device_id missing", errcode=errors.Codes.MISSING_PARAM, + ) + elif not isinstance(submission["device_id"], str): + raise errors.SynapseError( + 400, "device_id must be a string", errcode=errors.Codes.INVALID_PARAM, + ) + + result = await self.device_handler.rehydrate_device( + requester.user.to_string(), + self.auth.get_access_token_from_request(request), + submission["device_id"], + ) + + return (200, result) + + def register_servlets(hs, http_server): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) DeviceRestServlet(hs).register(http_server) + DehydratedDeviceServlet(hs).register(http_server) + ClaimDehydratedDeviceServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 55c46065694b..b91996c7387d 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -67,6 +68,7 @@ def __init__(self, hs): super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.device_handler = hs.get_device_handler() @trace(opname="upload_keys") async def on_POST(self, request, device_id): @@ -75,23 +77,28 @@ async def on_POST(self, request, device_id): body = parse_json_object_from_request(request) if device_id is not None: - # passing the device_id here is deprecated; however, we allow it - # for now for compatibility with older clients. + # Providing the device_id should only be done for setting keys + # for dehydrated devices; however, we allow it for any device for + # compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: - set_tag("error", True) - log_kv( - { - "message": "Client uploading keys for a different device", - "logged_in_id": requester.device_id, - "key_being_uploaded": device_id, - } - ) - logger.warning( - "Client uploading keys for a different device " - "(logged in as %s, uploading for %s)", - requester.device_id, - device_id, + dehydrated_device = await self.device_handler.get_dehydrated_device( + user_id ) + if dehydrated_device is not None and device_id != dehydrated_device[0]: + set_tag("error", True) + log_kv( + { + "message": "Client uploading keys for a different device", + "logged_in_id": requester.device_id, + "key_being_uploaded": device_id, + } + ) + logger.warning( + "Client uploading keys for a different device " + "(logged in as %s, uploading for %s)", + requester.device_id, + device_id, + ) else: device_id = requester.device_id diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fdf394c61205..317d6cde955d 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ make_tuple_comparison_clause, ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key -from synapse.util import json_encoder +from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import Cache, cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -698,6 +698,80 @@ def _mark_remote_user_device_list_as_unsubscribed_txn(txn): _mark_remote_user_device_list_as_unsubscribed_txn, ) + async def get_dehydrated_device( + self, user_id: str + ) -> Optional[Tuple[str, JsonDict]]: + """Retrieve the information for a dehydrated device. + + Args: + user_id: the user whose dehydrated device we are looking for + Returns: + a tuple whose first item is the device ID, and the second item is + the dehydrated device information + """ + # FIXME: make sure device ID still exists in devices table + row = await self.db_pool.simple_select_one( + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + retcols=["device_id", "device_data"], + allow_none=True, + ) + return ( + (row["device_id"], json_decoder.decode(row["device_data"])) if row else None + ) + + def _store_dehydrated_device_txn( + self, txn, user_id: str, device_id: str, device_data: str + ) -> Optional[str]: + old_device_id = self.db_pool.simple_select_one_onecol_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + retcol="device_id", + allow_none=True, + ) + self.db_pool.simple_upsert_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + values={"device_id": device_id, "device_data": device_data}, + ) + return old_device_id + + async def store_dehydrated_device( + self, user_id: str, device_id: str, device_data: JsonDict + ) -> Optional[str]: + """Store a dehydrated device for a user. + + Args: + user_id: the user that we are storing the device for + device_id: the ID of the dehydrated device + device_data: the dehydrated device information + Returns: + device id of the user's previous dehydrated device, if any + """ + return await self.db_pool.runInteraction( + "store_dehydrated_device_txn", + self._store_dehydrated_device_txn, + user_id, + device_id, + json_encoder.encode(device_data), + ) + + async def remove_dehydrated_device(self, user_id: str, device_id: str) -> bool: + """Remove a dehydrated device. + + Args: + user_id: the user that the dehydrated device belongs to + device_id: the ID of the dehydrated device + """ + count = await self.db_pool.simple_delete( + "dehydrated_devices", + {"user_id": user_id, "device_id": device_id}, + desc="remove_dehydrated_device", + ) + return count >= 1 + class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 8c97f2af5ce5..359dc6e968c6 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -844,6 +844,11 @@ def delete_e2e_keys_by_device_txn(txn): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + self.db_pool.simple_delete_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id, "device_id": device_id}, + ) self.db_pool.simple_delete_txn( txn, table="e2e_fallback_keys_json", diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index a83df7759d79..16ba5457403a 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -964,6 +964,36 @@ async def add_access_token_to_user( desc="add_access_token_to_user", ) + def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str: + old_device_id = self.db_pool.simple_select_one_onecol_txn( + txn, "access_tokens", {"token": token}, "device_id" + ) + + self.db_pool.simple_update_txn( + txn, "access_tokens", {"token": token}, {"device_id": device_id} + ) + + self._invalidate_cache_and_stream(txn, self.get_user_by_access_token, (token,)) + + return old_device_id + + async def set_device_for_access_token(self, token: str, device_id: str) -> str: + """Sets the device ID associated with an access token. + + Args: + token: The access token to modify. + device_id: The new device ID. + Returns: + The old device ID associated with the access token. + """ + + return await self.db_pool.runInteraction( + "set_device_for_access_token", + self._set_device_for_access_token_txn, + token, + device_id, + ) + async def register_user( self, user_id: str, diff --git a/synapse/storage/databases/main/schema/delta/58/11dehydration.sql b/synapse/storage/databases/main/schema/delta/58/11dehydration.sql new file mode 100644 index 000000000000..7851a0a825e4 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/11dehydration.sql @@ -0,0 +1,20 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS dehydrated_devices( + user_id TEXT NOT NULL PRIMARY KEY, + device_id TEXT NOT NULL, + device_data TEXT NOT NULL -- JSON-encoded client-defined data +); diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 969d44c78711..4512c513111c 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -224,3 +225,84 @@ def _record_user( ) ) self.reactor.advance(1000) + + +class DehydrationTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) + self.handler = hs.get_device_handler() + self.registration = hs.get_registration_handler() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + return hs + + def test_dehydrate_and_rehydrate_device(self): + user_id = "@boris:dehydration" + + self.get_success(self.store.register_user(user_id, "foobar")) + + # First check if we can store and fetch a dehydrated device + stored_dehydrated_device_id = self.get_success( + self.handler.store_dehydrated_device( + user_id=user_id, + device_data={"device_data": {"foo": "bar"}}, + initial_device_display_name="dehydrated device", + ) + ) + + retrieved_device_id, device_data = self.get_success( + self.handler.get_dehydrated_device(user_id=user_id) + ) + + self.assertEqual(retrieved_device_id, stored_dehydrated_device_id) + self.assertEqual(device_data, {"device_data": {"foo": "bar"}}) + + # Create a new login for the user and dehydrated the device + device_id, access_token = self.get_success( + self.registration.register_device( + user_id=user_id, device_id=None, initial_display_name="new device", + ) + ) + + # Trying to claim a nonexistent device should throw an error + self.get_failure( + self.handler.rehydrate_device( + user_id=user_id, + access_token=access_token, + device_id="not the right device ID", + ), + synapse.api.errors.NotFoundError, + ) + + # dehydrating the right devices should succeed and change our device ID + # to the dehydrated device's ID + res = self.get_success( + self.handler.rehydrate_device( + user_id=user_id, + access_token=access_token, + device_id=retrieved_device_id, + ) + ) + + self.assertEqual(res, {"success": True}) + + # make sure that our device ID has changed + user_info = self.get_success(self.auth.get_user_by_access_token(access_token)) + + self.assertEqual(user_info["device_id"], retrieved_device_id) + + # make sure the device has the display name that was set from the login + res = self.get_success(self.handler.get_device(user_id, retrieved_device_id)) + + self.assertEqual(res["display_name"], "new device") + + # make sure that the device ID that we were initially assigned no longer exists + self.get_failure( + self.handler.get_device(user_id, device_id), + synapse.api.errors.NotFoundError, + ) + + # make sure that there's no device available for dehydrating now + ret = self.get_success(self.handler.get_dehydrated_device(user_id=user_id)) + + self.assertIsNone(ret) From d373ec2f72f69ae6c394482df89061302be41405 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 7 Oct 2020 13:39:50 +0100 Subject: [PATCH 136/245] unblacklist some tests (#8474) It seems most of these blacklisted tests do actually pass most of the time. I'm of the opinion that having them blacklisted here means there is very little incentive for us to deflake any flaky tests, and meanwhile any value in those tests is completely lost. --- .buildkite/worker-blacklist | 31 ------------------------------- changelog.d/8474.misc | 1 + sytest-blacklist | 3 --- 3 files changed, 1 insertion(+), 34 deletions(-) create mode 100644 changelog.d/8474.misc diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index fd98cbbaf656..5975cb98cfda 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -1,41 +1,10 @@ # This file serves as a blacklist for SyTest tests that we expect will fail in # Synapse when run under worker mode. For more details, see sytest-blacklist. -Message history can be paginated - Can re-join room if re-invited -The only membership state included in an initial sync is for all the senders in the timeline - -Local device key changes get to remote servers - -If remote user leaves room we no longer receive device updates - -Forgotten room messages cannot be paginated - -Inbound federation can get public room list - -Members from the gap are included in gappy incr LL sync - -Leaves are present in non-gapped incremental syncs - -Old leaves are present in gapped incremental syncs - -User sees updates to presence from other users in the incremental sync. - -Gapped incremental syncs include all state changes - -Old members are included in gappy incr LL sync if they start speaking - # new failures as of https://github.com/matrix-org/sytest/pull/732 Device list doesn't change if remote server is down -Remote servers cannot set power levels in rooms without existing powerlevels -Remote servers should reject attempts by non-creators to set the power levels # https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5 Server correctly handles incoming m.device_list_update - -# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536 -Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state - -Can get rooms/{roomId}/members at a given point diff --git a/changelog.d/8474.misc b/changelog.d/8474.misc new file mode 100644 index 000000000000..65e329a6e3e3 --- /dev/null +++ b/changelog.d/8474.misc @@ -0,0 +1 @@ +Unblacklist some sytests. diff --git a/sytest-blacklist b/sytest-blacklist index b563448016f1..de9986357b9a 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -34,9 +34,6 @@ New federated private chats get full presence information (SYN-115) # this requirement from the spec Inbound federation of state requires event_id as a mandatory paramater -# Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands -Can upload self-signing keys - # Blacklisted until MSC2753 is implemented Local users can peek into world_readable rooms by room ID We can't peek into rooms with shared history_visibility From d9b55bd830e47fdbae0054afd0035342bb21c76e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 08:48:54 -0400 Subject: [PATCH 137/245] Add Ubuntu 20.10 (Groovy Gorilla) to build scripts. (#8475) --- changelog.d/8475.misc | 1 + scripts-dev/build_debian_packages | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/8475.misc diff --git a/changelog.d/8475.misc b/changelog.d/8475.misc new file mode 100644 index 000000000000..69bcb04097c7 --- /dev/null +++ b/changelog.d/8475.misc @@ -0,0 +1 @@ +Add Groovy Gorilla to the list of distributions we build `.deb`s for. diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index d055cf32877d..d0685c8b35fd 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -25,6 +25,7 @@ DISTS = ( "ubuntu:xenial", "ubuntu:bionic", "ubuntu:focal", + "ubuntu:groovy", ) DESC = '''\ From 9ca6341969b8b84c0c79a29fb914d1d8dbb3e320 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 13:49:40 +0100 Subject: [PATCH 138/245] Fix returning incorrect prev_batch token in incremental sync (#8486) --- changelog.d/8486.bugfix | 1 + synapse/handlers/sync.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8486.bugfix diff --git a/changelog.d/8486.bugfix b/changelog.d/8486.bugfix new file mode 100644 index 000000000000..63fc091ba674 --- /dev/null +++ b/changelog.d/8486.bugfix @@ -0,0 +1 @@ +Fix incremental sync returning an incorrect `prev_batch` token in timeline section, which when used to paginate returned events that were included in the incremental sync. Broken since v0.16.0. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index dd1f90e3594b..6fb8332f9365 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -460,8 +460,13 @@ async def _load_filtered_recents( recents = [] if not limited or block_all_timeline: + prev_batch_token = now_token + if recents: + room_key = recents[0].internal_metadata.before + prev_batch_token = now_token.copy_and_replace("room_key", room_key) + return TimelineBatch( - events=recents, prev_batch=now_token, limited=False + events=recents, prev_batch=prev_batch_token, limited=False ) filtering_factor = 2 From b460a088c647a6d3ea0e5a9f4f80d86bb9e303b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 08:58:21 -0400 Subject: [PATCH 139/245] Add typing information to the device handler. (#8407) --- changelog.d/8407.misc | 1 + mypy.ini | 1 + synapse/handlers/device.py | 89 ++++++++++++++--------- synapse/storage/databases/main/devices.py | 6 +- 4 files changed, 59 insertions(+), 38 deletions(-) create mode 100644 changelog.d/8407.misc diff --git a/changelog.d/8407.misc b/changelog.d/8407.misc new file mode 100644 index 000000000000..d37002d75bf1 --- /dev/null +++ b/changelog.d/8407.misc @@ -0,0 +1 @@ +Add typing information to the device handler. diff --git a/mypy.ini b/mypy.ini index e84ad04e412c..a7ffb81ef133 100644 --- a/mypy.ini +++ b/mypy.ini @@ -17,6 +17,7 @@ files = synapse/federation, synapse/handlers/auth.py, synapse/handlers/cas_handler.py, + synapse/handlers/device.py, synapse/handlers/directory.py, synapse/handlers/events.py, synapse/handlers/federation.py, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index e883ed1e37ea..debb1b4f2924 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple from synapse.api import errors from synapse.api.constants import EventTypes @@ -29,8 +29,10 @@ from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( + Collection, JsonDict, StreamToken, + UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, ) @@ -42,13 +44,16 @@ from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) MAX_DEVICE_DISPLAY_NAME_LEN = 100 class DeviceWorkerHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs @@ -106,7 +111,9 @@ async def get_device(self, user_id: str, device_id: str) -> Dict[str, Any]: @trace @measure_func("device.get_user_ids_changed") - async def get_user_ids_changed(self, user_id: str, from_token: StreamToken): + async def get_user_ids_changed( + self, user_id: str, from_token: StreamToken + ) -> JsonDict: """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. """ @@ -222,8 +229,8 @@ async def get_user_ids_changed(self, user_id: str, from_token: StreamToken): possibly_joined = possibly_changed & users_who_share_room possibly_left = (possibly_changed | possibly_left) - users_who_share_room else: - possibly_joined = [] - possibly_left = [] + possibly_joined = set() + possibly_left = set() result = {"changed": list(possibly_joined), "left": list(possibly_left)} @@ -231,7 +238,7 @@ async def get_user_ids_changed(self, user_id: str, from_token: StreamToken): return result - async def on_federation_query_user_devices(self, user_id): + async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: stream_id, devices = await self.store.get_e2e_device_keys_for_federation_query( user_id ) @@ -250,7 +257,7 @@ async def on_federation_query_user_devices(self, user_id): class DeviceHandler(DeviceWorkerHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.federation_sender = hs.get_federation_sender() @@ -265,7 +272,7 @@ def __init__(self, hs): hs.get_distributor().observe("user_left_room", self.user_left_room) - def _check_device_name_length(self, name: str): + def _check_device_name_length(self, name: Optional[str]): """ Checks whether a device name is longer than the maximum allowed length. @@ -284,8 +291,11 @@ def _check_device_name_length(self, name: str): ) async def check_device_registered( - self, user_id, device_id, initial_device_display_name=None - ): + self, + user_id: str, + device_id: Optional[str], + initial_device_display_name: Optional[str] = None, + ) -> str: """ If the given device has not been registered, register it with the supplied display name. @@ -293,12 +303,11 @@ async def check_device_registered( If no device_id is supplied, we make one up. Args: - user_id (str): @user:id - device_id (str | None): device id supplied by client - initial_device_display_name (str | None): device display name from - client + user_id: @user:id + device_id: device id supplied by client + initial_device_display_name: device display name from client Returns: - str: device id (generated if none was supplied) + device id (generated if none was supplied) """ self._check_device_name_length(initial_device_display_name) @@ -317,15 +326,15 @@ async def check_device_registered( # times in case of a clash. attempts = 0 while attempts < 5: - device_id = stringutils.random_string(10).upper() + new_device_id = stringutils.random_string(10).upper() new_device = await self.store.store_device( user_id=user_id, - device_id=device_id, + device_id=new_device_id, initial_device_display_name=initial_device_display_name, ) if new_device: - await self.notify_device_update(user_id, [device_id]) - return device_id + await self.notify_device_update(user_id, [new_device_id]) + return new_device_id attempts += 1 raise errors.StoreError(500, "Couldn't generate a device ID.") @@ -434,7 +443,9 @@ async def update_device(self, user_id: str, device_id: str, content: dict) -> No @trace @measure_func("notify_device_update") - async def notify_device_update(self, user_id, device_ids): + async def notify_device_update( + self, user_id: str, device_ids: Collection[str] + ) -> None: """Notify that a user's device(s) has changed. Pokes the notifier, and remote servers if the user is local. """ @@ -446,7 +457,7 @@ async def notify_device_update(self, user_id, device_ids): user_id ) - hosts = set() + hosts = set() # type: Set[str] if self.hs.is_mine_id(user_id): hosts.update(get_domain_from_id(u) for u in users_who_share_room) hosts.discard(self.server_name) @@ -498,7 +509,7 @@ async def notify_user_signature_update( self.notifier.on_new_event("device_list_key", position, users=[from_user_id]) - async def user_left_room(self, user, room_id): + async def user_left_room(self, user: UserID, room_id: str) -> None: user_id = user.to_string() room_ids = await self.store.get_rooms_for_user(user_id) if not room_ids: @@ -586,7 +597,9 @@ async def rehydrate_device( return {"success": True} -def _update_device_from_client_ips(device, client_ips): +def _update_device_from_client_ips( + device: Dict[str, Any], client_ips: Dict[Tuple[str, str], Dict[str, Any]] +) -> None: ip = client_ips.get((device["user_id"], device["device_id"]), {}) device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")}) @@ -594,7 +607,7 @@ def _update_device_from_client_ips(device, client_ips): class DeviceListUpdater: "Handles incoming device list updates from federation and updates the DB" - def __init__(self, hs, device_handler): + def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): self.store = hs.get_datastore() self.federation = hs.get_federation_client() self.clock = hs.get_clock() @@ -603,7 +616,9 @@ def __init__(self, hs, device_handler): self._remote_edu_linearizer = Linearizer(name="remote_device_list") # user_id -> list of updates waiting to be handled. - self._pending_updates = {} + self._pending_updates = ( + {} + ) # type: Dict[str, List[Tuple[str, str, Iterable[str], JsonDict]]] # Recently seen stream ids. We don't bother keeping these in the DB, # but they're useful to have them about to reduce the number of spurious @@ -626,7 +641,9 @@ def __init__(self, hs, device_handler): ) @trace - async def incoming_device_list_update(self, origin, edu_content): + async def incoming_device_list_update( + self, origin: str, edu_content: JsonDict + ) -> None: """Called on incoming device list update from federation. Responsible for parsing the EDU and adding to pending updates list. """ @@ -687,7 +704,7 @@ async def incoming_device_list_update(self, origin, edu_content): await self._handle_device_updates(user_id) @measure_func("_incoming_device_list_update") - async def _handle_device_updates(self, user_id): + async def _handle_device_updates(self, user_id: str) -> None: "Actually handle pending updates." with (await self._remote_edu_linearizer.queue(user_id)): @@ -735,7 +752,9 @@ async def _handle_device_updates(self, user_id): stream_id for _, stream_id, _, _ in pending_updates ) - async def _need_to_do_resync(self, user_id, updates): + async def _need_to_do_resync( + self, user_id: str, updates: Iterable[Tuple[str, str, Iterable[str], JsonDict]] + ) -> bool: """Given a list of updates for a user figure out if we need to do a full resync, or whether we have enough data that we can just apply the delta. """ @@ -766,7 +785,7 @@ async def _need_to_do_resync(self, user_id, updates): return False @trace - async def _maybe_retry_device_resync(self): + async def _maybe_retry_device_resync(self) -> None: """Retry to resync device lists that are out of sync, except if another retry is in progress. """ @@ -809,7 +828,7 @@ async def _maybe_retry_device_resync(self): async def user_device_resync( self, user_id: str, mark_failed_as_stale: bool = True - ) -> Optional[dict]: + ) -> Optional[JsonDict]: """Fetches all devices for a user and updates the device cache with them. Args: @@ -833,7 +852,7 @@ async def user_device_resync( # it later. await self.store.mark_remote_user_device_cache_as_stale(user_id) - return + return None except (RequestSendFailed, HttpResponseException) as e: logger.warning( "Failed to handle device list update for %s: %s", user_id, e, @@ -850,12 +869,12 @@ async def user_device_resync( # next time we get a device list update for this user_id. # This makes it more likely that the device lists will # eventually become consistent. - return + return None except FederationDeniedError as e: set_tag("error", True) log_kv({"reason": "FederationDeniedError"}) logger.info(e) - return + return None except Exception as e: set_tag("error", True) log_kv( @@ -868,7 +887,7 @@ async def user_device_resync( # it later. await self.store.mark_remote_user_device_cache_as_stale(user_id) - return + return None log_kv({"result": result}) stream_id = result["stream_id"] devices = result["devices"] @@ -929,7 +948,7 @@ async def process_cross_signing_key_update( user_id: str, master_key: Optional[Dict[str, Any]], self_signing_key: Optional[Dict[str, Any]], - ) -> list: + ) -> List[str]: """Process the given new master and self-signing key for the given remote user. Args: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 317d6cde955d..2d0a6408b5cd 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -911,7 +911,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._clock.looping_call(self._prune_old_outbound_device_pokes, 60 * 60 * 1000) async def store_device( - self, user_id: str, device_id: str, initial_device_display_name: str + self, user_id: str, device_id: str, initial_device_display_name: Optional[str] ) -> bool: """Ensure the given device is known; add it to the store if not @@ -1029,7 +1029,7 @@ async def update_device( ) async def update_remote_device_list_cache_entry( - self, user_id: str, device_id: str, content: JsonDict, stream_id: int + self, user_id: str, device_id: str, content: JsonDict, stream_id: str ) -> None: """Updates a single device in the cache of a remote user's devicelist. @@ -1057,7 +1057,7 @@ def _update_remote_device_list_cache_entry_txn( user_id: str, device_id: str, content: JsonDict, - stream_id: int, + stream_id: str, ) -> None: if content.get("deleted"): self.db_pool.simple_delete_txn( From 52a50e8686ec9af6c629004171748f41eae09f73 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 15:15:33 +0100 Subject: [PATCH 140/245] Use vector clocks for room stream tokens. (#8439) Currently when using multiple event persisters we (in the worst case) don't tell clients about events until all event persisters have persisted new events after the original event. This is a suboptimal, especially if one of the event persisters goes down. To handle this, we encode the position of each event persister in the room tokens so that we can send events to clients immediately. To reduce the size of the token we do two things: 1. We create a unique immutable persistent mapping between instance names and a generated small integer ID, which we can encode in the tokens instead of the instance name; and 2. We encode the "persisted upto position" of the room token and then only explicitly include instances that have positions strictly greater than that. The new tokens look something like: `m3478~1.3488~2.3489`, where the first number is the min position, and the subsequent `-` separated pairs are the instance ID to positions map. (We use `.` and `~` as separators as they're URL safe and not already used by `StreamToken`). --- changelog.d/8439.misc | 1 + .../delta/58/19instance_map.sql.postgres | 25 ++ synapse/storage/databases/main/stream.py | 280 +++++++++++++++--- synapse/types.py | 116 +++++++- 4 files changed, 380 insertions(+), 42 deletions(-) create mode 100644 changelog.d/8439.misc create mode 100644 synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres diff --git a/changelog.d/8439.misc b/changelog.d/8439.misc new file mode 100644 index 000000000000..237cb3b31135 --- /dev/null +++ b/changelog.d/8439.misc @@ -0,0 +1 @@ +Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres b/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres new file mode 100644 index 000000000000..841186b8267e --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres @@ -0,0 +1,25 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- A unique and immutable mapping between instance name and an integer ID. This +-- lets us refer to instances via a small ID in e.g. stream tokens, without +-- having to encode the full name. +CREATE TABLE IF NOT EXISTS instance_map ( + instance_id SERIAL PRIMARY KEY, + instance_name TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS instance_map_idx ON instance_map(instance_name); diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index a94bec1ac528..e3b9ff5ca6b3 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -53,7 +53,9 @@ ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import Collection, PersistedEventPosition, RoomStreamToken +from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache if TYPE_CHECKING: @@ -208,6 +210,55 @@ def _make_generic_sql_bound( ) +def _filter_results( + lower_token: Optional[RoomStreamToken], + upper_token: Optional[RoomStreamToken], + instance_name: str, + topological_ordering: int, + stream_ordering: int, +) -> bool: + """Returns True if the event persisted by the given instance at the given + topological/stream_ordering falls between the two tokens (taking a None + token to mean unbounded). + + Used to filter results from fetching events in the DB against the given + tokens. This is necessary to handle the case where the tokens include + position maps, which we handle by fetching more than necessary from the DB + and then filtering (rather than attempting to construct a complicated SQL + query). + """ + + event_historical_tuple = ( + topological_ordering, + stream_ordering, + ) + + if lower_token: + if lower_token.topological is not None: + # If these are historical tokens we compare the `(topological, stream)` + # tuples. + if event_historical_tuple <= lower_token.as_historical_tuple(): + return False + + else: + # If these are live tokens we compare the stream ordering against the + # writers stream position. + if stream_ordering <= lower_token.get_stream_pos_for_instance( + instance_name + ): + return False + + if upper_token: + if upper_token.topological is not None: + if upper_token.as_historical_tuple() < event_historical_tuple: + return False + else: + if upper_token.get_stream_pos_for_instance(instance_name) < stream_ordering: + return False + + return True + + def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: # NB: This may create SQL clauses that don't optimise well (and we don't # have indices on all possible clauses). E.g. it may create @@ -305,7 +356,31 @@ def get_room_min_stream_ordering(self) -> int: raise NotImplementedError() def get_room_max_token(self) -> RoomStreamToken: - return RoomStreamToken(None, self.get_room_max_stream_ordering()) + """Get a `RoomStreamToken` that marks the current maximum persisted + position of the events stream. Useful to get a token that represents + "now". + + The token returned is a "live" token that may have an instance_map + component. + """ + + min_pos = self._stream_id_gen.get_current_token() + + positions = {} + if isinstance(self._stream_id_gen, MultiWriterIdGenerator): + # The `min_pos` is the minimum position that we know all instances + # have finished persisting to, so we only care about instances whose + # positions are ahead of that. (Instance positions can be behind the + # min position as there are times we can work out that the minimum + # position is ahead of the naive minimum across all current + # positions. See MultiWriterIdGenerator for details) + positions = { + i: p + for i, p in self._stream_id_gen.get_positions().items() + if p > min_pos + } + + return RoomStreamToken(None, min_pos, positions) async def get_room_events_stream_for_rooms( self, @@ -404,25 +479,43 @@ async def get_room_events_stream_for_room( if from_key == to_key: return [], from_key - from_id = from_key.stream - to_id = to_key.stream - - has_changed = self._events_stream_cache.has_entity_changed(room_id, from_id) + has_changed = self._events_stream_cache.has_entity_changed( + room_id, from_key.stream + ) if not has_changed: return [], from_key def f(txn): - sql = ( - "SELECT event_id, stream_ordering FROM events WHERE" - " room_id = ?" - " AND not outlier" - " AND stream_ordering > ? AND stream_ordering <= ?" - " ORDER BY stream_ordering %s LIMIT ?" - ) % (order,) - txn.execute(sql, (room_id, from_id, to_id, limit)) - - rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] + # To handle tokens with a non-empty instance_map we fetch more + # results than necessary and then filter down + min_from_id = from_key.stream + max_to_id = to_key.get_max_stream_pos() + + sql = """ + SELECT event_id, instance_name, topological_ordering, stream_ordering + FROM events + WHERE + room_id = ? + AND not outlier + AND stream_ordering > ? AND stream_ordering <= ? + ORDER BY stream_ordering %s LIMIT ? + """ % ( + order, + ) + txn.execute(sql, (room_id, min_from_id, max_to_id, 2 * limit)) + + rows = [ + _EventDictReturn(event_id, None, stream_ordering) + for event_id, instance_name, topological_ordering, stream_ordering in txn + if _filter_results( + from_key, + to_key, + instance_name, + topological_ordering, + stream_ordering, + ) + ][:limit] return rows rows = await self.db_pool.runInteraction("get_room_events_stream_for_room", f) @@ -431,7 +524,7 @@ def f(txn): [r.event_id for r in rows], get_prev_content=True ) - self._set_before_and_after(ret, rows, topo_order=from_id is None) + self._set_before_and_after(ret, rows, topo_order=False) if order.lower() == "desc": ret.reverse() @@ -448,31 +541,43 @@ def f(txn): async def get_membership_changes_for_user( self, user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken ) -> List[EventBase]: - from_id = from_key.stream - to_id = to_key.stream - if from_key == to_key: return [] - if from_id: + if from_key: has_changed = self._membership_stream_cache.has_entity_changed( - user_id, int(from_id) + user_id, int(from_key.stream) ) if not has_changed: return [] def f(txn): - sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e," - " room_memberships AS m" - " WHERE e.event_id = m.event_id" - " AND m.user_id = ?" - " AND e.stream_ordering > ? AND e.stream_ordering <= ?" - " ORDER BY e.stream_ordering ASC" - ) - txn.execute(sql, (user_id, from_id, to_id)) - - rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] + # To handle tokens with a non-empty instance_map we fetch more + # results than necessary and then filter down + min_from_id = from_key.stream + max_to_id = to_key.get_max_stream_pos() + + sql = """ + SELECT m.event_id, instance_name, topological_ordering, stream_ordering + FROM events AS e, room_memberships AS m + WHERE e.event_id = m.event_id + AND m.user_id = ? + AND e.stream_ordering > ? AND e.stream_ordering <= ? + ORDER BY e.stream_ordering ASC + """ + txn.execute(sql, (user_id, min_from_id, max_to_id,)) + + rows = [ + _EventDictReturn(event_id, None, stream_ordering) + for event_id, instance_name, topological_ordering, stream_ordering in txn + if _filter_results( + from_key, + to_key, + instance_name, + topological_ordering, + stream_ordering, + ) + ] return rows @@ -966,11 +1071,46 @@ def _paginate_room_events_txn( else: order = "ASC" + # The bounds for the stream tokens are complicated by the fact + # that we need to handle the instance_map part of the tokens. We do this + # by fetching all events between the min stream token and the maximum + # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and + # then filtering the results. + if from_token.topological is not None: + from_bound = ( + from_token.as_historical_tuple() + ) # type: Tuple[Optional[int], int] + elif direction == "b": + from_bound = ( + None, + from_token.get_max_stream_pos(), + ) + else: + from_bound = ( + None, + from_token.stream, + ) + + to_bound = None # type: Optional[Tuple[Optional[int], int]] + if to_token: + if to_token.topological is not None: + to_bound = to_token.as_historical_tuple() + elif direction == "b": + to_bound = ( + None, + to_token.stream, + ) + else: + to_bound = ( + None, + to_token.get_max_stream_pos(), + ) + bounds = generate_pagination_where_clause( direction=direction, column_names=("topological_ordering", "stream_ordering"), - from_token=from_token.as_tuple(), - to_token=to_token.as_tuple() if to_token else None, + from_token=from_bound, + to_token=to_bound, engine=self.database_engine, ) @@ -980,7 +1120,8 @@ def _paginate_room_events_txn( bounds += " AND " + filter_clause args.extend(filter_args) - args.append(int(limit)) + # We fetch more events as we'll filter the result set + args.append(int(limit) * 2) select_keywords = "SELECT" join_clause = "" @@ -1002,7 +1143,9 @@ def _paginate_room_events_txn( select_keywords += "DISTINCT" sql = """ - %(select_keywords)s event_id, topological_ordering, stream_ordering + %(select_keywords)s + event_id, instance_name, + topological_ordering, stream_ordering FROM events %(join_clause)s WHERE outlier = ? AND room_id = ? AND %(bounds)s @@ -1017,7 +1160,18 @@ def _paginate_room_events_txn( txn.execute(sql, args) - rows = [_EventDictReturn(row[0], row[1], row[2]) for row in txn] + # Filter the result set. + rows = [ + _EventDictReturn(event_id, topological_ordering, stream_ordering) + for event_id, instance_name, topological_ordering, stream_ordering in txn + if _filter_results( + lower_token=to_token if direction == "b" else from_token, + upper_token=from_token if direction == "b" else to_token, + instance_name=instance_name, + topological_ordering=topological_ordering, + stream_ordering=stream_ordering, + ) + ][:limit] if rows: topo = rows[-1].topological_ordering @@ -1082,6 +1236,58 @@ async def paginate_room_events( return (events, token) + @cached() + async def get_id_for_instance(self, instance_name: str) -> int: + """Get a unique, immutable ID that corresponds to the given Synapse worker instance. + """ + + def _get_id_for_instance_txn(txn): + instance_id = self.db_pool.simple_select_one_onecol_txn( + txn, + table="instance_map", + keyvalues={"instance_name": instance_name}, + retcol="instance_id", + allow_none=True, + ) + if instance_id is not None: + return instance_id + + # If we don't have an entry upsert one. + # + # We could do this before the first check, and rely on the cache for + # efficiency, but each UPSERT causes the next ID to increment which + # can quickly bloat the size of the generated IDs for new instances. + self.db_pool.simple_upsert_txn( + txn, + table="instance_map", + keyvalues={"instance_name": instance_name}, + values={}, + ) + + return self.db_pool.simple_select_one_onecol_txn( + txn, + table="instance_map", + keyvalues={"instance_name": instance_name}, + retcol="instance_id", + ) + + return await self.db_pool.runInteraction( + "get_id_for_instance", _get_id_for_instance_txn + ) + + @cached() + async def get_name_from_instance_id(self, instance_id: int) -> str: + """Get the instance name from an ID previously returned by + `get_id_for_instance`. + """ + + return await self.db_pool.simple_select_one_onecol( + table="instance_map", + keyvalues={"instance_id": instance_id}, + retcol="instance_name", + desc="get_name_from_instance_id", + ) + class StreamStore(StreamWorkerStore): def get_room_max_stream_ordering(self) -> int: diff --git a/synapse/types.py b/synapse/types.py index bd271f9f1611..5bde67cc078a 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -22,6 +22,7 @@ TYPE_CHECKING, Any, Dict, + Iterable, Mapping, MutableMapping, Optional, @@ -43,7 +44,7 @@ if sys.version_info[:3] >= (3, 6, 0): from typing import Collection else: - from typing import Container, Iterable, Sized + from typing import Container, Sized T_co = TypeVar("T_co", covariant=True) @@ -375,7 +376,7 @@ def f2(m): return username.decode("ascii") -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, cmp=False) class RoomStreamToken: """Tokens are positions between events. The token "s1" comes after event 1. @@ -397,6 +398,31 @@ class RoomStreamToken: event it comes after. Historic tokens start with a "t" followed by the "topological_ordering" id of the event it comes after, followed by "-", followed by the "stream_ordering" id of the event it comes after. + + There is also a third mode for live tokens where the token starts with "m", + which is sometimes used when using sharded event persisters. In this case + the events stream is considered to be a set of streams (one for each writer) + and the token encodes the vector clock of positions of each writer in their + respective streams. + + The format of the token in such case is an initial integer min position, + followed by the mapping of instance ID to position separated by '.' and '~': + + m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}. ... + + The `min_pos` corresponds to the minimum position all writers have persisted + up to, and then only writers that are ahead of that position need to be + encoded. An example token is: + + m56~2.58~3.59 + + Which corresponds to a set of three (or more writers) where instances 2 and + 3 (these are instance IDs that can be looked up in the DB to fetch the more + commonly used instance names) are at positions 58 and 59 respectively, and + all other instances are at position 56. + + Note: The `RoomStreamToken` cannot have both a topological part and an + instance map. """ topological = attr.ib( @@ -405,6 +431,25 @@ class RoomStreamToken: ) stream = attr.ib(type=int, validator=attr.validators.instance_of(int)) + instance_map = attr.ib( + type=Dict[str, int], + factory=dict, + validator=attr.validators.deep_mapping( + key_validator=attr.validators.instance_of(str), + value_validator=attr.validators.instance_of(int), + mapping_validator=attr.validators.instance_of(dict), + ), + ) + + def __attrs_post_init__(self): + """Validates that both `topological` and `instance_map` aren't set. + """ + + if self.instance_map and self.topological: + raise ValueError( + "Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'." + ) + @classmethod async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken": try: @@ -413,6 +458,20 @@ async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken": if string[0] == "t": parts = string[1:].split("-", 1) return cls(topological=int(parts[0]), stream=int(parts[1])) + if string[0] == "m": + parts = string[1:].split("~") + stream = int(parts[0]) + + instance_map = {} + for part in parts[1:]: + key, value = part.split(".") + instance_id = int(key) + pos = int(value) + + instance_name = await store.get_name_from_instance_id(instance_id) + instance_map[instance_name] = pos + + return cls(topological=None, stream=stream, instance_map=instance_map,) except Exception: pass raise SynapseError(400, "Invalid token %r" % (string,)) @@ -436,14 +495,61 @@ def copy_and_advance(self, other: "RoomStreamToken") -> "RoomStreamToken": max_stream = max(self.stream, other.stream) - return RoomStreamToken(None, max_stream) + instance_map = { + instance: max( + self.instance_map.get(instance, self.stream), + other.instance_map.get(instance, other.stream), + ) + for instance in set(self.instance_map).union(other.instance_map) + } + + return RoomStreamToken(None, max_stream, instance_map) + + def as_historical_tuple(self) -> Tuple[int, int]: + """Returns a tuple of `(topological, stream)` for historical tokens. + + Raises if not an historical token (i.e. doesn't have a topological part). + """ + if self.topological is None: + raise Exception( + "Cannot call `RoomStreamToken.as_historical_tuple` on live token" + ) - def as_tuple(self) -> Tuple[Optional[int], int]: return (self.topological, self.stream) + def get_stream_pos_for_instance(self, instance_name: str) -> int: + """Get the stream position that the given writer was at at this token. + + This only makes sense for "live" tokens that may have a vector clock + component, and so asserts that this is a "live" token. + """ + assert self.topological is None + + # If we don't have an entry for the instance we can assume that it was + # at `self.stream`. + return self.instance_map.get(instance_name, self.stream) + + def get_max_stream_pos(self) -> int: + """Get the maximum stream position referenced in this token. + + The corresponding "min" position is, by definition just `self.stream`. + + This is used to handle tokens that have non-empty `instance_map`, and so + reference stream positions after the `self.stream` position. + """ + return max(self.instance_map.values(), default=self.stream) + async def to_string(self, store: "DataStore") -> str: if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) + elif self.instance_map: + entries = [] + for name, pos in self.instance_map.items(): + instance_id = await store.get_id_for_instance(name) + entries.append("{}.{}".format(instance_id, pos)) + + encoded_map = "~".join(entries) + return "m{}~{}".format(self.stream, encoded_map) else: return "s%d" % (self.stream,) @@ -535,7 +641,7 @@ class PersistedEventPosition: stream = attr.ib(type=int) def persisted_after(self, token: RoomStreamToken) -> bool: - return token.stream < self.stream + return token.get_stream_pos_for_instance(self.instance_name) < self.stream def to_room_stream_token(self) -> RoomStreamToken: """Converts the position to a room stream token such that events From ae5b2a72c09d67311c9830f5a6fae1decce03e1f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 15:15:57 +0100 Subject: [PATCH 141/245] Reduce serialization errors in MultiWriterIdGen (#8456) We call `_update_stream_positions_table_txn` a lot, which is an UPSERT that can conflict in `REPEATABLE READ` isolation level. Instead of doing a transaction consisting of a single query we may as well run it outside of a transaction. --- changelog.d/8456.misc | 1 + synapse/storage/database.py | 69 ++++++++++++++++++++++++--- synapse/storage/engines/_base.py | 17 +++++++ synapse/storage/engines/postgres.py | 10 +++- synapse/storage/engines/sqlite.py | 10 ++++ synapse/storage/util/id_generators.py | 12 ++++- tests/storage/test_base.py | 1 + 7 files changed, 112 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8456.misc diff --git a/changelog.d/8456.misc b/changelog.d/8456.misc new file mode 100644 index 000000000000..ccd260069ba9 --- /dev/null +++ b/changelog.d/8456.misc @@ -0,0 +1 @@ +Reduce number of serialization errors of `MultiWriterIdGenerator._update_table`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 0d9d9b7cc07a..0ba3a025cf1c 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -463,6 +463,24 @@ def new_transaction( *args: Any, **kwargs: Any ) -> R: + """Start a new database transaction with the given connection. + + Note: The given func may be called multiple times under certain + failure modes. This is normally fine when in a standard transaction, + but care must be taken if the connection is in `autocommit` mode that + the function will correctly handle being aborted and retried half way + through its execution. + + Args: + conn + desc + after_callbacks + exception_callbacks + func + *args + **kwargs + """ + start = monotonic_time() txn_id = self._TXN_ID @@ -566,7 +584,12 @@ def new_transaction( sql_txn_timer.labels(desc).observe(duration) async def runInteraction( - self, desc: str, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + desc: str, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Starts a transaction on the database and runs a given function @@ -576,6 +599,18 @@ async def runInteraction( database transaction (twisted.enterprise.adbapi.Transaction) as its first argument, followed by `args` and `kwargs`. + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transactions + that are only a single query. + + Currently, this is only implemented for Postgres. SQLite will still + run the function inside a transaction. + + WARNING: This means that if func fails half way through then + the changes will *not* be rolled back. `func` may also get + called multiple times if the transaction is retried, so must + correctly handle that case. + args: positional args to pass to `func` kwargs: named args to pass to `func` @@ -596,6 +631,7 @@ async def runInteraction( exception_callbacks, func, *args, + db_autocommit=db_autocommit, **kwargs ) @@ -609,7 +645,11 @@ async def runInteraction( return cast(R, result) async def runWithConnection( - self, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -618,6 +658,9 @@ async def runWithConnection( database connection (twisted.enterprise.adbapi.Connection) as its first argument, followed by `args` and `kwargs`. args: positional args to pass to `func` + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transaction + that are only a single query. Currently only affects postgres. kwargs: named args to pass to `func` Returns: @@ -633,6 +676,13 @@ async def runWithConnection( start_time = monotonic_time() def inner_func(conn, *args, **kwargs): + # We shouldn't be in a transaction. If we are then something + # somewhere hasn't committed after doing work. (This is likely only + # possible during startup, as `run*` will ensure changes are + # committed/rolled back before putting the connection back in the + # pool). + assert not self.engine.in_transaction(conn) + with LoggingContext("runWithConnection", parent_context) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) @@ -642,10 +692,17 @@ def inner_func(conn, *args, **kwargs): logger.debug("Reconnecting closed database connection") conn.reconnect() - db_conn = LoggingDatabaseConnection( - conn, self.engine, "runWithConnection" - ) - return func(db_conn, *args, **kwargs) + try: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, True) + + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) + finally: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, False) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 908cbc79e322..d6d632dc10f6 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -97,3 +97,20 @@ def server_version(self) -> str: """Gets a string giving the server version. For example: '3.22.0' """ ... + + @abc.abstractmethod + def in_transaction(self, conn: Connection) -> bool: + """Whether the connection is currently in a transaction. + """ + ... + + @abc.abstractmethod + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + """Attempt to set the connections autocommit mode. + + When True queries are run outside of transactions. + + Note: This has no effect on SQLite3, so callers still need to + commit/rollback the connections. + """ + ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index ff39281f8599..7719ac32f764 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,7 +15,8 @@ import logging -from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.engines._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.types import Connection logger = logging.getLogger(__name__) @@ -119,6 +120,7 @@ def on_new_connection(self, db_conn): cursor.execute("SET synchronous_commit TO OFF") cursor.close() + db_conn.commit() @property def can_native_upsert(self): @@ -171,3 +173,9 @@ def server_version(self): return "%i.%i" % (numver / 10000, numver % 10000) else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) + + def in_transaction(self, conn: Connection) -> bool: + return conn.status != self.module.extensions.STATUS_READY # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + return conn.set_session(autocommit=autocommit) # type: ignore diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 8a0f8c89d173..5db0f0b520db 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -17,6 +17,7 @@ import typing from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.types import Connection if typing.TYPE_CHECKING: import sqlite3 # noqa: F401 @@ -86,6 +87,7 @@ def on_new_connection(self, db_conn): db_conn.create_function("rank", 1, _rank) db_conn.execute("PRAGMA foreign_keys = ON;") + db_conn.commit() def is_deadlock(self, error): return False @@ -105,6 +107,14 @@ def server_version(self): """ return "%i.%i.%i" % self.module.sqlite_version_info + def in_transaction(self, conn: Connection) -> bool: + return conn.in_transaction # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + # Twisted doesn't let us set attributes on the connections, so we can't + # set the connection to autocommit mode. + pass + # Following functions taken from: https://github.com/coleifer/peewee diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 51f680d05d81..d7e40aaa8b40 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -24,6 +24,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.types import Cursor from synapse.storage.util.sequence import PostgresSequenceGenerator logger = logging.getLogger(__name__) @@ -548,7 +549,7 @@ def _add_persisted_position(self, new_id: int): # do. break - def _update_stream_positions_table_txn(self, txn): + def _update_stream_positions_table_txn(self, txn: Cursor): """Update the `stream_positions` table with newly persisted position. """ @@ -598,10 +599,13 @@ class _MultiWriterCtxManager: stream_ids = attr.ib(type=List[int], factory=list) async def __aenter__(self) -> Union[int, List[int]]: + # It's safe to run this in autocommit mode as fetching values from a + # sequence ignores transaction semantics anyway. self.stream_ids = await self.id_gen._db.runInteraction( "_load_next_mult_id", self.id_gen._load_next_mult_id_txn, self.multiple_ids or 1, + db_autocommit=True, ) # Assert the fetched ID is actually greater than any ID we've already @@ -632,10 +636,16 @@ async def __aexit__(self, exc_type, exc, tb): # # We only do this on the success path so that the persisted current # position points to a persisted row with the correct instance name. + # + # We do this in autocommit mode as a) the upsert works correctly outside + # transactions and b) reduces the amount of time the rows are locked + # for. If we don't do this then we'll often hit serialization errors due + # to the fact we default to REPEATABLE READ isolation levels. if self.id_gen._writers: await self.id_gen._db.runInteraction( "MultiWriterIdGenerator._update_table", self.id_gen._update_stream_positions_table_txn, + db_autocommit=True, ) return False diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 40ba652248ce..eac7e4dcd2fa 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -56,6 +56,7 @@ def runWithConnection(func, *args, **kwargs): engine = create_engine(sqlite_config) fake_engine = Mock(wraps=engine) fake_engine.can_native_upsert = False + fake_engine.in_transaction.return_value = False db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) db._db_pool = self.db_pool From 8dbf62fada36f11a915cea4b6445f716e931dea3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 11:13:38 -0400 Subject: [PATCH 142/245] Include the configured log level in phone home stats. (#8477) By reporting the log level of the synapse logger as a string. --- changelog.d/8477.misc | 1 + synapse/app/phone_stats_home.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/8477.misc diff --git a/changelog.d/8477.misc b/changelog.d/8477.misc new file mode 100644 index 000000000000..2ee1606b6e32 --- /dev/null +++ b/changelog.d/8477.misc @@ -0,0 +1 @@ +Include the log level in the phone home stats. diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 2c8e14a8c0ca..daed8ccfe952 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -113,6 +113,13 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process): stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version + # + # Logging configuration + # + synapse_logger = logging.getLogger("synapse") + log_level = synapse_logger.getEffectiveLevel() + stats["log_level"] = logging.getLevelName(log_level) + logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) try: await hs.get_proxied_http_client().put_json( From e4f72ddc44367d0cd53e6cfc5ba310b6f55319b6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 11:27:56 -0400 Subject: [PATCH 143/245] Move additional tasks to the background worker (#8458) --- changelog.d/8458.feature | 1 + synapse/app/generic_worker.py | 4 + synapse/app/phone_stats_home.py | 33 ++-- synapse/storage/databases/main/client_ips.py | 109 ++++++----- synapse/storage/databases/main/metrics.py | 14 +- .../storage/databases/main/registration.py | 184 +++++++++--------- synapse/storage/databases/main/roommember.py | 5 +- .../storage/databases/main/transactions.py | 42 ++-- 8 files changed, 195 insertions(+), 197 deletions(-) create mode 100644 changelog.d/8458.feature diff --git a/changelog.d/8458.feature b/changelog.d/8458.feature new file mode 100644 index 000000000000..542993110bc8 --- /dev/null +++ b/changelog.d/8458.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index fc5188ce95ed..d53181deb17b 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -127,6 +127,7 @@ from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer, cache_in_self from synapse.storage.databases.main.censor_events import CensorEventsStore +from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore from synapse.storage.databases.main.metrics import ServerMetricsStore from synapse.storage.databases.main.monthly_active_users import ( @@ -135,6 +136,7 @@ from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import ReadReceipt @@ -466,6 +468,7 @@ class GenericWorkerSlavedStore( SlavedAccountDataStore, SlavedPusherStore, CensorEventsStore, + ClientIpWorkerStore, SlavedEventStore, SlavedKeyStore, RoomStore, @@ -481,6 +484,7 @@ class GenericWorkerSlavedStore( MediaRepositoryStore, ServerMetricsStore, SearchWorkerStore, + TransactionWorkerStore, BaseSlavedStore, ): pass diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index daed8ccfe952..8a69104a042f 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import math import resource @@ -19,7 +18,10 @@ from prometheus_client import Gauge -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) logger = logging.getLogger("synapse.app.homeserver") @@ -41,6 +43,7 @@ ) +@wrap_as_background_process("phone_stats_home") async def phone_stats_home(hs, stats, stats_process=_stats_process): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) @@ -143,20 +146,10 @@ def performance_stats_init(): (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) ) - def start_phone_stats_home(): - return run_as_background_process( - "phone_stats_home", phone_stats_home, hs, stats - ) - - def generate_user_daily_visit_stats(): - return run_as_background_process( - "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits - ) - # Rather than update on per session basis, batch up the requests. # If you increase the loop period, the accuracy of user_daily_visits # table will decrease - clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) + clock.looping_call(hs.get_datastore().generate_user_daily_visits, 5 * 60 * 1000) # monthly active user limiting functionality def reap_monthly_active_users(): @@ -167,6 +160,7 @@ def reap_monthly_active_users(): clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) reap_monthly_active_users() + @wrap_as_background_process("generate_monthly_active_users") async def generate_monthly_active_users(): current_mau_count = 0 current_mau_count_by_service = {} @@ -186,19 +180,14 @@ async def generate_monthly_active_users(): registered_reserved_users_mau_gauge.set(float(len(reserved_users))) max_mau_gauge.set(float(hs.config.max_mau_value)) - def start_generate_monthly_active_users(): - return run_as_background_process( - "generate_monthly_active_users", generate_monthly_active_users - ) - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - start_generate_monthly_active_users() - clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) + generate_monthly_active_users() + clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") - clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) + clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process @@ -206,4 +195,4 @@ def start_generate_monthly_active_users(): # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes - clock.call_later(5 * 60, start_phone_stats_home) + clock.call_later(5 * 60, phone_stats_home, hs, stats) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 239c7a949cba..a25a88844384 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -351,7 +351,63 @@ def _devices_last_seen_update_txn(txn): return updated -class ClientIpStore(ClientIpBackgroundUpdateStore): +class ClientIpWorkerStore(ClientIpBackgroundUpdateStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + self.user_ips_max_age = hs.config.user_ips_max_age + + if hs.config.run_background_tasks and self.user_ips_max_age: + self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) + + @wrap_as_background_process("prune_old_user_ips") + async def _prune_old_user_ips(self): + """Removes entries in user IPs older than the configured period. + """ + + if self.user_ips_max_age is None: + # Nothing to do + return + + if not await self.db_pool.updates.has_completed_background_update( + "devices_last_seen" + ): + # Only start pruning if we have finished populating the devices + # last seen info. + return + + # We do a slightly funky SQL delete to ensure we don't try and delete + # too much at once (as the table may be very large from before we + # started pruning). + # + # This works by finding the max last_seen that is less than the given + # time, but has no more than N rows before it, deleting all rows with + # a lesser last_seen time. (We COALESCE so that the sub-SELECT always + # returns exactly one row). + sql = """ + DELETE FROM user_ips + WHERE last_seen <= ( + SELECT COALESCE(MAX(last_seen), -1) + FROM ( + SELECT last_seen FROM user_ips + WHERE last_seen <= ? + ORDER BY last_seen ASC + LIMIT 5000 + ) AS u + ) + """ + + timestamp = self.clock.time_msec() - self.user_ips_max_age + + def _prune_old_user_ips_txn(txn): + txn.execute(sql, (timestamp,)) + + await self.db_pool.runInteraction( + "_prune_old_user_ips", _prune_old_user_ips_txn + ) + + +class ClientIpStore(ClientIpWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): self.client_ip_last_seen = Cache( @@ -360,8 +416,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self.user_ips_max_age = hs.config.user_ips_max_age - # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen) self._batch_row_update = {} @@ -372,9 +426,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): "before", "shutdown", self._update_client_ips_batch ) - if self.user_ips_max_age: - self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) - async def insert_client_ip( self, user_id, access_token, ip, user_agent, device_id, now=None ): @@ -525,49 +576,3 @@ async def get_user_ip_and_agents(self, user): } for (access_token, ip), (user_agent, last_seen) in results.items() ] - - @wrap_as_background_process("prune_old_user_ips") - async def _prune_old_user_ips(self): - """Removes entries in user IPs older than the configured period. - """ - - if self.user_ips_max_age is None: - # Nothing to do - return - - if not await self.db_pool.updates.has_completed_background_update( - "devices_last_seen" - ): - # Only start pruning if we have finished populating the devices - # last seen info. - return - - # We do a slightly funky SQL delete to ensure we don't try and delete - # too much at once (as the table may be very large from before we - # started pruning). - # - # This works by finding the max last_seen that is less than the given - # time, but has no more than N rows before it, deleting all rows with - # a lesser last_seen time. (We COALESCE so that the sub-SELECT always - # returns exactly one row). - sql = """ - DELETE FROM user_ips - WHERE last_seen <= ( - SELECT COALESCE(MAX(last_seen), -1) - FROM ( - SELECT last_seen FROM user_ips - WHERE last_seen <= ? - ORDER BY last_seen ASC - LIMIT 5000 - ) AS u - ) - """ - - timestamp = self.clock.time_msec() - self.user_ips_max_age - - def _prune_old_user_ips_txn(txn): - txn.execute(sql, (timestamp,)) - - await self.db_pool.runInteraction( - "_prune_old_user_ips", _prune_old_user_ips_txn - ) diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 2c5a4fdbf6f9..0acf0617ca5a 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -18,7 +18,7 @@ from typing import Dict from synapse.metrics import GaugeBucketCollector -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.databases.main.event_push_actions import ( @@ -57,18 +57,13 @@ def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) # Read the extrems every 60 minutes - def read_forward_extremities(): - # run as a background process to make sure that the database transactions - # have a logcontext to report to - return run_as_background_process( - "read_forward_extremities", self._read_forward_extremities - ) - - hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000) + if hs.config.run_background_tasks: + self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) # Used in _generate_user_daily_visits to keep track of progress self._last_user_visit_update = self._get_start_of_day() + @wrap_as_background_process("read_forward_extremities") async def _read_forward_extremities(self): def fetch(txn): txn.execute( @@ -274,6 +269,7 @@ def _get_start_of_day(self): today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) return today_start * 1000 + @wrap_as_background_process("generate_user_daily_visits") async def generate_user_daily_visits(self) -> None: """ Generates daily visit data for use in cohort/ retention analysis diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 16ba5457403a..a85867936f06 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -14,14 +14,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import re from typing import Any, Dict, List, Optional, Tuple from synapse.api.constants import UserTypes from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.types import Cursor @@ -48,6 +50,21 @@ def __init__(self, database: DatabasePool, db_conn, hs): database.engine, find_max_generated_user_id_localpart, "user_id_seq", ) + self._account_validity = hs.config.account_validity + if hs.config.run_background_tasks and self._account_validity.enabled: + self._clock.call_later( + 0.0, + run_as_background_process, + "account_validity_set_expiration_dates", + self._set_expiration_date_when_missing, + ) + + # Create a background job for culling expired 3PID validity tokens + if hs.config.run_background_tasks: + self.clock.looping_call( + self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS + ) + @cached() async def get_user_by_id(self, user_id: str) -> Optional[Dict[str, Any]]: return await self.db_pool.simple_select_one( @@ -778,6 +795,78 @@ def delete_threepid_session_txn(txn): "delete_threepid_session", delete_threepid_session_txn ) + @wrap_as_background_process("cull_expired_threepid_validation_tokens") + async def cull_expired_threepid_validation_tokens(self) -> None: + """Remove threepid validation tokens with expiry dates that have passed""" + + def cull_expired_threepid_validation_tokens_txn(txn, ts): + sql = """ + DELETE FROM threepid_validation_token WHERE + expires < ? + """ + txn.execute(sql, (ts,)) + + await self.db_pool.runInteraction( + "cull_expired_threepid_validation_tokens", + cull_expired_threepid_validation_tokens_txn, + self.clock.time_msec(), + ) + + async def _set_expiration_date_when_missing(self): + """ + Retrieves the list of registered users that don't have an expiration date, and + adds an expiration date for each of them. + """ + + def select_users_with_no_expiration_date_txn(txn): + """Retrieves the list of registered users with no expiration date from the + database, filtering out deactivated users. + """ + sql = ( + "SELECT users.name FROM users" + " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" + " WHERE account_validity.user_id is NULL AND users.deactivated = 0;" + ) + txn.execute(sql, []) + + res = self.db_pool.cursor_to_dict(txn) + if res: + for user in res: + self.set_expiration_date_for_user_txn( + txn, user["name"], use_delta=True + ) + + await self.db_pool.runInteraction( + "get_users_with_no_expiration_date", + select_users_with_no_expiration_date_txn, + ) + + def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): + """Sets an expiration date to the account with the given user ID. + + Args: + user_id (str): User ID to set an expiration date for. + use_delta (bool): If set to False, the expiration date for the user will be + now + validity period. If set to True, this expiration date will be a + random value in the [now + period - d ; now + period] range, d being a + delta equal to 10% of the validity period. + """ + now_ms = self._clock.time_msec() + expiration_ts = now_ms + self._account_validity.period + + if use_delta: + expiration_ts = self.rand.randrange( + expiration_ts - self._account_validity.startup_job_max_delta, + expiration_ts, + ) + + self.db_pool.simple_upsert_txn( + txn, + "account_validity", + keyvalues={"user_id": user_id}, + values={"expiration_ts_ms": expiration_ts, "email_sent": False}, + ) + class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -911,28 +1000,8 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self._account_validity = hs.config.account_validity self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors - if self._account_validity.enabled: - self._clock.call_later( - 0.0, - run_as_background_process, - "account_validity_set_expiration_dates", - self._set_expiration_date_when_missing, - ) - - # Create a background job for culling expired 3PID validity tokens - def start_cull(): - # run as a background process to make sure that the database transactions - # have a logcontext to report to - return run_as_background_process( - "cull_expired_threepid_validation_tokens", - self.cull_expired_threepid_validation_tokens, - ) - - hs.get_clock().looping_call(start_cull, THIRTY_MINUTES_IN_MS) - async def add_access_token_to_user( self, user_id: str, @@ -1477,22 +1546,6 @@ def start_or_continue_validation_session_txn(txn): start_or_continue_validation_session_txn, ) - async def cull_expired_threepid_validation_tokens(self) -> None: - """Remove threepid validation tokens with expiry dates that have passed""" - - def cull_expired_threepid_validation_tokens_txn(txn, ts): - sql = """ - DELETE FROM threepid_validation_token WHERE - expires < ? - """ - txn.execute(sql, (ts,)) - - await self.db_pool.runInteraction( - "cull_expired_threepid_validation_tokens", - cull_expired_threepid_validation_tokens_txn, - self.clock.time_msec(), - ) - async def set_user_deactivated_status( self, user_id: str, deactivated: bool ) -> None: @@ -1522,61 +1575,6 @@ def set_user_deactivated_status_txn(self, txn, user_id, deactivated): ) txn.call_after(self.is_guest.invalidate, (user_id,)) - async def _set_expiration_date_when_missing(self): - """ - Retrieves the list of registered users that don't have an expiration date, and - adds an expiration date for each of them. - """ - - def select_users_with_no_expiration_date_txn(txn): - """Retrieves the list of registered users with no expiration date from the - database, filtering out deactivated users. - """ - sql = ( - "SELECT users.name FROM users" - " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" - " WHERE account_validity.user_id is NULL AND users.deactivated = 0;" - ) - txn.execute(sql, []) - - res = self.db_pool.cursor_to_dict(txn) - if res: - for user in res: - self.set_expiration_date_for_user_txn( - txn, user["name"], use_delta=True - ) - - await self.db_pool.runInteraction( - "get_users_with_no_expiration_date", - select_users_with_no_expiration_date_txn, - ) - - def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): - """Sets an expiration date to the account with the given user ID. - - Args: - user_id (str): User ID to set an expiration date for. - use_delta (bool): If set to False, the expiration date for the user will be - now + validity period. If set to True, this expiration date will be a - random value in the [now + period - d ; now + period] range, d being a - delta equal to 10% of the validity period. - """ - now_ms = self._clock.time_msec() - expiration_ts = now_ms + self._account_validity.period - - if use_delta: - expiration_ts = self.rand.randrange( - expiration_ts - self._account_validity.startup_job_max_delta, - expiration_ts, - ) - - self.db_pool.simple_upsert_txn( - txn, - "account_validity", - keyvalues={"user_id": user_id}, - values={"expiration_ts_ms": expiration_ts, "email_sent": False}, - ) - def find_max_generated_user_id_localpart(cur: Cursor) -> int: """ diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index bae1bd22d328..20fcdaa529ca 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -61,7 +61,10 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._check_safe_current_state_events_membership_updated_txn(txn) txn.close() - if self.hs.config.metrics_flags.known_servers: + if ( + self.hs.config.run_background_tasks + and self.hs.config.metrics_flags.known_servers + ): self._known_servers_count = 1 self.hs.get_clock().looping_call( run_as_background_process, diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 97aed1500e3e..7d460902675b 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -19,7 +19,7 @@ from canonicaljson import encode_canonical_json -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.engines import PostgresEngine, Sqlite3Engine @@ -43,15 +43,33 @@ SENTINEL = object() -class TransactionStore(SQLBaseStore): +class TransactionWorkerStore(SQLBaseStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + if hs.config.run_background_tasks: + self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) + + @wrap_as_background_process("cleanup_transactions") + async def _cleanup_transactions(self) -> None: + now = self._clock.time_msec() + month_ago = now - 30 * 24 * 60 * 60 * 1000 + + def _cleanup_transactions_txn(txn): + txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,)) + + await self.db_pool.runInteraction( + "_cleanup_transactions", _cleanup_transactions_txn + ) + + +class TransactionStore(TransactionWorkerStore): """A collection of queries for handling PDUs. """ def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) - self._destination_retry_cache = ExpiringCache( cache_name="get_destination_retry_timings", clock=self._clock, @@ -266,22 +284,6 @@ def _set_destination_retry_timings( }, ) - def _start_cleanup_transactions(self): - return run_as_background_process( - "cleanup_transactions", self._cleanup_transactions - ) - - async def _cleanup_transactions(self) -> None: - now = self._clock.time_msec() - month_ago = now - 30 * 24 * 60 * 60 * 1000 - - def _cleanup_transactions_txn(txn): - txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,)) - - await self.db_pool.runInteraction( - "_cleanup_transactions", _cleanup_transactions_txn - ) - async def store_destination_rooms_entries( self, destinations: Iterable[str], room_id: str, stream_ordering: int, ) -> None: From fa8934b175467d589dd34fae18639cac0d738fc9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 15:15:57 +0100 Subject: [PATCH 144/245] Reduce serialization errors in MultiWriterIdGen (#8456) We call `_update_stream_positions_table_txn` a lot, which is an UPSERT that can conflict in `REPEATABLE READ` isolation level. Instead of doing a transaction consisting of a single query we may as well run it outside of a transaction. --- changelog.d/8456.misc | 1 + synapse/storage/database.py | 63 +++++++++++++++++++++++++-- synapse/storage/engines/_base.py | 17 ++++++++ synapse/storage/engines/postgres.py | 10 ++++- synapse/storage/engines/sqlite.py | 10 +++++ synapse/storage/util/id_generators.py | 12 ++++- tests/storage/test_base.py | 1 + 7 files changed, 109 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8456.misc diff --git a/changelog.d/8456.misc b/changelog.d/8456.misc new file mode 100644 index 000000000000..ccd260069ba9 --- /dev/null +++ b/changelog.d/8456.misc @@ -0,0 +1 @@ +Reduce number of serialization errors of `MultiWriterIdGenerator._update_table`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 79ec8f119df7..6116191b16eb 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -403,6 +403,24 @@ def new_transaction( *args: Any, **kwargs: Any ) -> R: + """Start a new database transaction with the given connection. + + Note: The given func may be called multiple times under certain + failure modes. This is normally fine when in a standard transaction, + but care must be taken if the connection is in `autocommit` mode that + the function will correctly handle being aborted and retried half way + through its execution. + + Args: + conn + desc + after_callbacks + exception_callbacks + func + *args + **kwargs + """ + start = monotonic_time() txn_id = self._TXN_ID @@ -508,7 +526,12 @@ def new_transaction( sql_txn_timer.labels(desc).observe(duration) async def runInteraction( - self, desc: str, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + desc: str, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Starts a transaction on the database and runs a given function @@ -518,6 +541,18 @@ async def runInteraction( database transaction (twisted.enterprise.adbapi.Transaction) as its first argument, followed by `args` and `kwargs`. + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transactions + that are only a single query. + + Currently, this is only implemented for Postgres. SQLite will still + run the function inside a transaction. + + WARNING: This means that if func fails half way through then + the changes will *not* be rolled back. `func` may also get + called multiple times if the transaction is retried, so must + correctly handle that case. + args: positional args to pass to `func` kwargs: named args to pass to `func` @@ -538,6 +573,7 @@ async def runInteraction( exception_callbacks, func, *args, + db_autocommit=db_autocommit, **kwargs ) @@ -551,7 +587,11 @@ async def runInteraction( return cast(R, result) async def runWithConnection( - self, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -560,6 +600,9 @@ async def runWithConnection( database connection (twisted.enterprise.adbapi.Connection) as its first argument, followed by `args` and `kwargs`. args: positional args to pass to `func` + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transaction + that are only a single query. Currently only affects postgres. kwargs: named args to pass to `func` Returns: @@ -575,6 +618,13 @@ async def runWithConnection( start_time = monotonic_time() def inner_func(conn, *args, **kwargs): + # We shouldn't be in a transaction. If we are then something + # somewhere hasn't committed after doing work. (This is likely only + # possible during startup, as `run*` will ensure changes are + # committed/rolled back before putting the connection back in the + # pool). + assert not self.engine.in_transaction(conn) + with LoggingContext("runWithConnection", parent_context) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) @@ -584,7 +634,14 @@ def inner_func(conn, *args, **kwargs): logger.debug("Reconnecting closed database connection") conn.reconnect() - return func(conn, *args, **kwargs) + try: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, True) + + return func(conn, *args, **kwargs) + finally: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, False) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 908cbc79e322..d6d632dc10f6 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -97,3 +97,20 @@ def server_version(self) -> str: """Gets a string giving the server version. For example: '3.22.0' """ ... + + @abc.abstractmethod + def in_transaction(self, conn: Connection) -> bool: + """Whether the connection is currently in a transaction. + """ + ... + + @abc.abstractmethod + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + """Attempt to set the connections autocommit mode. + + When True queries are run outside of transactions. + + Note: This has no effect on SQLite3, so callers still need to + commit/rollback the connections. + """ + ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index ff39281f8599..7719ac32f764 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,7 +15,8 @@ import logging -from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.engines._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.types import Connection logger = logging.getLogger(__name__) @@ -119,6 +120,7 @@ def on_new_connection(self, db_conn): cursor.execute("SET synchronous_commit TO OFF") cursor.close() + db_conn.commit() @property def can_native_upsert(self): @@ -171,3 +173,9 @@ def server_version(self): return "%i.%i" % (numver / 10000, numver % 10000) else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) + + def in_transaction(self, conn: Connection) -> bool: + return conn.status != self.module.extensions.STATUS_READY # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + return conn.set_session(autocommit=autocommit) # type: ignore diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 8a0f8c89d173..5db0f0b520db 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -17,6 +17,7 @@ import typing from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.types import Connection if typing.TYPE_CHECKING: import sqlite3 # noqa: F401 @@ -86,6 +87,7 @@ def on_new_connection(self, db_conn): db_conn.create_function("rank", 1, _rank) db_conn.execute("PRAGMA foreign_keys = ON;") + db_conn.commit() def is_deadlock(self, error): return False @@ -105,6 +107,14 @@ def server_version(self): """ return "%i.%i.%i" % self.module.sqlite_version_info + def in_transaction(self, conn: Connection) -> bool: + return conn.in_transaction # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + # Twisted doesn't let us set attributes on the connections, so we can't + # set the connection to autocommit mode. + pass + # Following functions taken from: https://github.com/coleifer/peewee diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 48efbb5067bd..ad017207aae5 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -24,6 +24,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.types import Cursor from synapse.storage.util.sequence import PostgresSequenceGenerator logger = logging.getLogger(__name__) @@ -552,7 +553,7 @@ def _add_persisted_position(self, new_id: int): # do. break - def _update_stream_positions_table_txn(self, txn): + def _update_stream_positions_table_txn(self, txn: Cursor): """Update the `stream_positions` table with newly persisted position. """ @@ -602,10 +603,13 @@ class _MultiWriterCtxManager: stream_ids = attr.ib(type=List[int], factory=list) async def __aenter__(self) -> Union[int, List[int]]: + # It's safe to run this in autocommit mode as fetching values from a + # sequence ignores transaction semantics anyway. self.stream_ids = await self.id_gen._db.runInteraction( "_load_next_mult_id", self.id_gen._load_next_mult_id_txn, self.multiple_ids or 1, + db_autocommit=True, ) # Assert the fetched ID is actually greater than any ID we've already @@ -636,10 +640,16 @@ async def __aexit__(self, exc_type, exc, tb): # # We only do this on the success path so that the persisted current # position points to a persisted row with the correct instance name. + # + # We do this in autocommit mode as a) the upsert works correctly outside + # transactions and b) reduces the amount of time the rows are locked + # for. If we don't do this then we'll often hit serialization errors due + # to the fact we default to REPEATABLE READ isolation levels. if self.id_gen._writers: await self.id_gen._db.runInteraction( "MultiWriterIdGenerator._update_table", self.id_gen._update_stream_positions_table_txn, + db_autocommit=True, ) return False diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 40ba652248ce..eac7e4dcd2fa 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -56,6 +56,7 @@ def runWithConnection(func, *args, **kwargs): engine = create_engine(sqlite_config) fake_engine = Mock(wraps=engine) fake_engine.can_native_upsert = False + fake_engine.in_transaction.return_value = False db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) db._db_pool = self.db_pool From b28bfd905d51e02785979abaabf4debaf817f054 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 8 Oct 2020 11:10:15 +0100 Subject: [PATCH 145/245] Clarify error message when plugin config parsers raise an error (#8492) This turns: Failed to parse config for 'myplugin': Exception('error message') into: Failed to parse config for 'myplugin': error message. --- changelog.d/8492.misc | 1 + synapse/util/module_loader.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8492.misc diff --git a/changelog.d/8492.misc b/changelog.d/8492.misc new file mode 100644 index 000000000000..a344aee791b2 --- /dev/null +++ b/changelog.d/8492.misc @@ -0,0 +1 @@ +Clarify error message when plugin config parsers raise an error. diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index bb62db46379a..94b59afb385a 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -36,7 +36,7 @@ def load_module(provider): try: provider_config = provider_class.parse_config(provider.get("config")) except Exception as e: - raise ConfigError("Failed to parse config for %r: %r" % (provider["module"], e)) + raise ConfigError("Failed to parse config for %r: %s" % (provider["module"], e)) return provider_class, provider_config From 719474cae0f9fd958cef1279ad559866e184e9e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Przyby=C5=82owicz?= Date: Thu, 8 Oct 2020 12:16:56 +0200 Subject: [PATCH 146/245] Add useful shields to readme (#8493) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added shields directing to synapse-dev room, showing license, latest version on PyPi and supported Python versions. I've moved substitution definitions to the bottom to improve readability. Signed-off-by: Mateusz Przybyłowicz --- README.rst | 30 +++++++++++++++++++++++------- changelog.d/8493.doc | 1 + 2 files changed, 24 insertions(+), 7 deletions(-) create mode 100644 changelog.d/8493.doc diff --git a/README.rst b/README.rst index e623cf863a5c..d609b4b62ea3 100644 --- a/README.rst +++ b/README.rst @@ -1,10 +1,6 @@ -================ -Synapse |shield| -================ - -.. |shield| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix - :alt: (get support on #synapse:matrix.org) - :target: https://matrix.to/#/#synapse:matrix.org +========================================================= +Synapse |support| |development| |license| |pypi| |python| +========================================================= .. contents:: @@ -374,3 +370,23 @@ something like the following in their logs:: This is normally caused by a misconfiguration in your reverse-proxy. See ``_ and double-check that your settings are correct. + +.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix + :alt: (get support on #synapse:matrix.org) + :target: https://matrix.to/#/#synapse:matrix.org + +.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix + :alt: (discuss development on #synapse-dev:matrix.org) + :target: https://matrix.to/#/#synapse-dev:matrix.org + +.. |license| image:: https://img.shields.io/github/license/matrix-org/synapse + :alt: (check license in LICENSE file) + :target: LICENSE + +.. |pypi| image:: https://img.shields.io/pypi/v/matrix-synapse + :alt: (latest version released on PyPi) + :target: https://pypi.org/project/matrix-synapse + +.. |python| image:: https://img.shields.io/pypi/pyversions/matrix-synapse + :alt: (supported python versions) + :target: https://pypi.org/project/matrix-synapse diff --git a/changelog.d/8493.doc b/changelog.d/8493.doc new file mode 100644 index 000000000000..26797cd99e3b --- /dev/null +++ b/changelog.d/8493.doc @@ -0,0 +1 @@ +Improve readme by adding new shield.io badges. From 31fe46e0a3fc0aaa5a45c798cb33ce2d1f4accfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Oct 2020 11:19:22 +0100 Subject: [PATCH 147/245] 1.21.0rc3 --- CHANGES.md | 15 +++++++++++++++ changelog.d/8456.misc | 1 - changelog.d/8475.misc | 1 - synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/8456.misc delete mode 100644 changelog.d/8475.misc diff --git a/CHANGES.md b/CHANGES.md index 5d4e80499eec..5d977d2aada5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.21.0rc3 (2020-10-08) +============================== + +Bugfixes +-------- + +- Fix duplication of events on high traffic servers, caused by PostgresQL `could not serialize access due to concurrent update` errors. ([\#8456](https://github.com/matrix-org/synapse/issues/8456)) + + +Internal Changes +---------------- + +- Add Groovy Gorilla to the list of distributions we build `.deb`s for. ([\#8475](https://github.com/matrix-org/synapse/issues/8475)) + + Synapse 1.21.0rc2 (2020-10-02) ============================== diff --git a/changelog.d/8456.misc b/changelog.d/8456.misc deleted file mode 100644 index ccd260069ba9..000000000000 --- a/changelog.d/8456.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of serialization errors of `MultiWriterIdGenerator._update_table`. diff --git a/changelog.d/8475.misc b/changelog.d/8475.misc deleted file mode 100644 index 69bcb04097c7..000000000000 --- a/changelog.d/8475.misc +++ /dev/null @@ -1 +0,0 @@ -Add Groovy Gorilla to the list of distributions we build `.deb`s for. diff --git a/synapse/__init__.py b/synapse/__init__.py index 500558bbdf8b..a86dc07ddc9d 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.0rc2" +__version__ = "1.21.0rc3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From b9c253a724aaf2798c2d0b089d9250059d24aac9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Oct 2020 11:30:05 +0100 Subject: [PATCH 148/245] Update change log --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 5d977d2aada5..dfdd8aa68a3b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.21.0rc3 (2020-10-08) Bugfixes -------- -- Fix duplication of events on high traffic servers, caused by PostgresQL `could not serialize access due to concurrent update` errors. ([\#8456](https://github.com/matrix-org/synapse/issues/8456)) +- Fix duplication of events on high traffic servers, caused by PostgreSQL `could not serialize access due to concurrent update` errors. ([\#8456](https://github.com/matrix-org/synapse/issues/8456)) Internal Changes From a97cec18bba42e5cb743f61e79f253d6d95a0c0c Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 8 Oct 2020 13:24:46 -0400 Subject: [PATCH 149/245] Invalidate the cache when an olm fallback key is uploaded (#8501) --- changelog.d/8501.feature | 1 + .../storage/databases/main/end_to_end_keys.py | 4 ++++ tests/handlers/test_e2e_keys.py | 20 +++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 changelog.d/8501.feature diff --git a/changelog.d/8501.feature b/changelog.d/8501.feature new file mode 100644 index 000000000000..5220ddd48252 --- /dev/null +++ b/changelog.d/8501.feature @@ -0,0 +1 @@ +Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 359dc6e968c6..44159094147e 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -398,6 +398,10 @@ async def set_e2e_fallback_keys( desc="set_e2e_fallback_key", ) + await self.invalidate_cache_and_stream( + "get_e2e_unused_fallback_key_types", (user_id, device_id) + ) + @cached(max_entries=10000) async def get_e2e_unused_fallback_key_types( self, user_id: str, device_id: str diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 4e9e3dcbc267..e79d612f7ae2 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -33,6 +33,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.hs = None # type: synapse.server.HomeServer self.handler = None # type: synapse.handlers.e2e_keys.E2eKeysHandler + self.store = None # type: synapse.storage.Storage @defer.inlineCallbacks def setUp(self): @@ -40,6 +41,7 @@ def setUp(self): self.addCleanup, handlers=None, federation_client=mock.Mock() ) self.handler = synapse.handlers.e2e_keys.E2eKeysHandler(self.hs) + self.store = self.hs.get_datastore() @defer.inlineCallbacks def test_query_local_devices_no_devices(self): @@ -178,6 +180,12 @@ def test_fallback_key(self): fallback_key = {"alg1:k1": "key1"} otk = {"alg1:k2": "key2"} + # we shouldn't have any unused fallback keys yet + res = yield defer.ensureDeferred( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(res, []) + yield defer.ensureDeferred( self.handler.upload_keys_for_user( local_user, @@ -186,6 +194,12 @@ def test_fallback_key(self): ) ) + # we should now have an unused alg1 key + res = yield defer.ensureDeferred( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(res, ["alg1"]) + # claiming an OTK when no OTKs are available should return the fallback # key res = yield defer.ensureDeferred( @@ -198,6 +212,12 @@ def test_fallback_key(self): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, ) + # we shouldn't have any unused fallback keys again + res = yield defer.ensureDeferred( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(res, []) + # claiming an OTK again should return the same fallback key res = yield defer.ensureDeferred( self.handler.claim_one_time_keys( From a93f3121f8fd1c2b77e003d8e43ce881635bb098 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Oct 2020 07:20:51 -0400 Subject: [PATCH 150/245] Add type hints to some handlers (#8505) --- changelog.d/8505.misc | 1 + mypy.ini | 5 ++++ synapse/federation/federation_server.py | 2 +- synapse/handlers/account_data.py | 14 ++++++++--- synapse/handlers/deactivate_account.py | 9 ++++--- synapse/handlers/devicemessage.py | 25 +++++++++++++------ synapse/handlers/password_policy.py | 10 +++++--- synapse/handlers/read_marker.py | 10 ++++++-- synapse/notifier.py | 2 +- .../storage/databases/main/registration.py | 4 ++- 10 files changed, 60 insertions(+), 22 deletions(-) create mode 100644 changelog.d/8505.misc diff --git a/changelog.d/8505.misc b/changelog.d/8505.misc new file mode 100644 index 000000000000..5aa5c113bd78 --- /dev/null +++ b/changelog.d/8505.misc @@ -0,0 +1 @@ +Add type hints to various parts of the code base. diff --git a/mypy.ini b/mypy.ini index a7ffb81ef133..19b60f7534d7 100644 --- a/mypy.ini +++ b/mypy.ini @@ -15,9 +15,12 @@ files = synapse/events/builder.py, synapse/events/spamcheck.py, synapse/federation, + synapse/handlers/account_data.py, synapse/handlers/auth.py, synapse/handlers/cas_handler.py, + synapse/handlers/deactivate_account.py, synapse/handlers/device.py, + synapse/handlers/devicemessage.py, synapse/handlers/directory.py, synapse/handlers/events.py, synapse/handlers/federation.py, @@ -26,7 +29,9 @@ files = synapse/handlers/message.py, synapse/handlers/oidc_handler.py, synapse/handlers/pagination.py, + synapse/handlers/password_policy.py, synapse/handlers/presence.py, + synapse/handlers/read_marker.py, synapse/handlers/room.py, synapse/handlers/room_member.py, synapse/handlers/room_member_worker.py, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 02f11e120997..1c7ea886c9d4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -861,7 +861,7 @@ def __init__(self, hs: "HomeServer"): self._edu_type_to_instance = {} # type: Dict[str, str] def register_edu_handler( - self, edu_type: str, handler: Callable[[str, dict], Awaitable[None]] + self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]] ): """Sets the handler callable that will be used to handle an incoming federation EDU of the given type. diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 9112a0ab8610..341135822eb9 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -12,16 +12,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import TYPE_CHECKING, List, Tuple + +from synapse.types import JsonDict, UserID + +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer class AccountDataEventSource: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() - def get_current_key(self, direction="f"): + def get_current_key(self, direction: str = "f") -> int: return self.store.get_max_account_data_stream_id() - async def get_new_events(self, user, from_key, **kwargs): + async def get_new_events( + self, user: UserID, from_key: int, **kwargs + ) -> Tuple[List[JsonDict], int]: user_id = user.to_string() last_stream_id = from_key diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 0635ad570866..72a583153187 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Optional +from typing import TYPE_CHECKING, Optional from synapse.api.errors import SynapseError from synapse.metrics.background_process_metrics import run_as_background_process @@ -22,13 +22,16 @@ from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) class DeactivateAccountHandler(BaseHandler): """Handler which deals with deactivating user accounts.""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs self._auth_handler = hs.get_auth_handler() @@ -137,7 +140,7 @@ async def deactivate_account( return identity_server_supports_unbinding - async def _reject_pending_invites_for_user(self, user_id: str): + async def _reject_pending_invites_for_user(self, user_id: str) -> None: """Reject pending invites addressed to a given user ID. Args: diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 64ef7f63ab79..9cac5a846396 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import Any, Dict +from typing import TYPE_CHECKING, Any, Dict from synapse.api.errors import SynapseError from synapse.logging.context import run_in_background @@ -24,18 +24,22 @@ set_tag, start_active_span, ) -from synapse.types import UserID, get_domain_from_id +from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util import json_encoder from synapse.util.stringutils import random_string +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + + logger = logging.getLogger(__name__) class DeviceMessageHandler: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): """ Args: - hs (synapse.server.HomeServer): server + hs: server """ self.store = hs.get_datastore() self.notifier = hs.get_notifier() @@ -48,7 +52,7 @@ def __init__(self, hs): self._device_list_updater = hs.get_device_handler().device_list_updater - async def on_direct_to_device_edu(self, origin, content): + async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None: local_messages = {} sender_user_id = content["sender"] if origin != get_domain_from_id(sender_user_id): @@ -95,7 +99,7 @@ async def _check_for_unknown_devices( message_type: str, sender_user_id: str, by_device: Dict[str, Dict[str, Any]], - ): + ) -> None: """Checks inbound device messages for unknown remote devices, and if found marks the remote cache for the user as stale. """ @@ -138,11 +142,16 @@ async def _check_for_unknown_devices( self._device_list_updater.user_device_resync, sender_user_id ) - async def send_device_message(self, sender_user_id, message_type, messages): + async def send_device_message( + self, + sender_user_id: str, + message_type: str, + messages: Dict[str, Dict[str, JsonDict]], + ) -> None: set_tag("number_of_messages", len(messages)) set_tag("sender", sender_user_id) local_messages = {} - remote_messages = {} + remote_messages = {} # type: Dict[str, Dict[str, Dict[str, JsonDict]]] for user_id, by_device in messages.items(): # we use UserID.from_string to catch invalid user ids if self.is_mine(UserID.from_string(user_id)): diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py index 88e2f8720058..6c635cc31b7e 100644 --- a/synapse/handlers/password_policy.py +++ b/synapse/handlers/password_policy.py @@ -16,14 +16,18 @@ import logging import re +from typing import TYPE_CHECKING from synapse.api.errors import Codes, PasswordRefusedError +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) class PasswordPolicyHandler: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.policy = hs.config.password_policy self.enabled = hs.config.password_policy_enabled @@ -33,11 +37,11 @@ def __init__(self, hs): self.regexp_uppercase = re.compile("[A-Z]") self.regexp_lowercase = re.compile("[a-z]") - def validate_password(self, password): + def validate_password(self, password: str) -> None: """Checks whether a given password complies with the server's policy. Args: - password (str): The password to check against the server's policy. + password: The password to check against the server's policy. Raises: PasswordRefusedError: The password doesn't comply with the server's policy. diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index c32f314a1c0e..a7550806e6c4 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -14,23 +14,29 @@ # limitations under the License. import logging +from typing import TYPE_CHECKING from synapse.util.async_helpers import Linearizer from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) class ReadMarkerHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.server_name = hs.config.server_name self.store = hs.get_datastore() self.read_marker_linearizer = Linearizer(name="read_marker") self.notifier = hs.get_notifier() - async def received_client_read_marker(self, room_id, user_id, event_id): + async def received_client_read_marker( + self, room_id: str, user_id: str, event_id: str + ) -> None: """Updates the read marker for a given user in a given room if the event ID given is ahead in the stream relative to the current read marker. diff --git a/synapse/notifier.py b/synapse/notifier.py index 59415f6f88c5..13adeed01e5e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -339,7 +339,7 @@ def on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken], - users: Collection[UserID] = [], + users: Collection[Union[str, UserID]] = [], rooms: Collection[str] = [], ): """ Used to inform listeners that something has happened event wise. diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index a85867936f06..7fd7b0b952ca 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1220,7 +1220,9 @@ async def record_user_external_id( desc="record_user_external_id", ) - async def user_set_password_hash(self, user_id: str, password_hash: str) -> None: + async def user_set_password_hash( + self, user_id: str, password_hash: Optional[str] + ) -> None: """ NB. This does *not* evict any cache because the one use for this removes most of the entries subsequently anyway so it would be From c9c0ad5e204f309f2686dbe250382e481e0f82c2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Oct 2020 07:24:34 -0400 Subject: [PATCH 151/245] Remove the deprecated Handlers object (#8494) All handlers now available via get_*_handler() methods on the HomeServer. --- changelog.d/8494.misc | 1 + synapse/app/admin_cmd.py | 2 +- synapse/federation/federation_server.py | 7 +++- synapse/handlers/__init__.py | 33 ------------------- synapse/handlers/auth.py | 2 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/message.py | 4 +-- synapse/handlers/pagination.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/room_member.py | 6 ++-- synapse/handlers/ui_auth/checkers.py | 2 +- synapse/replication/http/federation.py | 2 +- synapse/replication/http/membership.py | 2 +- synapse/rest/admin/rooms.py | 4 +-- synapse/rest/admin/users.py | 13 ++++---- synapse/rest/client/v1/directory.py | 25 +++++++------- synapse/rest/client/v1/login.py | 1 - synapse/rest/client/v1/room.py | 10 ++---- synapse/rest/client/v2_alpha/account.py | 16 ++++----- synapse/rest/client/v2_alpha/register.py | 6 ++-- synapse/server.py | 30 ++++++++++++++--- tests/api/test_auth.py | 12 ++----- tests/api/test_filtering.py | 5 +-- tests/crypto/test_keyring.py | 6 ++-- tests/handlers/test_admin.py | 2 +- tests/handlers/test_auth.py | 11 ++----- tests/handlers/test_directory.py | 10 +++--- tests/handlers/test_e2e_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 2 +- tests/handlers/test_federation.py | 2 +- tests/handlers/test_presence.py | 2 +- tests/handlers/test_profile.py | 7 ---- tests/handlers/test_register.py | 22 +++++-------- .../test_federation_sender_shard.py | 2 +- tests/rest/client/test_shadow_banned.py | 2 +- tests/rest/client/v1/test_events.py | 2 +- tests/rest/client/v1/test_rooms.py | 6 +++- tests/rest/client/v1/test_typing.py | 2 +- tests/test_federation.py | 2 +- 40 files changed, 116 insertions(+), 157 deletions(-) create mode 100644 changelog.d/8494.misc diff --git a/changelog.d/8494.misc b/changelog.d/8494.misc new file mode 100644 index 000000000000..6e56c6b8548c --- /dev/null +++ b/changelog.d/8494.misc @@ -0,0 +1 @@ +Remove the deprecated `Handlers` object. diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index f0d65d08d72d..b4bd4d8e7afb 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -89,7 +89,7 @@ async def export_data_command(hs, args): user_id = args.user_id directory = args.output_directory - res = await hs.get_handlers().admin_handler.export_user_data( + res = await hs.get_admin_handler().export_user_data( user_id, FileExfiltrationWriter(user_id, directory=directory) ) print(res) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 1c7ea886c9d4..e8039e244ccc 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -99,10 +99,15 @@ def __init__(self, hs): super().__init__(hs) self.auth = hs.get_auth() - self.handler = hs.get_handlers().federation_handler + self.handler = hs.get_federation_handler() self.state = hs.get_state_handler() self.device_handler = hs.get_device_handler() + + # Ensure the following handlers are loaded since they register callbacks + # with FederationHandlerRegistry. + hs.get_directory_handler() + self._federation_ratelimiter = hs.get_federation_ratelimiter() self._server_linearizer = Linearizer("fed_server") diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index 286f0054be28..bfebb0f644f4 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -12,36 +12,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from .admin import AdminHandler -from .directory import DirectoryHandler -from .federation import FederationHandler -from .identity import IdentityHandler -from .search import SearchHandler - - -class Handlers: - - """ Deprecated. A collection of handlers. - - At some point most of the classes whose name ended "Handler" were - accessed through this class. - - However this makes it painful to unit test the handlers and to run cut - down versions of synapse that only use specific handlers because using a - single handler required creating all of the handlers. So some of the - handlers have been lifted out of the Handlers object and are now accessed - directly through the homeserver object itself. - - Any new handlers should follow the new pattern of being accessed through - the homeserver object and should not be added to the Handlers object. - - The remaining handlers should be moved out of the handlers object. - """ - - def __init__(self, hs): - self.federation_handler = FederationHandler(hs) - self.directory_handler = DirectoryHandler(hs) - self.admin_handler = AdminHandler(hs) - self.identity_handler = IdentityHandler(hs) - self.search_handler = SearchHandler(hs) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index f6d17c53b15e..1d1ddc22454b 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1080,7 +1080,7 @@ async def delete_threepid( if medium == "email": address = canonicalise_email(address) - identity_handler = self.hs.get_handlers().identity_handler + identity_handler = self.hs.get_identity_handler() result = await identity_handler.try_unbind_threepid( user_id, {"medium": medium, "address": address, "id_server": id_server} ) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 72a583153187..58c9f12686f5 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -37,7 +37,7 @@ def __init__(self, hs: "HomeServer"): self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() self._room_member_handler = hs.get_room_member_handler() - self._identity_handler = hs.get_handlers().identity_handler + self._identity_handler = hs.get_identity_handler() self.user_directory_handler = hs.get_user_directory_handler() # Flag that indicates whether the process to part users from rooms is running diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 3e9a22e8f334..33d133a4b21b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1014,7 +1014,7 @@ async def persist_and_notify_client_event( # Check the alias is currently valid (if it has changed). room_alias_str = event.content.get("alias", None) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() if room_alias_str and room_alias_str != original_alias: await self._validate_canonical_alias( directory_handler, room_alias_str, event.room_id @@ -1040,7 +1040,7 @@ async def persist_and_notify_client_event( directory_handler, alias_str, event.room_id ) - federation_handler = self.hs.get_handlers().federation_handler + federation_handler = self.hs.get_federation_handler() if event.type == EventTypes.Member: if event.content["membership"] == Membership.INVITE: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 2c2a633938ba..085b685959a4 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -383,7 +383,7 @@ async def get_messages( "room_key", leave_token ) - await self.hs.get_handlers().federation_handler.maybe_backfill( + await self.hs.get_federation_handler().maybe_backfill( room_id, curr_topo, limit=pagin_config.limit, ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 538f4b2a61de..a6f1d21674b6 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -48,7 +48,7 @@ def __init__(self, hs): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self.identity_handler = self.hs.get_handlers().identity_handler + self.identity_handler = self.hs.get_identity_handler() self.ratelimiter = hs.get_registration_ratelimiter() self.macaroon_gen = hs.get_macaroon_generator() self._server_notices_mxid = hs.config.server_notices_mxid diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d0530a446c83..1d04d41e9893 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -691,7 +691,7 @@ async def create_room( if not allowed_by_third_party_rules: raise SynapseError(403, "Room visibility value not allowed.") - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() if room_alias: await directory_handler.create_association( requester=requester, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index fd8114a64d25..ffbc62ff444a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -64,9 +64,9 @@ def __init__(self, hs: "HomeServer"): self.state_handler = hs.get_state_handler() self.config = hs.config - self.federation_handler = hs.get_handlers().federation_handler - self.directory_handler = hs.get_handlers().directory_handler - self.identity_handler = hs.get_handlers().identity_handler + self.federation_handler = hs.get_federation_handler() + self.directory_handler = hs.get_directory_handler() + self.identity_handler = hs.get_identity_handler() self.registration_handler = hs.get_registration_handler() self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 9146dc1a3bfa..3d66bf305e7f 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -143,7 +143,7 @@ async def _check_threepid(self, medium, authdict): threepid_creds = authdict["threepid_creds"] - identity_handler = self.hs.get_handlers().identity_handler + identity_handler = self.hs.get_identity_handler() logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 5393b9a9e7dd..b4f4a68b5cfc 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -62,7 +62,7 @@ def __init__(self, hs): self.store = hs.get_datastore() self.storage = hs.get_storage() self.clock = hs.get_clock() - self.federation_handler = hs.get_handlers().federation_handler + self.federation_handler = hs.get_federation_handler() @staticmethod async def _serialize_payload(store, room_id, event_and_contexts, backfilled): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 30680baee813..e7cc74a5d21c 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -47,7 +47,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): def __init__(self, hs): super().__init__(hs) - self.federation_handler = hs.get_handlers().federation_handler + self.federation_handler = hs.get_federation_handler() self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 09726d52d67e..f5304ff43dd4 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -138,7 +138,7 @@ class ListRoomRestServlet(RestServlet): def __init__(self, hs): self.store = hs.get_datastore() self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request) @@ -273,7 +273,7 @@ def __init__(self, hs): self.hs = hs self.auth = hs.get_auth() self.room_member_handler = hs.get_room_member_handler() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() self.state_handler = hs.get_state_handler() async def on_POST(self, request, room_identifier): diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 20dc1d0e057c..8efefbc0a03a 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -45,7 +45,7 @@ def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request, user_id): target_user = UserID.from_string(user_id) @@ -82,7 +82,7 @@ def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request): await assert_requester_is_admin(self.auth, request) @@ -135,7 +135,7 @@ class UserRestServletV2(RestServlet): def __init__(self, hs): self.hs = hs self.auth = hs.get_auth() - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() self.store = hs.get_datastore() self.auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() @@ -448,7 +448,7 @@ class WhoisRestServlet(RestServlet): def __init__(self, hs): self.hs = hs self.auth = hs.get_auth() - self.handlers = hs.get_handlers() + self.admin_handler = hs.get_admin_handler() async def on_GET(self, request, user_id): target_user = UserID.from_string(user_id) @@ -461,7 +461,7 @@ async def on_GET(self, request, user_id): if not self.hs.is_mine(target_user): raise SynapseError(400, "Can only whois a local user") - ret = await self.handlers.admin_handler.get_whois(target_user) + ret = await self.admin_handler.get_whois(target_user) return 200, ret @@ -591,7 +591,6 @@ def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() self.auth = hs.get_auth() - self.handlers = hs.get_handlers() async def on_GET(self, request, target_user_id): """Get request to search user table for specific users according to @@ -612,7 +611,7 @@ async def on_GET(self, request, target_user_id): term = parse_string(request, "term", required=True) logger.info("term: %s ", term) - ret = await self.handlers.store.search_users(term) + ret = await self.store.search_users(term) return 200, ret diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index faabeeb91c82..e5af26b176dc 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -42,14 +42,13 @@ class ClientDirectoryServer(RestServlet): def __init__(self, hs): super().__init__() self.store = hs.get_datastore() - self.handlers = hs.get_handlers() + self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() async def on_GET(self, request, room_alias): room_alias = RoomAlias.from_string(room_alias) - dir_handler = self.handlers.directory_handler - res = await dir_handler.get_association(room_alias) + res = await self.directory_handler.get_association(room_alias) return 200, res @@ -79,19 +78,19 @@ async def on_PUT(self, request, room_alias): requester = await self.auth.get_user_by_req(request) - await self.handlers.directory_handler.create_association( + await self.directory_handler.create_association( requester, room_alias, room_id, servers ) return 200, {} async def on_DELETE(self, request, room_alias): - dir_handler = self.handlers.directory_handler - try: service = self.auth.get_appservice_by_req(request) room_alias = RoomAlias.from_string(room_alias) - await dir_handler.delete_appservice_association(service, room_alias) + await self.directory_handler.delete_appservice_association( + service, room_alias + ) logger.info( "Application service at %s deleted alias %s", service.url, @@ -107,7 +106,7 @@ async def on_DELETE(self, request, room_alias): room_alias = RoomAlias.from_string(room_alias) - await dir_handler.delete_association(requester, room_alias) + await self.directory_handler.delete_association(requester, room_alias) logger.info( "User %s deleted alias %s", user.to_string(), room_alias.to_string() @@ -122,7 +121,7 @@ class ClientDirectoryListServer(RestServlet): def __init__(self, hs): super().__init__() self.store = hs.get_datastore() - self.handlers = hs.get_handlers() + self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() async def on_GET(self, request, room_id): @@ -138,7 +137,7 @@ async def on_PUT(self, request, room_id): content = parse_json_object_from_request(request) visibility = content.get("visibility", "public") - await self.handlers.directory_handler.edit_published_room_list( + await self.directory_handler.edit_published_room_list( requester, room_id, visibility ) @@ -147,7 +146,7 @@ async def on_PUT(self, request, room_id): async def on_DELETE(self, request, room_id): requester = await self.auth.get_user_by_req(request) - await self.handlers.directory_handler.edit_published_room_list( + await self.directory_handler.edit_published_room_list( requester, room_id, "private" ) @@ -162,7 +161,7 @@ class ClientAppserviceDirectoryListServer(RestServlet): def __init__(self, hs): super().__init__() self.store = hs.get_datastore() - self.handlers = hs.get_handlers() + self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() def on_PUT(self, request, network_id, room_id): @@ -180,7 +179,7 @@ async def _edit(self, request, network_id, room_id, visibility): 403, "Only appservices can edit the appservice published room list" ) - await self.handlers.directory_handler.edit_published_appservice_room_list( + await self.directory_handler.edit_published_appservice_room_list( requester.app_service.id, network_id, room_id, visibility ) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 3d1693d7acfb..d7deb9300d71 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -67,7 +67,6 @@ def __init__(self, hs): self.auth_handler = self.hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() - self.handlers = hs.get_handlers() self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( clock=hs.get_clock(), diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index b63389e5fedf..00b439708222 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -112,7 +112,6 @@ def on_OPTIONS(self, request): class RoomStateEventRestServlet(TransactionRestServlet): def __init__(self, hs): super().__init__(hs) - self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() @@ -798,7 +797,6 @@ def on_PUT(self, request, room_id, membership_action, txn_id): class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs): super().__init__(hs) - self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() @@ -903,7 +901,7 @@ class RoomAliasListServlet(RestServlet): def __init__(self, hs: "synapse.server.HomeServer"): super().__init__() self.auth = hs.get_auth() - self.directory_handler = hs.get_handlers().directory_handler + self.directory_handler = hs.get_directory_handler() async def on_GET(self, request, room_id): requester = await self.auth.get_user_by_req(request) @@ -920,7 +918,7 @@ class SearchRestServlet(RestServlet): def __init__(self, hs): super().__init__() - self.handlers = hs.get_handlers() + self.search_handler = hs.get_search_handler() self.auth = hs.get_auth() async def on_POST(self, request): @@ -929,9 +927,7 @@ async def on_POST(self, request): content = parse_json_object_from_request(request) batch = parse_string(request, "next_batch") - results = await self.handlers.search_handler.search( - requester.user, content, batch - ) + results = await self.search_handler.search(requester.user, content, batch) return 200, results diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ab5815e7f763..e857cff17616 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -56,7 +56,7 @@ def __init__(self, hs): self.hs = hs self.datastore = hs.get_datastore() self.config = hs.config - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( @@ -327,7 +327,7 @@ def __init__(self, hs): super().__init__() self.hs = hs self.config = hs.config - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.store = self.hs.get_datastore() if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: @@ -424,7 +424,7 @@ def __init__(self, hs): self.hs = hs super().__init__() self.store = self.hs.get_datastore() - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() async def on_POST(self, request): body = parse_json_object_from_request(request) @@ -574,7 +574,7 @@ def __init__(self, hs): self.config = hs.config self.clock = hs.get_clock() self.store = hs.get_datastore() - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() async def on_POST(self, request): if not self.config.account_threepid_delegate_msisdn: @@ -604,7 +604,7 @@ class ThreepidRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() self.datastore = self.hs.get_datastore() @@ -660,7 +660,7 @@ class ThreepidAddRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -711,7 +711,7 @@ class ThreepidBindRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() async def on_POST(self, request): @@ -740,7 +740,7 @@ class ThreepidUnbindRestServlet(RestServlet): def __init__(self, hs): super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() self.datastore = self.hs.get_datastore() diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index ffa2dfce42d0..395b6a82a978 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -78,7 +78,7 @@ def __init__(self, hs): """ super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.config = hs.config if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: @@ -176,7 +176,7 @@ def __init__(self, hs): """ super().__init__() self.hs = hs - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() async def on_POST(self, request): body = parse_json_object_from_request(request) @@ -370,7 +370,7 @@ def __init__(self, hs): self.store = hs.get_datastore() self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() - self.identity_handler = hs.get_handlers().identity_handler + self.identity_handler = hs.get_identity_handler() self.room_member_handler = hs.get_room_member_handler() self.macaroon_gen = hs.get_macaroon_generator() self.ratelimiter = hs.get_registration_ratelimiter() diff --git a/synapse/server.py b/synapse/server.py index f83dd6148c96..e793793cdca9 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -54,19 +54,22 @@ from synapse.federation.transport.client import TransportLayerClient from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler -from synapse.handlers import Handlers from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.acme import AcmeHandler +from synapse.handlers.admin import AdminHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, MacaroonGenerator from synapse.handlers.cas_handler import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler from synapse.handlers.devicemessage import DeviceMessageHandler +from synapse.handlers.directory import DirectoryHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler from synapse.handlers.events import EventHandler, EventStreamHandler +from synapse.handlers.federation import FederationHandler from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler +from synapse.handlers.identity import IdentityHandler from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler @@ -84,6 +87,7 @@ from synapse.handlers.room_list import RoomListHandler from synapse.handlers.room_member import RoomMemberMasterHandler from synapse.handlers.room_member_worker import RoomMemberWorkerHandler +from synapse.handlers.search import SearchHandler from synapse.handlers.set_password import SetPasswordHandler from synapse.handlers.stats import StatsHandler from synapse.handlers.sync import SyncHandler @@ -318,10 +322,6 @@ def get_federation_client(self) -> FederationClient: def get_federation_server(self) -> FederationServer: return FederationServer(self) - @cache_in_self - def get_handlers(self) -> Handlers: - return Handlers(self) - @cache_in_self def get_notifier(self) -> Notifier: return Notifier(self) @@ -408,6 +408,10 @@ def get_device_handler(self): def get_device_message_handler(self) -> DeviceMessageHandler: return DeviceMessageHandler(self) + @cache_in_self + def get_directory_handler(self) -> DirectoryHandler: + return DirectoryHandler(self) + @cache_in_self def get_e2e_keys_handler(self) -> E2eKeysHandler: return E2eKeysHandler(self) @@ -420,6 +424,10 @@ def get_e2e_room_keys_handler(self) -> E2eRoomKeysHandler: def get_acme_handler(self) -> AcmeHandler: return AcmeHandler(self) + @cache_in_self + def get_admin_handler(self) -> AdminHandler: + return AdminHandler(self) + @cache_in_self def get_application_service_api(self) -> ApplicationServiceApi: return ApplicationServiceApi(self) @@ -440,6 +448,14 @@ def get_event_handler(self) -> EventHandler: def get_event_stream_handler(self) -> EventStreamHandler: return EventStreamHandler(self) + @cache_in_self + def get_federation_handler(self) -> FederationHandler: + return FederationHandler(self) + + @cache_in_self + def get_identity_handler(self) -> IdentityHandler: + return IdentityHandler(self) + @cache_in_self def get_initial_sync_handler(self) -> InitialSyncHandler: return InitialSyncHandler(self) @@ -459,6 +475,10 @@ def get_event_creation_handler(self) -> EventCreationHandler: def get_deactivate_account_handler(self) -> DeactivateAccountHandler: return DeactivateAccountHandler(self) + @cache_in_self + def get_search_handler(self) -> SearchHandler: + return SearchHandler(self) + @cache_in_self def get_set_password_handler(self) -> SetPasswordHandler: return SetPasswordHandler(self) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 8ab56ec94c1e..cb6f29d6704b 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -19,7 +19,6 @@ from twisted.internet import defer -import synapse.handlers.auth from synapse.api.auth import Auth from synapse.api.constants import UserTypes from synapse.api.errors import ( @@ -36,20 +35,15 @@ from tests.utils import mock_getRawHeaders, setup_test_homeserver -class TestHandlers: - def __init__(self, hs): - self.auth_handler = synapse.handlers.auth.AuthHandler(hs) - - class AuthTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.state_handler = Mock() self.store = Mock() - self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None) + self.hs = yield setup_test_homeserver(self.addCleanup) self.hs.get_datastore = Mock(return_value=self.store) - self.hs.handlers = TestHandlers(self.hs) + self.hs.get_auth_handler().store = self.store self.auth = Auth(self.hs) # AuthBlocking reads from the hs' config on initialization. We need to @@ -283,7 +277,7 @@ def test_cannot_use_regular_token_as_guest(self): self.store.get_device = Mock(return_value=defer.succeed(None)) token = yield defer.ensureDeferred( - self.hs.handlers.auth_handler.get_access_token_for_user_id( + self.hs.get_auth_handler().get_access_token_for_user_id( USER_ID, "DEVICE", valid_until_ms=None ) ) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index d2d535d23c20..c98ae759743d 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -50,10 +50,7 @@ def setUp(self): self.mock_http_client.put_json = DeferredMockCallable() hs = yield setup_test_homeserver( - self.addCleanup, - handlers=None, - http_client=self.mock_http_client, - keyring=Mock(), + self.addCleanup, http_client=self.mock_http_client, keyring=Mock(), ) self.filtering = hs.get_filtering() diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 8ff1460c0d7a..697916a0194b 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -315,7 +315,7 @@ async def get_keys2(keys_to_fetch): class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.http_client = Mock() - hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) + hs = self.setup_test_homeserver(http_client=self.http_client) return hs def test_get_keys_from_server(self): @@ -395,9 +395,7 @@ def make_homeserver(self, reactor, clock): } ] - return self.setup_test_homeserver( - handlers=None, http_client=self.http_client, config=config - ) + return self.setup_test_homeserver(http_client=self.http_client, config=config) def build_perspectives_response( self, server_name: str, signing_key: SigningKey, valid_until_ts: int, diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index fc37c4328c3a..5c2b4de1a665 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -35,7 +35,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.admin_handler = hs.get_handlers().admin_handler + self.admin_handler = hs.get_admin_handler() self.user1 = self.register_user("user1", "password") self.token1 = self.login("user1", "password") diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 97877c2e42af..b5055e018cbd 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -21,24 +21,17 @@ import synapse import synapse.api.errors from synapse.api.errors import ResourceLimitError -from synapse.handlers.auth import AuthHandler from tests import unittest from tests.test_utils import make_awaitable from tests.utils import setup_test_homeserver -class AuthHandlers: - def __init__(self, hs): - self.auth_handler = AuthHandler(hs) - - class AuthTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None) - self.hs.handlers = AuthHandlers(self.hs) - self.auth_handler = self.hs.handlers.auth_handler + self.hs = yield setup_test_homeserver(self.addCleanup) + self.auth_handler = self.hs.get_auth_handler() self.macaroon_generator = self.hs.get_macaroon_generator() # MAU tests diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index bc0c5aefdcf0..2ce6dc952806 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -48,7 +48,7 @@ def register_query_handler(query_type, handler): federation_registry=self.mock_registry, ) - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() self.store = hs.get_datastore() @@ -110,7 +110,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() # Create user self.admin_user = self.register_user("admin", "pass", admin=True) @@ -173,7 +173,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() # Create user @@ -289,7 +289,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - self.handler = hs.get_handlers().directory_handler + self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() # Create user @@ -442,7 +442,7 @@ def prepare(self, reactor, clock, hs): self.assertEquals(200, channel.code, channel.result) self.room_list_handler = hs.get_room_list_handler() - self.directory_handler = hs.get_handlers().directory_handler + self.directory_handler = hs.get_directory_handler() return hs diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index e79d612f7ae2..924f29f05199 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -38,7 +38,7 @@ def __init__(self, *args, **kwargs): @defer.inlineCallbacks def setUp(self): self.hs = yield utils.setup_test_homeserver( - self.addCleanup, handlers=None, federation_client=mock.Mock() + self.addCleanup, federation_client=mock.Mock() ) self.handler = synapse.handlers.e2e_keys.E2eKeysHandler(self.hs) self.store = self.hs.get_datastore() diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 7adde9b9de8d..45f201a3992c 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -54,7 +54,7 @@ def __init__(self, *args, **kwargs): @defer.inlineCallbacks def setUp(self): self.hs = yield utils.setup_test_homeserver( - self.addCleanup, handlers=None, replication_layer=mock.Mock() + self.addCleanup, replication_layer=mock.Mock() ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) self.local_user = "@boris:" + self.hs.hostname diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 96fea5867309..9ef80fe5024b 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -38,7 +38,7 @@ class FederationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(http_client=None) - self.handler = hs.get_handlers().federation_handler + self.handler = hs.get_federation_handler() self.store = hs.get_datastore() return hs diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 306dcfe944e7..914c82e7a8d0 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -470,7 +470,7 @@ def make_homeserver(self, reactor, clock): def prepare(self, reactor, clock, hs): self.federation_sender = hs.get_federation_sender() self.event_builder_factory = hs.get_event_builder_factory() - self.federation_handler = hs.get_handlers().federation_handler + self.federation_handler = hs.get_federation_handler() self.presence_handler = hs.get_presence_handler() # self.event_builder_for_2 = EventBuilderFactory(hs) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 8e95e53d9ed6..a69fa28b4153 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -20,7 +20,6 @@ import synapse.types from synapse.api.errors import AuthError, SynapseError -from synapse.handlers.profile import MasterProfileHandler from synapse.types import UserID from tests import unittest @@ -28,11 +27,6 @@ from tests.utils import setup_test_homeserver -class ProfileHandlers: - def __init__(self, hs): - self.profile_handler = MasterProfileHandler(hs) - - class ProfileTestCase(unittest.TestCase): """ Tests profile management. """ @@ -51,7 +45,6 @@ def register_query_handler(query_type, handler): hs = yield setup_test_homeserver( self.addCleanup, http_client=None, - handlers=None, resource_for_federation=Mock(), federation_client=self.mock_federation, federation_server=Mock(), diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 702c6aa089b8..bdf3d0a8a298 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -18,7 +18,6 @@ from synapse.api.auth import Auth from synapse.api.constants import UserTypes from synapse.api.errors import Codes, ResourceLimitError, SynapseError -from synapse.handlers.register import RegistrationHandler from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import RoomAlias, UserID, create_requester @@ -29,11 +28,6 @@ from .. import unittest -class RegistrationHandlers: - def __init__(self, hs): - self.registration_handler = RegistrationHandler(hs) - - class RegistrationTestCase(unittest.HomeserverTestCase): """ Tests the RegistrationHandler. """ @@ -154,7 +148,7 @@ def test_auto_create_auto_join_rooms(self): room_alias_str = "#room:test" user_id = self.get_success(self.handler.register_user(localpart="jeff")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -193,7 +187,7 @@ def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self): user_id = self.get_success(self.handler.register_user(localpart="support")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) self.get_failure(directory_handler.get_association(room_alias), SynapseError) @@ -205,7 +199,7 @@ def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self): self.store.is_real_user = Mock(return_value=make_awaitable(True)) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -237,7 +231,7 @@ def test_auto_create_auto_join_rooms_federated(self): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -266,7 +260,7 @@ def test_auto_join_mxid_localpart(self): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -304,7 +298,7 @@ def test_auto_create_auto_join_room_preset(self): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -347,7 +341,7 @@ def test_auto_create_auto_join_room_preset_guest(self): ) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -384,7 +378,7 @@ def test_auto_create_auto_join_room_preset_invalid_permissions(self): user_id = self.get_success(self.handler.register_user(localpart="jeff")) # Ensure the room was created. - directory_handler = self.hs.get_handlers().directory_handler + directory_handler = self.hs.get_directory_handler() room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 1d7edee5bab3..9c4a9c35635b 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -207,7 +207,7 @@ def test_send_typing_sharded(self): def create_room_with_remote_server(self, user, token, remote_server="other_server"): room = self.helper.create_room_as(user, tok=token) store = self.hs.get_datastore() - federation = self.hs.get_handlers().federation_handler + federation = self.hs.get_federation_handler() prev_event_ids = self.get_success(store.get_latest_event_ids_in_room(room)) room_version = self.get_success(store.get_room_version(room)) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index dfe4bf7762e3..6bb02b96305a 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -78,7 +78,7 @@ def test_invite(self): def test_invite_3pid(self): """Ensure that a 3PID invite does not attempt to contact the identity server.""" - identity_handler = self.hs.get_handlers().identity_handler + identity_handler = self.hs.get_identity_handler() identity_handler.lookup_3pid = Mock( side_effect=AssertionError("This should not get called") ) diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index f75520877f6f..3397ba557946 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -42,7 +42,7 @@ def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(config=config) - hs.get_handlers().federation_handler = Mock() + hs.get_federation_handler = Mock() return hs diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 0d809d25d5d4..9ba5f9d943c5 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -32,6 +32,7 @@ from synapse.util.stringutils import random_string from tests import unittest +from tests.test_utils import make_awaitable PATH_PREFIX = b"/_matrix/client/api/v1" @@ -47,7 +48,10 @@ def make_homeserver(self, reactor, clock): "red", http_client=None, federation_client=Mock(), ) - self.hs.get_federation_handler = Mock(return_value=Mock()) + self.hs.get_federation_handler = Mock() + self.hs.get_federation_handler.return_value.maybe_backfill = Mock( + return_value=make_awaitable(None) + ) async def _insert_client_ip(*args, **kwargs): return None diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 94d2bf2eb172..cd58ee77924b 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -44,7 +44,7 @@ def make_homeserver(self, reactor, clock): self.event_source = hs.get_event_sources().sources["typing"] - hs.get_handlers().federation_handler = Mock() + hs.get_federation_handler = Mock() async def get_user_by_access_token(token=None, allow_guest=False): return { diff --git a/tests/test_federation.py b/tests/test_federation.py index 27a7fc9ed71a..d39e79258041 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -75,7 +75,7 @@ def setUp(self): } ) - self.handler = self.homeserver.get_handlers().federation_handler + self.handler = self.homeserver.get_federation_handler() self.handler.do_auth = lambda origin, event, context, auth_events: succeed( context ) From fe0f4a3591302176c7eea48a54f6ed83d9eb4aa9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Oct 2020 07:37:51 -0400 Subject: [PATCH 152/245] Move additional tasks to the background worker, part 3 (#8489) --- changelog.d/8489.feature | 1 + synapse/app/phone_stats_home.py | 14 +- .../storage/databases/main/censor_events.py | 15 +- synapse/storage/databases/main/devices.py | 196 ++++++------- .../databases/main/event_federation.py | 60 ++-- .../databases/main/event_push_actions.py | 259 +++++++++--------- .../databases/main/monthly_active_users.py | 2 + .../storage/databases/main/registration.py | 11 +- 8 files changed, 276 insertions(+), 282 deletions(-) create mode 100644 changelog.d/8489.feature diff --git a/changelog.d/8489.feature b/changelog.d/8489.feature new file mode 100644 index 000000000000..22591870a49f --- /dev/null +++ b/changelog.d/8489.feature @@ -0,0 +1 @@ + Allow running background tasks in a separate worker process. diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 8a69104a042f..c38cf8231fc4 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -18,10 +18,7 @@ from prometheus_client import Gauge -from synapse.metrics.background_process_metrics import ( - run_as_background_process, - wrap_as_background_process, -) +from synapse.metrics.background_process_metrics import wrap_as_background_process logger = logging.getLogger("synapse.app.homeserver") @@ -152,13 +149,8 @@ def performance_stats_init(): clock.looping_call(hs.get_datastore().generate_user_daily_visits, 5 * 60 * 1000) # monthly active user limiting functionality - def reap_monthly_active_users(): - return run_as_background_process( - "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users - ) - - clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) - reap_monthly_active_users() + clock.looping_call(hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60) + hs.get_datastore().reap_monthly_active_users() @wrap_as_background_process("generate_monthly_active_users") async def generate_monthly_active_users(): diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 4bb2b9c28c8e..849bd5ba7a5f 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -17,7 +17,7 @@ from typing import TYPE_CHECKING from synapse.events.utils import prune_event_dict -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore @@ -35,14 +35,13 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"): super().__init__(database, db_conn, hs) - def _censor_redactions(): - return run_as_background_process( - "_censor_redactions", self._censor_redactions - ) - - if self.hs.config.redaction_retention_period is not None: - hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000) + if ( + hs.config.run_background_tasks + and self.hs.config.redaction_retention_period is not None + ): + hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000) + @wrap_as_background_process("_censor_redactions") async def _censor_redactions(self): """Censors all redactions older than the configured period that haven't been censored yet. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 2d0a6408b5cd..88fd97e1df51 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -25,7 +25,7 @@ trace, whitelisted_homeserver, ) -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -48,6 +48,14 @@ class DeviceWorkerStore(SQLBaseStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + if hs.config.run_background_tasks: + self._clock.looping_call( + self._prune_old_outbound_device_pokes, 60 * 60 * 1000 + ) + async def get_device(self, user_id: str, device_id: str) -> Dict[str, Any]: """Retrieve a device. Only returns devices that are not marked as hidden. @@ -772,6 +780,98 @@ async def remove_dehydrated_device(self, user_id: str, device_id: str) -> bool: ) return count >= 1 + @wrap_as_background_process("prune_old_outbound_device_pokes") + async def _prune_old_outbound_device_pokes( + self, prune_age: int = 24 * 60 * 60 * 1000 + ) -> None: + """Delete old entries out of the device_lists_outbound_pokes to ensure + that we don't fill up due to dead servers. + + Normally, we try to send device updates as a delta since a previous known point: + this is done by setting the prev_id in the m.device_list_update EDU. However, + for that to work, we have to have a complete record of each change to + each device, which can add up to quite a lot of data. + + An alternative mechanism is that, if the remote server sees that it has missed + an entry in the stream_id sequence for a given user, it will request a full + list of that user's devices. Hence, we can reduce the amount of data we have to + store (and transmit in some future transaction), by clearing almost everything + for a given destination out of the database, and having the remote server + resync. + + All we need to do is make sure we keep at least one row for each + (user, destination) pair, to remind us to send a m.device_list_update EDU for + that user when the destination comes back. It doesn't matter which device + we keep. + """ + yesterday = self._clock.time_msec() - prune_age + + def _prune_txn(txn): + # look for (user, destination) pairs which have an update older than + # the cutoff. + # + # For each pair, we also need to know the most recent stream_id, and + # an arbitrary device_id at that stream_id. + select_sql = """ + SELECT + dlop1.destination, + dlop1.user_id, + MAX(dlop1.stream_id) AS stream_id, + (SELECT MIN(dlop2.device_id) AS device_id FROM + device_lists_outbound_pokes dlop2 + WHERE dlop2.destination = dlop1.destination AND + dlop2.user_id=dlop1.user_id AND + dlop2.stream_id=MAX(dlop1.stream_id) + ) + FROM device_lists_outbound_pokes dlop1 + GROUP BY destination, user_id + HAVING min(ts) < ? AND count(*) > 1 + """ + + txn.execute(select_sql, (yesterday,)) + rows = txn.fetchall() + + if not rows: + return + + logger.info( + "Pruning old outbound device list updates for %i users/destinations: %s", + len(rows), + shortstr((row[0], row[1]) for row in rows), + ) + + # we want to keep the update with the highest stream_id for each user. + # + # there might be more than one update (with different device_ids) with the + # same stream_id, so we also delete all but one rows with the max stream id. + delete_sql = """ + DELETE FROM device_lists_outbound_pokes + WHERE destination = ? AND user_id = ? AND ( + stream_id < ? OR + (stream_id = ? AND device_id != ?) + ) + """ + count = 0 + for (destination, user_id, stream_id, device_id) in rows: + txn.execute( + delete_sql, (destination, user_id, stream_id, stream_id, device_id) + ) + count += txn.rowcount + + # Since we've deleted unsent deltas, we need to remove the entry + # of last successful sent so that the prev_ids are correctly set. + sql = """ + DELETE FROM device_lists_outbound_last_success + WHERE destination = ? AND user_id = ? + """ + txn.executemany(sql, ((row[0], row[1]) for row in rows)) + + logger.info("Pruned %d device list outbound pokes", count) + + await self.db_pool.runInteraction( + "_prune_old_outbound_device_pokes", _prune_txn, + ) + class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -908,8 +1008,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): name="device_id_exists", keylen=2, max_entries=10000 ) - self._clock.looping_call(self._prune_old_outbound_device_pokes, 60 * 60 * 1000) - async def store_device( self, user_id: str, device_id: str, initial_device_display_name: Optional[str] ) -> bool: @@ -1267,95 +1365,3 @@ def _add_device_outbound_poke_to_stream_txn( for device_id in device_ids ], ) - - def _prune_old_outbound_device_pokes(self, prune_age: int = 24 * 60 * 60 * 1000): - """Delete old entries out of the device_lists_outbound_pokes to ensure - that we don't fill up due to dead servers. - - Normally, we try to send device updates as a delta since a previous known point: - this is done by setting the prev_id in the m.device_list_update EDU. However, - for that to work, we have to have a complete record of each change to - each device, which can add up to quite a lot of data. - - An alternative mechanism is that, if the remote server sees that it has missed - an entry in the stream_id sequence for a given user, it will request a full - list of that user's devices. Hence, we can reduce the amount of data we have to - store (and transmit in some future transaction), by clearing almost everything - for a given destination out of the database, and having the remote server - resync. - - All we need to do is make sure we keep at least one row for each - (user, destination) pair, to remind us to send a m.device_list_update EDU for - that user when the destination comes back. It doesn't matter which device - we keep. - """ - yesterday = self._clock.time_msec() - prune_age - - def _prune_txn(txn): - # look for (user, destination) pairs which have an update older than - # the cutoff. - # - # For each pair, we also need to know the most recent stream_id, and - # an arbitrary device_id at that stream_id. - select_sql = """ - SELECT - dlop1.destination, - dlop1.user_id, - MAX(dlop1.stream_id) AS stream_id, - (SELECT MIN(dlop2.device_id) AS device_id FROM - device_lists_outbound_pokes dlop2 - WHERE dlop2.destination = dlop1.destination AND - dlop2.user_id=dlop1.user_id AND - dlop2.stream_id=MAX(dlop1.stream_id) - ) - FROM device_lists_outbound_pokes dlop1 - GROUP BY destination, user_id - HAVING min(ts) < ? AND count(*) > 1 - """ - - txn.execute(select_sql, (yesterday,)) - rows = txn.fetchall() - - if not rows: - return - - logger.info( - "Pruning old outbound device list updates for %i users/destinations: %s", - len(rows), - shortstr((row[0], row[1]) for row in rows), - ) - - # we want to keep the update with the highest stream_id for each user. - # - # there might be more than one update (with different device_ids) with the - # same stream_id, so we also delete all but one rows with the max stream id. - delete_sql = """ - DELETE FROM device_lists_outbound_pokes - WHERE destination = ? AND user_id = ? AND ( - stream_id < ? OR - (stream_id = ? AND device_id != ?) - ) - """ - count = 0 - for (destination, user_id, stream_id, device_id) in rows: - txn.execute( - delete_sql, (destination, user_id, stream_id, stream_id, device_id) - ) - count += txn.rowcount - - # Since we've deleted unsent deltas, we need to remove the entry - # of last successful sent so that the prev_ids are correctly set. - sql = """ - DELETE FROM device_lists_outbound_last_success - WHERE destination = ? AND user_id = ? - """ - txn.executemany(sql, ((row[0], row[1]) for row in rows)) - - logger.info("Pruned %d device list outbound pokes", count) - - return run_as_background_process( - "prune_old_outbound_device_pokes", - self.db_pool.runInteraction, - "_prune_old_outbound_device_pokes", - _prune_txn, - ) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 6d3689c09e59..a6279a6c1394 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -19,7 +19,7 @@ from synapse.api.errors import StoreError from synapse.events import EventBase -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.databases.main.events_worker import EventsWorkerStore @@ -32,6 +32,14 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBaseStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + if hs.config.run_background_tasks: + hs.get_clock().looping_call( + self._delete_old_forward_extrem_cache, 60 * 60 * 1000 + ) + async def get_auth_chain( self, event_ids: Collection[str], include_given: bool = False ) -> List[EventBase]: @@ -586,6 +594,28 @@ async def get_successor_events(self, event_ids: Iterable[str]) -> List[str]: return [row["event_id"] for row in rows] + @wrap_as_background_process("delete_old_forward_extrem_cache") + async def _delete_old_forward_extrem_cache(self) -> None: + def _delete_old_forward_extrem_cache_txn(txn): + # Delete entries older than a month, while making sure we don't delete + # the only entries for a room. + sql = """ + DELETE FROM stream_ordering_to_exterm + WHERE + room_id IN ( + SELECT room_id + FROM stream_ordering_to_exterm + WHERE stream_ordering > ? + ) AND stream_ordering < ? + """ + txn.execute( + sql, (self.stream_ordering_month_ago, self.stream_ordering_month_ago) + ) + + await self.db_pool.runInteraction( + "_delete_old_forward_extrem_cache", _delete_old_forward_extrem_cache_txn, + ) + class EventFederationStore(EventFederationWorkerStore): """ Responsible for storing and serving up the various graphs associated @@ -606,34 +636,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): self.EVENT_AUTH_STATE_ONLY, self._background_delete_non_state_event_auth ) - hs.get_clock().looping_call( - self._delete_old_forward_extrem_cache, 60 * 60 * 1000 - ) - - def _delete_old_forward_extrem_cache(self): - def _delete_old_forward_extrem_cache_txn(txn): - # Delete entries older than a month, while making sure we don't delete - # the only entries for a room. - sql = """ - DELETE FROM stream_ordering_to_exterm - WHERE - room_id IN ( - SELECT room_id - FROM stream_ordering_to_exterm - WHERE stream_ordering > ? - ) AND stream_ordering < ? - """ - txn.execute( - sql, (self.stream_ordering_month_ago, self.stream_ordering_month_ago) - ) - - return run_as_background_process( - "delete_old_forward_extrem_cache", - self.db_pool.runInteraction, - "_delete_old_forward_extrem_cache", - _delete_old_forward_extrem_cache_txn, - ) - async def clean_room_for_join(self, room_id): return await self.db_pool.runInteraction( "clean_room_for_join", self._clean_room_for_join_txn, room_id diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 80f3b4d74007..2e56dfaf312a 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -13,15 +13,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import Dict, List, Optional, Tuple, Union import attr -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import DatabasePool +from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -81,8 +80,14 @@ def __init__(self, database: DatabasePool, db_conn, hs): self.find_stream_orderings_looping_call = self._clock.looping_call( self._find_stream_orderings_for_times, 10 * 60 * 1000 ) + self._rotate_delay = 3 self._rotate_count = 10000 + self._doing_notif_rotation = False + if hs.config.run_background_tasks: + self._rotate_notif_loop = self._clock.looping_call( + self._rotate_notifs, 30 * 60 * 1000 + ) @cached(num_args=3, tree=True, max_entries=5000) async def get_unread_event_push_actions_by_room_for_user( @@ -514,15 +519,14 @@ async def remove_push_actions_from_staging(self, event_id: str) -> None: "Error removing push actions after event persistence failure" ) - def _find_stream_orderings_for_times(self): - return run_as_background_process( - "event_push_action_stream_orderings", - self.db_pool.runInteraction, + @wrap_as_background_process("event_push_action_stream_orderings") + async def _find_stream_orderings_for_times(self) -> None: + await self.db_pool.runInteraction( "_find_stream_orderings_for_times", self._find_stream_orderings_for_times_txn, ) - def _find_stream_orderings_for_times_txn(self, txn): + def _find_stream_orderings_for_times_txn(self, txn: LoggingTransaction) -> None: logger.info("Searching for stream ordering 1 month ago") self.stream_ordering_month_ago = self._find_first_stream_ordering_after_ts_txn( txn, self._clock.time_msec() - 30 * 24 * 60 * 60 * 1000 @@ -652,129 +656,7 @@ def f(txn): ) return result[0] if result else None - -class EventPushActionsStore(EventPushActionsWorkerStore): - EPA_HIGHLIGHT_INDEX = "epa_highlight_index" - - def __init__(self, database: DatabasePool, db_conn, hs): - super().__init__(database, db_conn, hs) - - self.db_pool.updates.register_background_index_update( - self.EPA_HIGHLIGHT_INDEX, - index_name="event_push_actions_u_highlight", - table="event_push_actions", - columns=["user_id", "stream_ordering"], - ) - - self.db_pool.updates.register_background_index_update( - "event_push_actions_highlights_index", - index_name="event_push_actions_highlights_index", - table="event_push_actions", - columns=["user_id", "room_id", "topological_ordering", "stream_ordering"], - where_clause="highlight=1", - ) - - self._doing_notif_rotation = False - self._rotate_notif_loop = self._clock.looping_call( - self._start_rotate_notifs, 30 * 60 * 1000 - ) - - async def get_push_actions_for_user( - self, user_id, before=None, limit=50, only_highlight=False - ): - def f(txn): - before_clause = "" - if before: - before_clause = "AND epa.stream_ordering < ?" - args = [user_id, before, limit] - else: - args = [user_id, limit] - - if only_highlight: - if len(before_clause) > 0: - before_clause += " " - before_clause += "AND epa.highlight = 1" - - # NB. This assumes event_ids are globally unique since - # it makes the query easier to index - sql = ( - "SELECT epa.event_id, epa.room_id," - " epa.stream_ordering, epa.topological_ordering," - " epa.actions, epa.highlight, epa.profile_tag, e.received_ts" - " FROM event_push_actions epa, events e" - " WHERE epa.event_id = e.event_id" - " AND epa.user_id = ? %s" - " AND epa.notif = 1" - " ORDER BY epa.stream_ordering DESC" - " LIMIT ?" % (before_clause,) - ) - txn.execute(sql, args) - return self.db_pool.cursor_to_dict(txn) - - push_actions = await self.db_pool.runInteraction("get_push_actions_for_user", f) - for pa in push_actions: - pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) - return push_actions - - async def get_latest_push_action_stream_ordering(self): - def f(txn): - txn.execute("SELECT MAX(stream_ordering) FROM event_push_actions") - return txn.fetchone() - - result = await self.db_pool.runInteraction( - "get_latest_push_action_stream_ordering", f - ) - return result[0] or 0 - - def _remove_old_push_actions_before_txn( - self, txn, room_id, user_id, stream_ordering - ): - """ - Purges old push actions for a user and room before a given - stream_ordering. - - We however keep a months worth of highlighted notifications, so that - users can still get a list of recent highlights. - - Args: - txn: The transcation - room_id: Room ID to delete from - user_id: user ID to delete for - stream_ordering: The lowest stream ordering which will - not be deleted. - """ - txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate_many, - (room_id, user_id), - ) - - # We need to join on the events table to get the received_ts for - # event_push_actions and sqlite won't let us use a join in a delete so - # we can't just delete where received_ts < x. Furthermore we can - # only identify event_push_actions by a tuple of room_id, event_id - # we we can't use a subquery. - # Instead, we look up the stream ordering for the last event in that - # room received before the threshold time and delete event_push_actions - # in the room with a stream_odering before that. - txn.execute( - "DELETE FROM event_push_actions " - " WHERE user_id = ? AND room_id = ? AND " - " stream_ordering <= ?" - " AND ((stream_ordering < ? AND highlight = 1) or highlight = 0)", - (user_id, room_id, stream_ordering, self.stream_ordering_month_ago), - ) - - txn.execute( - """ - DELETE FROM event_push_summary - WHERE room_id = ? AND user_id = ? AND stream_ordering <= ? - """, - (room_id, user_id, stream_ordering), - ) - - def _start_rotate_notifs(self): - return run_as_background_process("rotate_notifs", self._rotate_notifs) - + @wrap_as_background_process("rotate_notifs") async def _rotate_notifs(self): if self._doing_notif_rotation or self.stream_ordering_day_ago is None: return @@ -954,6 +836,121 @@ def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering): ) +class EventPushActionsStore(EventPushActionsWorkerStore): + EPA_HIGHLIGHT_INDEX = "epa_highlight_index" + + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + self.EPA_HIGHLIGHT_INDEX, + index_name="event_push_actions_u_highlight", + table="event_push_actions", + columns=["user_id", "stream_ordering"], + ) + + self.db_pool.updates.register_background_index_update( + "event_push_actions_highlights_index", + index_name="event_push_actions_highlights_index", + table="event_push_actions", + columns=["user_id", "room_id", "topological_ordering", "stream_ordering"], + where_clause="highlight=1", + ) + + async def get_push_actions_for_user( + self, user_id, before=None, limit=50, only_highlight=False + ): + def f(txn): + before_clause = "" + if before: + before_clause = "AND epa.stream_ordering < ?" + args = [user_id, before, limit] + else: + args = [user_id, limit] + + if only_highlight: + if len(before_clause) > 0: + before_clause += " " + before_clause += "AND epa.highlight = 1" + + # NB. This assumes event_ids are globally unique since + # it makes the query easier to index + sql = ( + "SELECT epa.event_id, epa.room_id," + " epa.stream_ordering, epa.topological_ordering," + " epa.actions, epa.highlight, epa.profile_tag, e.received_ts" + " FROM event_push_actions epa, events e" + " WHERE epa.event_id = e.event_id" + " AND epa.user_id = ? %s" + " AND epa.notif = 1" + " ORDER BY epa.stream_ordering DESC" + " LIMIT ?" % (before_clause,) + ) + txn.execute(sql, args) + return self.db_pool.cursor_to_dict(txn) + + push_actions = await self.db_pool.runInteraction("get_push_actions_for_user", f) + for pa in push_actions: + pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) + return push_actions + + async def get_latest_push_action_stream_ordering(self): + def f(txn): + txn.execute("SELECT MAX(stream_ordering) FROM event_push_actions") + return txn.fetchone() + + result = await self.db_pool.runInteraction( + "get_latest_push_action_stream_ordering", f + ) + return result[0] or 0 + + def _remove_old_push_actions_before_txn( + self, txn, room_id, user_id, stream_ordering + ): + """ + Purges old push actions for a user and room before a given + stream_ordering. + + We however keep a months worth of highlighted notifications, so that + users can still get a list of recent highlights. + + Args: + txn: The transcation + room_id: Room ID to delete from + user_id: user ID to delete for + stream_ordering: The lowest stream ordering which will + not be deleted. + """ + txn.call_after( + self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (room_id, user_id), + ) + + # We need to join on the events table to get the received_ts for + # event_push_actions and sqlite won't let us use a join in a delete so + # we can't just delete where received_ts < x. Furthermore we can + # only identify event_push_actions by a tuple of room_id, event_id + # we we can't use a subquery. + # Instead, we look up the stream ordering for the last event in that + # room received before the threshold time and delete event_push_actions + # in the room with a stream_odering before that. + txn.execute( + "DELETE FROM event_push_actions " + " WHERE user_id = ? AND room_id = ? AND " + " stream_ordering <= ?" + " AND ((stream_ordering < ? AND highlight = 1) or highlight = 0)", + (user_id, room_id, stream_ordering, self.stream_ordering_month_ago), + ) + + txn.execute( + """ + DELETE FROM event_push_summary + WHERE room_id = ? AND user_id = ? AND stream_ordering <= ? + """, + (room_id, user_id, stream_ordering), + ) + + def _action_has_highlight(actions): for action in actions: try: diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index c66f558567b9..d788dc0fc698 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -15,6 +15,7 @@ import logging from typing import Dict, List +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_in_list_sql_clause from synapse.util.caches.descriptors import cached @@ -127,6 +128,7 @@ async def user_last_seen_monthly_active(self, user_id: str) -> int: desc="user_last_seen_monthly_active", ) + @wrap_as_background_process("reap_monthly_active_users") async def reap_monthly_active_users(self): """Cleans out monthly active user table to ensure that no stale entries exist. diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 7fd7b0b952ca..236d3cdbe3f2 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -20,10 +20,7 @@ from synapse.api.constants import UserTypes from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError -from synapse.metrics.background_process_metrics import ( - run_as_background_process, - wrap_as_background_process, -) +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.types import Cursor @@ -53,10 +50,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._account_validity = hs.config.account_validity if hs.config.run_background_tasks and self._account_validity.enabled: self._clock.call_later( - 0.0, - run_as_background_process, - "account_validity_set_expiration_dates", - self._set_expiration_date_when_missing, + 0.0, self._set_expiration_date_when_missing, ) # Create a background job for culling expired 3PID validity tokens @@ -812,6 +806,7 @@ def cull_expired_threepid_validation_tokens_txn(txn, ts): self.clock.time_msec(), ) + @wrap_as_background_process("account_validity_set_expiration_dates") async def _set_expiration_date_when_missing(self): """ Retrieves the list of registered users that don't have an expiration date, and From 5009ffcaa45fc3522edc04de2f2b98dc7fe5c59c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 9 Oct 2020 13:10:33 +0100 Subject: [PATCH 153/245] Only send RDATA for instance local events. (#8496) When pulling events out of the DB to send over replication we were not filtering by instance name, and so we were sending events for other instances. --- changelog.d/8496.misc | 1 + synapse/replication/tcp/streams/_base.py | 11 +++++-- synapse/replication/tcp/streams/events.py | 6 ++-- synapse/storage/databases/main/events.py | 12 ++++--- .../storage/databases/main/events_worker.py | 32 +++++++++++-------- .../delta/58/20instance_name_event_tables.sql | 17 ++++++++++ 6 files changed, 54 insertions(+), 25 deletions(-) create mode 100644 changelog.d/8496.misc create mode 100644 synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql diff --git a/changelog.d/8496.misc b/changelog.d/8496.misc new file mode 100644 index 000000000000..237cb3b31135 --- /dev/null +++ b/changelog.d/8496.misc @@ -0,0 +1 @@ +Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 54dccd15a627..61b282ab2dab 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -240,13 +240,18 @@ class BackfillStream(Stream): ROW_TYPE = BackfillStreamRow def __init__(self, hs): - store = hs.get_datastore() + self.store = hs.get_datastore() super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_current_backfill_token), - store.get_all_new_backfill_event_rows, + self._current_token, + self.store.get_all_new_backfill_event_rows, ) + def _current_token(self, instance_name: str) -> int: + # The backfill stream over replication operates on *positive* numbers, + # which means we need to negate it. + return -self.store._backfill_id_gen.get_current_token_for_writer(instance_name) + class PresenceStream(Stream): PresenceStreamRow = namedtuple( diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index ccc7ca30d8a9..82e9e0d64ece 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -155,7 +155,7 @@ async def _update_function( # now we fetch up to that many rows from the events table event_rows = await self._store.get_all_new_forward_event_rows( - from_token, current_token, target_row_count + instance_name, from_token, current_token, target_row_count ) # type: List[Tuple] # we rely on get_all_new_forward_event_rows strictly honouring the limit, so @@ -180,7 +180,7 @@ async def _update_function( upper_limit, state_rows_limited, ) = await self._store.get_all_updated_current_state_deltas( - from_token, upper_limit, target_row_count + instance_name, from_token, upper_limit, target_row_count ) limited = limited or state_rows_limited @@ -189,7 +189,7 @@ async def _update_function( # not to bother with the limit. ex_outliers_rows = await self._store.get_ex_outlier_stream_rows( - from_token, upper_limit + instance_name, from_token, upper_limit ) # type: List[Tuple] # we now need to turn the raw database rows returned into tuples suitable diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b4abd961b97f..b19c424ba952 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -426,12 +426,12 @@ def _update_current_state_txn( # so that async background tasks get told what happened. sql = """ INSERT INTO current_state_delta_stream - (stream_id, room_id, type, state_key, event_id, prev_event_id) - SELECT ?, room_id, type, state_key, null, event_id + (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id) + SELECT ?, ?, room_id, type, state_key, null, event_id FROM current_state_events WHERE room_id = ? """ - txn.execute(sql, (stream_id, room_id)) + txn.execute(sql, (stream_id, self._instance_name, room_id)) self.db_pool.simple_delete_txn( txn, table="current_state_events", keyvalues={"room_id": room_id}, @@ -452,8 +452,8 @@ def _update_current_state_txn( # sql = """ INSERT INTO current_state_delta_stream - (stream_id, room_id, type, state_key, event_id, prev_event_id) - SELECT ?, ?, ?, ?, ?, ( + (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id) + SELECT ?, ?, ?, ?, ?, ?, ( SELECT event_id FROM current_state_events WHERE room_id = ? AND type = ? AND state_key = ? ) @@ -463,6 +463,7 @@ def _update_current_state_txn( ( ( stream_id, + self._instance_name, room_id, etype, state_key, @@ -755,6 +756,7 @@ def _update_outliers_txn(self, txn, events_and_contexts): "event_stream_ordering": stream_order, "event_id": event.event_id, "state_group": state_group_id, + "instance_name": self._instance_name, }, ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index b7ed8ca6ab06..4e74fafe43d1 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1034,16 +1034,12 @@ async def get_room_complexity(self, room_id): return {"v1": complexity_v1} - def get_current_backfill_token(self): - """The current minimum token that backfilled events have reached""" - return -self._backfill_id_gen.get_current_token() - def get_current_events_token(self): """The current maximum token that events have reached""" return self._stream_id_gen.get_current_token() async def get_all_new_forward_event_rows( - self, last_id: int, current_id: int, limit: int + self, instance_name: str, last_id: int, current_id: int, limit: int ) -> List[Tuple]: """Returns new events, for the Events replication stream @@ -1067,10 +1063,11 @@ def get_all_new_forward_event_rows(txn): " LEFT JOIN state_events USING (event_id)" " LEFT JOIN event_relations USING (event_id)" " WHERE ? < stream_ordering AND stream_ordering <= ?" + " AND instance_name = ?" " ORDER BY stream_ordering ASC" " LIMIT ?" ) - txn.execute(sql, (last_id, current_id, limit)) + txn.execute(sql, (last_id, current_id, instance_name, limit)) return txn.fetchall() return await self.db_pool.runInteraction( @@ -1078,7 +1075,7 @@ def get_all_new_forward_event_rows(txn): ) async def get_ex_outlier_stream_rows( - self, last_id: int, current_id: int + self, instance_name: str, last_id: int, current_id: int ) -> List[Tuple]: """Returns de-outliered events, for the Events replication stream @@ -1097,16 +1094,17 @@ def get_ex_outlier_stream_rows_txn(txn): "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," " state_key, redacts, relates_to_id" " FROM events AS e" - " INNER JOIN ex_outlier_stream USING (event_id)" + " INNER JOIN ex_outlier_stream AS out USING (event_id)" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events USING (event_id)" " LEFT JOIN event_relations USING (event_id)" " WHERE ? < event_stream_ordering" " AND event_stream_ordering <= ?" + " AND out.instance_name = ?" " ORDER BY event_stream_ordering ASC" ) - txn.execute(sql, (last_id, current_id)) + txn.execute(sql, (last_id, current_id, instance_name)) return txn.fetchall() return await self.db_pool.runInteraction( @@ -1119,6 +1117,9 @@ async def get_all_new_backfill_event_rows( """Get updates for backfill replication stream, including all new backfilled events and events that have gone from being outliers to not. + NOTE: The IDs given here are from replication, and so should be + *positive*. + Args: instance_name: The writer we want to fetch updates from. Unused here since there is only ever one writer. @@ -1149,10 +1150,11 @@ def get_all_new_backfill_event_rows(txn): " LEFT JOIN state_events USING (event_id)" " LEFT JOIN event_relations USING (event_id)" " WHERE ? > stream_ordering AND stream_ordering >= ?" + " AND instance_name = ?" " ORDER BY stream_ordering ASC" " LIMIT ?" ) - txn.execute(sql, (-last_id, -current_id, limit)) + txn.execute(sql, (-last_id, -current_id, instance_name, limit)) new_event_updates = [(row[0], row[1:]) for row in txn] limited = False @@ -1166,15 +1168,16 @@ def get_all_new_backfill_event_rows(txn): "SELECT -event_stream_ordering, e.event_id, e.room_id, e.type," " state_key, redacts, relates_to_id" " FROM events AS e" - " INNER JOIN ex_outlier_stream USING (event_id)" + " INNER JOIN ex_outlier_stream AS out USING (event_id)" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events USING (event_id)" " LEFT JOIN event_relations USING (event_id)" " WHERE ? > event_stream_ordering" " AND event_stream_ordering >= ?" + " AND out.instance_name = ?" " ORDER BY event_stream_ordering DESC" ) - txn.execute(sql, (-last_id, -upper_bound)) + txn.execute(sql, (-last_id, -upper_bound, instance_name)) new_event_updates.extend((row[0], row[1:]) for row in txn) if len(new_event_updates) >= limit: @@ -1188,7 +1191,7 @@ def get_all_new_backfill_event_rows(txn): ) async def get_all_updated_current_state_deltas( - self, from_token: int, to_token: int, target_row_count: int + self, instance_name: str, from_token: int, to_token: int, target_row_count: int ) -> Tuple[List[Tuple], int, bool]: """Fetch updates from current_state_delta_stream @@ -1214,9 +1217,10 @@ def get_all_updated_current_state_deltas_txn(txn): SELECT stream_id, room_id, type, state_key, event_id FROM current_state_delta_stream WHERE ? < stream_id AND stream_id <= ? + AND instance_name = ? ORDER BY stream_id ASC LIMIT ? """ - txn.execute(sql, (from_token, to_token, target_row_count)) + txn.execute(sql, (from_token, to_token, instance_name, target_row_count)) return txn.fetchall() def get_deltas_for_stream_id_txn(txn, stream_id): diff --git a/synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql b/synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql new file mode 100644 index 000000000000..ad1f48142880 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql @@ -0,0 +1,17 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE current_state_delta_stream ADD COLUMN instance_name TEXT; +ALTER TABLE ex_outlier_stream ADD COLUMN instance_name TEXT; From 66ac4b1e34193f5aadfbe1966427093fde09befb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 9 Oct 2020 13:46:36 +0100 Subject: [PATCH 154/245] Allow modules to create and send events into rooms (#8479) This PR allows Synapse modules making use of the `ModuleApi` to create and send non-membership events into a room. This can useful to have modules send messages, or change power levels in a room etc. Note that they must send event through a user that's already in the room. The non-membership event limitation is currently arbitrary, as it's another chunk of work and not necessary at the moment. --- changelog.d/8479.feature | 1 + synapse/handlers/message.py | 11 ++- synapse/module_api/__init__.py | 30 ++++++- tests/module_api/test_api.py | 95 +++++++++++++++++++++ tests/rest/client/test_third_party_rules.py | 28 +++++- 5 files changed, 157 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8479.feature diff --git a/changelog.d/8479.feature b/changelog.d/8479.feature new file mode 100644 index 000000000000..11adeec8a987 --- /dev/null +++ b/changelog.d/8479.feature @@ -0,0 +1 @@ +Add the ability to send non-membership events into a room via the `ModuleApi`. \ No newline at end of file diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 33d133a4b21b..ad0b7bd868bf 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -59,6 +59,7 @@ from ._base import BaseHandler if TYPE_CHECKING: + from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -393,7 +394,9 @@ def __init__(self, hs: "HomeServer"): self.action_generator = hs.get_action_generator() self.spam_checker = hs.get_spam_checker() - self.third_party_event_rules = hs.get_third_party_event_rules() + self.third_party_event_rules = ( + self.hs.get_third_party_event_rules() + ) # type: ThirdPartyEventRules self._block_events_without_consent_error = ( self.config.block_events_without_consent_error @@ -1229,11 +1232,7 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: # Since this is a dummy-event it is OK if it is sent by a # shadow-banned user. await self.handle_new_client_event( - requester=requester, - event=event, - context=context, - ratelimit=False, - ignore_shadow_ban=True, + requester, event, context, ratelimit=False, ignore_shadow_ban=True, ) return True except ConsentNotGivenError: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index b410e3ad9c55..014254285251 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -18,11 +18,12 @@ from twisted.internet import defer +from synapse.events import EventBase from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage.state import StateFilter -from synapse.types import UserID +from synapse.types import JsonDict, UserID, create_requester if TYPE_CHECKING: from synapse.server import HomeServer @@ -320,6 +321,33 @@ def get_state_events_in_room( state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) return state.values() + async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBase: + """Create and send an event into a room. Membership events are currently not supported. + + Args: + event_dict: A dictionary representing the event to send. + Required keys are `type`, `room_id`, `sender` and `content`. + + Returns: + The event that was sent. If state event deduplication happened, then + the previous, duplicate event instead. + + Raises: + SynapseError if the event was not allowed. + """ + # Create a requester object + requester = create_requester(event_dict["sender"]) + + # Create and send the event + ( + event, + _, + ) = await self._hs.get_event_creation_handler().create_and_send_nonmember_event( + requester, event_dict, ratelimit=False, ignore_shadow_ban=True, + ) + + return event + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 7c790bee7dca..9b573ac24dd9 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,9 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock +from synapse.events import EventBase from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import create_requester from tests.unittest import HomeserverTestCase @@ -29,6 +32,7 @@ class ModuleApiTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = homeserver.get_module_api() + self.event_creation_handler = homeserver.get_event_creation_handler() def test_can_register_user(self): """Tests that an external module can register a user""" @@ -60,6 +64,97 @@ def test_can_register_user(self): displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") + def test_sending_events_into_room(self): + """Tests that a module can send events into a room""" + # Mock out create_and_send_nonmember_event to check whether events are being sent + self.event_creation_handler.create_and_send_nonmember_event = Mock( + spec=[], + side_effect=self.event_creation_handler.create_and_send_nonmember_event, + ) + + # Create a user and room to play with + user_id = self.register_user("summer", "monkey") + tok = self.login("summer", "monkey") + room_id = self.helper.create_room_as(user_id, tok=tok) + + # Create and send a non-state event + content = {"body": "I am a puppet", "msgtype": "m.text"} + event_dict = { + "room_id": room_id, + "type": "m.room.message", + "content": content, + "sender": user_id, + } + event = self.get_success( + self.module_api.create_and_send_event_into_room(event_dict) + ) # type: EventBase + self.assertEqual(event.sender, user_id) + self.assertEqual(event.type, "m.room.message") + self.assertEqual(event.room_id, room_id) + self.assertFalse(hasattr(event, "state_key")) + self.assertDictEqual(event.content, content) + + # Check that the event was sent + self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( + create_requester(user_id), + event_dict, + ratelimit=False, + ignore_shadow_ban=True, + ) + + # Create and send a state event + content = { + "events_default": 0, + "users": {user_id: 100}, + "state_default": 50, + "users_default": 0, + "events": {"test.event.type": 25}, + } + event_dict = { + "room_id": room_id, + "type": "m.room.power_levels", + "content": content, + "sender": user_id, + "state_key": "", + } + event = self.get_success( + self.module_api.create_and_send_event_into_room(event_dict) + ) # type: EventBase + self.assertEqual(event.sender, user_id) + self.assertEqual(event.type, "m.room.power_levels") + self.assertEqual(event.room_id, room_id) + self.assertEqual(event.state_key, "") + self.assertDictEqual(event.content, content) + + # Check that the event was sent + self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( + create_requester(user_id), + { + "type": "m.room.power_levels", + "content": content, + "room_id": room_id, + "sender": user_id, + "state_key": "", + }, + ratelimit=False, + ignore_shadow_ban=True, + ) + + # Check that we can't send membership events + content = { + "membership": "leave", + } + event_dict = { + "room_id": room_id, + "type": "m.room.member", + "content": content, + "sender": user_id, + "state_key": user_id, + } + self.get_failure( + self.module_api.create_and_send_event_into_room(event_dict), Exception + ) + def test_public_rooms(self): """Tests that a room can be added and removed from the public rooms list, as well as have its public rooms directory state queried. diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index c12518c93105..d03e12166498 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -13,10 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import threading +from typing import Dict from mock import Mock from synapse.events import EventBase +from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client.v1 import login, room from synapse.types import Requester, StateMap @@ -27,10 +29,11 @@ class ThirdPartyRulesTestModule: - def __init__(self, config, module_api): + def __init__(self, config: Dict, module_api: ModuleApi): # keep a record of the "current" rules module, so that the test can patch # it if desired. thread_local.rules_module = self + self.module_api = module_api async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool @@ -142,3 +145,26 @@ async def check(ev: EventBase, state): self.assertEqual(channel.result["code"], b"200", channel.result) ev = channel.json_body self.assertEqual(ev["content"]["x"], "y") + + def test_send_event(self): + """Tests that the module can send an event into a room via the module api""" + content = { + "msgtype": "m.text", + "body": "Hello!", + } + event_dict = { + "room_id": self.room_id, + "type": "m.room.message", + "content": content, + "sender": self.user_id, + } + event = self.get_success( + current_rules_module().module_api.create_and_send_event_into_room( + event_dict + ) + ) # type: EventBase + + self.assertEquals(event.sender, self.user_id) + self.assertEquals(event.room_id, self.room_id) + self.assertEquals(event.type, "m.room.message") + self.assertEquals(event.content, content) From 1781bbe319ce24e8e468f0422519dc5823d8d420 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Oct 2020 11:35:11 -0400 Subject: [PATCH 155/245] Add type hints to response cache. (#8507) --- changelog.d/8507.misc | 1 + mypy.ini | 1 + synapse/appservice/api.py | 4 +- synapse/federation/federation_server.py | 8 ++-- synapse/handlers/initial_sync.py | 10 +++-- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 4 +- synapse/replication/http/_base.py | 2 +- synapse/util/caches/response_cache.py | 50 ++++++++++++++----------- 9 files changed, 48 insertions(+), 34 deletions(-) create mode 100644 changelog.d/8507.misc diff --git a/changelog.d/8507.misc b/changelog.d/8507.misc new file mode 100644 index 000000000000..724da8a9960e --- /dev/null +++ b/changelog.d/8507.misc @@ -0,0 +1 @@ + Add type hints to various parts of the code base. diff --git a/mypy.ini b/mypy.ini index 19b60f7534d7..f08fe992a4d5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -65,6 +65,7 @@ files = synapse/types.py, synapse/util/async_helpers.py, synapse/util/caches/descriptors.py, + synapse/util/caches/response_cache.py, synapse/util/caches/stream_change_cache.py, synapse/util/metrics.py, tests/replication, diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index c526c28b9307..e8f07937952b 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -14,7 +14,7 @@ # limitations under the License. import logging import urllib -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, Tuple from prometheus_client import Counter @@ -93,7 +93,7 @@ def __init__(self, hs): self.protocol_meta_cache = ResponseCache( hs, "as_protocol_meta", timeout_ms=HOUR_IN_MS - ) + ) # type: ResponseCache[Tuple[str, str]] async def query_user(self, service, user_id): if service.url is None: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e8039e244ccc..23278e36b733 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -116,7 +116,7 @@ def __init__(self, hs): # We cache results for transaction with the same ID self._transaction_resp_cache = ResponseCache( hs, "fed_txn_handler", timeout_ms=30000 - ) + ) # type: ResponseCache[Tuple[str, str]] self.transaction_actions = TransactionActions(self.store) @@ -124,10 +124,12 @@ def __init__(self, hs): # We cache responses to state queries, as they take a while and often # come in waves. - self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000) + self._state_resp_cache = ResponseCache( + hs, "state_resp", timeout_ms=30000 + ) # type: ResponseCache[Tuple[str, str]] self._state_ids_resp_cache = ResponseCache( hs, "state_ids_resp", timeout_ms=30000 - ) + ) # type: ResponseCache[Tuple[str, str]] self._federation_metrics_domains = ( hs.get_config().federation.federation_metrics_domains diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 39a85801c1ad..98075f48d2b3 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional, Tuple from twisted.internet import defer @@ -47,12 +47,14 @@ def __init__(self, hs: "HomeServer"): self.state = hs.get_state_handler() self.clock = hs.get_clock() self.validator = EventValidator() - self.snapshot_cache = ResponseCache(hs, "initial_sync_cache") + self.snapshot_cache = ResponseCache( + hs, "initial_sync_cache" + ) # type: ResponseCache[Tuple[str, Optional[StreamToken], Optional[StreamToken], str, Optional[int], bool, bool]] self._event_serializer = hs.get_event_client_serializer() self.storage = hs.get_storage() self.state_store = self.storage.state - def snapshot_all_rooms( + async def snapshot_all_rooms( self, user_id: str, pagin_config: PaginationConfig, @@ -84,7 +86,7 @@ def snapshot_all_rooms( include_archived, ) - return self.snapshot_cache.wrap( + return await self.snapshot_cache.wrap( key, self._snapshot_all_rooms, user_id, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1d04d41e9893..93ed51063ac2 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -120,7 +120,7 @@ def __init__(self, hs: "HomeServer"): # subsequent requests self._upgrade_response_cache = ResponseCache( hs, "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS - ) + ) # type: ResponseCache[Tuple[str, str]] self._server_notices_mxid = hs.config.server_notices_mxid self.third_party_event_rules = hs.get_third_party_event_rules() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6fb8332f9365..a3066310942e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -243,7 +243,9 @@ def __init__(self, hs: "HomeServer"): self.presence_handler = hs.get_presence_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() - self.response_cache = ResponseCache(hs, "sync") + self.response_cache = ResponseCache( + hs, "sync" + ) # type: ResponseCache[Tuple[Any, ...]] self.state = hs.get_state_handler() self.auth = hs.get_auth() self.storage = hs.get_storage() diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 64edadb624c1..2b3972cb1418 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -92,7 +92,7 @@ def __init__(self, hs): if self.CACHE: self.response_cache = ResponseCache( hs, "repl." + self.NAME, timeout_ms=30 * 60 * 1000 - ) + ) # type: ResponseCache[str] # We reserve `instance_name` as a parameter to sending requests, so we # assert here that sub classes don't try and use the name. diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index df1a721adda8..32228f42ee59 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, TypeVar from twisted.internet import defer @@ -20,10 +21,15 @@ from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import register_cache +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) +T = TypeVar("T") + -class ResponseCache: +class ResponseCache(Generic[T]): """ This caches a deferred response. Until the deferred completes it will be returned from the cache. This means that if the client retries the request @@ -31,8 +37,9 @@ class ResponseCache: used rather than trying to compute a new response. """ - def __init__(self, hs, name, timeout_ms=0): - self.pending_result_cache = {} # Requests that haven't finished yet. + def __init__(self, hs: "HomeServer", name: str, timeout_ms: float = 0): + # Requests that haven't finished yet. + self.pending_result_cache = {} # type: Dict[T, ObservableDeferred] self.clock = hs.get_clock() self.timeout_sec = timeout_ms / 1000.0 @@ -40,13 +47,13 @@ def __init__(self, hs, name, timeout_ms=0): self._name = name self._metrics = register_cache("response_cache", name, self, resizable=False) - def size(self): + def size(self) -> int: return len(self.pending_result_cache) - def __len__(self): + def __len__(self) -> int: return self.size() - def get(self, key): + def get(self, key: T) -> Optional[defer.Deferred]: """Look up the given key. Can return either a new Deferred (which also doesn't follow the synapse @@ -58,12 +65,11 @@ def get(self, key): from an absent cache entry. Args: - key (hashable): + key: key to get/set in the cache Returns: - twisted.internet.defer.Deferred|None|E: None if there is no entry - for this key; otherwise either a deferred result or the result - itself. + None if there is no entry for this key; otherwise a deferred which + resolves to the result. """ result = self.pending_result_cache.get(key) if result is not None: @@ -73,7 +79,7 @@ def get(self, key): self._metrics.inc_misses() return None - def set(self, key, deferred): + def set(self, key: T, deferred: defer.Deferred) -> defer.Deferred: """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, @@ -85,12 +91,11 @@ def set(self, key, deferred): result. You will probably want to make_deferred_yieldable the result. Args: - key (hashable): - deferred (twisted.internet.defer.Deferred[T): + key: key to get/set in the cache + deferred: The deferred which resolves to the result. Returns: - twisted.internet.defer.Deferred[T]|T: a new deferred, or the actual - result. + A new deferred which resolves to the actual result. """ result = ObservableDeferred(deferred, consumeErrors=True) self.pending_result_cache[key] = result @@ -107,7 +112,9 @@ def remove(r): result.addBoth(remove) return result.observe() - def wrap(self, key, callback, *args, **kwargs): + def wrap( + self, key: T, callback: "Callable[..., Any]", *args: Any, **kwargs: Any + ) -> defer.Deferred: """Wrap together a *get* and *set* call, taking care of logcontexts First looks up the key in the cache, and if it is present makes it @@ -118,21 +125,20 @@ def wrap(self, key, callback, *args, **kwargs): Example usage: - @defer.inlineCallbacks - def handle_request(request): + async def handle_request(request): # etc return result - result = yield response_cache.wrap( + result = await response_cache.wrap( key, handle_request, request, ) Args: - key (hashable): key to get/set in the cache + key: key to get/set in the cache - callback (callable): function to call if the key is not found in + callback: function to call if the key is not found in the cache *args: positional parameters to pass to the callback, if it is used @@ -140,7 +146,7 @@ def handle_request(request): **kwargs: named parameters to pass to the callback, if it is used Returns: - twisted.internet.defer.Deferred: yieldable result + Deferred which resolves to the result """ result = self.get(key) if not result: From ca2db5dd0c9fc430a931b4d456fea6a5300b8b42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Przyby=C5=82owicz?= Date: Fri, 9 Oct 2020 17:58:23 +0200 Subject: [PATCH 156/245] Increase default max_upload_size from 10M to 50M (#8502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Mateusz Przybyłowicz --- changelog.d/8502.feature | 1 + docker/conf/homeserver.yaml | 2 +- docs/reverse_proxy.md | 2 +- docs/sample_config.yaml | 2 +- synapse/config/repository.py | 4 ++-- 5 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8502.feature diff --git a/changelog.d/8502.feature b/changelog.d/8502.feature new file mode 100644 index 000000000000..faab8d30422d --- /dev/null +++ b/changelog.d/8502.feature @@ -0,0 +1 @@ +Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index c1110f0f538e..a808485c12a3 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -90,7 +90,7 @@ federation_rc_concurrent: 3 media_store_path: "/data/media" uploads_path: "/data/uploads" -max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}" +max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "50M" }}" max_image_pixels: "32M" dynamic_thumbnails: false diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index 46d8f3577122..c7020f2df3cd 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -54,7 +54,7 @@ server { proxy_set_header X-Forwarded-For $remote_addr; # Nginx by default only allows file uploads up to 1M in size # Increase client_max_body_size to match max_upload_size defined in homeserver.yaml - client_max_body_size 10M; + client_max_body_size 50M; } } ``` diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index bb64662e2883..061226ea6fc1 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -893,7 +893,7 @@ media_store_path: "DATADIR/media_store" # The largest allowed upload size in bytes # -#max_upload_size: 10M +#max_upload_size: 50M # Maximum number of pixels that will be thumbnailed # diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 01009f39241a..ba1e9d23612c 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -100,7 +100,7 @@ def read_config(self, config, **kwargs): "media_instance_running_background_jobs", ) - self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M")) + self.max_upload_size = self.parse_size(config.get("max_upload_size", "50M")) self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M")) self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M")) @@ -242,7 +242,7 @@ def generate_config_section(self, data_dir_path, **kwargs): # The largest allowed upload size in bytes # - #max_upload_size: 10M + #max_upload_size: 50M # Maximum number of pixels that will be thumbnailed # From 9789b1fba541a5ae01b946770416729e5b7e5b7e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 9 Oct 2020 17:22:25 +0100 Subject: [PATCH 157/245] Fix threadsafety in ThreadedMemoryReactorClock (#8497) This could, very occasionally, cause: ``` tests.test_visibility.FilterEventsForServerTestCase.test_large_room =============================================================================== [ERROR] Traceback (most recent call last): File "/src/tests/rest/media/v1/test_media_storage.py", line 86, in test_ensure_media_is_in_local_cache self.wait_on_thread(x) File "/src/tests/unittest.py", line 296, in wait_on_thread self.reactor.advance(0.01) File "/src/.tox/py35/lib/python3.5/site-packages/twisted/internet/task.py", line 826, in advance self._sortCalls() File "/src/.tox/py35/lib/python3.5/site-packages/twisted/internet/task.py", line 787, in _sortCalls self.calls.sort(key=lambda a: a.getTime()) builtins.ValueError: list modified during sort tests.rest.media.v1.test_media_storage.MediaStorageTests.test_ensure_media_is_in_local_cache ``` --- changelog.d/8497.misc | 1 + tests/server.py | 36 ++++++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 changelog.d/8497.misc diff --git a/changelog.d/8497.misc b/changelog.d/8497.misc new file mode 100644 index 000000000000..8bc05e8df63b --- /dev/null +++ b/changelog.d/8497.misc @@ -0,0 +1 @@ +Fix a threadsafety bug in unit tests. diff --git a/tests/server.py b/tests/server.py index f7f5276b2152..422c8b42ca51 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1,8 +1,11 @@ import json import logging +from collections import deque from io import SEEK_END, BytesIO +from typing import Callable import attr +from typing_extensions import Deque from zope.interface import implementer from twisted.internet import address, threads, udp @@ -251,6 +254,7 @@ def __init__(self): self._tcp_callbacks = {} self._udp = [] lookups = self.lookups = {} + self._thread_callbacks = deque() # type: Deque[Callable[[], None]]() @implementer(IResolverSimple) class FakeResolver: @@ -272,10 +276,10 @@ def callFromThread(self, callback, *args, **kwargs): """ Make the callback fire in the next reactor iteration. """ - d = Deferred() - d.addCallback(lambda x: callback(*args, **kwargs)) - self.callLater(0, d.callback, True) - return d + cb = lambda: callback(*args, **kwargs) + # it's not safe to call callLater() here, so we append the callback to a + # separate queue. + self._thread_callbacks.append(cb) def getThreadPool(self): return self.threadpool @@ -303,6 +307,30 @@ def connectTCP(self, host, port, factory, timeout=30, bindAddress=None): return conn + def advance(self, amount): + # first advance our reactor's time, and run any "callLater" callbacks that + # makes ready + super().advance(amount) + + # now run any "callFromThread" callbacks + while True: + try: + callback = self._thread_callbacks.popleft() + except IndexError: + break + callback() + + # check for more "callLater" callbacks added by the thread callback + # This isn't required in a regular reactor, but it ends up meaning that + # our database queries can complete in a single call to `advance` [1] which + # simplifies tests. + # + # [1]: we replace the threadpool backing the db connection pool with a + # mock ThreadPool which doesn't really use threads; but we still use + # reactor.callFromThread to feed results back from the db functions to the + # main thread. + super().advance(0) + class ThreadPool: """ From d35a451399d5bb15ba0b452c26719474371298d7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Oct 2020 14:19:29 -0400 Subject: [PATCH 158/245] Clean-up some broken/unused code in the test framework (#8514) --- changelog.d/8514.misc | 1 + tests/server.py | 2 - tests/utils.py | 122 +++++++++++++++++++----------------------- 3 files changed, 55 insertions(+), 70 deletions(-) create mode 100644 changelog.d/8514.misc diff --git a/changelog.d/8514.misc b/changelog.d/8514.misc new file mode 100644 index 000000000000..0e7ac4f2207d --- /dev/null +++ b/changelog.d/8514.misc @@ -0,0 +1 @@ +Remove unused code from the test framework. diff --git a/tests/server.py b/tests/server.py index 422c8b42ca51..4d33b8409736 100644 --- a/tests/server.py +++ b/tests/server.py @@ -367,8 +367,6 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): """ server = _sth(cleanup_func, *args, **kwargs) - database = server.config.database.get_single_database() - # Make the thread pool synchronous. clock = server.get_clock() diff --git a/tests/utils.py b/tests/utils.py index af563ffe0f5d..0c09f5457fb0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -192,7 +192,6 @@ class TestHomeServer(HomeServer): def setup_test_homeserver( cleanup_func, name="test", - datastore=None, config=None, reactor=None, homeserverToUse=TestHomeServer, @@ -249,7 +248,7 @@ def setup_test_homeserver( # Create the database before we actually try and connect to it, based off # the template database we generate in setupdb() - if datastore is None and isinstance(db_engine, PostgresEngine): + if isinstance(db_engine, PostgresEngine): db_conn = db_engine.module.connect( database=POSTGRES_BASE_DB, user=POSTGRES_USER, @@ -265,79 +264,66 @@ def setup_test_homeserver( cur.close() db_conn.close() - if datastore is None: - hs = homeserverToUse( - name, - config=config, - version_string="Synapse/tests", - tls_server_context_factory=Mock(), - tls_client_options_factory=Mock(), - reactor=reactor, - **kargs - ) + hs = homeserverToUse( + name, + config=config, + version_string="Synapse/tests", + tls_server_context_factory=Mock(), + tls_client_options_factory=Mock(), + reactor=reactor, + **kargs + ) - hs.setup() - if homeserverToUse.__name__ == "TestHomeServer": - hs.setup_background_tasks() + hs.setup() + if homeserverToUse.__name__ == "TestHomeServer": + hs.setup_background_tasks() - if isinstance(db_engine, PostgresEngine): - database = hs.get_datastores().databases[0] + if isinstance(db_engine, PostgresEngine): + database = hs.get_datastores().databases[0] - # We need to do cleanup on PostgreSQL - def cleanup(): - import psycopg2 + # We need to do cleanup on PostgreSQL + def cleanup(): + import psycopg2 - # Close all the db pools - database._db_pool.close() + # Close all the db pools + database._db_pool.close() - dropped = False + dropped = False - # Drop the test database - db_conn = db_engine.module.connect( - database=POSTGRES_BASE_DB, - user=POSTGRES_USER, - host=POSTGRES_HOST, - password=POSTGRES_PASSWORD, - ) - db_conn.autocommit = True - cur = db_conn.cursor() - - # Try a few times to drop the DB. Some things may hold on to the - # database for a few more seconds due to flakiness, preventing - # us from dropping it when the test is over. If we can't drop - # it, warn and move on. - for x in range(5): - try: - cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) - db_conn.commit() - dropped = True - except psycopg2.OperationalError as e: - warnings.warn( - "Couldn't drop old db: " + str(e), category=UserWarning - ) - time.sleep(0.5) - - cur.close() - db_conn.close() - - if not dropped: - warnings.warn("Failed to drop old DB.", category=UserWarning) - - if not LEAVE_DB: - # Register the cleanup hook - cleanup_func(cleanup) + # Drop the test database + db_conn = db_engine.module.connect( + database=POSTGRES_BASE_DB, + user=POSTGRES_USER, + host=POSTGRES_HOST, + password=POSTGRES_PASSWORD, + ) + db_conn.autocommit = True + cur = db_conn.cursor() - else: - hs = homeserverToUse( - name, - datastore=datastore, - config=config, - version_string="Synapse/tests", - tls_server_context_factory=Mock(), - tls_client_options_factory=Mock(), - reactor=reactor, - **kargs - ) + # Try a few times to drop the DB. Some things may hold on to the + # database for a few more seconds due to flakiness, preventing + # us from dropping it when the test is over. If we can't drop + # it, warn and move on. + for x in range(5): + try: + cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) + db_conn.commit() + dropped = True + except psycopg2.OperationalError as e: + warnings.warn( + "Couldn't drop old db: " + str(e), category=UserWarning + ) + time.sleep(0.5) + + cur.close() + db_conn.close() + + if not dropped: + warnings.warn("Failed to drop old DB.", category=UserWarning) + + if not LEAVE_DB: + # Register the cleanup hook + cleanup_func(cleanup) # bcrypt is far too slow to be doing in unit tests # Need to let the HS build an auth handler and then mess with it From 6905f5751a7e4966b1b63240ce31f80d02d30104 Mon Sep 17 00:00:00 2001 From: Samuel Philipp Date: Sun, 11 Oct 2020 21:51:11 +0200 Subject: [PATCH 159/245] Docker: support passing additional commandline args to synapse (#8390) --- changelog.d/8390.docker | 1 + docker/README.md | 16 ++++++++++++++- docker/start.py | 45 +++++++++++++++++++++++++---------------- 3 files changed, 44 insertions(+), 18 deletions(-) create mode 100644 changelog.d/8390.docker diff --git a/changelog.d/8390.docker b/changelog.d/8390.docker new file mode 100644 index 000000000000..f71b8e4bbf2d --- /dev/null +++ b/changelog.d/8390.docker @@ -0,0 +1 @@ +Add support for passing commandline args to the synapse process. Contributed by @samuel-p. diff --git a/docker/README.md b/docker/README.md index d0da34778edc..c8f27b8566fc 100644 --- a/docker/README.md +++ b/docker/README.md @@ -83,7 +83,7 @@ docker logs synapse If all is well, you should now be able to connect to http://localhost:8008 and see a confirmation message. -The following environment variables are supported in run mode: +The following environment variables are supported in `run` mode: * `SYNAPSE_CONFIG_DIR`: where additional config files are stored. Defaults to `/data`. @@ -94,6 +94,20 @@ The following environment variables are supported in run mode: * `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`. * `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`. +For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this: + +``` +docker run -d --name synapse \ + --mount type=volume,src=synapse-data,dst=/data \ + -p 8008:8008 \ + matrixdotorg/synapse:latest run \ + -m synapse.app.generic_worker \ + --config-path=/data/homeserver.yaml \ + --config-path=/data/generic_worker.yaml +``` + +If you do not provide `-m`, the value of the `SYNAPSE_WORKER` environment variable is used. If you do not provide at least one `--config-path` or `-c`, the value of the `SYNAPSE_CONFIG_PATH` environment variable is used instead. + ## Generating an (admin) user After synapse is running, you may wish to create a user via `register_new_matrix_user`. diff --git a/docker/start.py b/docker/start.py index 9f081341581b..0d2c590b8838 100755 --- a/docker/start.py +++ b/docker/start.py @@ -179,7 +179,7 @@ def run_generate_config(environ, ownership): def main(args, environ): - mode = args[1] if len(args) > 1 else None + mode = args[1] if len(args) > 1 else "run" desired_uid = int(environ.get("UID", "991")) desired_gid = int(environ.get("GID", "991")) synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver") @@ -205,36 +205,47 @@ def main(args, environ): config_dir, config_path, environ, ownership ) - if mode is not None: + if mode != "run": error("Unknown execution mode '%s'" % (mode,)) - config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") - config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml") + args = args[2:] - if not os.path.exists(config_path): - if "SYNAPSE_SERVER_NAME" in environ: - error( - """\ + if "-m" not in args: + args = ["-m", synapse_worker] + args + + # if there are no config files passed to synapse, try adding the default file + if not any(p.startswith("--config-path") or p.startswith("-c") for p in args): + config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") + config_path = environ.get( + "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml" + ) + + if not os.path.exists(config_path): + if "SYNAPSE_SERVER_NAME" in environ: + error( + """\ Config file '%s' does not exist. The synapse docker image no longer supports generating a config file on-the-fly based on environment variables. You can migrate to a static config file by running with 'migrate_config'. See the README for more details. """ + % (config_path,) + ) + + error( + "Config file '%s' does not exist. You should either create a new " + "config file by running with the `generate` argument (and then edit " + "the resulting file before restarting) or specify the path to an " + "existing config file with the SYNAPSE_CONFIG_PATH variable." % (config_path,) ) - error( - "Config file '%s' does not exist. You should either create a new " - "config file by running with the `generate` argument (and then edit " - "the resulting file before restarting) or specify the path to an " - "existing config file with the SYNAPSE_CONFIG_PATH variable." - % (config_path,) - ) + args += ["--config-path", config_path] - log("Starting synapse with config file " + config_path) + log("Starting synapse with args " + " ".join(args)) - args = ["python", "-m", synapse_worker, "--config-path", config_path] + args = ["python"] + args if ownership is not None: args = ["gosu", ownership] + args os.execv("/usr/sbin/gosu", args) From f76194a02192d3c7ab0c821e56b403fbadf9c83d Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 12 Oct 2020 15:50:27 +0100 Subject: [PATCH 160/245] 1.21.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index dfdd8aa68a3b..f94886950141 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.21.0 (2020-10-12) +=========================== + +No significant changes since v1.21.0rc3. + + Synapse 1.21.0rc3 (2020-10-08) ============================== diff --git a/debian/changelog b/debian/changelog index 264ef9ce7cc3..a08782f587da 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.21.0) stable; urgency=medium + + * New synapse release 1.21.0. + + -- Synapse Packaging team Mon, 12 Oct 2020 15:47:44 +0100 + matrix-synapse-py3 (1.20.1) stable; urgency=medium * New synapse release 1.20.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index a86dc07ddc9d..57f818125a81 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.0rc3" +__version__ = "1.21.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 8de3703d214c814ad637793a0cc2220e20579ffa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Oct 2020 15:51:41 +0100 Subject: [PATCH 161/245] Make event persisters periodically announce position over replication. (#8499) Currently background proccesses stream the events stream use the "minimum persisted position" (i.e. `get_current_token()`) rather than the vector clock style tokens. This is broadly fine as it doesn't matter if the background processes lag a small amount. However, in extreme cases (i.e. SyTests) where we only write to one event persister the background processes will never make progress. This PR changes it so that the `MultiWriterIDGenerator` keeps the current position of a given instance as up to date as possible (i.e using the latest token it sees if its not in the process of persisting anything), and then periodically announces that over replication. This then allows the "minimum persisted position" to advance, albeit with a small lag. --- changelog.d/8499.misc | 1 + docs/tcp_replication.md | 13 +++++--- synapse/replication/tcp/client.py | 4 +++ synapse/replication/tcp/commands.py | 36 ++++++++++++++------ synapse/replication/tcp/handler.py | 24 ++++++++------ synapse/replication/tcp/resource.py | 47 ++++++++++++++++++++++++++- synapse/storage/util/id_generators.py | 10 ++++++ synapse/storage/util/sequence.py | 2 ++ tests/storage/test_id_generators.py | 25 +++++++++----- 9 files changed, 128 insertions(+), 34 deletions(-) create mode 100644 changelog.d/8499.misc diff --git a/changelog.d/8499.misc b/changelog.d/8499.misc new file mode 100644 index 000000000000..237cb3b31135 --- /dev/null +++ b/changelog.d/8499.misc @@ -0,0 +1 @@ +Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index db318baa9dbc..ad145439b4f8 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -15,7 +15,7 @@ example flow would be (where '>' indicates master to worker and > SERVER example.com < REPLICATE - > POSITION events master 53 + > POSITION events master 53 53 > RDATA events master 54 ["$foo1:bar.com", ...] > RDATA events master 55 ["$foo4:bar.com", ...] @@ -138,9 +138,9 @@ the wire: < NAME synapse.app.appservice < PING 1490197665618 < REPLICATE - > POSITION events master 1 - > POSITION backfill master 1 - > POSITION caches master 1 + > POSITION events master 1 1 + > POSITION backfill master 1 1 + > POSITION caches master 1 1 > RDATA caches master 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513] > RDATA events master 14 ["$149019767112vOHxz:localhost:8823", "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null] @@ -185,6 +185,11 @@ client (C): updates via HTTP API, rather than via the DB, then processes should make the request to the appropriate process. + Two positions are included, the "new" position and the last position sent respectively. + This allows servers to tell instances that the positions have advanced but no + data has been written, without clients needlessly checking to see if they + have missed any updates. + #### ERROR (S, C) There was an error diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e165429cad84..e27ee216f076 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -191,6 +191,10 @@ async def on_rdata( async def on_position(self, stream_name: str, instance_name: str, token: int): self.store.process_replication_rows(stream_name, instance_name, token, []) + # We poke the generic "replication" notifier to wake anything up that + # may be streaming. + self.notifier.notify_replication() + def on_remote_server_up(self, server: str): """Called when get a new REMOTE_SERVER_UP command.""" diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 8cd47770c108..ac532ed5887f 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -141,15 +141,23 @@ def get_logcontext_id(self): class PositionCommand(Command): - """Sent by the server to tell the client the stream position without - needing to send an RDATA. + """Sent by an instance to tell others the stream position without needing to + send an RDATA. + + Two tokens are sent, the new position and the last position sent by the + instance (in an RDATA or other POSITION). The tokens are chosen so that *no* + rows were written by the instance between the `prev_token` and `new_token`. + (If an instance hasn't sent a position before then the new position can be + used for both.) Format:: - POSITION + POSITION - On receipt of a POSITION command clients should check if they have missed - any updates, and if so then fetch them out of band. + On receipt of a POSITION command instances should check if they have missed + any updates, and if so then fetch them out of band. Instances can check this + by comparing their view of the current token for the sending instance with + the included `prev_token`. The `` is the process that sent the command and is the source of the stream. @@ -157,18 +165,26 @@ class PositionCommand(Command): NAME = "POSITION" - def __init__(self, stream_name, instance_name, token): + def __init__(self, stream_name, instance_name, prev_token, new_token): self.stream_name = stream_name self.instance_name = instance_name - self.token = token + self.prev_token = prev_token + self.new_token = new_token @classmethod def from_line(cls, line): - stream_name, instance_name, token = line.split(" ", 2) - return cls(stream_name, instance_name, int(token)) + stream_name, instance_name, prev_token, new_token = line.split(" ", 3) + return cls(stream_name, instance_name, int(prev_token), int(new_token)) def to_line(self): - return " ".join((self.stream_name, self.instance_name, str(self.token))) + return " ".join( + ( + self.stream_name, + self.instance_name, + str(self.prev_token), + str(self.new_token), + ) + ) class ErrorCommand(_SimpleCommand): diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index e92da7b26378..95e5502bf2b2 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -101,8 +101,9 @@ def __init__(self, hs): self._streams_to_replicate = [] # type: List[Stream] for stream in self._streams.values(): - if stream.NAME == CachesStream.NAME: - # All workers can write to the cache invalidation stream. + if hs.config.redis.redis_enabled and stream.NAME == CachesStream.NAME: + # All workers can write to the cache invalidation stream when + # using redis. self._streams_to_replicate.append(stream) continue @@ -313,11 +314,14 @@ def send_positions_to_connection(self, conn: AbstractConnection): # We respond with current position of all streams this instance # replicates. for stream in self.get_streams_to_replicate(): + # Note that we use the current token as the prev token here (rather + # than stream.last_token), as we can't be sure that there have been + # no rows written between last token and the current token (since we + # might be racing with the replication sending bg process). + current_token = stream.current_token(self._instance_name) self.send_command( PositionCommand( - stream.NAME, - self._instance_name, - stream.current_token(self._instance_name), + stream.NAME, self._instance_name, current_token, current_token, ) ) @@ -511,16 +515,16 @@ async def _process_position( # If the position token matches our current token then we're up to # date and there's nothing to do. Otherwise, fetch all updates # between then and now. - missing_updates = cmd.token != current_token + missing_updates = cmd.prev_token != current_token while missing_updates: logger.info( "Fetching replication rows for '%s' between %i and %i", stream_name, current_token, - cmd.token, + cmd.new_token, ) (updates, current_token, missing_updates) = await stream.get_updates_since( - cmd.instance_name, current_token, cmd.token + cmd.instance_name, current_token, cmd.new_token ) # TODO: add some tests for this @@ -536,11 +540,11 @@ async def _process_position( [stream.parse_row(row) for row in rows], ) - logger.info("Caught up with stream '%s' to %i", stream_name, cmd.token) + logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position( - cmd.stream_name, cmd.instance_name, cmd.token + cmd.stream_name, cmd.instance_name, cmd.new_token ) self._streams_by_connection.setdefault(conn, set()).add(stream_name) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 687984e7a8c8..666c13fdb7c2 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -23,7 +23,9 @@ from twisted.internet.protocol import Factory from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.replication.tcp.commands import PositionCommand from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol +from synapse.replication.tcp.streams import EventsStream from synapse.util.metrics import Measure stream_updates_counter = Counter( @@ -84,6 +86,23 @@ def __init__(self, hs): # Set of streams to replicate. self.streams = self.command_handler.get_streams_to_replicate() + # If we have streams then we must have redis enabled or on master + assert ( + not self.streams + or hs.config.redis.redis_enabled + or not hs.config.worker.worker_app + ) + + # If we are replicating an event stream we want to periodically check if + # we should send updated POSITIONs. We do this as a looping call rather + # explicitly poking when the position advances (without new data to + # replicate) to reduce replication traffic (otherwise each writer would + # likely send a POSITION for each new event received over replication). + # + # Note that if the position hasn't advanced then we won't send anything. + if any(EventsStream.NAME == s.NAME for s in self.streams): + self.clock.looping_call(self.on_notifier_poke, 1000) + def on_notifier_poke(self): """Checks if there is actually any new data and sends it to the connections if there are. @@ -91,7 +110,7 @@ def on_notifier_poke(self): This should get called each time new data is available, even if it is currently being executed, so that nothing gets missed """ - if not self.command_handler.connected(): + if not self.command_handler.connected() or not self.streams: # Don't bother if nothing is listening. We still need to advance # the stream tokens otherwise they'll fall behind forever for stream in self.streams: @@ -136,6 +155,8 @@ async def _run_notifier_loop(self): self._replication_torture_level / 1000.0 ) + last_token = stream.last_token + logger.debug( "Getting stream: %s: %s -> %s", stream.NAME, @@ -159,6 +180,30 @@ async def _run_notifier_loop(self): ) stream_updates_counter.labels(stream.NAME).inc(len(updates)) + else: + # The token has advanced but there is no data to + # send, so we send a `POSITION` to inform other + # workers of the updated position. + if stream.NAME == EventsStream.NAME: + # XXX: We only do this for the EventStream as it + # turns out that e.g. account data streams share + # their "current token" with each other, meaning + # that it is *not* safe to send a POSITION. + logger.info( + "Sending position: %s -> %s", + stream.NAME, + current_token, + ) + self.command_handler.send_command( + PositionCommand( + stream.NAME, + self._instance_name, + last_token, + current_token, + ) + ) + continue + # Some streams return multiple rows with the same stream IDs, # we need to make sure they get sent out in batches. We do # this by setting the current token to all but the last of diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index d7e40aaa8b40..3d8da48f2d65 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -524,6 +524,16 @@ def _add_persisted_position(self, new_id: int): heapq.heappush(self._known_persisted_positions, new_id) + # If we're a writer and we don't have any active writes we update our + # current position to the latest position seen. This allows the instance + # to report a recent position when asked, rather than a potentially old + # one (if this instance hasn't written anything for a while). + our_current_position = self._current_positions.get(self._instance_name) + if our_current_position and not self._unfinished_ids: + self._current_positions[self._instance_name] = max( + our_current_position, new_id + ) + # We move the current min position up if the minimum current positions # of all instances is higher (since by definition all positions less # that that have been persisted). diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index ff2d038ad2d2..4386b6101e3f 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -126,6 +126,8 @@ def check_consistency( if max_stream_id > last_value: logger.warning( "Postgres sequence %s is behind table %s: %d < %d", + self._sequence_name, + table, last_value, max_stream_id, ) diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 392b08832b05..cc0612cf65ef 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -199,10 +199,17 @@ def test_multi_instance(self): first_id_gen = self._create_id_generator("first", writers=["first", "second"]) second_id_gen = self._create_id_generator("second", writers=["first", "second"]) - self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) + # The first ID gen will notice that it can advance its token to 7 as it + # has no in progress writes... + self.assertEqual(first_id_gen.get_positions(), {"first": 7, "second": 7}) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) + # ... but the second ID gen doesn't know that. + self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) + # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. @@ -211,7 +218,7 @@ async def _get_next_async(): self.assertEqual(stream_id, 8) self.assertEqual( - first_id_gen.get_positions(), {"first": 3, "second": 7} + first_id_gen.get_positions(), {"first": 7, "second": 7} ) self.get_success(_get_next_async()) @@ -279,7 +286,7 @@ def test_get_persisted_upto_position(self): self._insert_row_with_id("first", 3) self._insert_row_with_id("second", 5) - id_gen = self._create_id_generator("first", writers=["first", "second"]) + id_gen = self._create_id_generator("worker", writers=["first", "second"]) self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) @@ -319,14 +326,14 @@ def test_get_persisted_upto_position_get_next(self): id_gen = self._create_id_generator("first", writers=["first", "second"]) - self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) + self.assertEqual(id_gen.get_positions(), {"first": 5, "second": 5}) - self.assertEqual(id_gen.get_persisted_upto_position(), 3) + self.assertEqual(id_gen.get_persisted_upto_position(), 5) async def _get_next_async(): async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 6) - self.assertEqual(id_gen.get_persisted_upto_position(), 3) + self.assertEqual(id_gen.get_persisted_upto_position(), 5) self.get_success(_get_next_async()) @@ -388,7 +395,7 @@ def test_writer_config_change(self): self._insert_row_with_id("second", 5) # Initial config has two writers - id_gen = self._create_id_generator("first", writers=["first", "second"]) + id_gen = self._create_id_generator("worker", writers=["first", "second"]) self.assertEqual(id_gen.get_persisted_upto_position(), 3) self.assertEqual(id_gen.get_current_token_for_writer("first"), 3) self.assertEqual(id_gen.get_current_token_for_writer("second"), 5) @@ -568,7 +575,7 @@ async def _get_next_async2(): self.get_success(_get_next_async2()) - self.assertEqual(id_gen_1.get_positions(), {"first": -1, "second": -2}) + self.assertEqual(id_gen_1.get_positions(), {"first": -2, "second": -2}) self.assertEqual(id_gen_2.get_positions(), {"first": -1, "second": -2}) self.assertEqual(id_gen_1.get_persisted_upto_position(), -2) self.assertEqual(id_gen_2.get_persisted_upto_position(), -2) From 4aa027ea70a4cf237a39c6bff48c225f73dcb0e8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 12 Oct 2020 16:07:08 +0100 Subject: [PATCH 162/245] Add deprecation warning for admin api under client api prefixes --- CHANGES.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index f94886950141..fe496a853418 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,14 @@ Synapse 1.21.0 (2020-10-12) No significant changes since v1.21.0rc3. +As [noted in +v1.20.0](https://github.com/matrix-org/synapse/blob/release-v1.21.0/CHANGES.md#synapse-1200-2020-09-22), +a future release will drop support for accessing Synapse's +[Admin API](https://github.com/matrix-org/synapse/tree/master/docs/admin_api) under the +`/_matrix/client/*` endpoint prefixes. Please update your +[reverse proxy configuration](https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md) +accordingly. + Synapse 1.21.0rc3 (2020-10-08) ============================== From cd0f65d2c71ce8f6cadfa84a6eb6b882d97e36c0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 12 Oct 2020 16:16:35 +0100 Subject: [PATCH 163/245] Reverse proxies are not the only thing to change;be explicit w/ new endpoint --- CHANGES.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index fe496a853418..dec263013081 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,9 +7,8 @@ As [noted in v1.20.0](https://github.com/matrix-org/synapse/blob/release-v1.21.0/CHANGES.md#synapse-1200-2020-09-22), a future release will drop support for accessing Synapse's [Admin API](https://github.com/matrix-org/synapse/tree/master/docs/admin_api) under the -`/_matrix/client/*` endpoint prefixes. Please update your -[reverse proxy configuration](https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md) -accordingly. +`/_matrix/client/*` endpoint prefixes. At that point, the Admin API will only +be accessible under `/_synapse/admin`. Synapse 1.21.0rc3 (2020-10-08) @@ -168,9 +167,11 @@ API](https://github.com/matrix-org/synapse/tree/master/docs) has been accessible under the `/_matrix/client/api/v1/admin`, `/_matrix/client/unstable/admin`, `/_matrix/client/r0/admin` and `/_synapse/admin` prefixes. In a future release, we will be dropping support -for accessing Synapse's Admin API using the `/_matrix/client/*` prefixes. This -makes it easier for homeserver admins to lock down external access to the Admin -API endpoints. +for accessing Synapse's Admin API using the `/_matrix/client/*` prefixes. + +From that point, the Admin API will only be accessible under `/_synapse/admin`. +This makes it easier for homeserver admins to lock down external access to the +Admin API endpoints. Synapse 1.20.0rc5 (2020-09-18) ============================== From a06b7a5d94fa8b1a5c18e563420fe78870c3473e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 12 Oct 2020 17:44:11 +0100 Subject: [PATCH 164/245] Explicitly install test dependencies when building deb packages (#8523) After https://github.com/matrix-org/synapse/pull/8377, the deb packages no longer indirectly installed the `"test"` dependencies, causing debian packages to fail to build while carrying out the unit tests. This PR installs `test` dependencies explicitly when building debian packages. --- debian/build_virtualenv | 2 +- debian/changelog | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/debian/build_virtualenv b/debian/build_virtualenv index 4c9aabcac386..cbdde93f96a9 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -42,7 +42,7 @@ dh_virtualenv \ --preinstall="mock" \ --extra-pip-arg="--no-cache-dir" \ --extra-pip-arg="--compile" \ - --extras="all,systemd" + --extras="all,systemd,test" PACKAGE_BUILD_DIR="debian/matrix-synapse-py3" VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse" diff --git a/debian/changelog b/debian/changelog index a08782f587da..27b6708115fd 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.21.0+nmu1) UNRELEASED; urgency=medium + + * Explicitly install "test" python dependencies. + + -- Andrew Morgan Mon, 12 Oct 2020 17:30:30 +0100 + matrix-synapse-py3 (1.21.0) stable; urgency=medium * New synapse release 1.21.0. From b76f53bb79fc9f6f60de90f28a8194a48764d146 Mon Sep 17 00:00:00 2001 From: Christopher May-Townsend Date: Mon, 12 Oct 2020 22:00:33 +0100 Subject: [PATCH 165/245] Multi arch docker support: add arm/v7 and arm64 to our docker images (#7921) Signed-off-by: Christopher May-Townsend (chris@maytownsend.co.uk) --- .circleci/config.yml | 60 +++++++++++++++++++++++++++++++++++------ changelog.d/7921.docker | 1 + 2 files changed, 53 insertions(+), 8 deletions(-) create mode 100644 changelog.d/7921.docker diff --git a/.circleci/config.yml b/.circleci/config.yml index 5bd2ab2b7656..b10cbedd6d1b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,22 +1,36 @@ -version: 2 +version: 2.1 jobs: dockerhubuploadrelease: - machine: true + docker: + - image: docker:git steps: - checkout - - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} . + - setup_remote_docker + - docker_prepare - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - - run: docker push matrixdotorg/synapse:${CIRCLE_TAG} + - docker_build: + tag: -t matrixdotorg/synapse:${CIRCLE_TAG} + platforms: linux/amd64 + - docker_build: + tag: -t matrixdotorg/synapse:${CIRCLE_TAG} + platforms: linux/amd64,linux/arm/v7,linux/arm64 + dockerhubuploadlatest: - machine: true + docker: + - image: docker:git steps: - checkout - - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest . + - setup_remote_docker + - docker_prepare - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - - run: docker push matrixdotorg/synapse:latest + - docker_build: + tag: -t matrixdotorg/synapse:latest + platforms: linux/amd64 + - docker_build: + tag: -t matrixdotorg/synapse:latest + platforms: linux/amd64,linux/arm/v7,linux/arm64 workflows: - version: 2 build: jobs: - dockerhubuploadrelease: @@ -29,3 +43,33 @@ workflows: filters: branches: only: master + +commands: + docker_prepare: + description: Downloads the buildx cli plugin and enables multiarch images + parameters: + buildx_version: + type: string + default: "v0.4.1" + steps: + - run: apk add --no-cache curl + - run: mkdir -vp ~/.docker/cli-plugins/ ~/dockercache + - run: curl --silent -L "https://github.com/docker/buildx/releases/download/<< parameters.buildx_version >>/buildx-<< parameters.buildx_version >>.linux-amd64" > ~/.docker/cli-plugins/docker-buildx + - run: chmod a+x ~/.docker/cli-plugins/docker-buildx + # install qemu links in /proc/sys/fs/binfmt_misc on the docker instance running the circleci job + - run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + # create a context named `builder` for the builds + - run: docker context create builder + # create a buildx builder using the new context, and set it as the default + - run: docker buildx create builder --use + + docker_build: + description: Builds and pushed images to dockerhub using buildx + parameters: + platforms: + type: string + default: linux/amd64 + tag: + type: string + steps: + - run: docker buildx build -f docker/Dockerfile --push --platform << parameters.platforms >> --label gitsha1=${CIRCLE_SHA1} << parameters.tag >> --progress=plain . diff --git a/changelog.d/7921.docker b/changelog.d/7921.docker new file mode 100644 index 000000000000..7cecd67c6a26 --- /dev/null +++ b/changelog.d/7921.docker @@ -0,0 +1 @@ +Added multi-arch support (arm64,arm/v7) for the docker images. Contributed by @maquis196. From 58e583eac1204e6eee6ee924a798180542f1e2c0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 13 Oct 2020 10:27:16 +0100 Subject: [PATCH 166/245] 1.21.1 --- CHANGES.md | 6 ++++++ debian/changelog | 8 ++++++-- synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index dec263013081..75dc5fa89395 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.21.1 (2020-10-13) +=========================== + +This release fixes a regression in v1.21.0 that prevented debian packages from being built. +It is otherwise identical to v1.21.0. + Synapse 1.21.0 (2020-10-12) =========================== diff --git a/debian/changelog b/debian/changelog index 27b6708115fd..eeafd4f50abe 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -matrix-synapse-py3 (1.21.0+nmu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.21.1) stable; urgency=medium + [ Synapse Packaging team ] + * New synapse release 1.21.1. + + [ Andrew Morgan ] * Explicitly install "test" python dependencies. - -- Andrew Morgan Mon, 12 Oct 2020 17:30:30 +0100 + -- Synapse Packaging team Tue, 13 Oct 2020 10:24:13 +0100 matrix-synapse-py3 (1.21.0) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index 57f818125a81..722b53a67de0 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.0" +__version__ = "1.21.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From b2486f6656bec2307e62de19d2830994a42b879d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Oct 2020 12:07:56 +0100 Subject: [PATCH 167/245] Fix message duplication if something goes wrong after persisting the event (#8476) Should fix #3365. --- changelog.d/8476.bugfix | 1 + synapse/handlers/federation.py | 9 +- synapse/handlers/message.py | 48 +++++- synapse/handlers/room_member.py | 13 +- synapse/replication/http/send_event.py | 16 +- synapse/storage/databases/main/events.py | 31 ++++ .../storage/databases/main/events_worker.py | 83 ++++++++- .../storage/databases/main/registration.py | 6 +- .../main/schema/delta/58/19txn_id.sql | 40 +++++ synapse/storage/persist_events.py | 96 +++++++++-- tests/handlers/test_message.py | 157 ++++++++++++++++++ tests/rest/client/test_third_party_rules.py | 2 +- tests/unittest.py | 11 +- 13 files changed, 481 insertions(+), 32 deletions(-) create mode 100644 changelog.d/8476.bugfix create mode 100644 synapse/storage/databases/main/schema/delta/58/19txn_id.sql create mode 100644 tests/handlers/test_message.py diff --git a/changelog.d/8476.bugfix b/changelog.d/8476.bugfix new file mode 100644 index 000000000000..993a269979af --- /dev/null +++ b/changelog.d/8476.bugfix @@ -0,0 +1 @@ +Fix message duplication if something goes wrong after persisting the event. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5ac2fc56567b..455acd7669c4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2966,17 +2966,20 @@ async def persist_events_and_notify( return result["max_stream_id"] else: assert self.storage.persistence - max_stream_token = await self.storage.persistence.persist_events( + + # Note that this returns the events that were persisted, which may not be + # the same as were passed in if some were deduplicated due to transaction IDs. + events, max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) if self._ephemeral_messages_enabled: - for (event, context) in event_and_contexts: + for event in events: # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) if not backfilled: # Never notify for backfilled events - for event, _ in event_and_contexts: + for event in events: await self._notify_persisted_event(event, max_stream_token) return max_stream_token.stream diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ad0b7bd868bf..b0da938aa94a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -689,7 +689,7 @@ async def create_and_send_nonmember_event( send this event. Returns: - The event, and its stream ordering (if state event deduplication happened, + The event, and its stream ordering (if deduplication happened, the previous, duplicate event). Raises: @@ -712,6 +712,19 @@ async def create_and_send_nonmember_event( # extremities to pile up, which in turn leads to state resolution # taking longer. with (await self.limiter.queue(event_dict["room_id"])): + if txn_id and requester.access_token_id: + existing_event_id = await self.store.get_event_id_from_transaction_id( + event_dict["room_id"], + requester.user.to_string(), + requester.access_token_id, + txn_id, + ) + if existing_event_id: + event = await self.store.get_event(existing_event_id) + # we know it was persisted, so must have a stream ordering + assert event.internal_metadata.stream_ordering + return event, event.internal_metadata.stream_ordering + event, context = await self.create_event( requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id ) @@ -913,10 +926,20 @@ async def handle_new_client_event( extra_users=extra_users, ) stream_id = result["stream_id"] - event.internal_metadata.stream_ordering = stream_id + event_id = result["event_id"] + if event_id != event.event_id: + # If we get a different event back then it means that its + # been de-duplicated, so we replace the given event with the + # one already persisted. + event = await self.store.get_event(event_id) + else: + # If we newly persisted the event then we need to update its + # stream_ordering entry manually (as it was persisted on + # another worker). + event.internal_metadata.stream_ordering = stream_id return event - stream_id = await self.persist_and_notify_client_event( + event = await self.persist_and_notify_client_event( requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) @@ -965,11 +988,16 @@ async def persist_and_notify_client_event( context: EventContext, ratelimit: bool = True, extra_users: List[UserID] = [], - ) -> int: + ) -> EventBase: """Called when we have fully built the event, have already calculated the push actions for the event, and checked auth. This should only be run on the instance in charge of persisting events. + + Returns: + The persisted event. This may be different than the given event if + it was de-duplicated (e.g. because we had already persisted an + event with the same transaction ID.) """ assert self.storage.persistence is not None assert self._events_shard_config.should_handle( @@ -1137,9 +1165,13 @@ def is_inviter_member_event(e): if prev_state_ids: raise AuthError(403, "Changing the room create event is forbidden") - event_pos, max_stream_token = await self.storage.persistence.persist_event( - event, context=context - ) + # Note that this returns the event that was persisted, which may not be + # the same as we passed in if it was deduplicated due transaction IDs. + ( + event, + event_pos, + max_stream_token, + ) = await self.storage.persistence.persist_event(event, context=context) if self._ephemeral_events_enabled: # If there's an expiry timestamp on the event, schedule its expiry. @@ -1160,7 +1192,7 @@ def _notify(): # matters as sometimes presence code can take a while. run_in_background(self._bump_active_time, requester.user) - return event_pos.stream + return event async def _bump_active_time(self, user: UserID) -> None: try: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ffbc62ff444a..0080eeaf8dbf 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -171,6 +171,17 @@ async def _local_membership_update( if requester.is_guest: content["kind"] = "guest" + # Check if we already have an event with a matching transaction ID. (We + # do this check just before we persist an event as well, but may as well + # do it up front for efficiency.) + if txn_id and requester.access_token_id: + existing_event_id = await self.store.get_event_id_from_transaction_id( + room_id, requester.user.to_string(), requester.access_token_id, txn_id, + ) + if existing_event_id: + event_pos = await self.store.get_position_for_event(existing_event_id) + return existing_event_id, event_pos.stream + event, context = await self.event_creation_handler.create_event( requester, { @@ -679,7 +690,7 @@ async def send_membership_event( if is_blocked: raise SynapseError(403, "This room has been blocked on this server") - await self.event_creation_handler.handle_new_client_event( + event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[target_user], ratelimit=ratelimit ) diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 9a3a694d5dfa..fc129dbaa7b7 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -46,6 +46,12 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): "ratelimit": true, "extra_users": [], } + + 200 OK + + { "stream_id": 12345, "event_id": "$abcdef..." } + + The returned event ID may not match the sent event if it was deduplicated. """ NAME = "send_event" @@ -116,11 +122,17 @@ async def _handle_request(self, request, event_id): "Got event to send with ID: %s into room: %s", event.event_id, event.room_id ) - stream_id = await self.event_creation_handler.persist_and_notify_client_event( + event = await self.event_creation_handler.persist_and_notify_client_event( requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - return 200, {"stream_id": stream_id} + return ( + 200, + { + "stream_id": event.internal_metadata.stream_ordering, + "event_id": event.event_id, + }, + ) def register_servlets(hs, http_server): diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b19c424ba952..fdb17745f69a 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -361,6 +361,8 @@ def _persist_events_txn( self._store_event_txn(txn, events_and_contexts=events_and_contexts) + self._persist_transaction_ids_txn(txn, events_and_contexts) + # Insert into event_to_state_groups. self._store_event_state_mappings_txn(txn, events_and_contexts) @@ -405,6 +407,35 @@ def _persist_events_txn( # room_memberships, where applicable. self._update_current_state_txn(txn, state_delta_for_room, min_stream_order) + def _persist_transaction_ids_txn( + self, + txn: LoggingTransaction, + events_and_contexts: List[Tuple[EventBase, EventContext]], + ): + """Persist the mapping from transaction IDs to event IDs (if defined). + """ + + to_insert = [] + for event, _ in events_and_contexts: + token_id = getattr(event.internal_metadata, "token_id", None) + txn_id = getattr(event.internal_metadata, "txn_id", None) + if token_id and txn_id: + to_insert.append( + { + "event_id": event.event_id, + "room_id": event.room_id, + "user_id": event.sender, + "token_id": token_id, + "txn_id": txn_id, + "inserted_ts": self._clock.time_msec(), + } + ) + + if to_insert: + self.db_pool.simple_insert_many_txn( + txn, table="event_txn_id", values=to_insert, + ) + def _update_current_state_txn( self, txn: LoggingTransaction, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 4e74fafe43d1..3ec4d1d9c2ff 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import itertools import logging import threading @@ -137,6 +136,15 @@ def __init__(self, database: DatabasePool, db_conn, hs): db_conn, "events", "stream_ordering", step=-1 ) + if not hs.config.worker.worker_app: + # We periodically clean out old transaction ID mappings + self._clock.looping_call( + run_as_background_process, + 5 * 60 * 1000, + "_cleanup_old_transaction_ids", + self._cleanup_old_transaction_ids, + ) + self._get_event_cache = Cache( "*getEvent*", keylen=3, @@ -1308,3 +1316,76 @@ def get_next_event_to_expire_txn(txn): return await self.db_pool.runInteraction( desc="get_next_event_to_expire", func=get_next_event_to_expire_txn ) + + async def get_event_id_from_transaction_id( + self, room_id: str, user_id: str, token_id: int, txn_id: str + ) -> Optional[str]: + """Look up if we have already persisted an event for the transaction ID, + returning the event ID if so. + """ + return await self.db_pool.simple_select_one_onecol( + table="event_txn_id", + keyvalues={ + "room_id": room_id, + "user_id": user_id, + "token_id": token_id, + "txn_id": txn_id, + }, + retcol="event_id", + allow_none=True, + desc="get_event_id_from_transaction_id", + ) + + async def get_already_persisted_events( + self, events: Iterable[EventBase] + ) -> Dict[str, str]: + """Look up if we have already persisted an event for the transaction ID, + returning a mapping from event ID in the given list to the event ID of + an existing event. + + Also checks if there are duplicates in the given events, if there are + will map duplicates to the *first* event. + """ + + mapping = {} + txn_id_to_event = {} # type: Dict[Tuple[str, int, str], str] + + for event in events: + token_id = getattr(event.internal_metadata, "token_id", None) + txn_id = getattr(event.internal_metadata, "txn_id", None) + + if token_id and txn_id: + # Check if this is a duplicate of an event in the given events. + existing = txn_id_to_event.get((event.room_id, token_id, txn_id)) + if existing: + mapping[event.event_id] = existing + continue + + # Check if this is a duplicate of an event we've already + # persisted. + existing = await self.get_event_id_from_transaction_id( + event.room_id, event.sender, token_id, txn_id + ) + if existing: + mapping[event.event_id] = existing + txn_id_to_event[(event.room_id, token_id, txn_id)] = existing + else: + txn_id_to_event[(event.room_id, token_id, txn_id)] = event.event_id + + return mapping + + async def _cleanup_old_transaction_ids(self): + """Cleans out transaction id mappings older than 24hrs. + """ + + def _cleanup_old_transaction_ids_txn(txn): + sql = """ + DELETE FROM event_txn_id + WHERE inserted_ts < ? + """ + one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000 + txn.execute(sql, (one_day_ago,)) + + return await self.db_pool.runInteraction( + "_cleanup_old_transaction_ids", _cleanup_old_transaction_ids_txn, + ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 236d3cdbe3f2..9a003e30f9d5 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1003,7 +1003,7 @@ async def add_access_token_to_user( token: str, device_id: Optional[str], valid_until_ms: Optional[int], - ) -> None: + ) -> int: """Adds an access token for the given user. Args: @@ -1013,6 +1013,8 @@ async def add_access_token_to_user( valid_until_ms: when the token is valid until. None for no expiry. Raises: StoreError if there was a problem adding this. + Returns: + The token ID """ next_id = self._access_tokens_id_gen.get_next() @@ -1028,6 +1030,8 @@ async def add_access_token_to_user( desc="add_access_token_to_user", ) + return next_id + def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str: old_device_id = self.db_pool.simple_select_one_onecol_txn( txn, "access_tokens", {"token": token}, "device_id" diff --git a/synapse/storage/databases/main/schema/delta/58/19txn_id.sql b/synapse/storage/databases/main/schema/delta/58/19txn_id.sql new file mode 100644 index 000000000000..b2454121a825 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/19txn_id.sql @@ -0,0 +1,40 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- A map of recent events persisted with transaction IDs. Used to deduplicate +-- send event requests with the same transaction ID. +-- +-- Note: transaction IDs are scoped to the room ID/user ID/access token that was +-- used to make the request. +-- +-- Note: The foreign key constraints are ON DELETE CASCADE, as if we delete the +-- events or access token we don't want to try and de-duplicate the event. +CREATE TABLE IF NOT EXISTS event_txn_id ( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + user_id TEXT NOT NULL, + token_id BIGINT NOT NULL, + txn_id TEXT NOT NULL, + inserted_ts BIGINT NOT NULL, + FOREIGN KEY (event_id) + REFERENCES events (event_id) ON DELETE CASCADE, + FOREIGN KEY (token_id) + REFERENCES access_tokens (id) ON DELETE CASCADE +); + +CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_event_id ON event_txn_id(event_id); +CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_txn_id ON event_txn_id(room_id, user_id, token_id, txn_id); +CREATE INDEX IF NOT EXISTS event_txn_id_ts ON event_txn_id(inserted_ts); diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 4d2d88d1f02d..70e636b0bac0 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -96,7 +96,9 @@ def add_to_queue(self, room_id, events_and_contexts, backfilled): Returns: defer.Deferred: a deferred which will resolve once the events are - persisted. Runs its callbacks *without* a logcontext. + persisted. Runs its callbacks *without* a logcontext. The result + is the same as that returned by the callback passed to + `handle_queue`. """ queue = self._event_persist_queues.setdefault(room_id, deque()) if queue: @@ -199,7 +201,7 @@ async def persist_events( self, events_and_contexts: Iterable[Tuple[EventBase, EventContext]], backfilled: bool = False, - ) -> RoomStreamToken: + ) -> Tuple[List[EventBase], RoomStreamToken]: """ Write events to the database Args: @@ -209,7 +211,11 @@ async def persist_events( which might update the current state etc. Returns: - the stream ordering of the latest persisted event + List of events persisted, the current position room stream position. + The list of events persisted may not be the same as those passed in + if they were deduplicated due to an event already existing that + matched the transcation ID; the existing event is returned in such + a case. """ partitioned = {} # type: Dict[str, List[Tuple[EventBase, EventContext]]] for event, ctx in events_and_contexts: @@ -225,19 +231,41 @@ async def persist_events( for room_id in partitioned: self._maybe_start_persisting(room_id) - await make_deferred_yieldable( + # Each deferred returns a map from event ID to existing event ID if the + # event was deduplicated. (The dict may also include other entries if + # the event was persisted in a batch with other events). + # + # Since we use `defer.gatherResults` we need to merge the returned list + # of dicts into one. + ret_vals = await make_deferred_yieldable( defer.gatherResults(deferreds, consumeErrors=True) ) + replaced_events = {} + for d in ret_vals: + replaced_events.update(d) + + events = [] + for event, _ in events_and_contexts: + existing_event_id = replaced_events.get(event.event_id) + if existing_event_id: + events.append(await self.main_store.get_event(existing_event_id)) + else: + events.append(event) - return self.main_store.get_room_max_token() + return ( + events, + self.main_store.get_room_max_token(), + ) async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False - ) -> Tuple[PersistedEventPosition, RoomStreamToken]: + ) -> Tuple[EventBase, PersistedEventPosition, RoomStreamToken]: """ Returns: - The stream ordering of `event`, and the stream ordering of the - latest persisted event + The event, stream ordering of `event`, and the stream ordering of the + latest persisted event. The returned event may not match the given + event if it was deduplicated due to an existing event matching the + transaction ID. """ deferred = self._event_persist_queue.add_to_queue( event.room_id, [(event, context)], backfilled=backfilled @@ -245,19 +273,33 @@ async def persist_event( self._maybe_start_persisting(event.room_id) - await make_deferred_yieldable(deferred) + # The deferred returns a map from event ID to existing event ID if the + # event was deduplicated. (The dict may also include other entries if + # the event was persisted in a batch with other events.) + replaced_events = await make_deferred_yieldable(deferred) + replaced_event = replaced_events.get(event.event_id) + if replaced_event: + event = await self.main_store.get_event(replaced_event) event_stream_id = event.internal_metadata.stream_ordering # stream ordering should have been assigned by now assert event_stream_id pos = PersistedEventPosition(self._instance_name, event_stream_id) - return pos, self.main_store.get_room_max_token() + return event, pos, self.main_store.get_room_max_token() def _maybe_start_persisting(self, room_id: str): + """Pokes the `_event_persist_queue` to start handling new items in the + queue, if not already in progress. + + Causes the deferreds returned by `add_to_queue` to resolve with: a + dictionary of event ID to event ID we didn't persist as we already had + another event persisted with the same TXN ID. + """ + async def persisting_queue(item): with Measure(self._clock, "persist_events"): - await self._persist_events( + return await self._persist_events( item.events_and_contexts, backfilled=item.backfilled ) @@ -267,12 +309,38 @@ async def _persist_events( self, events_and_contexts: List[Tuple[EventBase, EventContext]], backfilled: bool = False, - ): + ) -> Dict[str, str]: """Calculates the change to current state and forward extremities, and persists the given events and with those updates. + + Returns: + A dictionary of event ID to event ID we didn't persist as we already + had another event persisted with the same TXN ID. """ + replaced_events = {} # type: Dict[str, str] if not events_and_contexts: - return + return replaced_events + + # Check if any of the events have a transaction ID that has already been + # persisted, and if so we don't persist it again. + # + # We should have checked this a long time before we get here, but it's + # possible that different send event requests race in such a way that + # they both pass the earlier checks. Checking here isn't racey as we can + # have only one `_persist_events` per room being called at a time. + replaced_events = await self.main_store.get_already_persisted_events( + (event for event, _ in events_and_contexts) + ) + + if replaced_events: + events_and_contexts = [ + (e, ctx) + for e, ctx in events_and_contexts + if e.event_id not in replaced_events + ] + + if not events_and_contexts: + return replaced_events chunks = [ events_and_contexts[x : x + 100] @@ -441,6 +509,8 @@ async def _persist_events( await self._handle_potentially_left_users(potentially_left_users) + return replaced_events + async def _calculate_new_extremities( self, room_id: str, diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py new file mode 100644 index 000000000000..64e28bc639ac --- /dev/null +++ b/tests/handlers/test_message.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import Tuple + +from synapse.api.constants import EventTypes +from synapse.events import EventBase +from synapse.events.snapshot import EventContext +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.types import create_requester +from synapse.util.stringutils import random_string + +from tests import unittest + +logger = logging.getLogger(__name__) + + +class EventCreationTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.handler = self.hs.get_event_creation_handler() + self.persist_event_storage = self.hs.get_storage().persistence + + self.user_id = self.register_user("tester", "foobar") + self.access_token = self.login("tester", "foobar") + self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token) + + self.info = self.get_success( + self.hs.get_datastore().get_user_by_access_token(self.access_token,) + ) + self.token_id = self.info["token_id"] + + self.requester = create_requester(self.user_id, access_token_id=self.token_id) + + def _create_duplicate_event(self, txn_id: str) -> Tuple[EventBase, EventContext]: + """Create a new event with the given transaction ID. All events produced + by this method will be considered duplicates. + """ + + # We create a new event with a random body, as otherwise we'll produce + # *exactly* the same event with the same hash, and so same event ID. + return self.get_success( + self.handler.create_event( + self.requester, + { + "type": EventTypes.Message, + "room_id": self.room_id, + "sender": self.requester.user.to_string(), + "content": {"msgtype": "m.text", "body": random_string(5)}, + }, + token_id=self.token_id, + txn_id=txn_id, + ) + ) + + def test_duplicated_txn_id(self): + """Test that attempting to handle/persist an event with a transaction ID + that has already been persisted correctly returns the old event and does + *not* produce duplicate messages. + """ + + txn_id = "something_suitably_random" + + event1, context = self._create_duplicate_event(txn_id) + + ret_event1 = self.get_success( + self.handler.handle_new_client_event(self.requester, event1, context) + ) + stream_id1 = ret_event1.internal_metadata.stream_ordering + + self.assertEqual(event1.event_id, ret_event1.event_id) + + event2, context = self._create_duplicate_event(txn_id) + + # We want to test that the deduplication at the persit event end works, + # so we want to make sure we test with different events. + self.assertNotEqual(event1.event_id, event2.event_id) + + ret_event2 = self.get_success( + self.handler.handle_new_client_event(self.requester, event2, context) + ) + stream_id2 = ret_event2.internal_metadata.stream_ordering + + # Assert that the returned values match those from the initial event + # rather than the new one. + self.assertEqual(ret_event1.event_id, ret_event2.event_id) + self.assertEqual(stream_id1, stream_id2) + + # Let's test that calling `persist_event` directly also does the right + # thing. + event3, context = self._create_duplicate_event(txn_id) + self.assertNotEqual(event1.event_id, event3.event_id) + + ret_event3, event_pos3, _ = self.get_success( + self.persist_event_storage.persist_event(event3, context) + ) + + # Assert that the returned values match those from the initial event + # rather than the new one. + self.assertEqual(ret_event1.event_id, ret_event3.event_id) + self.assertEqual(stream_id1, event_pos3.stream) + + # Let's test that calling `persist_events` directly also does the right + # thing. + event4, context = self._create_duplicate_event(txn_id) + self.assertNotEqual(event1.event_id, event3.event_id) + + events, _ = self.get_success( + self.persist_event_storage.persist_events([(event3, context)]) + ) + ret_event4 = events[0] + + # Assert that the returned values match those from the initial event + # rather than the new one. + self.assertEqual(ret_event1.event_id, ret_event4.event_id) + + def test_duplicated_txn_id_one_call(self): + """Test that we correctly handle duplicates that we try and persist at + the same time. + """ + + txn_id = "something_else_suitably_random" + + # Create two duplicate events to persist at the same time + event1, context1 = self._create_duplicate_event(txn_id) + event2, context2 = self._create_duplicate_event(txn_id) + + # Ensure their event IDs are different to start with + self.assertNotEqual(event1.event_id, event2.event_id) + + events, _ = self.get_success( + self.persist_event_storage.persist_events( + [(event1, context1), (event2, context2)] + ) + ) + + # Check that we've deduplicated the events. + self.assertEqual(len(events), 2) + self.assertEqual(events[0].event_id, events[1].event_id) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index d03e12166498..b737625e3324 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -107,7 +107,7 @@ async def check(ev, state): request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/2" % self.room_id, {}, access_token=self.tok, ) diff --git a/tests/unittest.py b/tests/unittest.py index 5c87f6097ec8..6c1661c92c14 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -254,17 +254,24 @@ def setUp(self): if hasattr(self, "user_id"): if self.hijack_auth: + # We need a valid token ID to satisfy foreign key constraints. + token_id = self.get_success( + self.hs.get_datastore().add_access_token_to_user( + self.helper.auth_user_id, "some_fake_token", None, None, + ) + ) + async def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.helper.auth_user_id), - "token_id": 1, + "token_id": token_id, "is_guest": False, } async def get_user_by_req(request, allow_guest=False, rights="access"): return create_requester( UserID.from_string(self.helper.auth_user_id), - 1, + token_id, False, False, None, From 629a951b49ae58af43323e6829cf49d7452ebf39 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 13 Oct 2020 08:20:32 -0400 Subject: [PATCH 168/245] Move additional tasks to the background worker, part 4 (#8513) --- changelog.d/8513.feature | 1 + synapse/handlers/account_validity.py | 3 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/message.py | 18 +- synapse/handlers/pagination.py | 2 +- synapse/handlers/profile.py | 29 ++- synapse/server.py | 12 +- synapse/storage/databases/main/profile.py | 82 ++++----- .../storage/databases/main/registration.py | 52 +++--- synapse/storage/databases/main/room.py | 168 +++++++++--------- tests/handlers/test_typing.py | 48 ++--- 11 files changed, 196 insertions(+), 221 deletions(-) create mode 100644 changelog.d/8513.feature diff --git a/changelog.d/8513.feature b/changelog.d/8513.feature new file mode 100644 index 000000000000..542993110bc8 --- /dev/null +++ b/changelog.d/8513.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 4caf6d591af5..f33044e97aa6 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -70,7 +70,8 @@ def send_emails(): "send_renewals", self._send_renewal_emails ) - self.clock.looping_call(send_emails, 30 * 60 * 1000) + if hs.config.run_background_tasks: + self.clock.looping_call(send_emails, 30 * 60 * 1000) async def _send_renewal_emails(self): """Gets the list of users whose account is expiring in the amount of time diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 58c9f12686f5..4efe6c530a1b 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -45,7 +45,7 @@ def __init__(self, hs: "HomeServer"): # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). - if hs.config.worker_app is None: + if hs.config.run_background_tasks: hs.get_reactor().callWhenRunning(self._start_user_parting) self._account_validity_enabled = hs.config.account_validity.enabled diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b0da938aa94a..c52e6824d340 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -402,21 +402,23 @@ def __init__(self, hs: "HomeServer"): self.config.block_events_without_consent_error ) + # we need to construct a ConsentURIBuilder here, as it checks that the necessary + # config options, but *only* if we have a configuration for which we are + # going to need it. + if self._block_events_without_consent_error: + self._consent_uri_builder = ConsentURIBuilder(self.config) + # Rooms which should be excluded from dummy insertion. (For instance, # those without local users who can send events into the room). # # map from room id to time-of-last-attempt. # self._rooms_to_exclude_from_dummy_event_insertion = {} # type: Dict[str, int] - - # we need to construct a ConsentURIBuilder here, as it checks that the necessary - # config options, but *only* if we have a configuration for which we are - # going to need it. - if self._block_events_without_consent_error: - self._consent_uri_builder = ConsentURIBuilder(self.config) + # The number of forward extremeities before a dummy event is sent. + self._dummy_events_threshold = hs.config.dummy_events_threshold if ( - not self.config.worker_app + self.config.run_background_tasks and self.config.cleanup_extremities_with_dummy_events ): self.clock.looping_call( @@ -431,8 +433,6 @@ def __init__(self, hs: "HomeServer"): self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages - self._dummy_events_threshold = hs.config.dummy_events_threshold - async def create_event( self, requester: Requester, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 085b685959a4..426b58da9e49 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -92,7 +92,7 @@ def __init__(self, hs: "HomeServer"): self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max - if hs.config.retention_enabled: + if hs.config.run_background_tasks and hs.config.retention_enabled: # Run the purge jobs described in the configuration file. for job in hs.config.retention_purge_jobs: logger.info("Setting up purge job with config: %s", job) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 5453e6dfc87a..b78493875522 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -35,14 +35,16 @@ MAX_AVATAR_URL_LEN = 1000 -class BaseProfileHandler(BaseHandler): +class ProfileHandler(BaseHandler): """Handles fetching and updating user profile information. - BaseProfileHandler can be instantiated directly on workers and will - delegate to master when necessary. The master process should use the - subclass MasterProfileHandler + ProfileHandler can be instantiated directly on workers and will + delegate to master when necessary. """ + PROFILE_UPDATE_MS = 60 * 1000 + PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 + def __init__(self, hs): super().__init__(hs) @@ -53,6 +55,11 @@ def __init__(self, hs): self.user_directory_handler = hs.get_user_directory_handler() + if hs.config.run_background_tasks: + self.clock.looping_call( + self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS + ) + async def get_profile(self, user_id): target_user = UserID.from_string(user_id) @@ -363,20 +370,6 @@ async def check_profile_query_allowed(self, target_user, requester=None): raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) raise - -class MasterProfileHandler(BaseProfileHandler): - PROFILE_UPDATE_MS = 60 * 1000 - PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 - - def __init__(self, hs): - super().__init__(hs) - - assert hs.config.worker_app is None - - self.clock.looping_call( - self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS - ) - def _start_update_remote_profile_cache(self): return run_as_background_process( "Update remote profile", self._update_remote_profile_cache diff --git a/synapse/server.py b/synapse/server.py index e793793cdca9..f921ee4b53af 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -75,7 +75,7 @@ from synapse.handlers.pagination import PaginationHandler from synapse.handlers.password_policy import PasswordPolicyHandler from synapse.handlers.presence import PresenceHandler -from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler +from synapse.handlers.profile import ProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.register import RegistrationHandler @@ -191,7 +191,12 @@ class HomeServer(metaclass=abc.ABCMeta): """ REQUIRED_ON_BACKGROUND_TASK_STARTUP = [ + "account_validity", "auth", + "deactivate_account", + "message", + "pagination", + "profile", "stats", ] @@ -462,10 +467,7 @@ def get_initial_sync_handler(self) -> InitialSyncHandler: @cache_in_self def get_profile_handler(self): - if self.config.worker_app: - return BaseProfileHandler(self) - else: - return MasterProfileHandler(self) + return ProfileHandler(self) @cache_in_self def get_event_creation_handler(self) -> EventCreationHandler: diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index d2e0685e9e66..1681caa1f031 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -91,27 +91,6 @@ async def set_profile_avatar_url( desc="set_profile_avatar_url", ) - -class ProfileStore(ProfileWorkerStore): - async def add_remote_profile_cache( - self, user_id: str, displayname: str, avatar_url: str - ) -> None: - """Ensure we are caching the remote user's profiles. - - This should only be called when `is_subscribed_remote_profile_for_user` - would return true for the user. - """ - await self.db_pool.simple_upsert( - table="remote_profile_cache", - keyvalues={"user_id": user_id}, - values={ - "displayname": displayname, - "avatar_url": avatar_url, - "last_check": self._clock.time_msec(), - }, - desc="add_remote_profile_cache", - ) - async def update_remote_profile_cache( self, user_id: str, displayname: str, avatar_url: str ) -> int: @@ -138,6 +117,31 @@ async def maybe_delete_remote_profile_cache(self, user_id): desc="delete_remote_profile_cache", ) + async def is_subscribed_remote_profile_for_user(self, user_id): + """Check whether we are interested in a remote user's profile. + """ + res = await self.db_pool.simple_select_one_onecol( + table="group_users", + keyvalues={"user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="should_update_remote_profile_cache_for_user", + ) + + if res: + return True + + res = await self.db_pool.simple_select_one_onecol( + table="group_invites", + keyvalues={"user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="should_update_remote_profile_cache_for_user", + ) + + if res: + return True + async def get_remote_profile_cache_entries_that_expire( self, last_checked: int ) -> Dict[str, str]: @@ -160,27 +164,23 @@ def _get_remote_profile_cache_entries_that_expire_txn(txn): _get_remote_profile_cache_entries_that_expire_txn, ) - async def is_subscribed_remote_profile_for_user(self, user_id): - """Check whether we are interested in a remote user's profile. - """ - res = await self.db_pool.simple_select_one_onecol( - table="group_users", - keyvalues={"user_id": user_id}, - retcol="user_id", - allow_none=True, - desc="should_update_remote_profile_cache_for_user", - ) - if res: - return True +class ProfileStore(ProfileWorkerStore): + async def add_remote_profile_cache( + self, user_id: str, displayname: str, avatar_url: str + ) -> None: + """Ensure we are caching the remote user's profiles. - res = await self.db_pool.simple_select_one_onecol( - table="group_invites", + This should only be called when `is_subscribed_remote_profile_for_user` + would return true for the user. + """ + await self.db_pool.simple_upsert( + table="remote_profile_cache", keyvalues={"user_id": user_id}, - retcol="user_id", - allow_none=True, - desc="should_update_remote_profile_cache_for_user", + values={ + "displayname": displayname, + "avatar_url": avatar_url, + "last_check": self._clock.time_msec(), + }, + desc="add_remote_profile_cache", ) - - if res: - return True diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 9a003e30f9d5..4c843b76798c 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -862,6 +862,32 @@ def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): values={"expiration_ts_ms": expiration_ts, "email_sent": False}, ) + async def get_user_pending_deactivation(self) -> Optional[str]: + """ + Gets one user from the table of users waiting to be parted from all the rooms + they're in. + """ + return await self.db_pool.simple_select_one_onecol( + "users_pending_deactivation", + keyvalues={}, + retcol="user_id", + allow_none=True, + desc="get_users_pending_deactivation", + ) + + async def del_user_pending_deactivation(self, user_id: str) -> None: + """ + Removes the given user to the table of users who need to be parted from all the + rooms they're in, effectively marking that user as fully deactivated. + """ + # XXX: This should be simple_delete_one but we failed to put a unique index on + # the table, so somehow duplicate entries have ended up in it. + await self.db_pool.simple_delete( + "users_pending_deactivation", + keyvalues={"user_id": user_id}, + desc="del_user_pending_deactivation", + ) + class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -1371,32 +1397,6 @@ async def add_user_pending_deactivation(self, user_id: str) -> None: desc="add_user_pending_deactivation", ) - async def del_user_pending_deactivation(self, user_id: str) -> None: - """ - Removes the given user to the table of users who need to be parted from all the - rooms they're in, effectively marking that user as fully deactivated. - """ - # XXX: This should be simple_delete_one but we failed to put a unique index on - # the table, so somehow duplicate entries have ended up in it. - await self.db_pool.simple_delete( - "users_pending_deactivation", - keyvalues={"user_id": user_id}, - desc="del_user_pending_deactivation", - ) - - async def get_user_pending_deactivation(self) -> Optional[str]: - """ - Gets one user from the table of users waiting to be parted from all the rooms - they're in. - """ - return await self.db_pool.simple_select_one_onecol( - "users_pending_deactivation", - keyvalues={}, - retcol="user_id", - allow_none=True, - desc="get_users_pending_deactivation", - ) - async def validate_threepid_session( self, session_id: str, client_secret: str, token: str, current_ts: int ) -> Optional[str]: diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index c0f2af07850b..e83d961c20b4 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -869,6 +869,89 @@ def get_all_new_public_rooms(txn): "get_all_new_public_rooms", get_all_new_public_rooms ) + async def get_rooms_for_retention_period_in_range( + self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False + ) -> Dict[str, dict]: + """Retrieves all of the rooms within the given retention range. + + Optionally includes the rooms which don't have a retention policy. + + Args: + min_ms: Duration in milliseconds that define the lower limit of + the range to handle (exclusive). If None, doesn't set a lower limit. + max_ms: Duration in milliseconds that define the upper limit of + the range to handle (inclusive). If None, doesn't set an upper limit. + include_null: Whether to include rooms which retention policy is NULL + in the returned set. + + Returns: + The rooms within this range, along with their retention + policy. The key is "room_id", and maps to a dict describing the retention + policy associated with this room ID. The keys for this nested dict are + "min_lifetime" (int|None), and "max_lifetime" (int|None). + """ + + def get_rooms_for_retention_period_in_range_txn(txn): + range_conditions = [] + args = [] + + if min_ms is not None: + range_conditions.append("max_lifetime > ?") + args.append(min_ms) + + if max_ms is not None: + range_conditions.append("max_lifetime <= ?") + args.append(max_ms) + + # Do a first query which will retrieve the rooms that have a retention policy + # in their current state. + sql = """ + SELECT room_id, min_lifetime, max_lifetime FROM room_retention + INNER JOIN current_state_events USING (event_id, room_id) + """ + + if len(range_conditions): + sql += " WHERE (" + " AND ".join(range_conditions) + ")" + + if include_null: + sql += " OR max_lifetime IS NULL" + + txn.execute(sql, args) + + rows = self.db_pool.cursor_to_dict(txn) + rooms_dict = {} + + for row in rows: + rooms_dict[row["room_id"]] = { + "min_lifetime": row["min_lifetime"], + "max_lifetime": row["max_lifetime"], + } + + if include_null: + # If required, do a second query that retrieves all of the rooms we know + # of so we can handle rooms with no retention policy. + sql = "SELECT DISTINCT room_id FROM current_state_events" + + txn.execute(sql) + + rows = self.db_pool.cursor_to_dict(txn) + + # If a room isn't already in the dict (i.e. it doesn't have a retention + # policy in its state), add it with a null policy. + for row in rows: + if row["room_id"] not in rooms_dict: + rooms_dict[row["room_id"]] = { + "min_lifetime": None, + "max_lifetime": None, + } + + return rooms_dict + + return await self.db_pool.runInteraction( + "get_rooms_for_retention_period_in_range", + get_rooms_for_retention_period_in_range_txn, + ) + class RoomBackgroundUpdateStore(SQLBaseStore): REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" @@ -1446,88 +1529,3 @@ async def block_room(self, room_id: str, user_id: str) -> None: self.is_room_blocked, (room_id,), ) - - async def get_rooms_for_retention_period_in_range( - self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False - ) -> Dict[str, dict]: - """Retrieves all of the rooms within the given retention range. - - Optionally includes the rooms which don't have a retention policy. - - Args: - min_ms: Duration in milliseconds that define the lower limit of - the range to handle (exclusive). If None, doesn't set a lower limit. - max_ms: Duration in milliseconds that define the upper limit of - the range to handle (inclusive). If None, doesn't set an upper limit. - include_null: Whether to include rooms which retention policy is NULL - in the returned set. - - Returns: - The rooms within this range, along with their retention - policy. The key is "room_id", and maps to a dict describing the retention - policy associated with this room ID. The keys for this nested dict are - "min_lifetime" (int|None), and "max_lifetime" (int|None). - """ - - def get_rooms_for_retention_period_in_range_txn(txn): - range_conditions = [] - args = [] - - if min_ms is not None: - range_conditions.append("max_lifetime > ?") - args.append(min_ms) - - if max_ms is not None: - range_conditions.append("max_lifetime <= ?") - args.append(max_ms) - - # Do a first query which will retrieve the rooms that have a retention policy - # in their current state. - sql = """ - SELECT room_id, min_lifetime, max_lifetime FROM room_retention - INNER JOIN current_state_events USING (event_id, room_id) - """ - - if len(range_conditions): - sql += " WHERE (" + " AND ".join(range_conditions) + ")" - - if include_null: - sql += " OR max_lifetime IS NULL" - - txn.execute(sql, args) - - rows = self.db_pool.cursor_to_dict(txn) - rooms_dict = {} - - for row in rows: - rooms_dict[row["room_id"]] = { - "min_lifetime": row["min_lifetime"], - "max_lifetime": row["max_lifetime"], - } - - if include_null: - # If required, do a second query that retrieves all of the rooms we know - # of so we can handle rooms with no retention policy. - sql = "SELECT DISTINCT room_id FROM current_state_events" - - txn.execute(sql) - - rows = self.db_pool.cursor_to_dict(txn) - - # If a room isn't already in the dict (i.e. it doesn't have a retention - # policy in its state), add it with a null policy. - for row in rows: - if row["room_id"] not in rooms_dict: - rooms_dict[row["room_id"]] = { - "min_lifetime": None, - "max_lifetime": None, - } - - return rooms_dict - - rooms = await self.db_pool.runInteraction( - "get_rooms_for_retention_period_in_range", - get_rooms_for_retention_period_in_range_txn, - ) - - return rooms diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 3fec09ea8a91..16ff2e22d295 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -65,26 +65,6 @@ def make_homeserver(self, reactor, clock): mock_federation_client = Mock(spec=["put_json"]) mock_federation_client.put_json.return_value = defer.succeed((200, "OK")) - datastores = Mock() - datastores.main = Mock( - spec=[ - # Bits that Federation needs - "prep_send_transaction", - "delivered_txn", - "get_received_txn_response", - "set_received_txn_response", - "get_destination_last_successful_stream_ordering", - "get_destination_retry_timings", - "get_devices_by_remote", - "maybe_store_room_on_invite", - # Bits that user_directory needs - "get_user_directory_stream_pos", - "get_current_state_deltas", - "get_device_updates_by_remote", - "get_room_max_stream_ordering", - ] - ) - # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) @@ -95,8 +75,6 @@ def make_homeserver(self, reactor, clock): replication_streams={}, ) - hs.datastores = datastores - return hs def prepare(self, reactor, clock, hs): @@ -114,16 +92,16 @@ def prepare(self, reactor, clock, hs): "retry_interval": 0, "failure_ts": None, } - self.datastore.get_destination_retry_timings.return_value = defer.succeed( - retry_timings_res + self.datastore.get_destination_retry_timings = Mock( + return_value=defer.succeed(retry_timings_res) ) - self.datastore.get_device_updates_by_remote.return_value = make_awaitable( - (0, []) + self.datastore.get_device_updates_by_remote = Mock( + return_value=make_awaitable((0, [])) ) - self.datastore.get_destination_last_successful_stream_ordering.return_value = make_awaitable( - None + self.datastore.get_destination_last_successful_stream_ordering = Mock( + return_value=make_awaitable(None) ) def get_received_txn_response(*args): @@ -145,17 +123,19 @@ def get_joined_hosts_for_room(room_id): self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room - def get_users_in_room(room_id): - return defer.succeed({str(u) for u in self.room_members}) + async def get_users_in_room(room_id): + return {str(u) for u in self.room_members} self.datastore.get_users_in_room = get_users_in_room - self.datastore.get_user_directory_stream_pos.side_effect = ( - # we deliberately return a non-None stream pos to avoid doing an initial_spam - lambda: make_awaitable(1) + self.datastore.get_user_directory_stream_pos = Mock( + side_effect=( + # we deliberately return a non-None stream pos to avoid doing an initial_spam + lambda: make_awaitable(1) + ) ) - self.datastore.get_current_state_deltas.return_value = (0, None) + self.datastore.get_current_state_deltas = Mock(return_value=(0, None)) self.datastore.get_to_device_stream_token = lambda: 0 self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: make_awaitable( From d59378d86b04b396ff493b45bd2b9b68513038e0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 15:17:13 +0100 Subject: [PATCH 169/245] Remove redundant calls to third_party_rules in `on_send_{join,leave}` There's not much point in calling these *after* we have decided to accept them into the DAG. --- synapse/handlers/federation.py | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 455acd7669c4..c38cb7a5c857 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1567,15 +1567,6 @@ async def on_send_join_request(self, origin, pdu): context = await self._handle_new_event(origin, event) - event_allowed = await self.third_party_event_rules.check_event_allowed( - event, context - ) - if not event_allowed: - logger.info("Sending of join %s forbidden by third-party rules", event) - raise SynapseError( - 403, "This event is not allowed in this context", Codes.FORBIDDEN - ) - logger.debug( "on_send_join_request: After _handle_new_event: %s, sigs: %s", event.event_id, @@ -1789,16 +1780,7 @@ async def on_send_leave_request(self, origin, pdu): event.internal_metadata.outlier = False - context = await self._handle_new_event(origin, event) - - event_allowed = await self.third_party_event_rules.check_event_allowed( - event, context - ) - if not event_allowed: - logger.info("Sending of leave %s forbidden by third-party rules", event) - raise SynapseError( - 403, "This event is not allowed in this context", Codes.FORBIDDEN - ) + await self._handle_new_event(origin, event) logger.debug( "on_send_leave_request: After _handle_new_event: %s, sigs: %s", From 123711ed198bd5cf9984818f8bac1926ed1af5fa Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 15:44:54 +0100 Subject: [PATCH 170/245] Move third_party_rules check to event creation time Rather than waiting until we handle the event, call the ThirdPartyRules check when we fist create the event. --- synapse/handlers/federation.py | 46 ++-------------------------------- synapse/handlers/message.py | 19 ++++++++------ 2 files changed, 13 insertions(+), 52 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c38cb7a5c857..fde8f005318e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1507,18 +1507,9 @@ async def on_make_join_request( event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) - except AuthError as e: + except SynapseError as e: logger.warning("Failed to create join to %s because %s", room_id, e) - raise e - - event_allowed = await self.third_party_event_rules.check_event_allowed( - event, context - ) - if not event_allowed: - logger.info("Creation of join %s forbidden by third-party rules", event) - raise SynapseError( - 403, "This event is not allowed in this context", Codes.FORBIDDEN - ) + raise # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` @@ -1739,15 +1730,6 @@ async def on_make_leave_request( builder=builder ) - event_allowed = await self.third_party_event_rules.check_event_allowed( - event, context - ) - if not event_allowed: - logger.warning("Creation of leave %s forbidden by third-party rules", event) - raise SynapseError( - 403, "This event is not allowed in this context", Codes.FORBIDDEN - ) - try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` @@ -2676,18 +2658,6 @@ async def exchange_third_party_invite( builder=builder ) - event_allowed = await self.third_party_event_rules.check_event_allowed( - event, context - ) - if not event_allowed: - logger.info( - "Creation of threepid invite %s forbidden by third-party rules", - event, - ) - raise SynapseError( - 403, "This event is not allowed in this context", Codes.FORBIDDEN - ) - event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) @@ -2738,18 +2708,6 @@ async def on_exchange_third_party_invite_request( event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) - - event_allowed = await self.third_party_event_rules.check_event_allowed( - event, context - ) - if not event_allowed: - logger.warning( - "Exchange of threepid invite %s forbidden by third-party rules", event - ) - raise SynapseError( - 403, "This event is not allowed in this context", Codes.FORBIDDEN - ) - event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c52e6824d340..987c7597914a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -795,6 +795,17 @@ async def create_new_client_event( if requester: context.app_service = requester.app_service + event_allowed = await self.third_party_event_rules.check_event_allowed( + event, context + ) + if not event_allowed: + logger.info( + "Event %s forbidden by third-party rules", event, + ) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN + ) + self.validator.validate_new(event, self.config) # If this event is an annotation then we check that that the sender @@ -881,14 +892,6 @@ async def handle_new_client_event( else: room_version = await self.store.get_room_version_id(event.room_id) - event_allowed = await self.third_party_event_rules.check_event_allowed( - event, context - ) - if not event_allowed: - raise SynapseError( - 403, "This event is not allowed in this context", Codes.FORBIDDEN - ) - if event.internal_metadata.is_out_of_band_membership(): # the only sort of out-of-band-membership events we expect to see here # are invite rejections we have generated ourselves. From d9d86c29965ceaf92927f061375a0d7a888aea67 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 23:06:36 +0100 Subject: [PATCH 171/245] Remove redundant `token_id` parameter to create_event this is always the same as requester.access_token_id. --- synapse/handlers/message.py | 8 +++----- synapse/handlers/room.py | 1 - synapse/handlers/room_member.py | 1 - tests/handlers/test_message.py | 1 - 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c52e6824d340..b8f541425bbb 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -437,7 +437,6 @@ async def create_event( self, requester: Requester, event_dict: dict, - token_id: Optional[str] = None, txn_id: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, require_consent: bool = True, @@ -453,7 +452,6 @@ async def create_event( Args: requester event_dict: An entire event - token_id txn_id prev_event_ids: the forward extremities to use as the prev_events for the @@ -511,8 +509,8 @@ async def create_event( if require_consent and not is_exempt: await self.assert_accepted_privacy_policy(requester) - if token_id is not None: - builder.internal_metadata.token_id = token_id + if requester.access_token_id is not None: + builder.internal_metadata.token_id = requester.access_token_id if txn_id is not None: builder.internal_metadata.txn_id = txn_id @@ -726,7 +724,7 @@ async def create_and_send_nonmember_event( return event, event.internal_metadata.stream_ordering event, context = await self.create_event( - requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id + requester, event_dict, txn_id=txn_id ) assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 93ed51063ac2..ec300d8877c6 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -214,7 +214,6 @@ async def _upgrade_room( "replacement_room": new_room_id, }, }, - token_id=requester.access_token_id, ) old_room_version = await self.store.get_room_version_id(old_room_id) await self.auth.check_from_context( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 0080eeaf8dbf..d9b98d8acd3d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -193,7 +193,6 @@ async def _local_membership_update( # For backwards compatibility: "membership": membership, }, - token_id=requester.access_token_id, txn_id=txn_id, prev_event_ids=prev_event_ids, require_consent=require_consent, diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 64e28bc639ac..9f6f21a6e202 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -66,7 +66,6 @@ def _create_duplicate_event(self, txn_id: str) -> Tuple[EventBase, EventContext] "sender": self.requester.user.to_string(), "content": {"msgtype": "m.text", "body": random_string(5)}, }, - token_id=self.token_id, txn_id=txn_id, ) ) From 617e8a46538af56bd5abbb2c9e4df8025841c338 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 18:53:56 +0100 Subject: [PATCH 172/245] Allow ThirdPartyRules modules to replace event content Support returning a new event dict from `check_event_allowed`. --- synapse/events/third_party_rules.py | 12 +++- synapse/handlers/message.py | 64 ++++++++++++++++++++- tests/rest/client/test_third_party_rules.py | 8 +-- 3 files changed, 75 insertions(+), 9 deletions(-) diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 1535cc53391a..a9aabe00dfd6 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -12,7 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable + +from typing import Callable, Union from synapse.events import EventBase from synapse.events.snapshot import EventContext @@ -44,15 +45,20 @@ def __init__(self, hs): async def check_event_allowed( self, event: EventBase, context: EventContext - ) -> bool: + ) -> Union[bool, dict]: """Check if a provided event should be allowed in the given context. + The module can return: + * True: the event is allowed. + * False: the event is not allowed, and should be rejected with M_FORBIDDEN. + * a dict: replacement event data. + Args: event: The event to be checked. context: The context of the event. Returns: - True if the event should be allowed, False if not. + The result from the ThirdPartyRules module, as above """ if self.third_party_rules is None: return True diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 987c7597914a..0c6aec347ec1 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -795,16 +795,22 @@ async def create_new_client_event( if requester: context.app_service = requester.app_service - event_allowed = await self.third_party_event_rules.check_event_allowed( + third_party_result = await self.third_party_event_rules.check_event_allowed( event, context ) - if not event_allowed: + if not third_party_result: logger.info( "Event %s forbidden by third-party rules", event, ) raise SynapseError( 403, "This event is not allowed in this context", Codes.FORBIDDEN ) + elif isinstance(third_party_result, dict): + # the third-party rules want to replace the event. We'll need to build a new + # event. + event, context = await self._rebuild_event_after_third_party_rules( + third_party_result, event + ) self.validator.validate_new(event, self.config) @@ -1294,3 +1300,57 @@ def _expire_rooms_to_exclude_from_dummy_event_insertion(self): room_id, ) del self._rooms_to_exclude_from_dummy_event_insertion[room_id] + + async def _rebuild_event_after_third_party_rules( + self, third_party_result: dict, original_event: EventBase + ) -> Tuple[EventBase, EventContext]: + # the third_party_event_rules want to replace the event. + # we do some basic checks, and then return the replacement event and context. + + # Construct a new EventBuilder and validate it, which helps with the + # rest of these checks. + try: + builder = self.event_builder_factory.for_room_version( + original_event.room_version, third_party_result + ) + self.validator.validate_builder(builder) + except SynapseError as e: + raise Exception( + "Third party rules module created an invalid event: " + e.msg, + ) + + immutable_fields = [ + # changing the room is going to break things: we've already checked that the + # room exists, and are holding a concurrency limiter token for that room. + # Also, we might need to use a different room version. + "room_id", + # changing the type or state key might work, but we'd need to check that the + # calling functions aren't making assumptions about them. + "type", + "state_key", + ] + + for k in immutable_fields: + if getattr(builder, k, None) != original_event.get(k): + raise Exception( + "Third party rules module created an invalid event: " + "cannot change field " + k + ) + + # check that the new sender belongs to this HS + if not self.hs.is_mine_id(builder.sender): + raise Exception( + "Third party rules module created an invalid event: " + "invalid sender " + builder.sender + ) + + # copy over the original internal metadata + for k, v in original_event.internal_metadata.get_dict().items(): + setattr(builder.internal_metadata, k, v) + + event = await builder.build(prev_event_ids=original_event.prev_event_ids()) + + # we rebuild the event context, to be on the safe side. If nothing else, + # delta_ids might need an update. + context = await self.state.compute_event_context(event) + return event, context diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index b737625e3324..d40455080031 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -115,12 +115,12 @@ async def check(ev, state): self.assertEquals(channel.result["code"], b"403", channel.result) def test_modify_event(self): - """Tests that the module can successfully tweak an event before it is persisted. - """ + """The module can return a modified version of the event""" # first patch the event checker so that it will modify the event async def check(ev: EventBase, state): - ev.content = {"x": "y"} - return True + d = ev.get_dict() + d["content"] = {"x": "y"} + return d current_rules_module().check_event_allowed = check From 898196f1cca419c0d2b60529c86ddff3cea83072 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 22:02:41 +0100 Subject: [PATCH 173/245] guard against accidental modification --- synapse/events/__init__.py | 6 ++++++ synapse/events/third_party_rules.py | 7 ++++--- tests/rest/client/test_third_party_rules.py | 20 ++++++++++++++++++++ 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 7a51d0a22fe9..65df62107f78 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -312,6 +312,12 @@ def auth_event_ids(self): """ return [e for e, _ in self.auth_events] + def freeze(self): + """'Freeze' the event dict, so it cannot be modified by accident""" + + # this will be a no-op if the event dict is already frozen. + self._dict = freeze(self._dict) + class FrozenEvent(EventBase): format_version = EventFormatVersions.V1 # All events of this type are V1 diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index a9aabe00dfd6..77fbd3f68a59 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -69,9 +69,10 @@ async def check_event_allowed( events = await self.store.get_events(prev_state_ids.values()) state_events = {(ev.type, ev.state_key): ev for ev in events.values()} - # The module can modify the event slightly if it wants, but caution should be - # exercised, and it's likely to go very wrong if applied to events received over - # federation. + # Ensure that the event is frozen, to make sure that the module is not tempted + # to try to modify it. Any attempt to modify it at this point will invalidate + # the hashes and signatures. + event.freeze() return await self.third_party_rules.check_event_allowed(event, state_events) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index d40455080031..0048bea54a54 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -114,6 +114,26 @@ async def check(ev, state): self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) + def test_cannot_modify_event(self): + """cannot accidentally modify an event before it is persisted""" + + # first patch the event checker so that it will try to modify the event + async def check(ev: EventBase, state): + ev.content = {"x": "y"} + return True + + current_rules_module().check_event_allowed = check + + # now send the event + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id, + {"x": "x"}, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"500", channel.result) + def test_modify_event(self): """The module can return a modified version of the event""" # first patch the event checker so that it will modify the event From 091e9482af6171c19f4b967d3a973ec6c2088ab7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 18:57:37 +0100 Subject: [PATCH 174/245] changelog --- changelog.d/8535.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8535.feature diff --git a/changelog.d/8535.feature b/changelog.d/8535.feature new file mode 100644 index 000000000000..45342e66ad7d --- /dev/null +++ b/changelog.d/8535.feature @@ -0,0 +1 @@ +Support modifying event content in `ThirdPartyRules` modules. From a34b17e492129adba260915de401ab1cbabf6215 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 23:14:35 +0100 Subject: [PATCH 175/245] Simplify `_locally_reject_invite` Update `EventCreationHandler.create_event` to accept an auth_events param, and use it in `_locally_reject_invite` instead of reinventing the wheel. --- synapse/events/builder.py | 21 ++++--- synapse/handlers/message.py | 22 ++++++- synapse/handlers/room_member.py | 58 +++++-------------- tests/handlers/test_presence.py | 2 +- .../test_federation_sender_shard.py | 2 +- tests/storage/test_redaction.py | 4 +- 6 files changed, 52 insertions(+), 57 deletions(-) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index b6c47be6468c..df4f950fec86 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -97,32 +97,37 @@ def state_key(self): def is_state(self): return self._state_key is not None - async def build(self, prev_event_ids: List[str]) -> EventBase: + async def build( + self, prev_event_ids: List[str], auth_event_ids: Optional[List[str]] + ) -> EventBase: """Transform into a fully signed and hashed event Args: prev_event_ids: The event IDs to use as the prev events + auth_event_ids: The event IDs to use as the auth events. + Should normally be set to None, which will cause them to be calculated + based on the room state at the prev_events. Returns: The signed and hashed event. """ - - state_ids = await self._state.get_current_state_ids( - self.room_id, prev_event_ids - ) - auth_ids = self._auth.compute_auth_events(self, state_ids) + if auth_event_ids is None: + state_ids = await self._state.get_current_state_ids( + self.room_id, prev_event_ids + ) + auth_event_ids = self._auth.compute_auth_events(self, state_ids) format_version = self.room_version.event_format if format_version == EventFormatVersions.V1: # The types of auth/prev events changes between event versions. auth_events = await self._store.add_event_hashes( - auth_ids + auth_event_ids ) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]] prev_events = await self._store.add_event_hashes( prev_event_ids ) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]] else: - auth_events = auth_ids + auth_events = auth_event_ids prev_events = prev_event_ids old_depth = await self._store.get_max_depth_of(prev_event_ids) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b8f541425bbb..f18f8825963d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -439,6 +439,7 @@ async def create_event( event_dict: dict, txn_id: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, + auth_event_ids: Optional[List[str]] = None, require_consent: bool = True, ) -> Tuple[EventBase, EventContext]: """ @@ -458,6 +459,12 @@ async def create_event( new event. If None, they will be requested from the database. + + auth_event_ids: + The event ids to use as the auth_events for the new event. + Should normally be left as None, which will cause them to be calculated + based on the room state at the prev_events. + require_consent: Whether to check if the requester has consented to the privacy policy. Raises: @@ -516,7 +523,10 @@ async def create_event( builder.internal_metadata.txn_id = txn_id event, context = await self.create_new_client_event( - builder=builder, requester=requester, prev_event_ids=prev_event_ids, + builder=builder, + requester=requester, + prev_event_ids=prev_event_ids, + auth_event_ids=auth_event_ids, ) # In an ideal world we wouldn't need the second part of this condition. However, @@ -755,6 +765,7 @@ async def create_new_client_event( builder: EventBuilder, requester: Optional[Requester] = None, prev_event_ids: Optional[List[str]] = None, + auth_event_ids: Optional[List[str]] = None, ) -> Tuple[EventBase, EventContext]: """Create a new event for a local client @@ -767,6 +778,11 @@ async def create_new_client_event( If None, they will be requested from the database. + auth_event_ids: + The event ids to use as the auth_events for the new event. + Should normally be left as None, which will cause them to be calculated + based on the room state at the prev_events. + Returns: Tuple of created event, context """ @@ -788,7 +804,9 @@ async def create_new_client_event( builder.type == EventTypes.Create or len(prev_event_ids) > 0 ), "Attempting to create an event with no prev_events" - event = await builder.build(prev_event_ids=prev_event_ids) + event = await builder.build( + prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids + ) context = await self.state.compute_event_context(event) if requester: context.app_service = requester.app_service diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d9b98d8acd3d..ec784030e90f 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -17,12 +17,10 @@ import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union - -from unpaddedbase64 import encode_base64 +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple from synapse import types -from synapse.api.constants import MAX_DEPTH, AccountDataTypes, EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -31,12 +29,8 @@ SynapseError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.api.room_versions import EventFormatVersions -from synapse.crypto.event_signing import compute_event_reference_hash from synapse.events import EventBase -from synapse.events.builder import create_local_event_from_event_dict from synapse.events.snapshot import EventContext -from synapse.events.validator import EventValidator from synapse.storage.roommember import RoomsForUser from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID from synapse.util.async_helpers import Linearizer @@ -1132,31 +1126,10 @@ async def _locally_reject_invite( room_id = invite_event.room_id target_user = invite_event.state_key - room_version = await self.store.get_room_version(room_id) content["membership"] = Membership.LEAVE - # the auth events for the new event are the same as that of the invite, plus - # the invite itself. - # - # the prev_events are just the invite. - invite_hash = invite_event.event_id # type: Union[str, Tuple] - if room_version.event_format == EventFormatVersions.V1: - alg, h = compute_event_reference_hash(invite_event) - invite_hash = (invite_event.event_id, {alg: encode_base64(h)}) - - auth_events = tuple(invite_event.auth_events) + (invite_hash,) - prev_events = (invite_hash,) - - # we cap depth of generated events, to ensure that they are not - # rejected by other servers (and so that they can be persisted in - # the db) - depth = min(invite_event.depth + 1, MAX_DEPTH) - event_dict = { - "depth": depth, - "auth_events": auth_events, - "prev_events": prev_events, "type": EventTypes.Member, "room_id": room_id, "sender": target_user, @@ -1164,24 +1137,23 @@ async def _locally_reject_invite( "state_key": target_user, } - event = create_local_event_from_event_dict( - clock=self.clock, - hostname=self.hs.hostname, - signing_key=self.hs.signing_key, - room_version=room_version, - event_dict=event_dict, + # the auth events for the new event are the same as that of the invite, plus + # the invite itself. + # + # the prev_events are just the invite. + prev_event_ids = [invite_event.event_id] + auth_event_ids = invite_event.auth_event_ids() + prev_event_ids + + event, context = await self.event_creation_handler.create_event( + requester, + event_dict, + txn_id=txn_id, + prev_event_ids=prev_event_ids, + auth_event_ids=auth_event_ids, ) event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True - if txn_id is not None: - event.internal_metadata.txn_id = txn_id - if requester.access_token_id is not None: - event.internal_metadata.token_id = requester.access_token_id - - EventValidator().validate_new(event, self.config) - context = await self.state_handler.compute_event_context(event) - context.app_service = requester.app_service result_event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[UserID.from_string(target_user)], ) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 914c82e7a8d0..8ed67640f8e7 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -615,7 +615,7 @@ def _add_new_user(self, room_id, user_id): self.store.get_latest_event_ids_in_room(room_id) ) - event = self.get_success(builder.build(prev_event_ids)) + event = self.get_success(builder.build(prev_event_ids, None)) self.get_success(self.federation_handler.on_receive_pdu(hostname, event)) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 9c4a9c35635b..779745ae9da3 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -226,7 +226,7 @@ def create_room_with_remote_server(self, user, token, remote_server="other_serve } builder = factory.for_room_version(room_version, event_dict) - join_event = self.get_success(builder.build(prev_event_ids)) + join_event = self.get_success(builder.build(prev_event_ids, None)) self.get_success(federation.on_send_join_request(remote_server, join_event)) self.replicate() diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 1ea35d60c11c..d4f9e809db36 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -236,9 +236,9 @@ def __init__(self, base_builder, event_id): self._event_id = event_id @defer.inlineCallbacks - def build(self, prev_event_ids): + def build(self, prev_event_ids, auth_event_ids): built_event = yield defer.ensureDeferred( - self._base_builder.build(prev_event_ids) + self._base_builder.build(prev_event_ids, auth_event_ids) ) built_event._event_id = self._event_id From d9dc6185d3596e2243dcbf257909f8cdb722e622 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 13 Oct 2020 23:19:49 +0100 Subject: [PATCH 176/245] changelog --- changelog.d/8537.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8537.misc diff --git a/changelog.d/8537.misc b/changelog.d/8537.misc new file mode 100644 index 000000000000..26309b5b9393 --- /dev/null +++ b/changelog.d/8537.misc @@ -0,0 +1 @@ +Factor out common code between `RoomMemberHandler._locally_reject_invite` and `EventCreationHandler.create_event`. From ec606ea9e334a17af30a552e4612f96e8e7695fb Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 14 Oct 2020 05:24:58 -0500 Subject: [PATCH 177/245] Add correct M_BAD_JSON error code to /profile/{userId}/displayname (#8517) Fixes #8029 --- changelog.d/8517.bugfix | 1 + synapse/rest/client/v1/profile.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8517.bugfix diff --git a/changelog.d/8517.bugfix b/changelog.d/8517.bugfix new file mode 100644 index 000000000000..1ab623c59fcf --- /dev/null +++ b/changelog.d/8517.bugfix @@ -0,0 +1 @@ +Fix error code for `/profile/{userId}/displayname` to be `M_BAD_JSON`. diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index b686cd671ffd..e7fcd2b1ffea 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -59,7 +59,9 @@ async def on_PUT(self, request, user_id): try: new_name = content["displayname"] except Exception: - return 400, "Unable to parse name" + raise SynapseError( + code=400, msg="Unable to parse name", errcode=Codes.BAD_JSON, + ) await self.profile_handler.set_displayname(user, requester, new_name, is_admin) From 3ee97a2748f64a772878b6e4d4775aed2e0534a2 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 14 Oct 2020 12:00:52 +0100 Subject: [PATCH 178/245] Make sure a retention policy is a state event (#8527) * Make sure a retention policy is a state event * Changelog --- changelog.d/8527.bugfix | 1 + synapse/events/validator.py | 3 +++ synapse/storage/databases/main/events.py | 4 ++++ 3 files changed, 8 insertions(+) create mode 100644 changelog.d/8527.bugfix diff --git a/changelog.d/8527.bugfix b/changelog.d/8527.bugfix new file mode 100644 index 000000000000..727e0ba2992a --- /dev/null +++ b/changelog.d/8527.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.7.0 that could cause Synapse to insert values from non-state `m.room.retention` events into the `room_retention` database table. diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 9df35b54baff..5f9af8529be0 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -83,6 +83,9 @@ def _validate_retention(self, event): Args: event (FrozenEvent): The event to validate. """ + if not event.is_state(): + raise SynapseError(code=400, msg="must be a state event") + min_lifetime = event.content.get("min_lifetime") max_lifetime = event.content.get("max_lifetime") diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index fdb17745f69a..ba3b1769b0ed 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1270,6 +1270,10 @@ def _store_room_message_txn(self, txn, event): ) def _store_retention_policy_for_room_txn(self, txn, event): + if not event.is_state(): + logger.debug("Ignoring non-state m.room.retention event") + return + if hasattr(event, "content") and ( "min_lifetime" in event.content or "max_lifetime" in event.content ): From 921a3f8a59da0f8fe706a22627f464a74b54c992 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Oct 2020 13:27:51 +0100 Subject: [PATCH 179/245] Fix not sending events over federation when using sharded event persisters (#8536) * Fix outbound federaion with multiple event persisters. We incorrectly notified federation senders that the minimum persisted stream position had advanced when we got an `RDATA` from an event persister. Notifying of federation senders already correctly happens in the notifier, so we just delete the offending line. * Change some interfaces to use RoomStreamToken. By enforcing use of `RoomStreamTokens` we make it less likely that people pass in random ints that they got from somewhere random. --- changelog.d/8536.bugfix | 1 + synapse/app/generic_worker.py | 4 ---- synapse/federation/send_queue.py | 2 +- synapse/federation/sender/__init__.py | 9 +++++++-- synapse/handlers/appservice.py | 11 +++++++---- synapse/notifier.py | 6 +++--- synapse/push/emailpusher.py | 8 +++++++- synapse/push/httppusher.py | 8 +++++++- synapse/push/pusherpool.py | 10 ++++++++-- tests/handlers/test_appservice.py | 13 ++++++++++--- 10 files changed, 51 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8536.bugfix diff --git a/changelog.d/8536.bugfix b/changelog.d/8536.bugfix new file mode 100644 index 000000000000..8d238cc00853 --- /dev/null +++ b/changelog.d/8536.bugfix @@ -0,0 +1 @@ +Fix not sending events over federation when using sharded event writers. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index d53181deb17b..1b511890aa36 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -790,10 +790,6 @@ async def process_replication_rows(self, stream_name, token, rows): send_queue.process_rows_for_federation(self.federation_sender, rows) await self.update_token(token) - # We also need to poke the federation sender when new events happen - elif stream_name == "events": - self.federation_sender.notify_new_events(token) - # ... and when new receipts happen elif stream_name == ReceiptsStream.NAME: await self._on_new_receipts(rows) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 8e46957d15e2..5f1bf492c1d0 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -188,7 +188,7 @@ def _clear_queue_before_pos(self, position_to_delete): for key in keys[:i]: del self.edus[key] - def notify_new_events(self, current_id): + def notify_new_events(self, max_token): """As per FederationSender""" # We don't need to replicate this as it gets sent down a different # stream. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index e33b29a42c60..604cfd1935cc 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -40,7 +40,7 @@ events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import ReadReceipt +from synapse.types import ReadReceipt, RoomStreamToken from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) @@ -154,10 +154,15 @@ def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: self._per_destination_queues[destination] = queue return queue - def notify_new_events(self, current_id: int) -> None: + def notify_new_events(self, max_token: RoomStreamToken) -> None: """This gets called when we have some new events we might want to send out to other servers. """ + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + current_id = max_token.stream + self._last_poked_id = max(current_id, self._last_poked_id) if self._is_processing: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 9d4e87dad6a6..c8d5e580353d 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -27,6 +27,7 @@ event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import RoomStreamToken from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -47,15 +48,17 @@ def __init__(self, hs): self.current_max = 0 self.is_processing = False - async def notify_interested_services(self, current_id): + async def notify_interested_services(self, max_token: RoomStreamToken): """Notifies (pushes) all application services interested in this event. Pushing is done asynchronously, so this method won't block for any prolonged length of time. - - Args: - current_id(int): The current maximum ID. """ + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + current_id = max_token.stream + services = self.store.get_app_services() if not services or not self.notify_appservices: return diff --git a/synapse/notifier.py b/synapse/notifier.py index 13adeed01e5e..51c830c91ea2 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -319,19 +319,19 @@ def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): ) if self.federation_sender: - self.federation_sender.notify_new_events(max_room_stream_token.stream) + self.federation_sender.notify_new_events(max_room_stream_token) async def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: await self.appservice_handler.notify_interested_services( - max_room_stream_token.stream + max_room_stream_token ) except Exception: logger.exception("Error notifying application services of event") async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: - await self._pusher_pool.on_new_notifications(max_room_stream_token.stream) + await self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception("Error pusher pool of event") diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 28bd8ab7481e..c6763971ee37 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -18,6 +18,7 @@ from twisted.internet.error import AlreadyCalled, AlreadyCancelled from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import RoomStreamToken logger = logging.getLogger(__name__) @@ -91,7 +92,12 @@ def on_stop(self): pass self.timed_call = None - def on_new_notifications(self, max_stream_ordering): + def on_new_notifications(self, max_token: RoomStreamToken): + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_ordering = max_token.stream + if self.max_stream_ordering: self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 26706bf3e1ee..793d0db2d9c5 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -23,6 +23,7 @@ from synapse.logging import opentracing from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException +from synapse.types import RoomStreamToken from . import push_rule_evaluator, push_tools @@ -114,7 +115,12 @@ def on_started(self, should_check_for_notifs): if should_check_for_notifs: self._start_processing() - def on_new_notifications(self, max_stream_ordering): + def on_new_notifications(self, max_token: RoomStreamToken): + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_ordering = max_token.stream + self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 76150e117b2b..0080c68ce28e 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -24,6 +24,7 @@ from synapse.push.emailpusher import EmailPusher from synapse.push.httppusher import HttpPusher from synapse.push.pusher import PusherFactory +from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute if TYPE_CHECKING: @@ -186,11 +187,16 @@ async def remove_pushers_by_access_token(self, user_id, access_tokens): ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, max_stream_id: int): + async def on_new_notifications(self, max_token: RoomStreamToken): if not self.pushers: # nothing to do here. return + # We just use the minimum stream ordering and ignore the vector clock + # component. This is safe to do as long as we *always* ignore the vector + # clock components. + max_stream_id = max_token.stream + if max_stream_id < self._last_room_stream_id_seen: # Nothing to do return @@ -214,7 +220,7 @@ async def on_new_notifications(self, max_stream_id: int): if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_notifications(max_stream_id) + p.on_new_notifications(max_token) except Exception: logger.exception("Exception in pusher on_new_notifications") diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 2a0b7c1b56ec..ee4f3da31c83 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.handlers.appservice import ApplicationServicesHandler +from synapse.types import RoomStreamToken from tests.test_utils import make_awaitable from tests.utils import MockClock @@ -61,7 +62,9 @@ def test_notify_interested_services(self): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred(self.handler.notify_interested_services(0)) + yield defer.ensureDeferred( + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + ) self.mock_scheduler.submit_event_for_as.assert_called_once_with( interested_service, event ) @@ -80,7 +83,9 @@ def test_query_user_exists_unknown_user(self): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred(self.handler.notify_interested_services(0)) + yield defer.ensureDeferred( + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + ) self.mock_as_api.query_user.assert_called_once_with(services[0], user_id) @defer.inlineCallbacks @@ -97,7 +102,9 @@ def test_query_user_exists_known_user(self): defer.succeed((0, [event])), defer.succeed((0, [])), ] - yield defer.ensureDeferred(self.handler.notify_interested_services(0)) + yield defer.ensureDeferred( + self.handler.notify_interested_services(RoomStreamToken(None, 0)) + ) self.assertFalse( self.mock_as_api.query_user.called, "query_user called when it shouldn't have been.", From 1264c8ac89e5bfa757acee8d26437f14621206a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Oct 2020 13:53:20 +0100 Subject: [PATCH 180/245] Add basic tests for sync/pagination with vector clock tokens. (#8488) These are tests for #8439 --- changelog.d/8488.misc | 1 + .../test_sharded_event_persister.py | 217 ++++++++++++++++++ tests/unittest.py | 32 ++- 3 files changed, 249 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8488.misc diff --git a/changelog.d/8488.misc b/changelog.d/8488.misc new file mode 100644 index 000000000000..237cb3b31135 --- /dev/null +++ b/changelog.d/8488.misc @@ -0,0 +1 @@ +Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 6068d1490538..82cf033d4eca 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -14,8 +14,12 @@ # limitations under the License. import logging +from mock import patch + +from synapse.api.room_versions import RoomVersion from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import sync from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.utils import USE_POSTGRES_FOR_TESTS @@ -36,6 +40,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, + sync.register_servlets, ] def prepare(self, reactor, clock, hs): @@ -43,6 +48,9 @@ def prepare(self, reactor, clock, hs): self.other_user_id = self.register_user("otheruser", "pass") self.other_access_token = self.login("otheruser", "pass") + self.room_creator = self.hs.get_room_creation_handler() + self.store = hs.get_datastore() + def default_config(self): conf = super().default_config() conf["redis"] = {"enabled": "true"} @@ -53,6 +61,29 @@ def default_config(self): } return conf + def _create_room(self, room_id: str, user_id: str, tok: str): + """Create a room with given room_id + """ + + # We control the room ID generation by patching out the + # `_generate_room_id` method + async def generate_room( + creator_id: str, is_public: bool, room_version: RoomVersion + ): + await self.store.store_room( + room_id=room_id, + room_creator_user_id=creator_id, + is_public=is_public, + room_version=room_version, + ) + return room_id + + with patch( + "synapse.handlers.room.RoomCreationHandler._generate_room_id" + ) as mock: + mock.side_effect = generate_room + self.helper.create_room_as(user_id, tok=tok) + def test_basic(self): """Simple test to ensure that multiple rooms can be created and joined, and that different rooms get handled by different instances. @@ -100,3 +131,189 @@ def test_basic(self): self.assertTrue(persisted_on_1) self.assertTrue(persisted_on_2) + + def test_vector_clock_token(self): + """Tests that using a stream token with a vector clock component works + correctly with basic /sync and /messages usage. + """ + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker1"}, + ) + + worker_hs2 = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker2"}, + ) + + sync_hs = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "sync"}, + ) + + # Specially selected room IDs that get persisted on different workers. + room_id1 = "!foo:test" + room_id2 = "!baz:test" + + self.assertEqual( + self.hs.config.worker.events_shard_config.get_instance(room_id1), "worker1" + ) + self.assertEqual( + self.hs.config.worker.events_shard_config.get_instance(room_id2), "worker2" + ) + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + store = self.hs.get_datastore() + + # Create two room on the different workers. + self._create_room(room_id1, user_id, access_token) + self._create_room(room_id2, user_id, access_token) + + # The other user joins + self.helper.join( + room=room_id1, user=self.other_user_id, tok=self.other_access_token + ) + self.helper.join( + room=room_id2, user=self.other_user_id, tok=self.other_access_token + ) + + # Do an initial sync so that we're up to date. + request, channel = self.make_request("GET", "/sync", access_token=access_token) + self.render_on_worker(sync_hs, request) + next_batch = channel.json_body["next_batch"] + + # We now gut wrench into the events stream MultiWriterIdGenerator on + # worker2 to mimic it getting stuck persisting an event. This ensures + # that when we send an event on worker1 we end up in a state where + # worker2 events stream position lags that on worker1, resulting in a + # RoomStreamToken with a non-empty instance map component. + # + # Worker2's event stream position will not advance until we call + # __aexit__ again. + actx = worker_hs2.get_datastore()._stream_id_gen.get_next() + self.get_success(actx.__aenter__()) + + response = self.helper.send(room_id1, body="Hi!", tok=self.other_access_token) + first_event_in_room1 = response["event_id"] + + # Assert that the current stream token has an instance map component, as + # we are trying to test vector clock tokens. + room_stream_token = store.get_room_max_token() + self.assertNotEqual(len(room_stream_token.instance_map), 0) + + # Check that syncing still gets the new event, despite the gap in the + # stream IDs. + request, channel = self.make_request( + "GET", "/sync?since={}".format(next_batch), access_token=access_token + ) + self.render_on_worker(sync_hs, request) + + # We should only see the new event and nothing else + self.assertIn(room_id1, channel.json_body["rooms"]["join"]) + self.assertNotIn(room_id2, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id1]["timeline"]["events"] + self.assertListEqual( + [first_event_in_room1], [event["event_id"] for event in events] + ) + + # Get the next batch and makes sure its a vector clock style token. + vector_clock_token = channel.json_body["next_batch"] + self.assertTrue(vector_clock_token.startswith("m")) + + # Now that we've got a vector clock token we finish the fake persisting + # an event we started above. + self.get_success(actx.__aexit__(None, None, None)) + + # Now try and send an event to the other rooom so that we can test that + # the vector clock style token works as a `since` token. + response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token) + first_event_in_room2 = response["event_id"] + + request, channel = self.make_request( + "GET", + "/sync?since={}".format(vector_clock_token), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + + self.assertNotIn(room_id1, channel.json_body["rooms"]["join"]) + self.assertIn(room_id2, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id2]["timeline"]["events"] + self.assertListEqual( + [first_event_in_room2], [event["event_id"] for event in events] + ) + + next_batch = channel.json_body["next_batch"] + + # We also want to test that the vector clock style token works with + # pagination. We do this by sending a couple of new events into the room + # and syncing again to get a prev_batch token for each room, then + # paginating from there back to the vector clock token. + self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token) + self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token) + + request, channel = self.make_request( + "GET", "/sync?since={}".format(next_batch), access_token=access_token + ) + self.render_on_worker(sync_hs, request) + + prev_batch1 = channel.json_body["rooms"]["join"][room_id1]["timeline"][ + "prev_batch" + ] + prev_batch2 = channel.json_body["rooms"]["join"][room_id2]["timeline"][ + "prev_batch" + ] + + # Paginating back in the first room should not produce any results, as + # no events have happened in it. This tests that we are correctly + # filtering results based on the vector clock portion. + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=b".format( + room_id1, prev_batch1, vector_clock_token + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertListEqual([], channel.json_body["chunk"]) + + # Paginating back on the second room should produce the first event + # again. This tests that pagination isn't completely broken. + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=b".format( + room_id2, prev_batch2, vector_clock_token + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertEqual( + channel.json_body["chunk"][0]["event_id"], first_event_in_room2 + ) + + # Paginating forwards should give the same results + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=f".format( + room_id1, vector_clock_token, prev_batch1 + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertListEqual([], channel.json_body["chunk"]) + + request, channel = self.make_request( + "GET", + "/rooms/{}/messages?from={}&to={}&dir=f".format( + room_id2, vector_clock_token, prev_batch2, + ), + access_token=access_token, + ) + self.render_on_worker(sync_hs, request) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertEqual( + channel.json_body["chunk"][0]["event_id"], first_event_in_room2 + ) diff --git a/tests/unittest.py b/tests/unittest.py index 6c1661c92c14..040b126a27bc 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -20,7 +20,7 @@ import inspect import logging import time -from typing import Optional, Tuple, Type, TypeVar, Union +from typing import Optional, Tuple, Type, TypeVar, Union, overload from mock import Mock, patch @@ -364,6 +364,36 @@ def prepare(self, reactor, clock, homeserver): Function to optionally be overridden in subclasses. """ + # Annoyingly mypy doesn't seem to pick up the fact that T is SynapseRequest + # when the `request` arg isn't given, so we define an explicit override to + # cover that case. + @overload + def make_request( + self, + method: Union[bytes, str], + path: Union[bytes, str], + content: Union[bytes, dict] = b"", + access_token: Optional[str] = None, + shorthand: bool = True, + federation_auth_origin: str = None, + content_is_form: bool = False, + ) -> Tuple[SynapseRequest, FakeChannel]: + ... + + @overload + def make_request( + self, + method: Union[bytes, str], + path: Union[bytes, str], + content: Union[bytes, dict] = b"", + access_token: Optional[str] = None, + request: Type[T] = SynapseRequest, + shorthand: bool = True, + federation_auth_origin: str = None, + content_is_form: bool = False, + ) -> Tuple[T, FakeChannel]: + ... + def make_request( self, method: Union[bytes, str], From 9e66f3761cdfe8adcd45be37466c86bdbfc57a35 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 14 Oct 2020 15:00:49 +0100 Subject: [PATCH 181/245] Update documentation on retention policies limits (#8529) * Update documentation on retention policies limits Document the changes from https://github.com/matrix-org/synapse/pull/8104 --- changelog.d/8529.doc | 1 + docs/message_retention_policies.md | 34 +++++++++++++++++++----------- 2 files changed, 23 insertions(+), 12 deletions(-) create mode 100644 changelog.d/8529.doc diff --git a/changelog.d/8529.doc b/changelog.d/8529.doc new file mode 100644 index 000000000000..6e710e6527bb --- /dev/null +++ b/changelog.d/8529.doc @@ -0,0 +1 @@ +Document the new behaviour of the `allowed_lifetime_min` and `allowed_lifetime_max` settings in the room retention configuration. diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index 1dd60bdad952..75d2028e1740 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -136,24 +136,34 @@ the server's database. ### Lifetime limits -**Note: this feature is mainly useful within a closed federation or on -servers that don't federate, because there currently is no way to -enforce these limits in an open federation.** - -Server admins can restrict the values their local users are allowed to -use for both `min_lifetime` and `max_lifetime`. These limits can be -defined as such in the `retention` section of the configuration file: +Server admins can set limits on the values of `max_lifetime` to use when +purging old events in a room. These limits can be defined as such in the +`retention` section of the configuration file: ```yaml allowed_lifetime_min: 1d allowed_lifetime_max: 1y ``` -Here, `allowed_lifetime_min` is the lowest value a local user can set -for both `min_lifetime` and `max_lifetime`, and `allowed_lifetime_max` -is the highest value. Both parameters are optional (e.g. setting -`allowed_lifetime_min` but not `allowed_lifetime_max` only enforces a -minimum and no maximum). +The limits are considered when running purge jobs. If necessary, the +effective value of `max_lifetime` will be brought between +`allowed_lifetime_min` and `allowed_lifetime_max` (inclusive). +This means that, if the value of `max_lifetime` defined in the room's state +is lower than `allowed_lifetime_min`, the value of `allowed_lifetime_min` +will be used instead. Likewise, if the value of `max_lifetime` is higher +than `allowed_lifetime_max`, the value of `allowed_lifetime_max` will be +used instead. + +In the example above, we ensure Synapse never deletes events that are less +than one day old, and that it always deletes events that are over a year +old. + +If a default policy is set, and its `max_lifetime` value is lower than +`allowed_lifetime_min` or higher than `allowed_lifetime_max`, the same +process applies. + +Both parameters are optional; if one is omitted Synapse won't use it to +adjust the effective value of `max_lifetime`. Like other settings in this section, these parameters can be expressed either as a duration or as a number of milliseconds. From 1cf4a68108a77607c8aff1ee8f6216df251c4e7e Mon Sep 17 00:00:00 2001 From: Christopher May-Townsend Date: Wed, 14 Oct 2020 15:28:59 +0100 Subject: [PATCH 182/245] Add note to manhole.md about bind_address when using with docker (#8526) Signed-off-by: Christopher May-Townsend --- changelog.d/8526.doc | 1 + docs/manhole.md | 46 +++++++++++++++++++++++++++++++++++++------- 2 files changed, 40 insertions(+), 7 deletions(-) create mode 100644 changelog.d/8526.doc diff --git a/changelog.d/8526.doc b/changelog.d/8526.doc new file mode 100644 index 000000000000..cbf48680c12f --- /dev/null +++ b/changelog.d/8526.doc @@ -0,0 +1 @@ +Added note about docker in manhole.md regarding which ip address to bind to. Contributed by @Maquis196. diff --git a/docs/manhole.md b/docs/manhole.md index 75b6ae40e0cb..37d1d7823c00 100644 --- a/docs/manhole.md +++ b/docs/manhole.md @@ -5,22 +5,54 @@ The "manhole" allows server administrators to access a Python shell on a running Synapse installation. This is a very powerful mechanism for administration and debugging. +**_Security Warning_** + +Note that this will give administrative access to synapse to **all users** with +shell access to the server. It should therefore **not** be enabled in +environments where untrusted users have shell access. + +*** + To enable it, first uncomment the `manhole` listener configuration in -`homeserver.yaml`: +`homeserver.yaml`. The configuration is slightly different if you're using docker. + +#### Docker config + +If you are using Docker, set `bind_addresses` to `['0.0.0.0']` as shown: ```yaml listeners: - port: 9000 - bind_addresses: ['::1', '127.0.0.1'] + bind_addresses: ['0.0.0.0'] type: manhole ``` -(`bind_addresses` in the above is important: it ensures that access to the -manhole is only possible for local users). +When using `docker run` to start the server, you will then need to change the command to the following to include the +`manhole` port forwarding. The `-p 127.0.0.1:9000:9000` below is important: it +ensures that access to the `manhole` is only possible for local users. -Note that this will give administrative access to synapse to **all users** with -shell access to the server. It should therefore **not** be enabled in -environments where untrusted users have shell access. +```bash +docker run -d --name synapse \ + --mount type=volume,src=synapse-data,dst=/data \ + -p 8008:8008 \ + -p 127.0.0.1:9000:9000 \ + matrixdotorg/synapse:latest +``` + +#### Native config + +If you are not using docker, set `bind_addresses` to `['::1', '127.0.0.1']` as shown. +The `bind_addresses` in the example below is important: it ensures that access to the +`manhole` is only possible for local users). + +```yaml +listeners: + - port: 9000 + bind_addresses: ['::1', '127.0.0.1'] + type: manhole +``` + +#### Accessing synapse manhole Then restart synapse, and point an ssh client at port 9000 on localhost, using the username `matrix`: From 618d405a322590d022d839b6d72ba51e992a71c3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Oct 2020 15:40:06 +0100 Subject: [PATCH 183/245] Remove racey assertion in MultiWriterIDGenerator (#8530) We asserted that the IDs returned by postgres sequence was greater than any we had seen, however this is technically racey as we may update the current positions out of order. We now assert that the sequences are correct on startup, so the assertion is no longer really required, so we remove them. --- changelog.d/8530.bugfix | 1 + synapse/storage/util/id_generators.py | 7 ------- 2 files changed, 1 insertion(+), 7 deletions(-) create mode 100644 changelog.d/8530.bugfix diff --git a/changelog.d/8530.bugfix b/changelog.d/8530.bugfix new file mode 100644 index 000000000000..443d88424ead --- /dev/null +++ b/changelog.d/8530.bugfix @@ -0,0 +1 @@ +Fix rare bug where sending an event would fail due to a racey assertion. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 3d8da48f2d65..02d71302ea5e 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -618,14 +618,7 @@ async def __aenter__(self) -> Union[int, List[int]]: db_autocommit=True, ) - # Assert the fetched ID is actually greater than any ID we've already - # seen. If not, then the sequence and table have got out of sync - # somehow. with self.id_gen._lock: - assert max(self.id_gen._current_positions.values(), default=0) < min( - self.stream_ids - ) - self.id_gen._unfinished_ids.update(self.stream_ids) if self.multiple_ids is None: From 19b15d63e80dd1d925ff78158f7a191427d6403f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Oct 2020 15:50:59 +0100 Subject: [PATCH 184/245] Use autocommit mode for single statement DB functions. (#8542) Autocommit means that we don't wrap the functions in transactions, and instead get executed directly. Introduced in #8456. This will help: 1. reduce the number of `could not serialize access due to concurrent delete` errors that we see (though there are a few functions that often cause serialization errors that we don't fix here); 2. improve the DB performance, as it no longer needs to deal with the overhead of `REPEATABLE READ` isolation levels; and 3. improve wall clock speed of these functions, as we no longer need to send `BEGIN` and `COMMIT` to the DB. Some notes about the differences between autocommit mode and our default `REPEATABLE READ` transactions: 1. Currently `autocommit` only applies when using PostgreSQL, and is ignored when using SQLite (due to silliness with [Twisted DB classes](https://twistedmatrix.com/trac/ticket/9998)). 2. Autocommit functions may get retried on error, which means they can get applied *twice* (or more) to the DB (since they are not in a transaction the previous call would not get rolled back). This means that the functions need to be idempotent (or otherwise not care about being called multiple times). Read queries, simple deletes, and updates/upserts that replace rows (rather than generating new values from existing rows) are all idempotent. 3. Autocommit functions no longer get executed in [`REPEATABLE READ`](https://www.postgresql.org/docs/current/transaction-iso.html) isolation level, and so data can change queries, which is fine for single statement queries. --- changelog.d/8542.misc | 1 + synapse/storage/database.py | 99 +++++++++++++++++-- synapse/storage/databases/main/keys.py | 5 +- .../storage/databases/main/transactions.py | 76 ++++++++------ .../storage/databases/main/user_directory.py | 45 ++++----- 5 files changed, 156 insertions(+), 70 deletions(-) create mode 100644 changelog.d/8542.misc diff --git a/changelog.d/8542.misc b/changelog.d/8542.misc new file mode 100644 index 000000000000..63149fd9b982 --- /dev/null +++ b/changelog.d/8542.misc @@ -0,0 +1 @@ +Improve database performance by executing more queries without starting transactions. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 0ba3a025cf1c..763722d6bce6 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -893,6 +893,12 @@ async def simple_upsert( attempts = 0 while True: try: + # We can autocommit if we are going to use native upserts + autocommit = ( + self.engine.can_native_upsert + and table not in self._unsafe_to_upsert_tables + ) + return await self.runInteraction( desc, self.simple_upsert_txn, @@ -901,6 +907,7 @@ async def simple_upsert( values, insertion_values, lock=lock, + db_autocommit=autocommit, ) except self.engine.module.IntegrityError as e: attempts += 1 @@ -1063,6 +1070,43 @@ def simple_upsert_txn_native_upsert( ) txn.execute(sql, list(allvalues.values())) + async def simple_upsert_many( + self, + table: str, + key_names: Collection[str], + key_values: Collection[Iterable[Any]], + value_names: Collection[str], + value_values: Iterable[Iterable[Any]], + desc: str, + ) -> None: + """ + Upsert, many times. + + Args: + table: The table to upsert into + key_names: The key column names. + key_values: A list of each row's key column values. + value_names: The value column names + value_values: A list of each row's value column values. + Ignored if value_names is empty. + """ + + # We can autocommit if we are going to use native upserts + autocommit = ( + self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables + ) + + return await self.runInteraction( + desc, + self.simple_upsert_many_txn, + table, + key_names, + key_values, + value_names, + value_values, + db_autocommit=autocommit, + ) + def simple_upsert_many_txn( self, txn: LoggingTransaction, @@ -1214,7 +1258,13 @@ async def simple_select_one( desc: description of the transaction, for logging and metrics """ return await self.runInteraction( - desc, self.simple_select_one_txn, table, keyvalues, retcols, allow_none + desc, + self.simple_select_one_txn, + table, + keyvalues, + retcols, + allow_none, + db_autocommit=True, ) @overload @@ -1265,6 +1315,7 @@ async def simple_select_one_onecol( keyvalues, retcol, allow_none=allow_none, + db_autocommit=True, ) @overload @@ -1346,7 +1397,12 @@ async def simple_select_onecol( Results in a list """ return await self.runInteraction( - desc, self.simple_select_onecol_txn, table, keyvalues, retcol + desc, + self.simple_select_onecol_txn, + table, + keyvalues, + retcol, + db_autocommit=True, ) async def simple_select_list( @@ -1371,7 +1427,12 @@ async def simple_select_list( A list of dictionaries. """ return await self.runInteraction( - desc, self.simple_select_list_txn, table, keyvalues, retcols + desc, + self.simple_select_list_txn, + table, + keyvalues, + retcols, + db_autocommit=True, ) @classmethod @@ -1450,6 +1511,7 @@ async def simple_select_many_batch( chunk, keyvalues, retcols, + db_autocommit=True, ) results.extend(rows) @@ -1548,7 +1610,12 @@ async def simple_update_one( desc: description of the transaction, for logging and metrics """ await self.runInteraction( - desc, self.simple_update_one_txn, table, keyvalues, updatevalues + desc, + self.simple_update_one_txn, + table, + keyvalues, + updatevalues, + db_autocommit=True, ) @classmethod @@ -1607,7 +1674,9 @@ async def simple_delete_one( keyvalues: dict of column names and values to select the row with desc: description of the transaction, for logging and metrics """ - await self.runInteraction(desc, self.simple_delete_one_txn, table, keyvalues) + await self.runInteraction( + desc, self.simple_delete_one_txn, table, keyvalues, db_autocommit=True, + ) @staticmethod def simple_delete_one_txn( @@ -1646,7 +1715,9 @@ async def simple_delete( Returns: The number of deleted rows. """ - return await self.runInteraction(desc, self.simple_delete_txn, table, keyvalues) + return await self.runInteraction( + desc, self.simple_delete_txn, table, keyvalues, db_autocommit=True + ) @staticmethod def simple_delete_txn( @@ -1694,7 +1765,13 @@ async def simple_delete_many( Number rows deleted """ return await self.runInteraction( - desc, self.simple_delete_many_txn, table, column, iterable, keyvalues + desc, + self.simple_delete_many_txn, + table, + column, + iterable, + keyvalues, + db_autocommit=True, ) @staticmethod @@ -1860,7 +1937,13 @@ async def simple_search_list( """ return await self.runInteraction( - desc, self.simple_search_list_txn, table, term, col, retcols + desc, + self.simple_search_list_txn, + table, + term, + col, + retcols, + db_autocommit=True, ) @classmethod diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index ad43bb05abb5..f8f4bb9b3fb9 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -122,9 +122,7 @@ async def store_server_verify_keys( # param, which is itself the 2-tuple (server_name, key_id). invalidations.append((server_name, key_id)) - await self.db_pool.runInteraction( - "store_server_verify_keys", - self.db_pool.simple_upsert_many_txn, + await self.db_pool.simple_upsert_many( table="server_signature_keys", key_names=("server_name", "key_id"), key_values=key_values, @@ -135,6 +133,7 @@ async def store_server_verify_keys( "verify_key", ), value_values=value_values, + desc="store_server_verify_keys", ) invalidate = self._get_server_verify_key.invalidate diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 7d460902675b..59207cadd429 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -208,42 +208,56 @@ async def set_destination_retry_timings( """ self._destination_retry_cache.pop(destination, None) - return await self.db_pool.runInteraction( - "set_destination_retry_timings", - self._set_destination_retry_timings, - destination, - failure_ts, - retry_last_ts, - retry_interval, - ) + if self.database_engine.can_native_upsert: + return await self.db_pool.runInteraction( + "set_destination_retry_timings", + self._set_destination_retry_timings_native, + destination, + failure_ts, + retry_last_ts, + retry_interval, + db_autocommit=True, # Safe as its a single upsert + ) + else: + return await self.db_pool.runInteraction( + "set_destination_retry_timings", + self._set_destination_retry_timings_emulated, + destination, + failure_ts, + retry_last_ts, + retry_interval, + ) - def _set_destination_retry_timings( + def _set_destination_retry_timings_native( self, txn, destination, failure_ts, retry_last_ts, retry_interval ): + assert self.database_engine.can_native_upsert + + # Upsert retry time interval if retry_interval is zero (i.e. we're + # resetting it) or greater than the existing retry interval. + # + # WARNING: This is executed in autocommit, so we shouldn't add any more + # SQL calls in here (without being very careful). + sql = """ + INSERT INTO destinations ( + destination, failure_ts, retry_last_ts, retry_interval + ) + VALUES (?, ?, ?, ?) + ON CONFLICT (destination) DO UPDATE SET + failure_ts = EXCLUDED.failure_ts, + retry_last_ts = EXCLUDED.retry_last_ts, + retry_interval = EXCLUDED.retry_interval + WHERE + EXCLUDED.retry_interval = 0 + OR destinations.retry_interval IS NULL + OR destinations.retry_interval < EXCLUDED.retry_interval + """ - if self.database_engine.can_native_upsert: - # Upsert retry time interval if retry_interval is zero (i.e. we're - # resetting it) or greater than the existing retry interval. - - sql = """ - INSERT INTO destinations ( - destination, failure_ts, retry_last_ts, retry_interval - ) - VALUES (?, ?, ?, ?) - ON CONFLICT (destination) DO UPDATE SET - failure_ts = EXCLUDED.failure_ts, - retry_last_ts = EXCLUDED.retry_last_ts, - retry_interval = EXCLUDED.retry_interval - WHERE - EXCLUDED.retry_interval = 0 - OR destinations.retry_interval IS NULL - OR destinations.retry_interval < EXCLUDED.retry_interval - """ - - txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval)) - - return + txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval)) + def _set_destination_retry_timings_emulated( + self, txn, destination, failure_ts, retry_last_ts, retry_interval + ): self.database_engine.lock_table(txn, "destinations") # We need to be careful here as the data may have changed from under us diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 5a390ff2f612..d87ceec6da86 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -480,21 +480,16 @@ async def add_users_who_share_private_room( user_id_tuples: iterable of 2-tuple of user IDs. """ - def _add_users_who_share_room_txn(txn): - self.db_pool.simple_upsert_many_txn( - txn, - table="users_who_share_private_rooms", - key_names=["user_id", "other_user_id", "room_id"], - key_values=[ - (user_id, other_user_id, room_id) - for user_id, other_user_id in user_id_tuples - ], - value_names=(), - value_values=None, - ) - - await self.db_pool.runInteraction( - "add_users_who_share_room", _add_users_who_share_room_txn + await self.db_pool.simple_upsert_many( + table="users_who_share_private_rooms", + key_names=["user_id", "other_user_id", "room_id"], + key_values=[ + (user_id, other_user_id, room_id) + for user_id, other_user_id in user_id_tuples + ], + value_names=(), + value_values=None, + desc="add_users_who_share_room", ) async def add_users_in_public_rooms( @@ -508,19 +503,13 @@ async def add_users_in_public_rooms( user_ids """ - def _add_users_in_public_rooms_txn(txn): - - self.db_pool.simple_upsert_many_txn( - txn, - table="users_in_public_rooms", - key_names=["user_id", "room_id"], - key_values=[(user_id, room_id) for user_id in user_ids], - value_names=(), - value_values=None, - ) - - await self.db_pool.runInteraction( - "add_users_in_public_rooms", _add_users_in_public_rooms_txn + await self.db_pool.simple_upsert_many( + table="users_in_public_rooms", + key_names=["user_id", "room_id"], + key_values=[(user_id, room_id) for user_id in user_ids], + value_names=(), + value_values=None, + desc="add_users_in_public_rooms", ) async def delete_all_from_user_dir(self) -> None: From 7eff59ec91e59140c375b43a6dac05b833ab0051 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 19:40:53 +0100 Subject: [PATCH 185/245] Add some more type annotations to Cache --- .../replication/slave/storage/client_ips.py | 2 +- synapse/util/caches/descriptors.py | 81 ++++++++++++++----- synapse/util/caches/lrucache.py | 3 +- 3 files changed, 62 insertions(+), 24 deletions(-) diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 1f8dafe7ea40..273d627fad18 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -26,7 +26,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, max_entries=50000 - ) + ) # type: Cache[tuple, int] async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 98b34f222318..14458bc20fd1 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -13,12 +13,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum import functools import inspect import logging import threading -from typing import Any, Callable, Generic, Optional, Tuple, TypeVar, Union, cast +from typing import ( + Any, + Callable, + Generic, + Iterable, + MutableMapping, + Optional, + Tuple, + TypeVar, + Union, + cast, +) from weakref import WeakValueDictionary from prometheus_client import Gauge @@ -38,6 +49,8 @@ CacheKey = Union[Tuple, Any] F = TypeVar("F", bound=Callable[..., Any]) +KT = TypeVar("KT") +VT = TypeVar("VT") class _CachedFunction(Generic[F]): @@ -61,13 +74,19 @@ class _CachedFunction(Generic[F]): ["name"], ) -_CacheSentinel = object() + +class _Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup. + sentinel = object() class CacheEntry: __slots__ = ["deferred", "callbacks", "invalidated"] - def __init__(self, deferred, callbacks): + def __init__( + self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]] + ): self.deferred = deferred self.callbacks = set(callbacks) self.invalidated = False @@ -80,7 +99,13 @@ def invalidate(self): self.callbacks.clear() -class Cache: +class Cache(Generic[KT, VT]): + """Wraps an LruCache, adding support for Deferred results. + + It expects that each entry added with set() will be a Deferred; likewise get() + may return an ObservableDeferred. + """ + __slots__ = ( "cache", "name", @@ -103,19 +128,23 @@ def __init__( Args: name: The name of the cache max_entries: Maximum amount of entries that the cache will hold - keylen: The length of the tuple used as the cache key + keylen: The length of the tuple used as the cache key. Ignored unless + `tree` is True. tree: Use a TreeCache instead of a dict as the underlying cache type iterable: If True, count each item in the cached object as an entry, rather than each cached object apply_cache_factor_from_config: Whether cache factors specified in the config file affect `max_entries` - - Returns: - Cache """ cache_type = TreeCache if tree else dict - self._pending_deferred_cache = cache_type() + # _pending_deferred_cache maps from the key value to a `CacheEntry` object. + self._pending_deferred_cache = ( + cache_type() + ) # type: MutableMapping[KT, CacheEntry] + + # cache is used for completed results and maps to the result itself, rather than + # a Deferred. self.cache = LruCache( max_size=max_entries, keylen=keylen, @@ -155,7 +184,13 @@ def check_thread(self): "Cache objects can only be accessed from the main thread" ) - def get(self, key, default=_CacheSentinel, callback=None, update_metrics=True): + def get( + self, + key: KT, + default=_Sentinel.sentinel, + callback: Optional[Callable[[], None]] = None, + update_metrics: bool = True, + ): """Looks the key up in the caches. Args: @@ -166,30 +201,32 @@ def get(self, key, default=_CacheSentinel, callback=None, update_metrics=True): update_metrics (bool): whether to update the cache hit rate metrics Returns: - Either an ObservableDeferred or the raw result + Either an ObservableDeferred or the result itself """ callbacks = [callback] if callback else [] - val = self._pending_deferred_cache.get(key, _CacheSentinel) - if val is not _CacheSentinel: + val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) + if val is not _Sentinel.sentinel: val.callbacks.update(callbacks) if update_metrics: self.metrics.inc_hits() return val.deferred - val = self.cache.get(key, _CacheSentinel, callbacks=callbacks) - if val is not _CacheSentinel: + val = self.cache.get(key, _Sentinel.sentinel, callbacks=callbacks) + if val is not _Sentinel.sentinel: self.metrics.inc_hits() return val if update_metrics: self.metrics.inc_misses() - if default is _CacheSentinel: + if default is _Sentinel.sentinel: raise KeyError() else: return default - def set(self, key, value, callback=None): + def set( + self, key: KT, value: defer.Deferred, callback: Optional[Callable[[], None]] = None + ) -> ObservableDeferred: if not isinstance(value, defer.Deferred): raise TypeError("not a Deferred") @@ -248,7 +285,7 @@ def eb(_fail): observer.addCallbacks(cb, eb) return observable - def prefill(self, key, value, callback=None): + def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): callbacks = [callback] if callback else [] self.cache.set(key, value, callbacks=callbacks) @@ -267,7 +304,7 @@ def invalidate(self, key): if entry: entry.invalidate() - def invalidate_many(self, key): + def invalidate_many(self, key: KT): self.check_thread() if not isinstance(key, tuple): raise TypeError("The cache key must be a tuple not %r" % (type(key),)) @@ -275,7 +312,7 @@ def invalidate_many(self, key): # if we have a pending lookup for this key, remove it from the # _pending_deferred_cache, as above - entry_dict = self._pending_deferred_cache.pop(key, None) + entry_dict = self._pending_deferred_cache.pop(cast(KT, key), None) if entry_dict is not None: for entry in iterate_tree_cache_entry(entry_dict): entry.invalidate() @@ -396,7 +433,7 @@ def __get__(self, obj, owner): keylen=self.num_args, tree=self.tree, iterable=self.iterable, - ) + ) # type: Cache[Tuple, Any] def get_cache_key_gen(args, kwargs): """Given some args/kwargs return a generator that resolves into diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 4bc1a67b58a2..33eae2b7c4fe 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -64,7 +64,8 @@ def __init__( Args: max_size: The maximum amount of entries the cache can hold - keylen: The length of the tuple used as the cache key + keylen: The length of the tuple used as the cache key. Ignored unless + cache_type is `TreeCache`. cache_type (type): type of underlying cache to be used. Typically one of dict From 9f87da0a84f93096e228f01f1139c9b5db8ea3d4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 19:43:37 +0100 Subject: [PATCH 186/245] Rename Cache->DeferredCache --- .../replication/slave/storage/client_ips.py | 6 +++--- synapse/storage/databases/main/client_ips.py | 4 ++-- synapse/storage/databases/main/devices.py | 4 ++-- .../storage/databases/main/events_worker.py | 4 ++-- synapse/util/caches/descriptors.py | 19 ++++++++++++------- tests/storage/test__base.py | 10 +++++----- tests/test_metrics.py | 4 ++-- tests/util/caches/test_descriptors.py | 4 ++-- 8 files changed, 30 insertions(+), 25 deletions(-) diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 273d627fad18..40ea78a35317 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -15,7 +15,7 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY -from synapse.util.caches.descriptors import Cache +from synapse.util.caches.descriptors import DeferredCache from ._base import BaseSlavedStore @@ -24,9 +24,9 @@ class SlavedClientIpStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self.client_ip_last_seen = Cache( + self.client_ip_last_seen = DeferredCache( name="client_ip_last_seen", keylen=4, max_entries=50000 - ) # type: Cache[tuple, int] + ) # type: DeferredCache[tuple, int] async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index a25a88844384..ad32701d3601 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -19,7 +19,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_tuple_comparison_clause -from synapse.util.caches.descriptors import Cache +from synapse.util.caches.descriptors import DeferredCache logger = logging.getLogger(__name__) @@ -410,7 +410,7 @@ def _prune_old_user_ips_txn(txn): class ClientIpStore(ClientIpWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - self.client_ip_last_seen = Cache( + self.client_ip_last_seen = DeferredCache( name="client_ip_last_seen", keylen=4, max_entries=50000 ) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 88fd97e1df51..d903155e8965 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -34,7 +34,7 @@ ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder -from synapse.util.caches.descriptors import Cache, cached, cachedList +from synapse.util.caches.descriptors import DeferredCache, cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -1004,7 +1004,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. - self.device_id_exists_cache = Cache( + self.device_id_exists_cache = DeferredCache( name="device_id_exists", keylen=2, max_entries=10000 ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 3ec4d1d9c2ff..be7f60f2e896 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -42,7 +42,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.types import Collection, get_domain_from_id -from synapse.util.caches.descriptors import Cache, cached +from synapse.util.caches.descriptors import DeferredCache, cached from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -145,7 +145,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._cleanup_old_transaction_ids, ) - self._get_event_cache = Cache( + self._get_event_cache = DeferredCache( "*getEvent*", keylen=3, max_entries=hs.config.caches.event_cache_size, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 14458bc20fd1..7c9fe199bf7c 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -99,7 +99,7 @@ def invalidate(self): self.callbacks.clear() -class Cache(Generic[KT, VT]): +class DeferredCache(Generic[KT, VT]): """Wraps an LruCache, adding support for Deferred results. It expects that each entry added with set() will be a Deferred; likewise get() @@ -225,7 +225,10 @@ def get( return default def set( - self, key: KT, value: defer.Deferred, callback: Optional[Callable[[], None]] = None + self, + key: KT, + value: defer.Deferred, + callback: Optional[Callable[[], None]] = None, ) -> ObservableDeferred: if not isinstance(value, defer.Deferred): raise TypeError("not a Deferred") @@ -427,13 +430,13 @@ def __init__( self.iterable = iterable def __get__(self, obj, owner): - cache = Cache( + cache = DeferredCache( name=self.orig.__name__, max_entries=self.max_entries, keylen=self.num_args, tree=self.tree, iterable=self.iterable, - ) # type: Cache[Tuple, Any] + ) # type: DeferredCache[Tuple, Any] def get_cache_key_gen(args, kwargs): """Given some args/kwargs return a generator that resolves into @@ -677,9 +680,9 @@ class _CacheContext: _cache_context_objects = ( WeakValueDictionary() - ) # type: WeakValueDictionary[Tuple[Cache, CacheKey], _CacheContext] + ) # type: WeakValueDictionary[Tuple[DeferredCache, CacheKey], _CacheContext] - def __init__(self, cache, cache_key): # type: (Cache, CacheKey) -> None + def __init__(self, cache, cache_key): # type: (DeferredCache, CacheKey) -> None self._cache = cache self._cache_key = cache_key @@ -688,7 +691,9 @@ def invalidate(self): # type: () -> None self._cache.invalidate(self._cache_key) @classmethod - def get_instance(cls, cache, cache_key): # type: (Cache, CacheKey) -> _CacheContext + def get_instance( + cls, cache, cache_key + ): # type: (DeferredCache, CacheKey) -> _CacheContext """Returns an instance constructed with the given arguments. A new instance is only created if none already exists. diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index f5afed017c67..00adcab7b99d 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,14 +20,14 @@ from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.descriptors import Cache, cached +from synapse.util.caches.descriptors import DeferredCache, cached from tests import unittest -class CacheTestCase(unittest.HomeserverTestCase): +class DeferredCacheTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): - self.cache = Cache("test") + self.cache = DeferredCache("test") def test_empty(self): failed = False @@ -56,7 +56,7 @@ def test_invalidate(self): self.assertTrue(failed) def test_eviction(self): - cache = Cache("test", max_entries=2) + cache = DeferredCache("test", max_entries=2) cache.prefill(1, "one") cache.prefill(2, "two") @@ -74,7 +74,7 @@ def test_eviction(self): cache.get(3) def test_eviction_lru(self): - cache = Cache("test", max_entries=2) + cache = DeferredCache("test", max_entries=2) cache.prefill(1, "one") cache.prefill(2, "two") diff --git a/tests/test_metrics.py b/tests/test_metrics.py index f5f63d8ed699..1c03a52f7c0f 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -15,7 +15,7 @@ # limitations under the License. from synapse.metrics import REGISTRY, InFlightGauge, generate_latest -from synapse.util.caches.descriptors import Cache +from synapse.util.caches.descriptors import DeferredCache from tests import unittest @@ -138,7 +138,7 @@ def test_cache_metric(self): Caches produce metrics reflecting their state when scraped. """ CACHE_NAME = "cache_metrics_test_fgjkbdfg" - cache = Cache(CACHE_NAME, max_entries=777) + cache = DeferredCache(CACHE_NAME, max_entries=777) items = { x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 677e925477e0..bd870b4a33cf 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -42,9 +42,9 @@ def run_on_reactor(): return make_deferred_yieldable(d) -class CacheTestCase(unittest.TestCase): +class DeferredCacheTestCase(unittest.TestCase): def test_invalidate_all(self): - cache = descriptors.Cache("testcache") + cache = descriptors.DeferredCache("testcache") callback_record = [False, False] From 4182bb812f21d7231ff0efc5e93e5f2e88f6605e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 23:25:23 +0100 Subject: [PATCH 187/245] move DeferredCache into its own module --- .../replication/slave/storage/client_ips.py | 2 +- synapse/storage/databases/main/client_ips.py | 2 +- synapse/storage/databases/main/devices.py | 3 +- .../storage/databases/main/events_worker.py | 3 +- synapse/util/caches/deferred_cache.py | 292 ++++++++++++++++++ synapse/util/caches/descriptors.py | 284 +---------------- tests/storage/test__base.py | 3 +- tests/test_metrics.py | 2 +- tests/util/caches/test_deferred_cache.py | 64 ++++ tests/util/caches/test_descriptors.py | 44 --- 10 files changed, 367 insertions(+), 332 deletions(-) create mode 100644 synapse/util/caches/deferred_cache.py create mode 100644 tests/util/caches/test_deferred_cache.py diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 40ea78a35317..4b0ea0cc01cd 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -15,7 +15,7 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY -from synapse.util.caches.descriptors import DeferredCache +from synapse.util.caches.deferred_cache import DeferredCache from ._base import BaseSlavedStore diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index ad32701d3601..9e66e6648a19 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -19,7 +19,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_tuple_comparison_clause -from synapse.util.caches.descriptors import DeferredCache +from synapse.util.caches.deferred_cache import DeferredCache logger = logging.getLogger(__name__) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d903155e8965..e662a20d24a4 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -34,7 +34,8 @@ ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder -from synapse.util.caches.descriptors import DeferredCache, cached, cachedList +from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.descriptors import cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index be7f60f2e896..ff150f0be7cc 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -42,7 +42,8 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.types import Collection, get_domain_from_id -from synapse.util.caches.descriptors import DeferredCache, cached +from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.descriptors import cached from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py new file mode 100644 index 000000000000..f728cd2cf27a --- /dev/null +++ b/synapse/util/caches/deferred_cache.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import threading +from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, cast + +from prometheus_client import Gauge + +from twisted.internet import defer + +from synapse.util.async_helpers import ObservableDeferred +from synapse.util.caches import register_cache +from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry + +cache_pending_metric = Gauge( + "synapse_util_caches_cache_pending", + "Number of lookups currently pending for this cache", + ["name"], +) + + +KT = TypeVar("KT") +VT = TypeVar("VT") + + +class _Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup. + sentinel = object() + + +class DeferredCache(Generic[KT, VT]): + """Wraps an LruCache, adding support for Deferred results. + + It expects that each entry added with set() will be a Deferred; likewise get() + may return an ObservableDeferred. + """ + + __slots__ = ( + "cache", + "name", + "keylen", + "thread", + "metrics", + "_pending_deferred_cache", + ) + + def __init__( + self, + name: str, + max_entries: int = 1000, + keylen: int = 1, + tree: bool = False, + iterable: bool = False, + apply_cache_factor_from_config: bool = True, + ): + """ + Args: + name: The name of the cache + max_entries: Maximum amount of entries that the cache will hold + keylen: The length of the tuple used as the cache key. Ignored unless + `tree` is True. + tree: Use a TreeCache instead of a dict as the underlying cache type + iterable: If True, count each item in the cached object as an entry, + rather than each cached object + apply_cache_factor_from_config: Whether cache factors specified in the + config file affect `max_entries` + """ + cache_type = TreeCache if tree else dict + + # _pending_deferred_cache maps from the key value to a `CacheEntry` object. + self._pending_deferred_cache = ( + cache_type() + ) # type: MutableMapping[KT, CacheEntry] + + # cache is used for completed results and maps to the result itself, rather than + # a Deferred. + self.cache = LruCache( + max_size=max_entries, + keylen=keylen, + cache_type=cache_type, + size_callback=(lambda d: len(d)) if iterable else None, + evicted_callback=self._on_evicted, + apply_cache_factor_from_config=apply_cache_factor_from_config, + ) + + self.name = name + self.keylen = keylen + self.thread = None # type: Optional[threading.Thread] + self.metrics = register_cache( + "cache", + name, + self.cache, + collect_callback=self._metrics_collection_callback, + ) + + @property + def max_entries(self): + return self.cache.max_size + + def _on_evicted(self, evicted_count): + self.metrics.inc_evictions(evicted_count) + + def _metrics_collection_callback(self): + cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) + + def check_thread(self): + expected_thread = self.thread + if expected_thread is None: + self.thread = threading.current_thread() + else: + if expected_thread is not threading.current_thread(): + raise ValueError( + "Cache objects can only be accessed from the main thread" + ) + + def get( + self, + key: KT, + default=_Sentinel.sentinel, + callback: Optional[Callable[[], None]] = None, + update_metrics: bool = True, + ): + """Looks the key up in the caches. + + Args: + key(tuple) + default: What is returned if key is not in the caches. If not + specified then function throws KeyError instead + callback(fn): Gets called when the entry in the cache is invalidated + update_metrics (bool): whether to update the cache hit rate metrics + + Returns: + Either an ObservableDeferred or the result itself + """ + callbacks = [callback] if callback else [] + val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) + if val is not _Sentinel.sentinel: + val.callbacks.update(callbacks) + if update_metrics: + self.metrics.inc_hits() + return val.deferred + + val = self.cache.get(key, _Sentinel.sentinel, callbacks=callbacks) + if val is not _Sentinel.sentinel: + self.metrics.inc_hits() + return val + + if update_metrics: + self.metrics.inc_misses() + + if default is _Sentinel.sentinel: + raise KeyError() + else: + return default + + def set( + self, + key: KT, + value: defer.Deferred, + callback: Optional[Callable[[], None]] = None, + ) -> ObservableDeferred: + if not isinstance(value, defer.Deferred): + raise TypeError("not a Deferred") + + callbacks = [callback] if callback else [] + self.check_thread() + observable = ObservableDeferred(value, consumeErrors=True) + observer = observable.observe() + entry = CacheEntry(deferred=observable, callbacks=callbacks) + + existing_entry = self._pending_deferred_cache.pop(key, None) + if existing_entry: + existing_entry.invalidate() + + self._pending_deferred_cache[key] = entry + + def compare_and_pop(): + """Check if our entry is still the one in _pending_deferred_cache, and + if so, pop it. + + Returns true if the entries matched. + """ + existing_entry = self._pending_deferred_cache.pop(key, None) + if existing_entry is entry: + return True + + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + return False + + def cb(result): + if compare_and_pop(): + self.cache.set(key, result, entry.callbacks) + else: + # we're not going to put this entry into the cache, so need + # to make sure that the invalidation callbacks are called. + # That was probably done when _pending_deferred_cache was + # updated, but it's possible that `set` was called without + # `invalidate` being previously called, in which case it may + # not have been. Either way, let's double-check now. + entry.invalidate() + + def eb(_fail): + compare_and_pop() + entry.invalidate() + + # once the deferred completes, we can move the entry from the + # _pending_deferred_cache to the real cache. + # + observer.addCallbacks(cb, eb) + return observable + + def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): + callbacks = [callback] if callback else [] + self.cache.set(key, value, callbacks=callbacks) + + def invalidate(self, key): + self.check_thread() + self.cache.pop(key, None) + + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, which will (a) stop it being returned + # for future queries and (b) stop it being persisted as a proper entry + # in self.cache. + entry = self._pending_deferred_cache.pop(key, None) + + # run the invalidation callbacks now, rather than waiting for the + # deferred to resolve. + if entry: + entry.invalidate() + + def invalidate_many(self, key: KT): + self.check_thread() + if not isinstance(key, tuple): + raise TypeError("The cache key must be a tuple not %r" % (type(key),)) + self.cache.del_multi(key) + + # if we have a pending lookup for this key, remove it from the + # _pending_deferred_cache, as above + entry_dict = self._pending_deferred_cache.pop(cast(KT, key), None) + if entry_dict is not None: + for entry in iterate_tree_cache_entry(entry_dict): + entry.invalidate() + + def invalidate_all(self): + self.check_thread() + self.cache.clear() + for entry in self._pending_deferred_cache.values(): + entry.invalidate() + self._pending_deferred_cache.clear() + + +class CacheEntry: + __slots__ = ["deferred", "callbacks", "invalidated"] + + def __init__( + self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]] + ): + self.deferred = deferred + self.callbacks = set(callbacks) + self.invalidated = False + + def invalidate(self): + if not self.invalidated: + self.invalidated = True + for callback in self.callbacks: + callback() + self.callbacks.clear() diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 7c9fe199bf7c..1f438868047b 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -13,44 +13,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import enum import functools import inspect import logging -import threading -from typing import ( - Any, - Callable, - Generic, - Iterable, - MutableMapping, - Optional, - Tuple, - TypeVar, - Union, - cast, -) +from typing import Any, Callable, Generic, Optional, Tuple, TypeVar, Union, cast from weakref import WeakValueDictionary -from prometheus_client import Gauge - from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.lrucache import LruCache -from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry - -from . import register_cache +from synapse.util.caches.deferred_cache import DeferredCache logger = logging.getLogger(__name__) CacheKey = Union[Tuple, Any] F = TypeVar("F", bound=Callable[..., Any]) -KT = TypeVar("KT") -VT = TypeVar("VT") class _CachedFunction(Generic[F]): @@ -68,266 +48,6 @@ class _CachedFunction(Generic[F]): __call__ = None # type: F -cache_pending_metric = Gauge( - "synapse_util_caches_cache_pending", - "Number of lookups currently pending for this cache", - ["name"], -) - - -class _Sentinel(enum.Enum): - # defining a sentinel in this way allows mypy to correctly handle the - # type of a dictionary lookup. - sentinel = object() - - -class CacheEntry: - __slots__ = ["deferred", "callbacks", "invalidated"] - - def __init__( - self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]] - ): - self.deferred = deferred - self.callbacks = set(callbacks) - self.invalidated = False - - def invalidate(self): - if not self.invalidated: - self.invalidated = True - for callback in self.callbacks: - callback() - self.callbacks.clear() - - -class DeferredCache(Generic[KT, VT]): - """Wraps an LruCache, adding support for Deferred results. - - It expects that each entry added with set() will be a Deferred; likewise get() - may return an ObservableDeferred. - """ - - __slots__ = ( - "cache", - "name", - "keylen", - "thread", - "metrics", - "_pending_deferred_cache", - ) - - def __init__( - self, - name: str, - max_entries: int = 1000, - keylen: int = 1, - tree: bool = False, - iterable: bool = False, - apply_cache_factor_from_config: bool = True, - ): - """ - Args: - name: The name of the cache - max_entries: Maximum amount of entries that the cache will hold - keylen: The length of the tuple used as the cache key. Ignored unless - `tree` is True. - tree: Use a TreeCache instead of a dict as the underlying cache type - iterable: If True, count each item in the cached object as an entry, - rather than each cached object - apply_cache_factor_from_config: Whether cache factors specified in the - config file affect `max_entries` - """ - cache_type = TreeCache if tree else dict - - # _pending_deferred_cache maps from the key value to a `CacheEntry` object. - self._pending_deferred_cache = ( - cache_type() - ) # type: MutableMapping[KT, CacheEntry] - - # cache is used for completed results and maps to the result itself, rather than - # a Deferred. - self.cache = LruCache( - max_size=max_entries, - keylen=keylen, - cache_type=cache_type, - size_callback=(lambda d: len(d)) if iterable else None, - evicted_callback=self._on_evicted, - apply_cache_factor_from_config=apply_cache_factor_from_config, - ) - - self.name = name - self.keylen = keylen - self.thread = None # type: Optional[threading.Thread] - self.metrics = register_cache( - "cache", - name, - self.cache, - collect_callback=self._metrics_collection_callback, - ) - - @property - def max_entries(self): - return self.cache.max_size - - def _on_evicted(self, evicted_count): - self.metrics.inc_evictions(evicted_count) - - def _metrics_collection_callback(self): - cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) - - def check_thread(self): - expected_thread = self.thread - if expected_thread is None: - self.thread = threading.current_thread() - else: - if expected_thread is not threading.current_thread(): - raise ValueError( - "Cache objects can only be accessed from the main thread" - ) - - def get( - self, - key: KT, - default=_Sentinel.sentinel, - callback: Optional[Callable[[], None]] = None, - update_metrics: bool = True, - ): - """Looks the key up in the caches. - - Args: - key(tuple) - default: What is returned if key is not in the caches. If not - specified then function throws KeyError instead - callback(fn): Gets called when the entry in the cache is invalidated - update_metrics (bool): whether to update the cache hit rate metrics - - Returns: - Either an ObservableDeferred or the result itself - """ - callbacks = [callback] if callback else [] - val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) - if val is not _Sentinel.sentinel: - val.callbacks.update(callbacks) - if update_metrics: - self.metrics.inc_hits() - return val.deferred - - val = self.cache.get(key, _Sentinel.sentinel, callbacks=callbacks) - if val is not _Sentinel.sentinel: - self.metrics.inc_hits() - return val - - if update_metrics: - self.metrics.inc_misses() - - if default is _Sentinel.sentinel: - raise KeyError() - else: - return default - - def set( - self, - key: KT, - value: defer.Deferred, - callback: Optional[Callable[[], None]] = None, - ) -> ObservableDeferred: - if not isinstance(value, defer.Deferred): - raise TypeError("not a Deferred") - - callbacks = [callback] if callback else [] - self.check_thread() - observable = ObservableDeferred(value, consumeErrors=True) - observer = observable.observe() - entry = CacheEntry(deferred=observable, callbacks=callbacks) - - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry: - existing_entry.invalidate() - - self._pending_deferred_cache[key] = entry - - def compare_and_pop(): - """Check if our entry is still the one in _pending_deferred_cache, and - if so, pop it. - - Returns true if the entries matched. - """ - existing_entry = self._pending_deferred_cache.pop(key, None) - if existing_entry is entry: - return True - - # oops, the _pending_deferred_cache has been updated since - # we started our query, so we are out of date. - # - # Better put back whatever we took out. (We do it this way - # round, rather than peeking into the _pending_deferred_cache - # and then removing on a match, to make the common case faster) - if existing_entry is not None: - self._pending_deferred_cache[key] = existing_entry - - return False - - def cb(result): - if compare_and_pop(): - self.cache.set(key, result, entry.callbacks) - else: - # we're not going to put this entry into the cache, so need - # to make sure that the invalidation callbacks are called. - # That was probably done when _pending_deferred_cache was - # updated, but it's possible that `set` was called without - # `invalidate` being previously called, in which case it may - # not have been. Either way, let's double-check now. - entry.invalidate() - - def eb(_fail): - compare_and_pop() - entry.invalidate() - - # once the deferred completes, we can move the entry from the - # _pending_deferred_cache to the real cache. - # - observer.addCallbacks(cb, eb) - return observable - - def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): - callbacks = [callback] if callback else [] - self.cache.set(key, value, callbacks=callbacks) - - def invalidate(self, key): - self.check_thread() - self.cache.pop(key, None) - - # if we have a pending lookup for this key, remove it from the - # _pending_deferred_cache, which will (a) stop it being returned - # for future queries and (b) stop it being persisted as a proper entry - # in self.cache. - entry = self._pending_deferred_cache.pop(key, None) - - # run the invalidation callbacks now, rather than waiting for the - # deferred to resolve. - if entry: - entry.invalidate() - - def invalidate_many(self, key: KT): - self.check_thread() - if not isinstance(key, tuple): - raise TypeError("The cache key must be a tuple not %r" % (type(key),)) - self.cache.del_multi(key) - - # if we have a pending lookup for this key, remove it from the - # _pending_deferred_cache, as above - entry_dict = self._pending_deferred_cache.pop(cast(KT, key), None) - if entry_dict is not None: - for entry in iterate_tree_cache_entry(entry_dict): - entry.invalidate() - - def invalidate_all(self): - self.check_thread() - self.cache.clear() - for entry in self._pending_deferred_cache.values(): - entry.invalidate() - self._pending_deferred_cache.clear() - - class _CacheDescriptorBase: def __init__(self, orig: _CachedFunction, num_args, cache_context=False): self.orig = orig diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 00adcab7b99d..2598dbe0a7b6 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,7 +20,8 @@ from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.descriptors import DeferredCache, cached +from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.descriptors import cached from tests import unittest diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 1c03a52f7c0f..759e4cd0480f 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -15,7 +15,7 @@ # limitations under the License. from synapse.metrics import REGISTRY, InFlightGauge, generate_latest -from synapse.util.caches.descriptors import DeferredCache +from synapse.util.caches.deferred_cache import DeferredCache from tests import unittest diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py new file mode 100644 index 000000000000..9b6acdfc433d --- /dev/null +++ b/tests/util/caches/test_deferred_cache.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from functools import partial + +from twisted.internet import defer + +import synapse.util.caches.deferred_cache + + +class DeferredCacheTestCase(unittest.TestCase): + def test_invalidate_all(self): + cache = synapse.util.caches.deferred_cache.DeferredCache("testcache") + + callback_record = [False, False] + + def record_callback(idx): + callback_record[idx] = True + + # add a couple of pending entries + d1 = defer.Deferred() + cache.set("key1", d1, partial(record_callback, 0)) + + d2 = defer.Deferred() + cache.set("key2", d2, partial(record_callback, 1)) + + # lookup should return observable deferreds + self.assertFalse(cache.get("key1").has_called()) + self.assertFalse(cache.get("key2").has_called()) + + # let one of the lookups complete + d2.callback("result2") + + # for now at least, the cache will return real results rather than an + # observabledeferred + self.assertEqual(cache.get("key2"), "result2") + + # now do the invalidation + cache.invalidate_all() + + # lookup should return none + self.assertIsNone(cache.get("key1", None)) + self.assertIsNone(cache.get("key2", None)) + + # both callbacks should have been callbacked + self.assertTrue(callback_record[0], "Invalidation callback for key1 not called") + self.assertTrue(callback_record[1], "Invalidation callback for key2 not called") + + # letting the other lookup complete should do nothing + d1.callback("result1") + self.assertIsNone(cache.get("key1", None)) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index bd870b4a33cf..3d1f960869da 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from functools import partial import mock @@ -42,49 +41,6 @@ def run_on_reactor(): return make_deferred_yieldable(d) -class DeferredCacheTestCase(unittest.TestCase): - def test_invalidate_all(self): - cache = descriptors.DeferredCache("testcache") - - callback_record = [False, False] - - def record_callback(idx): - callback_record[idx] = True - - # add a couple of pending entries - d1 = defer.Deferred() - cache.set("key1", d1, partial(record_callback, 0)) - - d2 = defer.Deferred() - cache.set("key2", d2, partial(record_callback, 1)) - - # lookup should return observable deferreds - self.assertFalse(cache.get("key1").has_called()) - self.assertFalse(cache.get("key2").has_called()) - - # let one of the lookups complete - d2.callback("result2") - - # for now at least, the cache will return real results rather than an - # observabledeferred - self.assertEqual(cache.get("key2"), "result2") - - # now do the invalidation - cache.invalidate_all() - - # lookup should return none - self.assertIsNone(cache.get("key1", None)) - self.assertIsNone(cache.get("key2", None)) - - # both callbacks should have been callbacked - self.assertTrue(callback_record[0], "Invalidation callback for key1 not called") - self.assertTrue(callback_record[1], "Invalidation callback for key2 not called") - - # letting the other lookup complete should do nothing - d1.callback("result1") - self.assertIsNone(cache.get("key1", None)) - - class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): From 470dedd2662536c309407d05085d04a7d61c5de8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 23:37:23 +0100 Subject: [PATCH 188/245] Combine the two sets of DeferredCache tests --- tests/storage/test__base.py | 72 ---------------------- tests/util/caches/test_deferred_cache.py | 77 +++++++++++++++++++++++- 2 files changed, 75 insertions(+), 74 deletions(-) diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 2598dbe0a7b6..8e69b1e9cc91 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,83 +20,11 @@ from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.descriptors import cached from tests import unittest -class DeferredCacheTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): - self.cache = DeferredCache("test") - - def test_empty(self): - failed = False - try: - self.cache.get("foo") - except KeyError: - failed = True - - self.assertTrue(failed) - - def test_hit(self): - self.cache.prefill("foo", 123) - - self.assertEquals(self.cache.get("foo"), 123) - - def test_invalidate(self): - self.cache.prefill(("foo",), 123) - self.cache.invalidate(("foo",)) - - failed = False - try: - self.cache.get(("foo",)) - except KeyError: - failed = True - - self.assertTrue(failed) - - def test_eviction(self): - cache = DeferredCache("test", max_entries=2) - - cache.prefill(1, "one") - cache.prefill(2, "two") - cache.prefill(3, "three") # 1 will be evicted - - failed = False - try: - cache.get(1) - except KeyError: - failed = True - - self.assertTrue(failed) - - cache.get(2) - cache.get(3) - - def test_eviction_lru(self): - cache = DeferredCache("test", max_entries=2) - - cache.prefill(1, "one") - cache.prefill(2, "two") - - # Now access 1 again, thus causing 2 to be least-recently used - cache.get(1) - - cache.prefill(3, "three") - - failed = False - try: - cache.get(2) - except KeyError: - failed = True - - self.assertTrue(failed) - - cache.get(1) - cache.get(3) - - class CacheDecoratorTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def test_passthrough(self): diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 9b6acdfc433d..9717be56b6cd 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -18,12 +18,41 @@ from twisted.internet import defer -import synapse.util.caches.deferred_cache +from synapse.util.caches.deferred_cache import DeferredCache class DeferredCacheTestCase(unittest.TestCase): + def test_empty(self): + cache = DeferredCache("test") + failed = False + try: + cache.get("foo") + except KeyError: + failed = True + + self.assertTrue(failed) + + def test_hit(self): + cache = DeferredCache("test") + cache.prefill("foo", 123) + + self.assertEquals(cache.get("foo"), 123) + + def test_invalidate(self): + cache = DeferredCache("test") + cache.prefill(("foo",), 123) + cache.invalidate(("foo",)) + + failed = False + try: + cache.get(("foo",)) + except KeyError: + failed = True + + self.assertTrue(failed) + def test_invalidate_all(self): - cache = synapse.util.caches.deferred_cache.DeferredCache("testcache") + cache = DeferredCache("testcache") callback_record = [False, False] @@ -62,3 +91,47 @@ def record_callback(idx): # letting the other lookup complete should do nothing d1.callback("result1") self.assertIsNone(cache.get("key1", None)) + + def test_eviction(self): + cache = DeferredCache( + "test", max_entries=2, apply_cache_factor_from_config=False + ) + + cache.prefill(1, "one") + cache.prefill(2, "two") + cache.prefill(3, "three") # 1 will be evicted + + failed = False + try: + cache.get(1) + except KeyError: + failed = True + + self.assertTrue(failed) + + cache.get(2) + cache.get(3) + + def test_eviction_lru(self): + cache = DeferredCache( + "test", max_entries=2, apply_cache_factor_from_config=False + ) + + cache.prefill(1, "one") + cache.prefill(2, "two") + + # Now access 1 again, thus causing 2 to be least-recently used + cache.get(1) + + cache.prefill(3, "three") + + failed = False + try: + cache.get(2) + except KeyError: + failed = True + + self.assertTrue(failed) + + cache.get(1) + cache.get(3) From 27cfd712b38bd4e925e17f01f19b746d0d060cff Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 14 Oct 2020 23:38:47 +0100 Subject: [PATCH 189/245] changelog --- changelog.d/8548.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8548.misc diff --git a/changelog.d/8548.misc b/changelog.d/8548.misc new file mode 100644 index 000000000000..fba10bd731ca --- /dev/null +++ b/changelog.d/8548.misc @@ -0,0 +1 @@ +Rename `Cache` to `DeferredCache`, to better reflect its purpose. From 1f3915507160a0eb64ed50931f80a94155e1b491 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 15 Oct 2020 10:36:40 +0100 Subject: [PATCH 190/245] Include user agent in user daily visits table (#8503) Include user agent in user daily visits table. --- changelog.d/8503.misc | 1 + synapse/storage/databases/main/metrics.py | 11 ++++++++--- .../schema/delta/58/20user_daily_visits.sql | 18 ++++++++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8503.misc create mode 100644 synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql diff --git a/changelog.d/8503.misc b/changelog.d/8503.misc new file mode 100644 index 000000000000..edb1be8aa8e9 --- /dev/null +++ b/changelog.d/8503.misc @@ -0,0 +1 @@ +Add user agent to user_daily_visits table. diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 0acf0617ca5a..79b01d16f9d5 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -281,9 +281,14 @@ def _generate_user_daily_visits(txn): a_day_in_milliseconds = 24 * 60 * 60 * 1000 now = self._clock.time_msec() + # A note on user_agent. Technically a given device can have multiple + # user agents, so we need to decide which one to pick. We could have handled this + # in number of ways, but given that we don't _that_ much have gone for MAX() + # For more details of the other options considered see + # https://github.com/matrix-org/synapse/pull/8503#discussion_r502306111 sql = """ - INSERT INTO user_daily_visits (user_id, device_id, timestamp) - SELECT u.user_id, u.device_id, ? + INSERT INTO user_daily_visits (user_id, device_id, timestamp, user_agent) + SELECT u.user_id, u.device_id, ?, MAX(u.user_agent) FROM user_ips AS u LEFT JOIN ( SELECT user_id, device_id, timestamp FROM user_daily_visits @@ -294,7 +299,7 @@ def _generate_user_daily_visits(txn): WHERE last_seen > ? AND last_seen <= ? AND udv.timestamp IS NULL AND users.is_guest=0 AND users.appservice_id IS NULL - GROUP BY u.user_id, u.device_id + GROUP BY u.user_id, u.device_id, u.user_agent """ # This means that the day has rolled over but there could still diff --git a/synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql b/synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql new file mode 100644 index 000000000000..b0b5dcddceef --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql @@ -0,0 +1,18 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + -- Add new column to user_daily_visits to track user agent +ALTER TABLE user_daily_visits + ADD COLUMN user_agent TEXT; From 8075504a600b47ac93faf9b605ade691ae0fbcd3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 15 Oct 2020 11:44:39 +0100 Subject: [PATCH 191/245] Enable mypy for synapse.util.caches (#8547) This seemed to entail dragging in a type stub for SortedList. --- changelog.d/8547.misc | 1 + mypy.ini | 4 +- stubs/sortedcontainers/__init__.pyi | 11 +- stubs/sortedcontainers/sortedlist.pyi | 177 ++++++++++++++++++++++++++ synapse/util/caches/ttlcache.py | 2 +- 5 files changed, 185 insertions(+), 10 deletions(-) create mode 100644 changelog.d/8547.misc create mode 100644 stubs/sortedcontainers/sortedlist.pyi diff --git a/changelog.d/8547.misc b/changelog.d/8547.misc new file mode 100644 index 000000000000..fafb1c8347b2 --- /dev/null +++ b/changelog.d/8547.misc @@ -0,0 +1 @@ +Enable mypy type checking for `synapse.util.caches`. diff --git a/mypy.ini b/mypy.ini index f08fe992a4d5..9748f6258cf2 100644 --- a/mypy.ini +++ b/mypy.ini @@ -64,9 +64,7 @@ files = synapse/streams, synapse/types.py, synapse/util/async_helpers.py, - synapse/util/caches/descriptors.py, - synapse/util/caches/response_cache.py, - synapse/util/caches/stream_change_cache.py, + synapse/util/caches, synapse/util/metrics.py, tests/replication, tests/test_utils, diff --git a/stubs/sortedcontainers/__init__.pyi b/stubs/sortedcontainers/__init__.pyi index 073b806d3c98..fa307483febe 100644 --- a/stubs/sortedcontainers/__init__.pyi +++ b/stubs/sortedcontainers/__init__.pyi @@ -1,13 +1,12 @@ -from .sorteddict import ( - SortedDict, - SortedKeysView, - SortedItemsView, - SortedValuesView, -) +from .sorteddict import SortedDict, SortedItemsView, SortedKeysView, SortedValuesView +from .sortedlist import SortedKeyList, SortedList, SortedListWithKey __all__ = [ "SortedDict", "SortedKeysView", "SortedItemsView", "SortedValuesView", + "SortedKeyList", + "SortedList", + "SortedListWithKey", ] diff --git a/stubs/sortedcontainers/sortedlist.pyi b/stubs/sortedcontainers/sortedlist.pyi new file mode 100644 index 000000000000..8f6086b3ff38 --- /dev/null +++ b/stubs/sortedcontainers/sortedlist.pyi @@ -0,0 +1,177 @@ +# stub for SortedList. This is an exact copy of +# https://github.com/grantjenks/python-sortedcontainers/blob/a419ffbd2b1c935b09f11f0971696e537fd0c510/sortedcontainers/sortedlist.pyi +# (from https://github.com/grantjenks/python-sortedcontainers/pull/107) + +from typing import ( + Any, + Callable, + Generic, + Iterable, + Iterator, + List, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +_T = TypeVar("_T") +_SL = TypeVar("_SL", bound=SortedList) +_SKL = TypeVar("_SKL", bound=SortedKeyList) +_Key = Callable[[_T], Any] +_Repr = Callable[[], str] + +def recursive_repr(fillvalue: str = ...) -> Callable[[_Repr], _Repr]: ... + +class SortedList(MutableSequence[_T]): + + DEFAULT_LOAD_FACTOR: int = ... + def __init__( + self, iterable: Optional[Iterable[_T]] = ..., key: Optional[_Key[_T]] = ..., + ): ... + # NB: currently mypy does not honour return type, see mypy #3307 + @overload + def __new__(cls: Type[_SL], iterable: None, key: None) -> _SL: ... + @overload + def __new__(cls: Type[_SL], iterable: None, key: _Key[_T]) -> SortedKeyList[_T]: ... + @overload + def __new__(cls: Type[_SL], iterable: Iterable[_T], key: None) -> _SL: ... + @overload + def __new__(cls, iterable: Iterable[_T], key: _Key[_T]) -> SortedKeyList[_T]: ... + @property + def key(self) -> Optional[Callable[[_T], Any]]: ... + def _reset(self, load: int) -> None: ... + def clear(self) -> None: ... + def _clear(self) -> None: ... + def add(self, value: _T) -> None: ... + def _expand(self, pos: int) -> None: ... + def update(self, iterable: Iterable[_T]) -> None: ... + def _update(self, iterable: Iterable[_T]) -> None: ... + def discard(self, value: _T) -> None: ... + def remove(self, value: _T) -> None: ... + def _delete(self, pos: int, idx: int) -> None: ... + def _loc(self, pos: int, idx: int) -> int: ... + def _pos(self, idx: int) -> int: ... + def _build_index(self) -> None: ... + def __contains__(self, value: Any) -> bool: ... + def __delitem__(self, index: Union[int, slice]) -> None: ... + @overload + def __getitem__(self, index: int) -> _T: ... + @overload + def __getitem__(self, index: slice) -> List[_T]: ... + @overload + def _getitem(self, index: int) -> _T: ... + @overload + def _getitem(self, index: slice) -> List[_T]: ... + @overload + def __setitem__(self, index: int, value: _T) -> None: ... + @overload + def __setitem__(self, index: slice, value: Iterable[_T]) -> None: ... + def __iter__(self) -> Iterator[_T]: ... + def __reversed__(self) -> Iterator[_T]: ... + def __len__(self) -> int: ... + def reverse(self) -> None: ... + def islice( + self, start: Optional[int] = ..., stop: Optional[int] = ..., reverse=bool, + ) -> Iterator[_T]: ... + def _islice( + self, min_pos: int, min_idx: int, max_pos: int, max_idx: int, reverse: bool, + ) -> Iterator[_T]: ... + def irange( + self, + minimum: Optional[int] = ..., + maximum: Optional[int] = ..., + inclusive: Tuple[bool, bool] = ..., + reverse: bool = ..., + ) -> Iterator[_T]: ... + def bisect_left(self, value: _T) -> int: ... + def bisect_right(self, value: _T) -> int: ... + def bisect(self, value: _T) -> int: ... + def _bisect_right(self, value: _T) -> int: ... + def count(self, value: _T) -> int: ... + def copy(self: _SL) -> _SL: ... + def __copy__(self: _SL) -> _SL: ... + def append(self, value: _T) -> None: ... + def extend(self, values: Iterable[_T]) -> None: ... + def insert(self, index: int, value: _T) -> None: ... + def pop(self, index: int = ...) -> _T: ... + def index( + self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ... + ) -> int: ... + def __add__(self: _SL, other: Iterable[_T]) -> _SL: ... + def __radd__(self: _SL, other: Iterable[_T]) -> _SL: ... + def __iadd__(self: _SL, other: Iterable[_T]) -> _SL: ... + def __mul__(self: _SL, num: int) -> _SL: ... + def __rmul__(self: _SL, num: int) -> _SL: ... + def __imul__(self: _SL, num: int) -> _SL: ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + def __lt__(self, other: Sequence[_T]) -> bool: ... + def __gt__(self, other: Sequence[_T]) -> bool: ... + def __le__(self, other: Sequence[_T]) -> bool: ... + def __ge__(self, other: Sequence[_T]) -> bool: ... + def __repr__(self) -> str: ... + def _check(self) -> None: ... + +class SortedKeyList(SortedList[_T]): + def __init__( + self, iterable: Optional[Iterable[_T]] = ..., key: _Key[_T] = ... + ) -> None: ... + def __new__( + cls, iterable: Optional[Iterable[_T]] = ..., key: _Key[_T] = ... + ) -> SortedKeyList[_T]: ... + @property + def key(self) -> Callable[[_T], Any]: ... + def clear(self) -> None: ... + def _clear(self) -> None: ... + def add(self, value: _T) -> None: ... + def _expand(self, pos: int) -> None: ... + def update(self, iterable: Iterable[_T]) -> None: ... + def _update(self, iterable: Iterable[_T]) -> None: ... + # NB: Must be T to be safely passed to self.func, yet base class imposes Any + def __contains__(self, value: _T) -> bool: ... # type: ignore + def discard(self, value: _T) -> None: ... + def remove(self, value: _T) -> None: ... + def _delete(self, pos: int, idx: int) -> None: ... + def irange( + self, + minimum: Optional[int] = ..., + maximum: Optional[int] = ..., + inclusive: Tuple[bool, bool] = ..., + reverse: bool = ..., + ): ... + def irange_key( + self, + min_key: Optional[Any] = ..., + max_key: Optional[Any] = ..., + inclusive: Tuple[bool, bool] = ..., + reserve: bool = ..., + ): ... + def bisect_left(self, value: _T) -> int: ... + def bisect_right(self, value: _T) -> int: ... + def bisect(self, value: _T) -> int: ... + def bisect_key_left(self, key: Any) -> int: ... + def _bisect_key_left(self, key: Any) -> int: ... + def bisect_key_right(self, key: Any) -> int: ... + def _bisect_key_right(self, key: Any) -> int: ... + def bisect_key(self, key: Any) -> int: ... + def count(self, value: _T) -> int: ... + def copy(self: _SKL) -> _SKL: ... + def __copy__(self: _SKL) -> _SKL: ... + def index( + self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ... + ) -> int: ... + def __add__(self: _SKL, other: Iterable[_T]) -> _SKL: ... + def __radd__(self: _SKL, other: Iterable[_T]) -> _SKL: ... + def __iadd__(self: _SKL, other: Iterable[_T]) -> _SKL: ... + def __mul__(self: _SKL, num: int) -> _SKL: ... + def __rmul__(self: _SKL, num: int) -> _SKL: ... + def __imul__(self: _SKL, num: int) -> _SKL: ... + def __repr__(self) -> str: ... + def _check(self) -> None: ... + +SortedListWithKey = SortedKeyList diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 3e180cafd3a0..6ce2a3d12b63 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -34,7 +34,7 @@ def __init__(self, cache_name, timer=time.time): self._data = {} # the _CacheEntries, sorted by expiry time - self._expiry_list = SortedList() + self._expiry_list = SortedList() # type: SortedList[_CacheEntry] self._timer = timer From 20fa83f3744b25e513fdc904261c87c324bbc87e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Oct 2020 15:40:06 +0100 Subject: [PATCH 192/245] Remove racey assertion in MultiWriterIDGenerator (#8530) We asserted that the IDs returned by postgres sequence was greater than any we had seen, however this is technically racey as we may update the current positions out of order. We now assert that the sequences are correct on startup, so the assertion is no longer really required, so we remove them. --- changelog.d/8530.bugfix | 1 + synapse/storage/util/id_generators.py | 7 ------- 2 files changed, 1 insertion(+), 7 deletions(-) create mode 100644 changelog.d/8530.bugfix diff --git a/changelog.d/8530.bugfix b/changelog.d/8530.bugfix new file mode 100644 index 000000000000..443d88424ead --- /dev/null +++ b/changelog.d/8530.bugfix @@ -0,0 +1 @@ +Fix rare bug where sending an event would fail due to a racey assertion. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index ad017207aae5..eccd2d5b7bfb 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -612,14 +612,7 @@ async def __aenter__(self) -> Union[int, List[int]]: db_autocommit=True, ) - # Assert the fetched ID is actually greater than any ID we've already - # seen. If not, then the sequence and table have got out of sync - # somehow. with self.id_gen._lock: - assert max(self.id_gen._current_positions.values(), default=0) < min( - self.stream_ids - ) - self.id_gen._unfinished_ids.update(self.stream_ids) if self.multiple_ids is None: From 9991aaa49c7c044c16c37e4a75ee2a9b8c2376b9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 15 Oct 2020 09:24:10 -0400 Subject: [PATCH 193/245] 1.21.2 --- CHANGES.md | 9 +++++++++ changelog.d/8530.bugfix | 1 - debian/changelog | 7 +++++++ synapse/__init__.py | 2 +- 4 files changed, 17 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/8530.bugfix diff --git a/CHANGES.md b/CHANGES.md index 75dc5fa89395..6ef499bd9e6f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.21.2 (2020-10-15) +=========================== + +Bugfixes +-------- + +- Fix rare bug where sending an event would fail due to a racey assertion. ([\#8530](https://github.com/matrix-org/synapse/issues/8530)) + + Synapse 1.21.1 (2020-10-13) =========================== diff --git a/changelog.d/8530.bugfix b/changelog.d/8530.bugfix deleted file mode 100644 index 443d88424ead..000000000000 --- a/changelog.d/8530.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rare bug where sending an event would fail due to a racey assertion. diff --git a/debian/changelog b/debian/changelog index eeafd4f50abe..8d873a4845c8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +matrix-synapse-py3 (1.21.2) stable; urgency=medium + + [ Synapse Packaging team ] + * New synapse release 1.21.2. + + -- Synapse Packaging team Thu, 15 Oct 2020 09:23:27 -0400 + matrix-synapse-py3 (1.21.1) stable; urgency=medium [ Synapse Packaging team ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 722b53a67de0..83b8e4897f3c 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.1" +__version__ = "1.21.2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From f49708dee3c46be87a23a934ecba17e7e58d4b16 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 15 Oct 2020 10:18:02 -0400 Subject: [PATCH 194/245] Add additional release notes. --- CHANGES.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 6ef499bd9e6f..af5a9bafb8a5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,23 @@ Synapse 1.21.2 (2020-10-15) =========================== +Security advisory +----------------- + +* HTML pages served via Synapse were vulernable to cross-site scripting (XSS) + attacks. All server administrators are encouraged to upgrade. + ([34ff8da8](https://github.com/matrix-org/synapse/commit/34ff8da83b54024289f515c6d73e6b486574d699)) + ([CVE-2020-26891](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26891)) + + This fix was originally included in v1.21.0 but was missing a security advisory. + + This was reported by [Denis Kasak](https://github.com/dkasak). + Bugfixes -------- - Fix rare bug where sending an event would fail due to a racey assertion. ([\#8530](https://github.com/matrix-org/synapse/issues/8530)) +- Fix issues introduced in the packaging of v1.21.1 when using OpenID Connect with the Docker or Debian packages by including an updated version of the authlib dependency. Synapse 1.21.1 (2020-10-13) From f30f12a839a231eb9e57582b4a48a6ae38979de4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 15 Oct 2020 10:28:27 -0400 Subject: [PATCH 195/245] Fix typo. --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index af5a9bafb8a5..696f6bc6cce9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.21.2 (2020-10-15) Security advisory ----------------- -* HTML pages served via Synapse were vulernable to cross-site scripting (XSS) +* HTML pages served via Synapse were vulnerable to cross-site scripting (XSS) attacks. All server administrators are encouraged to upgrade. ([34ff8da8](https://github.com/matrix-org/synapse/commit/34ff8da83b54024289f515c6d73e6b486574d699)) ([CVE-2020-26891](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26891)) From a7d4985a6b0a3c9e22c4f376a62e3d8664e779b8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 15 Oct 2020 10:28:53 -0400 Subject: [PATCH 196/245] Clarify authlib changes. --- CHANGES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 696f6bc6cce9..e9ff374e4d39 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.21.2 (2020-10-15) =========================== +Debian packages and Docker images are rebuilt using the latest versions of dependency libraries, including authlib 0.15.1. Please see bugfixes below. + Security advisory ----------------- @@ -17,7 +19,7 @@ Bugfixes -------- - Fix rare bug where sending an event would fail due to a racey assertion. ([\#8530](https://github.com/matrix-org/synapse/issues/8530)) -- Fix issues introduced in the packaging of v1.21.1 when using OpenID Connect with the Docker or Debian packages by including an updated version of the authlib dependency. +- An updated version of the authlib dependency is included in the Docker and Debian release to fix an issue using OpenID Connect. Synapse 1.21.1 (2020-10-13) From 9b8a53c7b9e1a3ca5f46e417b9fa705f8bacb494 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 15 Oct 2020 10:33:43 -0400 Subject: [PATCH 197/245] Additional tweaks. --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e9ff374e4d39..38a0814bbf53 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,14 +1,14 @@ Synapse 1.21.2 (2020-10-15) =========================== -Debian packages and Docker images are rebuilt using the latest versions of dependency libraries, including authlib 0.15.1. Please see bugfixes below. +Debian packages and Docker images have been rebuilt using the latest versions of dependency libraries, including authlib 0.15.1. Please see bugfixes below. Security advisory ----------------- * HTML pages served via Synapse were vulnerable to cross-site scripting (XSS) attacks. All server administrators are encouraged to upgrade. - ([34ff8da8](https://github.com/matrix-org/synapse/commit/34ff8da83b54024289f515c6d73e6b486574d699)) + ([\#8444](https://github.com/matrix-org/synapse/pull/8444)) ([CVE-2020-26891](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26891)) This fix was originally included in v1.21.0 but was missing a security advisory. @@ -19,7 +19,7 @@ Bugfixes -------- - Fix rare bug where sending an event would fail due to a racey assertion. ([\#8530](https://github.com/matrix-org/synapse/issues/8530)) -- An updated version of the authlib dependency is included in the Docker and Debian release to fix an issue using OpenID Connect. +- An updated version of the authlib dependency is included in the Docker and Debian images to fix an issue using OpenID Connect. See [\#8534](https://github.com/matrix-org/synapse/issues/8534) for details. Synapse 1.21.1 (2020-10-13) From 654e239b25e5ed49dd0340132a74e4617aa185c8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 15 Oct 2020 15:45:13 +0100 Subject: [PATCH 198/245] Add option to scripts-dev/lint.sh to only lint files changed since the last git commit (#8472) This PR makes several changes to the `./scripts-dev/lint.sh` script, which lints the codebase with a number of tools: * Adds usage information, with `-h` flag to show it. Otherwise it will show when providing an unknown flag. * Adds option `-d` which will check both staged and unstaged files that have changed since the last commit and add them to the list of files to lint. - Note that only files without an extension, or with a `.py` extension will be allowed. This prevents editing bash scripts causing the linters to break on non-python files. * Improves the print-out of which files/directories are being linted. --- CONTRIBUTING.md | 4 ++ changelog.d/8472.misc | 1 + scripts-dev/lint.sh | 93 ++++++++++++++++++++++++++++++++++++++----- setup.py | 4 +- 4 files changed, 90 insertions(+), 12 deletions(-) create mode 100644 changelog.d/8472.misc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 524f82433dba..c17e3b23995a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -63,6 +63,10 @@ run-time: ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder ``` +You can also provided the `-d` option, which will lint the files that have been +changed since the last git commit. This will often be significantly faster than +linting the whole codebase. + Before pushing new changes, ensure they don't produce linting errors. Commit any files that were corrected. diff --git a/changelog.d/8472.misc b/changelog.d/8472.misc new file mode 100644 index 000000000000..880f3f5e14fa --- /dev/null +++ b/changelog.d/8472.misc @@ -0,0 +1 @@ +Add `-d` option to `./scripts-dev/lint.sh` to lint files that have changed since the last git commit. \ No newline at end of file diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 064799365832..f2b65a210533 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # Runs linting scripts over the local Synapse checkout # isort - sorts import statements @@ -7,15 +7,90 @@ set -e -if [ $# -ge 1 ] -then - files=$* +usage() { + echo + echo "Usage: $0 [-h] [-d] [paths...]" + echo + echo "-d" + echo " Lint files that have changed since the last git commit." + echo + echo " If paths are provided and this option is set, both provided paths and those" + echo " that have changed since the last commit will be linted." + echo + echo " If no paths are provided and this option is not set, all files will be linted." + echo + echo " Note that paths with a file extension that is not '.py' will be excluded." + echo "-h" + echo " Display this help text." +} + +USING_DIFF=0 +files=() + +while getopts ":dh" opt; do + case $opt in + d) + USING_DIFF=1 + ;; + h) + usage + exit + ;; + \?) + echo "ERROR: Invalid option: -$OPTARG" >&2 + usage + exit + ;; + esac +done + +# Strip any options from the command line arguments now that +# we've finished processing them +shift "$((OPTIND-1))" + +if [ $USING_DIFF -eq 1 ]; then + # Check both staged and non-staged changes + for path in $(git diff HEAD --name-only); do + filename=$(basename "$path") + file_extension="${filename##*.}" + + # If an extension is present, and it's something other than 'py', + # then ignore this file + if [[ -n ${file_extension+x} && $file_extension != "py" ]]; then + continue + fi + + # Append this path to our list of files to lint + files+=("$path") + done +fi + +# Append any remaining arguments as files to lint +files+=("$@") + +if [[ $USING_DIFF -eq 1 ]]; then + # If we were asked to lint changed files, and no paths were found as a result... + if [ ${#files[@]} -eq 0 ]; then + # Then print and exit + echo "No files found to lint." + exit 0 + fi else - files="synapse tests scripts-dev scripts contrib synctl" + # If we were not asked to lint changed files, and no paths were found as a result, + # then lint everything! + if [[ -z ${files+x} ]]; then + # Lint all source code files and directories + files=("synapse" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py") + fi fi -echo "Linting these locations: $files" -isort $files -python3 -m black $files +echo "Linting these paths: ${files[*]}" +echo + +# Print out the commands being run +set -x + +isort "${files[@]}" +python3 -m black "${files[@]}" ./scripts-dev/config-lint.sh -flake8 $files +flake8 "${files[@]}" diff --git a/setup.py b/setup.py index 926b1bc86fa8..08843fe2a3e4 100755 --- a/setup.py +++ b/setup.py @@ -15,12 +15,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import glob import os -from setuptools import setup, find_packages, Command -import sys +from setuptools import Command, find_packages, setup here = os.path.abspath(os.path.dirname(__file__)) From c276bd996916adce899410b9c4c891892f51b992 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 15 Oct 2020 17:33:28 +0100 Subject: [PATCH 199/245] Send some ephemeral events to appservices (#8437) Optionally sends typing, presence, and read receipt information to appservices. --- changelog.d/8437.feature | 1 + mypy.ini | 1 + synapse/appservice/__init__.py | 180 ++++++++++++------ synapse/appservice/api.py | 27 ++- synapse/appservice/scheduler.py | 48 +++-- synapse/config/appservice.py | 3 + synapse/handlers/appservice.py | 109 ++++++++++- synapse/handlers/receipts.py | 35 +++- synapse/handlers/sync.py | 1 - synapse/handlers/typing.py | 31 ++- synapse/notifier.py | 25 +++ synapse/storage/databases/main/appservice.py | 66 ++++++- synapse/storage/databases/main/receipts.py | 55 ++++++ .../schema/delta/59/19as_device_stream.sql | 18 ++ tests/appservice/test_scheduler.py | 77 ++++++-- tests/storage/test_appservice.py | 8 +- 16 files changed, 563 insertions(+), 122 deletions(-) create mode 100644 changelog.d/8437.feature create mode 100644 synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql diff --git a/changelog.d/8437.feature b/changelog.d/8437.feature new file mode 100644 index 000000000000..4abcccb326e0 --- /dev/null +++ b/changelog.d/8437.feature @@ -0,0 +1 @@ +Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. diff --git a/mypy.ini b/mypy.ini index 9748f6258cf2..b5db54ee3b93 100644 --- a/mypy.ini +++ b/mypy.ini @@ -15,6 +15,7 @@ files = synapse/events/builder.py, synapse/events/spamcheck.py, synapse/federation, + synapse/handlers/appservice.py, synapse/handlers/account_data.py, synapse/handlers/auth.py, synapse/handlers/cas_handler.py, diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 13ec1f71a64c..3862d9c08f38 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -14,14 +14,15 @@ # limitations under the License. import logging import re -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Iterable, List, Match, Optional from synapse.api.constants import EventTypes -from synapse.appservice.api import ApplicationServiceApi -from synapse.types import GroupID, get_domain_from_id +from synapse.events import EventBase +from synapse.types import GroupID, JsonDict, UserID, get_domain_from_id from synapse.util.caches.descriptors import cached if TYPE_CHECKING: + from synapse.appservice.api import ApplicationServiceApi from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -32,38 +33,6 @@ class ApplicationServiceState: UP = "up" -class AppServiceTransaction: - """Represents an application service transaction.""" - - def __init__(self, service, id, events): - self.service = service - self.id = id - self.events = events - - async def send(self, as_api: ApplicationServiceApi) -> bool: - """Sends this transaction using the provided AS API interface. - - Args: - as_api: The API to use to send. - Returns: - True if the transaction was sent. - """ - return await as_api.push_bulk( - service=self.service, events=self.events, txn_id=self.id - ) - - async def complete(self, store: "DataStore") -> None: - """Completes this transaction as successful. - - Marks this transaction ID on the application service and removes the - transaction contents from the database. - - Args: - store: The database store to operate on. - """ - await store.complete_appservice_txn(service=self.service, txn_id=self.id) - - class ApplicationService: """Defines an application service. This definition is mostly what is provided to the /register AS API. @@ -91,6 +60,7 @@ def __init__( protocols=None, rate_limited=True, ip_range_whitelist=None, + supports_ephemeral=False, ): self.token = token self.url = ( @@ -102,6 +72,7 @@ def __init__( self.namespaces = self._check_namespaces(namespaces) self.id = id self.ip_range_whitelist = ip_range_whitelist + self.supports_ephemeral = supports_ephemeral if "|" in self.id: raise Exception("application service ID cannot contain '|' character") @@ -161,19 +132,21 @@ def _check_namespaces(self, namespaces): raise ValueError("Expected string for 'regex' in ns '%s'" % ns) return namespaces - def _matches_regex(self, test_string, namespace_key): + def _matches_regex(self, test_string: str, namespace_key: str) -> Optional[Match]: for regex_obj in self.namespaces[namespace_key]: if regex_obj["regex"].match(test_string): return regex_obj return None - def _is_exclusive(self, ns_key, test_string): + def _is_exclusive(self, ns_key: str, test_string: str) -> bool: regex_obj = self._matches_regex(test_string, ns_key) if regex_obj: return regex_obj["exclusive"] return False - async def _matches_user(self, event, store): + async def _matches_user( + self, event: Optional[EventBase], store: Optional["DataStore"] = None + ) -> bool: if not event: return False @@ -188,14 +161,23 @@ async def _matches_user(self, event, store): if not store: return False - does_match = await self._matches_user_in_member_list(event.room_id, store) + does_match = await self.matches_user_in_member_list(event.room_id, store) return does_match - @cached(num_args=1, cache_context=True) - async def _matches_user_in_member_list(self, room_id, store, cache_context): - member_list = await store.get_users_in_room( - room_id, on_invalidate=cache_context.invalidate - ) + @cached(num_args=1) + async def matches_user_in_member_list( + self, room_id: str, store: "DataStore" + ) -> bool: + """Check if this service is interested a room based upon it's membership + + Args: + room_id: The room to check. + store: The datastore to query. + + Returns: + True if this service would like to know about this room. + """ + member_list = await store.get_users_in_room(room_id) # check joined member events for user_id in member_list: @@ -203,12 +185,14 @@ async def _matches_user_in_member_list(self, room_id, store, cache_context): return True return False - def _matches_room_id(self, event): + def _matches_room_id(self, event: EventBase) -> bool: if hasattr(event, "room_id"): return self.is_interested_in_room(event.room_id) return False - async def _matches_aliases(self, event, store): + async def _matches_aliases( + self, event: EventBase, store: Optional["DataStore"] = None + ) -> bool: if not store or not event: return False @@ -218,12 +202,15 @@ async def _matches_aliases(self, event, store): return True return False - async def is_interested(self, event, store=None) -> bool: + async def is_interested( + self, event: EventBase, store: Optional["DataStore"] = None + ) -> bool: """Check if this service is interested in this event. Args: - event(Event): The event to check. - store(DataStore) + event: The event to check. + store: The datastore to query. + Returns: True if this service would like to know about this event. """ @@ -231,39 +218,66 @@ async def is_interested(self, event, store=None) -> bool: if self._matches_room_id(event): return True + # This will check the namespaces first before + # checking the store, so should be run before _matches_aliases + if await self._matches_user(event, store): + return True + + # This will check the store, so should be run last if await self._matches_aliases(event, store): return True - if await self._matches_user(event, store): + return False + + @cached(num_args=1) + async def is_interested_in_presence( + self, user_id: UserID, store: "DataStore" + ) -> bool: + """Check if this service is interested a user's presence + + Args: + user_id: The user to check. + store: The datastore to query. + + Returns: + True if this service would like to know about presence for this user. + """ + # Find all the rooms the sender is in + if self.is_interested_in_user(user_id.to_string()): return True + room_ids = await store.get_rooms_for_user(user_id.to_string()) + # Then find out if the appservice is interested in any of those rooms + for room_id in room_ids: + if await self.matches_user_in_member_list(room_id, store): + return True return False - def is_interested_in_user(self, user_id): + def is_interested_in_user(self, user_id: str) -> bool: return ( - self._matches_regex(user_id, ApplicationService.NS_USERS) + bool(self._matches_regex(user_id, ApplicationService.NS_USERS)) or user_id == self.sender ) - def is_interested_in_alias(self, alias): + def is_interested_in_alias(self, alias: str) -> bool: return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES)) - def is_interested_in_room(self, room_id): + def is_interested_in_room(self, room_id: str) -> bool: return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS)) - def is_exclusive_user(self, user_id): + def is_exclusive_user(self, user_id: str) -> bool: return ( self._is_exclusive(ApplicationService.NS_USERS, user_id) or user_id == self.sender ) - def is_interested_in_protocol(self, protocol): + def is_interested_in_protocol(self, protocol: str) -> bool: return protocol in self.protocols - def is_exclusive_alias(self, alias): + def is_exclusive_alias(self, alias: str) -> bool: return self._is_exclusive(ApplicationService.NS_ALIASES, alias) - def is_exclusive_room(self, room_id): + def is_exclusive_room(self, room_id: str) -> bool: return self._is_exclusive(ApplicationService.NS_ROOMS, room_id) def get_exclusive_user_regexes(self): @@ -276,14 +290,14 @@ def get_exclusive_user_regexes(self): if regex_obj["exclusive"] ] - def get_groups_for_user(self, user_id): + def get_groups_for_user(self, user_id: str) -> Iterable[str]: """Get the groups that this user is associated with by this AS Args: - user_id (str): The ID of the user. + user_id: The ID of the user. Returns: - iterable[str]: an iterable that yields group_id strings. + An iterable that yields group_id strings. """ return ( regex_obj["group_id"] @@ -291,7 +305,7 @@ def get_groups_for_user(self, user_id): if "group_id" in regex_obj and regex_obj["regex"].match(user_id) ) - def is_rate_limited(self): + def is_rate_limited(self) -> bool: return self.rate_limited def __str__(self): @@ -300,3 +314,45 @@ def __str__(self): dict_copy["token"] = "" dict_copy["hs_token"] = "" return "ApplicationService: %s" % (dict_copy,) + + +class AppServiceTransaction: + """Represents an application service transaction.""" + + def __init__( + self, + service: ApplicationService, + id: int, + events: List[EventBase], + ephemeral: List[JsonDict], + ): + self.service = service + self.id = id + self.events = events + self.ephemeral = ephemeral + + async def send(self, as_api: "ApplicationServiceApi") -> bool: + """Sends this transaction using the provided AS API interface. + + Args: + as_api: The API to use to send. + Returns: + True if the transaction was sent. + """ + return await as_api.push_bulk( + service=self.service, + events=self.events, + ephemeral=self.ephemeral, + txn_id=self.id, + ) + + async def complete(self, store: "DataStore") -> None: + """Completes this transaction as successful. + + Marks this transaction ID on the application service and removes the + transaction contents from the database. + + Args: + store: The database store to operate on. + """ + await store.complete_appservice_txn(service=self.service, txn_id=self.id) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index e8f07937952b..e366a982b801 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -14,12 +14,13 @@ # limitations under the License. import logging import urllib -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple from prometheus_client import Counter from synapse.api.constants import EventTypes, ThirdPartyEntityKind from synapse.api.errors import CodeMessageException +from synapse.events import EventBase from synapse.events.utils import serialize_event from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, ThirdPartyInstanceID @@ -201,7 +202,13 @@ async def _get() -> Optional[JsonDict]: key = (service.id, protocol) return await self.protocol_meta_cache.wrap(key, _get) - async def push_bulk(self, service, events, txn_id=None): + async def push_bulk( + self, + service: "ApplicationService", + events: List[EventBase], + ephemeral: List[JsonDict], + txn_id: Optional[int] = None, + ): if service.url is None: return True @@ -211,15 +218,19 @@ async def push_bulk(self, service, events, txn_id=None): logger.warning( "push_bulk: Missing txn ID sending events to %s", service.url ) - txn_id = str(0) - txn_id = str(txn_id) + txn_id = 0 + + uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id))) + + # Never send ephemeral events to appservices that do not support it + if service.supports_ephemeral: + body = {"events": events, "de.sorunome.msc2409.ephemeral": ephemeral} + else: + body = {"events": events} - uri = service.url + ("/transactions/%s" % urllib.parse.quote(txn_id)) try: await self.put_json( - uri=uri, - json_body={"events": events}, - args={"access_token": service.hs_token}, + uri=uri, json_body=body, args={"access_token": service.hs_token}, ) sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(events)) diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 8eb8c6f51cf9..ad3c408519ee 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -49,10 +49,13 @@ components. """ import logging +from typing import List -from synapse.appservice import ApplicationServiceState +from synapse.appservice import ApplicationService, ApplicationServiceState +from synapse.events import EventBase from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -82,8 +85,13 @@ async def start(self): for service in services: self.txn_ctrl.start_recoverer(service) - def submit_event_for_as(self, service, event): - self.queuer.enqueue(service, event) + def submit_event_for_as(self, service: ApplicationService, event: EventBase): + self.queuer.enqueue_event(service, event) + + def submit_ephemeral_events_for_as( + self, service: ApplicationService, events: List[JsonDict] + ): + self.queuer.enqueue_ephemeral(service, events) class _ServiceQueuer: @@ -96,17 +104,15 @@ class _ServiceQueuer: def __init__(self, txn_ctrl, clock): self.queued_events = {} # dict of {service_id: [events]} + self.queued_ephemeral = {} # dict of {service_id: [events]} # the appservices which currently have a transaction in flight self.requests_in_flight = set() self.txn_ctrl = txn_ctrl self.clock = clock - def enqueue(self, service, event): - self.queued_events.setdefault(service.id, []).append(event) - + def _start_background_request(self, service): # start a sender for this appservice if we don't already have one - if service.id in self.requests_in_flight: return @@ -114,7 +120,15 @@ def enqueue(self, service, event): "as-sender-%s" % (service.id,), self._send_request, service ) - async def _send_request(self, service): + def enqueue_event(self, service: ApplicationService, event: EventBase): + self.queued_events.setdefault(service.id, []).append(event) + self._start_background_request(service) + + def enqueue_ephemeral(self, service: ApplicationService, events: List[JsonDict]): + self.queued_ephemeral.setdefault(service.id, []).extend(events) + self._start_background_request(service) + + async def _send_request(self, service: ApplicationService): # sanity-check: we shouldn't get here if this service already has a sender # running. assert service.id not in self.requests_in_flight @@ -123,10 +137,11 @@ async def _send_request(self, service): try: while True: events = self.queued_events.pop(service.id, []) - if not events: + ephemeral = self.queued_ephemeral.pop(service.id, []) + if not events and not ephemeral: return try: - await self.txn_ctrl.send(service, events) + await self.txn_ctrl.send(service, events, ephemeral) except Exception: logger.exception("AS request failed") finally: @@ -158,9 +173,16 @@ def __init__(self, clock, store, as_api): # for UTs self.RECOVERER_CLASS = _Recoverer - async def send(self, service, events): + async def send( + self, + service: ApplicationService, + events: List[EventBase], + ephemeral: List[JsonDict] = [], + ): try: - txn = await self.store.create_appservice_txn(service=service, events=events) + txn = await self.store.create_appservice_txn( + service=service, events=events, ephemeral=ephemeral + ) service_is_up = await self._is_service_up(service) if service_is_up: sent = await txn.send(self.as_api) @@ -204,7 +226,7 @@ def start_recoverer(self, service): recoverer.recover() logger.info("Now %i active recoverers", len(self.recoverers)) - async def _is_service_up(self, service): + async def _is_service_up(self, service: ApplicationService) -> bool: state = await self.store.get_appservice_state(service) return state == ApplicationServiceState.UP or state is None diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 8ed3e2425843..746fc3cc02f6 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -160,6 +160,8 @@ def _load_appservice(hostname, as_info, config_filename): if as_info.get("ip_range_whitelist"): ip_range_whitelist = IPSet(as_info.get("ip_range_whitelist")) + supports_ephemeral = as_info.get("de.sorunome.msc2409.push_ephemeral", False) + return ApplicationService( token=as_info["as_token"], hostname=hostname, @@ -168,6 +170,7 @@ def _load_appservice(hostname, as_info, config_filename): hs_token=as_info["hs_token"], sender=user_id, id=as_info["id"], + supports_ephemeral=supports_ephemeral, protocols=protocols, rate_limited=rate_limited, ip_range_whitelist=ip_range_whitelist, diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index c8d5e580353d..07240d3a14ba 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Dict, List, Optional from prometheus_client import Counter @@ -21,13 +22,16 @@ import synapse from synapse.api.constants import EventTypes +from synapse.appservice import ApplicationService +from synapse.events import EventBase +from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import ( event_processing_loop_counter, event_processing_loop_room_count, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import RoomStreamToken +from synapse.types import Collection, JsonDict, RoomStreamToken, UserID from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -44,6 +48,7 @@ def __init__(self, hs): self.started_scheduler = False self.clock = hs.get_clock() self.notify_appservices = hs.config.notify_appservices + self.event_sources = hs.get_event_sources() self.current_max = 0 self.is_processing = False @@ -82,7 +87,7 @@ async def notify_interested_services(self, max_token: RoomStreamToken): if not events: break - events_by_room = {} + events_by_room = {} # type: Dict[str, List[EventBase]] for event in events: events_by_room.setdefault(event.room_id, []).append(event) @@ -161,6 +166,104 @@ async def handle_room_events(events): finally: self.is_processing = False + async def notify_interested_services_ephemeral( + self, stream_key: str, new_token: Optional[int], users: Collection[UserID] = [], + ): + """This is called by the notifier in the background + when a ephemeral event handled by the homeserver. + + This will determine which appservices + are interested in the event, and submit them. + + Events will only be pushed to appservices + that have opted into ephemeral events + + Args: + stream_key: The stream the event came from. + new_token: The latest stream token + users: The user(s) involved with the event. + """ + services = [ + service + for service in self.store.get_app_services() + if service.supports_ephemeral + ] + if not services or not self.notify_appservices: + return + logger.info("Checking interested services for %s" % (stream_key)) + with Measure(self.clock, "notify_interested_services_ephemeral"): + for service in services: + # Only handle typing if we have the latest token + if stream_key == "typing_key" and new_token is not None: + events = await self._handle_typing(service, new_token) + if events: + self.scheduler.submit_ephemeral_events_for_as(service, events) + # We don't persist the token for typing_key for performance reasons + elif stream_key == "receipt_key": + events = await self._handle_receipts(service) + if events: + self.scheduler.submit_ephemeral_events_for_as(service, events) + await self.store.set_type_stream_id_for_appservice( + service, "read_receipt", new_token + ) + elif stream_key == "presence_key": + events = await self._handle_presence(service, users) + if events: + self.scheduler.submit_ephemeral_events_for_as(service, events) + await self.store.set_type_stream_id_for_appservice( + service, "presence", new_token + ) + + async def _handle_typing(self, service: ApplicationService, new_token: int): + typing_source = self.event_sources.sources["typing"] + # Get the typing events from just before current + typing, _ = await typing_source.get_new_events_as( + service=service, + # For performance reasons, we don't persist the previous + # token in the DB and instead fetch the latest typing information + # for appservices. + from_key=new_token - 1, + ) + return typing + + async def _handle_receipts(self, service: ApplicationService): + from_key = await self.store.get_type_stream_id_for_appservice( + service, "read_receipt" + ) + receipts_source = self.event_sources.sources["receipt"] + receipts, _ = await receipts_source.get_new_events_as( + service=service, from_key=from_key + ) + return receipts + + async def _handle_presence( + self, service: ApplicationService, users: Collection[UserID] + ): + events = [] # type: List[JsonDict] + presence_source = self.event_sources.sources["presence"] + from_key = await self.store.get_type_stream_id_for_appservice( + service, "presence" + ) + for user in users: + interested = await service.is_interested_in_presence(user, self.store) + if not interested: + continue + presence_events, _ = await presence_source.get_new_events( + user=user, service=service, from_key=from_key, + ) + time_now = self.clock.time_msec() + presence_events = [ + { + "type": "m.presence", + "sender": event.user_id, + "content": format_user_presence_state( + event, time_now, include_user_id=False + ), + } + for event in presence_events + ] + events = events + presence_events + async def query_user_exists(self, user_id): """Check if any application service knows this user_id exists. @@ -223,7 +326,7 @@ async def query_3pe(self, kind, protocol, fields): async def get_3pe_protocols(self, only_protocol=None): services = self.store.get_app_services() - protocols = {} + protocols = {} # type: Dict[str, List[JsonDict]] # Collect up all the individual protocol responses out of the ASes for s in services: diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 722592375796..c242c409cf26 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -13,9 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List, Tuple +from synapse.appservice import ApplicationService from synapse.handlers._base import BaseHandler -from synapse.types import ReadReceipt, get_domain_from_id +from synapse.types import JsonDict, ReadReceipt, get_domain_from_id from synapse.util.async_helpers import maybe_awaitable logger = logging.getLogger(__name__) @@ -140,5 +142,36 @@ async def get_new_events(self, from_key, room_ids, **kwargs): return (events, to_key) + async def get_new_events_as( + self, from_key: int, service: ApplicationService + ) -> Tuple[List[JsonDict], int]: + """Returns a set of new receipt events that an appservice + may be interested in. + + Args: + from_key: the stream position at which events should be fetched from + service: The appservice which may be interested + """ + from_key = int(from_key) + to_key = self.get_current_key() + + if from_key == to_key: + return [], to_key + + # We first need to fetch all new receipts + rooms_to_events = await self.store.get_linearized_receipts_for_all_rooms( + from_key=from_key, to_key=to_key + ) + + # Then filter down to rooms that the AS can read + events = [] + for room_id, event in rooms_to_events.items(): + if not await service.matches_user_in_member_list(room_id, self.store): + continue + + events.append(event) + + return (events, to_key) + def get_current_key(self, direction="f"): return self.store.get_max_receipt_stream_id() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a3066310942e..b527724bc492 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import itertools import logging from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 3cbfc2d780a7..d3692842e3b6 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -12,16 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import random from collections import namedtuple from typing import TYPE_CHECKING, List, Set, Tuple from synapse.api.errors import AuthError, ShadowBanError, SynapseError +from synapse.appservice import ApplicationService from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.streams import TypingStream -from synapse.types import UserID, get_domain_from_id +from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -430,6 +430,33 @@ def _make_event_for(self, room_id): "content": {"user_ids": list(typing)}, } + async def get_new_events_as( + self, from_key: int, service: ApplicationService + ) -> Tuple[List[JsonDict], int]: + """Returns a set of new typing events that an appservice + may be interested in. + + Args: + from_key: the stream position at which events should be fetched from + service: The appservice which may be interested + """ + with Measure(self.clock, "typing.get_new_events_as"): + from_key = int(from_key) + handler = self.get_typing_handler() + + events = [] + for room_id in handler._room_serials.keys(): + if handler._room_serials[room_id] <= from_key: + continue + if not await service.matches_user_in_member_list( + room_id, handler.store + ): + continue + + events.append(self._make_event_for(room_id)) + + return (events, handler._latest_room_serial) + async def get_new_events(self, from_key, room_ids, **kwargs): with Measure(self.clock, "typing.get_new_events"): from_key = int(from_key) diff --git a/synapse/notifier.py b/synapse/notifier.py index 51c830c91ea2..2e993411b9ec 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -329,6 +329,22 @@ async def _notify_app_services(self, max_room_stream_token: RoomStreamToken): except Exception: logger.exception("Error notifying application services of event") + async def _notify_app_services_ephemeral( + self, + stream_key: str, + new_token: Union[int, RoomStreamToken], + users: Collection[UserID] = [], + ): + try: + stream_token = None + if isinstance(new_token, int): + stream_token = new_token + await self.appservice_handler.notify_interested_services_ephemeral( + stream_key, stream_token, users + ) + except Exception: + logger.exception("Error notifying application services of event") + async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: await self._pusher_pool.on_new_notifications(max_room_stream_token) @@ -367,6 +383,15 @@ def on_new_event( self.notify_replication() + # Notify appservices + run_as_background_process( + "_notify_app_services_ephemeral", + self._notify_app_services_ephemeral, + stream_key, + new_token, + users, + ) + def on_new_replication_data(self) -> None: """Used to inform replication listeners that something has happend without waking up any of the normal user event streams""" diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 85f6b1e3fdf7..43bf0f649abf 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -15,12 +15,15 @@ # limitations under the License. import logging import re +from typing import List -from synapse.appservice import AppServiceTransaction +from synapse.appservice import ApplicationService, AppServiceTransaction from synapse.config.appservice import load_appservices +from synapse.events import EventBase from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.types import JsonDict from synapse.util import json_encoder logger = logging.getLogger(__name__) @@ -172,15 +175,23 @@ async def set_appservice_state(self, service, state) -> None: "application_services_state", {"as_id": service.id}, {"state": state} ) - async def create_appservice_txn(self, service, events): + async def create_appservice_txn( + self, + service: ApplicationService, + events: List[EventBase], + ephemeral: List[JsonDict], + ) -> AppServiceTransaction: """Atomically creates a new transaction for this application service - with the given list of events. + with the given list of events. Ephemeral events are NOT persisted to the + database and are not resent if a transaction is retried. Args: - service(ApplicationService): The service who the transaction is for. - events(list): A list of events to put in the transaction. + service: The service who the transaction is for. + events: A list of persistent events to put in the transaction. + ephemeral: A list of ephemeral events to put in the transaction. + Returns: - AppServiceTransaction: A new transaction. + A new transaction. """ def _create_appservice_txn(txn): @@ -207,7 +218,9 @@ def _create_appservice_txn(txn): "VALUES(?,?,?)", (service.id, new_txn_id, event_ids), ) - return AppServiceTransaction(service=service, id=new_txn_id, events=events) + return AppServiceTransaction( + service=service, id=new_txn_id, events=events, ephemeral=ephemeral + ) return await self.db_pool.runInteraction( "create_appservice_txn", _create_appservice_txn @@ -296,7 +309,9 @@ def _get_oldest_unsent_txn(txn): events = await self.get_events_as_list(event_ids) - return AppServiceTransaction(service=service, id=entry["txn_id"], events=events) + return AppServiceTransaction( + service=service, id=entry["txn_id"], events=events, ephemeral=[] + ) def _get_last_txn(self, txn, service_id): txn.execute( @@ -320,7 +335,7 @@ def set_appservice_last_pos_txn(txn): ) async def get_new_events_for_appservice(self, current_id, limit): - """Get all new evnets""" + """Get all new events for an appservice""" def get_new_events_for_appservice_txn(txn): sql = ( @@ -351,6 +366,39 @@ def get_new_events_for_appservice_txn(txn): return upper_bound, events + async def get_type_stream_id_for_appservice( + self, service: ApplicationService, type: str + ) -> int: + def get_type_stream_id_for_appservice_txn(txn): + stream_id_type = "%s_stream_id" % type + txn.execute( + "SELECT ? FROM application_services_state WHERE as_id=?", + (stream_id_type, service.id,), + ) + last_txn_id = txn.fetchone() + if last_txn_id is None or last_txn_id[0] is None: # no row exists + return 0 + else: + return int(last_txn_id[0]) + + return await self.db_pool.runInteraction( + "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn + ) + + async def set_type_stream_id_for_appservice( + self, service: ApplicationService, type: str, pos: int + ) -> None: + def set_type_stream_id_for_appservice_txn(txn): + stream_id_type = "%s_stream_id" % type + txn.execute( + "UPDATE ? SET device_list_stream_id = ? WHERE as_id=?", + (stream_id_type, pos, service.id), + ) + + await self.db_pool.runInteraction( + "set_type_stream_id_for_appservice", set_type_stream_id_for_appservice_txn + ) + class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore): # This is currently empty due to there not being any AS storage functions diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index c79ddff6806f..5cdf16521c3f 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -23,6 +23,7 @@ from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool from synapse.storage.util.id_generators import StreamIdGenerator +from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedList @@ -274,6 +275,60 @@ def f(txn): } return results + @cached(num_args=2,) + async def get_linearized_receipts_for_all_rooms( + self, to_key: int, from_key: Optional[int] = None + ) -> Dict[str, JsonDict]: + """Get receipts for all rooms between two stream_ids. + + Args: + to_key: Max stream id to fetch receipts upto. + from_key: Min stream id to fetch receipts from. None fetches + from the start. + + Returns: + A dictionary of roomids to a list of receipts. + """ + + def f(txn): + if from_key: + sql = """ + SELECT * FROM receipts_linearized WHERE + stream_id > ? AND stream_id <= ? + """ + txn.execute(sql, [from_key, to_key]) + else: + sql = """ + SELECT * FROM receipts_linearized WHERE + stream_id <= ? + """ + + txn.execute(sql, [to_key]) + + return self.db_pool.cursor_to_dict(txn) + + txn_results = await self.db_pool.runInteraction( + "get_linearized_receipts_for_all_rooms", f + ) + + results = {} + for row in txn_results: + # We want a single event per room, since we want to batch the + # receipts by room, event and type. + room_event = results.setdefault( + row["room_id"], + {"type": "m.receipt", "room_id": row["room_id"], "content": {}}, + ) + + # The content is of the form: + # {"$foo:bar": { "read": { "@user:host": }, .. }, .. } + event_entry = room_event["content"].setdefault(row["event_id"], {}) + receipt_type = event_entry.setdefault(row["receipt_type"], {}) + + receipt_type[row["user_id"]] = db_to_json(row["data"]) + + return results + async def get_users_sent_receipts_between( self, last_id: int, current_id: int ) -> List[str]: diff --git a/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql b/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql new file mode 100644 index 000000000000..20f5a95a24f8 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql @@ -0,0 +1,18 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE application_services_state + ADD COLUMN read_receipt_stream_id INT, + ADD COLUMN presence_stream_id INT; \ No newline at end of file diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 68a4caabbfdf..2acb8b7603b0 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -60,7 +60,7 @@ def test_single_service_up_txn_sent(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events # txn made and saved + service=service, events=events, ephemeral=[] # txn made and saved ) self.assertEquals(0, len(self.txnctrl.recoverers)) # no recoverer made txn.complete.assert_called_once_with(self.store) # txn completed @@ -81,7 +81,7 @@ def test_single_service_down(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events # txn made and saved + service=service, events=events, ephemeral=[] # txn made and saved ) self.assertEquals(0, txn.send.call_count) # txn not sent though self.assertEquals(0, txn.complete.call_count) # or completed @@ -106,7 +106,7 @@ def test_single_service_up_txn_not_sent(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events + service=service, events=events, ephemeral=[] ) self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made self.assertEquals(1, self.recoverer.recover.call_count) # and invoked @@ -202,26 +202,28 @@ def test_send_single_event_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4) event = Mock() - self.queuer.enqueue(service, event) - self.txn_ctrl.send.assert_called_once_with(service, [event]) + self.queuer.enqueue_event(service, event) + self.txn_ctrl.send.assert_called_once_with(service, [event], []) def test_send_single_event_with_queue(self): d = defer.Deferred() - self.txn_ctrl.send = Mock(side_effect=lambda x, y: make_deferred_yieldable(d)) + self.txn_ctrl.send = Mock( + side_effect=lambda x, y, z: make_deferred_yieldable(d) + ) service = Mock(id=4) event = Mock(event_id="first") event2 = Mock(event_id="second") event3 = Mock(event_id="third") # Send an event and don't resolve it just yet. - self.queuer.enqueue(service, event) + self.queuer.enqueue_event(service, event) # Send more events: expect send() to NOT be called multiple times. - self.queuer.enqueue(service, event2) - self.queuer.enqueue(service, event3) - self.txn_ctrl.send.assert_called_with(service, [event]) + self.queuer.enqueue_event(service, event2) + self.queuer.enqueue_event(service, event3) + self.txn_ctrl.send.assert_called_with(service, [event], []) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve the send event: expect the queued events to be sent d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [event2, event3]) + self.txn_ctrl.send.assert_called_with(service, [event2, event3], []) self.assertEquals(2, self.txn_ctrl.send.call_count) def test_multiple_service_queues(self): @@ -239,21 +241,58 @@ def test_multiple_service_queues(self): send_return_list = [srv_1_defer, srv_2_defer] - def do_send(x, y): + def do_send(x, y, z): return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) # send events for different ASes and make sure they are sent - self.queuer.enqueue(srv1, srv_1_event) - self.queuer.enqueue(srv1, srv_1_event2) - self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event]) - self.queuer.enqueue(srv2, srv_2_event) - self.queuer.enqueue(srv2, srv_2_event2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event]) + self.queuer.enqueue_event(srv1, srv_1_event) + self.queuer.enqueue_event(srv1, srv_1_event2) + self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], []) + self.queuer.enqueue_event(srv2, srv_2_event) + self.queuer.enqueue_event(srv2, srv_2_event2) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], []) # make sure callbacks for a service only send queued events for THAT # service srv_2_defer.callback(srv2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2]) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], []) self.assertEquals(3, self.txn_ctrl.send.call_count) + + def test_send_single_ephemeral_no_queue(self): + # Expect the event to be sent immediately. + service = Mock(id=4, name="service") + event_list = [Mock(name="event")] + self.queuer.enqueue_ephemeral(service, event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + + def test_send_multiple_ephemeral_no_queue(self): + # Expect the event to be sent immediately. + service = Mock(id=4, name="service") + event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")] + self.queuer.enqueue_ephemeral(service, event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + + def test_send_single_ephemeral_with_queue(self): + d = defer.Deferred() + self.txn_ctrl.send = Mock( + side_effect=lambda x, y, z: make_deferred_yieldable(d) + ) + service = Mock(id=4) + event_list_1 = [Mock(event_id="event1"), Mock(event_id="event2")] + event_list_2 = [Mock(event_id="event3"), Mock(event_id="event4")] + event_list_3 = [Mock(event_id="event5"), Mock(event_id="event6")] + + # Send an event and don't resolve it just yet. + self.queuer.enqueue_ephemeral(service, event_list_1) + # Send more events: expect send() to NOT be called multiple times. + self.queuer.enqueue_ephemeral(service, event_list_2) + self.queuer.enqueue_ephemeral(service, event_list_3) + self.txn_ctrl.send.assert_called_with(service, [], event_list_1) + self.assertEquals(1, self.txn_ctrl.send.call_count) + # Resolve txn_ctrl.send + d.callback(service) + # Expect the queued events to be sent + self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3) + self.assertEquals(2, self.txn_ctrl.send.call_count) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index c905a3893075..c5c79873495d 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -244,7 +244,7 @@ def test_create_appservice_txn_first(self): service = Mock(id=self.as_list[0]["id"]) events = [Mock(event_id="e1"), Mock(event_id="e2")] txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 1) self.assertEquals(txn.events, events) @@ -258,7 +258,7 @@ def test_create_appservice_txn_older_last_txn(self): yield self._insert_txn(service.id, 9644, events) yield self._insert_txn(service.id, 9645, events) txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 9646) self.assertEquals(txn.events, events) @@ -270,7 +270,7 @@ def test_create_appservice_txn_up_to_date_last_txn(self): events = [Mock(event_id="e1"), Mock(event_id="e2")] yield self._set_last_txn(service.id, 9643) txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) @@ -293,7 +293,7 @@ def test_create_appservice_txn_up_fuzzing(self): yield self._insert_txn(self.as_list[3]["id"], 9643, events) txn = yield defer.ensureDeferred( - self.store.create_appservice_txn(service, events) + self.store.create_appservice_txn(service, events, []) ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) From 6b5a115c0a0f9036444cd8686b32afbdf5334915 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 15 Oct 2020 21:29:13 +0200 Subject: [PATCH 200/245] Solidify the HomeServer constructor. (#8515) This implements a more standard API for instantiating a homeserver and moves some of the dependency injection into the test suite. More concretely this stops using `setattr` on all `kwargs` passed to `HomeServer`. --- changelog.d/8515.misc | 1 + synapse/server.py | 14 +++++++---- tests/app/test_frontend_proxy.py | 2 +- tests/app/test_openid_listener.py | 4 +-- tests/replication/_base.py | 4 +-- tests/replication/test_federation_ack.py | 2 +- tests/utils.py | 31 +++++++++++++----------- 7 files changed, 33 insertions(+), 25 deletions(-) create mode 100644 changelog.d/8515.misc diff --git a/changelog.d/8515.misc b/changelog.d/8515.misc new file mode 100644 index 000000000000..1f8aa292d81d --- /dev/null +++ b/changelog.d/8515.misc @@ -0,0 +1 @@ +Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable. diff --git a/synapse/server.py b/synapse/server.py index f921ee4b53af..21a232bbd964 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -205,7 +205,13 @@ class HomeServer(metaclass=abc.ABCMeta): # instantiated during setup() for future return by get_datastore() DATASTORE_CLASS = abc.abstractproperty() - def __init__(self, hostname: str, config: HomeServerConfig, reactor=None, **kwargs): + def __init__( + self, + hostname: str, + config: HomeServerConfig, + reactor=None, + version_string="Synapse", + ): """ Args: hostname : The hostname for the server. @@ -236,11 +242,9 @@ def __init__(self, hostname: str, config: HomeServerConfig, reactor=None, **kwar burst_count=config.rc_registration.burst_count, ) - self.datastores = None # type: Optional[Databases] + self.version_string = version_string - # Other kwargs are explicit dependencies - for depname in kwargs: - setattr(self, depname, kwargs[depname]) + self.datastores = None # type: Optional[Databases] def get_instance_id(self) -> str: """A unique ID for this synapse process instance. diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 641093d34988..4a301b84e129 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -22,7 +22,7 @@ class FrontendProxyTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=GenericWorkerServer + http_client=None, homeserver_to_use=GenericWorkerServer ) return hs diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 0f016c32ebc0..c2b10d2c704b 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -26,7 +26,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=GenericWorkerServer + http_client=None, homeserver_to_use=GenericWorkerServer ) return hs @@ -84,7 +84,7 @@ def test_openid_listener(self, names, expectation): class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=SynapseHomeServer + http_client=None, homeserver_to_use=SynapseHomeServer ) return hs diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 81ea985b9f43..093e2faac7bf 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -59,7 +59,7 @@ def prepare(self, reactor, clock, hs): self.reactor.lookups["testserv"] = "1.2.3.4" self.worker_hs = self.setup_test_homeserver( http_client=None, - homeserverToUse=GenericWorkerServer, + homeserver_to_use=GenericWorkerServer, config=self._get_worker_hs_config(), reactor=self.reactor, ) @@ -266,7 +266,7 @@ def make_worker_hs( config.update(extra_config) worker_hs = self.setup_test_homeserver( - homeserverToUse=GenericWorkerServer, + homeserver_to_use=GenericWorkerServer, config=config, reactor=self.reactor, **kwargs diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 23be1167a362..18536675588e 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -31,7 +31,7 @@ def default_config(self) -> dict: return config def make_homeserver(self, reactor, clock): - hs = self.setup_test_homeserver(homeserverToUse=GenericWorkerServer) + hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) return hs diff --git a/tests/utils.py b/tests/utils.py index 0c09f5457fb0..acec74e9e916 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -21,6 +21,7 @@ import uuid import warnings from inspect import getcallargs +from typing import Type from urllib import parse as urlparse from mock import Mock, patch @@ -194,8 +195,8 @@ def setup_test_homeserver( name="test", config=None, reactor=None, - homeserverToUse=TestHomeServer, - **kargs + homeserver_to_use: Type[HomeServer] = TestHomeServer, + **kwargs ): """ Setup a homeserver suitable for running tests against. Keyword arguments @@ -218,8 +219,8 @@ def setup_test_homeserver( config.ldap_enabled = False - if "clock" not in kargs: - kargs["clock"] = MockClock() + if "clock" not in kwargs: + kwargs["clock"] = MockClock() if USE_POSTGRES_FOR_TESTS: test_db = "synapse_test_%s" % uuid.uuid4().hex @@ -264,18 +265,20 @@ def setup_test_homeserver( cur.close() db_conn.close() - hs = homeserverToUse( - name, - config=config, - version_string="Synapse/tests", - tls_server_context_factory=Mock(), - tls_client_options_factory=Mock(), - reactor=reactor, - **kargs + hs = homeserver_to_use( + name, config=config, version_string="Synapse/tests", reactor=reactor, ) + # Install @cache_in_self attributes + for key, val in kwargs.items(): + setattr(hs, key, val) + + # Mock TLS + hs.tls_server_context_factory = Mock() + hs.tls_client_options_factory = Mock() + hs.setup() - if homeserverToUse.__name__ == "TestHomeServer": + if homeserver_to_use == TestHomeServer: hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): @@ -339,7 +342,7 @@ async def validate_hash(p, h): hs.get_auth_handler().validate_hash = validate_hash - fed = kargs.get("resource_for_federation", None) + fed = kwargs.get("resource_for_federation", None) if fed: register_federation_servlets(hs, fed) From da0090fdff65f9f3fecad039f35b8e3615f8d100 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 16 Oct 2020 13:39:46 +0100 Subject: [PATCH 201/245] Fix modifying events in `ThirdPartyRules` modules (#8564) EventBuilder.build wants auth events these days --- changelog.d/8564.feature | 1 + synapse/events/builder.py | 2 +- synapse/handlers/message.py | 7 ++++++- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8564.feature diff --git a/changelog.d/8564.feature b/changelog.d/8564.feature new file mode 100644 index 000000000000..45342e66ad7d --- /dev/null +++ b/changelog.d/8564.feature @@ -0,0 +1 @@ +Support modifying event content in `ThirdPartyRules` modules. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index df4f950fec86..07df258e6eed 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -98,7 +98,7 @@ def is_state(self): return self._state_key is not None async def build( - self, prev_event_ids: List[str], auth_event_ids: Optional[List[str]] + self, prev_event_ids: List[str], auth_event_ids: Optional[List[str]], ) -> EventBase: """Transform into a fully signed and hashed event diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 7f00805a91ef..d6855c60ea72 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1364,7 +1364,12 @@ async def _rebuild_event_after_third_party_rules( for k, v in original_event.internal_metadata.get_dict().items(): setattr(builder.internal_metadata, k, v) - event = await builder.build(prev_event_ids=original_event.prev_event_ids()) + # the event type hasn't changed, so there's no point in re-calculating the + # auth events. + event = await builder.build( + prev_event_ids=original_event.prev_event_ids(), + auth_event_ids=original_event.auth_event_ids(), + ) # we rebuild the event context, to be on the safe side. If nothing else, # delta_ids might need an update. From 3ee17585cd095e590096683395cfb9a017eac15e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 16 Oct 2020 15:51:57 +0100 Subject: [PATCH 202/245] Make LruCache register its own metrics (#8561) rather than have everything that instantiates an LruCache manage metrics separately, have LruCache do it itself. --- changelog.d/8561.misc | 1 + synapse/api/auth.py | 4 +-- synapse/push/push_rule_evaluator.py | 4 +-- synapse/util/caches/__init__.py | 13 ++++--- synapse/util/caches/deferred_cache.py | 43 +++++++---------------- synapse/util/caches/dictionary_cache.py | 9 +---- synapse/util/caches/lrucache.py | 46 +++++++++++++++++++------ tests/util/test_lrucache.py | 4 +-- 8 files changed, 62 insertions(+), 62 deletions(-) create mode 100644 changelog.d/8561.misc diff --git a/changelog.d/8561.misc b/changelog.d/8561.misc new file mode 100644 index 000000000000..a40dedfa8e6b --- /dev/null +++ b/changelog.d/8561.misc @@ -0,0 +1 @@ +Move metric registration code down into `LruCache`. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 1071a0576e5e..eb6f418b13e2 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -34,7 +34,6 @@ from synapse.events import EventBase from synapse.logging import opentracing as opentracing from synapse.types import StateMap, UserID -from synapse.util.caches import register_cache from synapse.util.caches.lrucache import LruCache from synapse.util.metrics import Measure @@ -70,8 +69,7 @@ def __init__(self, hs): self.store = hs.get_datastore() self.state = hs.get_state_handler() - self.token_cache = LruCache(10000) - register_cache("cache", "token_cache", self.token_cache) + self.token_cache = LruCache(10000, "token_cache") self._auth_blocking = AuthBlocking(self.hs) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 3a68ce636fac..4c95b149c51c 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -20,7 +20,6 @@ from synapse.events import EventBase from synapse.types import UserID -from synapse.util.caches import register_cache from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -186,8 +185,7 @@ def _get_value(self, dotted_key: str) -> Optional[str]: # Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches -regex_cache = LruCache(50000) -register_cache("cache", "regex_push_cache", regex_cache) +regex_cache = LruCache(50000, "regex_push_cache") def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 8fc05be278fa..89f0b385357d 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -16,7 +16,7 @@ import logging from sys import intern -from typing import Callable, Dict, Optional +from typing import Callable, Dict, Optional, Sized import attr from prometheus_client.core import Gauge @@ -92,7 +92,7 @@ def collect(self): def register_cache( cache_type: str, cache_name: str, - cache, + cache: Sized, collect_callback: Optional[Callable] = None, resizable: bool = True, resize_callback: Optional[Callable] = None, @@ -100,12 +100,15 @@ def register_cache( """Register a cache object for metric collection and resizing. Args: - cache_type + cache_type: a string indicating the "type" of the cache. This is used + only for deduplication so isn't too important provided it's constant. cache_name: name of the cache - cache: cache itself + cache: cache itself, which must implement __len__(), and may optionally implement + a max_size property collect_callback: If given, a function which is called during metric collection to update additional metrics. - resizable: Whether this cache supports being resized. + resizable: Whether this cache supports being resized, in which case either + resize_callback must be provided, or the cache must support set_max_size(). resize_callback: A function which can be called to resize the cache. Returns: diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index f728cd2cf27a..91fdc8142d92 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -24,7 +24,6 @@ from twisted.internet import defer from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches import register_cache from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry @@ -54,10 +53,7 @@ class DeferredCache(Generic[KT, VT]): __slots__ = ( "cache", - "name", - "keylen", "thread", - "metrics", "_pending_deferred_cache", ) @@ -89,37 +85,27 @@ def __init__( cache_type() ) # type: MutableMapping[KT, CacheEntry] + def metrics_cb(): + cache_pending_metric.labels(name).set(len(self._pending_deferred_cache)) + # cache is used for completed results and maps to the result itself, rather than # a Deferred. self.cache = LruCache( max_size=max_entries, keylen=keylen, + cache_name=name, cache_type=cache_type, size_callback=(lambda d: len(d)) if iterable else None, - evicted_callback=self._on_evicted, + metrics_collection_callback=metrics_cb, apply_cache_factor_from_config=apply_cache_factor_from_config, ) - self.name = name - self.keylen = keylen self.thread = None # type: Optional[threading.Thread] - self.metrics = register_cache( - "cache", - name, - self.cache, - collect_callback=self._metrics_collection_callback, - ) @property def max_entries(self): return self.cache.max_size - def _on_evicted(self, evicted_count): - self.metrics.inc_evictions(evicted_count) - - def _metrics_collection_callback(self): - cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) - def check_thread(self): expected_thread = self.thread if expected_thread is None: @@ -154,21 +140,18 @@ def get( if val is not _Sentinel.sentinel: val.callbacks.update(callbacks) if update_metrics: - self.metrics.inc_hits() + m = self.cache.metrics + assert m # we always have a name, so should always have metrics + m.inc_hits() return val.deferred - val = self.cache.get(key, _Sentinel.sentinel, callbacks=callbacks) - if val is not _Sentinel.sentinel: - self.metrics.inc_hits() - return val - - if update_metrics: - self.metrics.inc_misses() - - if default is _Sentinel.sentinel: + val = self.cache.get( + key, default, callbacks=callbacks, update_metrics=update_metrics + ) + if val is _Sentinel.sentinel: raise KeyError() else: - return default + return val def set( self, diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 8592b93689bd..8b426c005b33 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -19,8 +19,6 @@ from synapse.util.caches.lrucache import LruCache -from . import register_cache - logger = logging.getLogger(__name__) @@ -46,18 +44,16 @@ class DictionaryCache: """ def __init__(self, name, max_entries=1000): - self.cache = LruCache(max_size=max_entries, size_callback=len) + self.cache = LruCache(max_size=max_entries, cache_name=name, size_callback=len) self.name = name self.sequence = 0 self.thread = None - # caches_by_name[name] = self.cache class Sentinel: __slots__ = [] self.sentinel = Sentinel() - self.metrics = register_cache("dictionary", name, self.cache) def check_thread(self): expected_thread = self.thread @@ -82,8 +78,6 @@ def get(self, key, dict_keys=None): """ entry = self.cache.get(key, self.sentinel) if entry is not self.sentinel: - self.metrics.inc_hits() - if dict_keys is None: return DictionaryEntry( entry.full, entry.known_absent, dict(entry.value) @@ -95,7 +89,6 @@ def get(self, key, dict_keys=None): {k: entry.value[k] for k in dict_keys if k in entry.value}, ) - self.metrics.inc_misses() return DictionaryEntry(False, set(), {}) def invalidate(self, key): diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 33eae2b7c4fe..e4804f79e07d 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -18,6 +18,7 @@ from typing import Callable, Optional, Type, Union from synapse.config import cache as cache_config +from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches.treecache import TreeCache @@ -43,27 +44,29 @@ def __init__(self, prev_node, next_node, key, value, callbacks=set()): class LruCache: """ - Least-recently-used cache. + Least-recently-used cache, supporting prometheus metrics and invalidation callbacks. + Supports del_multi only if cache_type=TreeCache If cache_type=TreeCache, all keys must be tuples. - - Can also set callbacks on objects when getting/setting which are fired - when that key gets invalidated/evicted. """ def __init__( self, max_size: int, + cache_name: Optional[str] = None, keylen: int = 1, cache_type: Type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable] = None, - evicted_callback: Optional[Callable] = None, + metrics_collection_callback: Optional[Callable[[], None]] = None, apply_cache_factor_from_config: bool = True, ): """ Args: max_size: The maximum amount of entries the cache can hold + cache_name: The name of this cache, for the prometheus metrics. If unset, + no metrics will be reported on this cache. + keylen: The length of the tuple used as the cache key. Ignored unless cache_type is `TreeCache`. @@ -73,9 +76,13 @@ def __init__( size_callback (func(V) -> int | None): - evicted_callback (func(int)|None): - if not None, called on eviction with the size of the evicted - entry + metrics_collection_callback: + metrics collection callback. This is called early in the metrics + collection process, before any of the metrics registered with the + prometheus Registry are collected, so can be used to update any dynamic + metrics. + + Ignored if cache_name is None. apply_cache_factor_from_config (bool): If true, `max_size` will be multiplied by a cache factor derived from the homeserver config @@ -94,6 +101,19 @@ def __init__( else: self.max_size = int(max_size) + if cache_name is not None: + metrics = register_cache( + "lru_cache", + cache_name, + self, + collect_callback=metrics_collection_callback, + ) # type: Optional[CacheMetric] + else: + metrics = None + + # this is exposed for access from outside this class + self.metrics = metrics + list_root = _Node(None, None, None, None) list_root.next_node = list_root list_root.prev_node = list_root @@ -105,8 +125,8 @@ def evict(): todelete = list_root.prev_node evicted_len = delete_node(todelete) cache.pop(todelete.key, None) - if evicted_callback: - evicted_callback(evicted_len) + if metrics: + metrics.inc_evictions(evicted_len) def synchronized(f): @wraps(f) @@ -169,13 +189,17 @@ def delete_node(node): return deleted_len @synchronized - def cache_get(key, default=None, callbacks=[]): + def cache_get(key, default=None, callbacks=[], update_metrics=True): node = cache.get(key, None) if node is not None: move_node_to_front(node) node.callbacks.update(callbacks) + if update_metrics and metrics: + metrics.inc_hits() return node.value else: + if update_metrics and metrics: + metrics.inc_misses() return default @synchronized diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index 0adb2174af98..f12834edab2c 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -59,7 +59,7 @@ def test_pop(self): self.assertEquals(cache.pop("key"), None) def test_del_multi(self): - cache = LruCache(4, 2, cache_type=TreeCache) + cache = LruCache(4, keylen=2, cache_type=TreeCache) cache[("animal", "cat")] = "mew" cache[("animal", "dog")] = "woof" cache[("vehicles", "car")] = "vroom" @@ -160,7 +160,7 @@ def test_del_multi(self): m2 = Mock() m3 = Mock() m4 = Mock() - cache = LruCache(4, 2, cache_type=TreeCache) + cache = LruCache(4, keylen=2, cache_type=TreeCache) cache.set(("a", "1"), "value", callbacks=[m1]) cache.set(("a", "2"), "value", callbacks=[m2]) From 0ec0bc3886bd72bdf2f64d455a7d777f4573a4f1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 15:56:39 +0100 Subject: [PATCH 203/245] type annotations for LruCache --- synapse/api/auth.py | 4 +- synapse/push/push_rule_evaluator.py | 16 +++--- synapse/util/caches/deferred_cache.py | 5 +- synapse/util/caches/dictionary_cache.py | 22 +++++--- synapse/util/caches/lrucache.py | 73 +++++++++++++++++++++---- 5 files changed, 89 insertions(+), 31 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index eb6f418b13e2..bff87fabde75 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -69,7 +69,9 @@ def __init__(self, hs): self.store = hs.get_datastore() self.state = hs.get_state_handler() - self.token_cache = LruCache(10000, "token_cache") + self.token_cache = LruCache( + 10000, "token_cache" + ) # type: LruCache[str, Tuple[str, bool]] self._auth_blocking = AuthBlocking(self.hs) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 4c95b149c51c..854ffd625e60 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -16,7 +16,7 @@ import logging import re -from typing import Any, Dict, List, Optional, Pattern, Union +from typing import Any, Dict, List, Optional, Pattern, Tuple, Union from synapse.events import EventBase from synapse.types import UserID @@ -173,19 +173,21 @@ def _contains_display_name(self, display_name: str) -> bool: # Similar to _glob_matches, but do not treat display_name as a glob. r = regex_cache.get((display_name, False, True), None) if not r: - r = re.escape(display_name) - r = _re_word_boundary(r) - r = re.compile(r, flags=re.IGNORECASE) + r1 = re.escape(display_name) + r1 = _re_word_boundary(r1) + r = re.compile(r1, flags=re.IGNORECASE) regex_cache[(display_name, False, True)] = r - return r.search(body) + return bool(r.search(body)) def _get_value(self, dotted_key: str) -> Optional[str]: return self._value_cache.get(dotted_key, None) # Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches -regex_cache = LruCache(50000, "regex_push_cache") +regex_cache = LruCache( + 50000, "regex_push_cache" +) # type: LruCache[Tuple[str, bool, bool],Pattern] def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: @@ -203,7 +205,7 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: if not r: r = _glob_to_re(glob, word_boundary) regex_cache[(glob, True, word_boundary)] = r - return r.search(value) + return bool(r.search(value)) except re.error: logger.warning("Failed to parse glob to regex: %r", glob) return False diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 91fdc8142d92..4026e1f8fadc 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -98,7 +98,7 @@ def metrics_cb(): size_callback=(lambda d: len(d)) if iterable else None, metrics_collection_callback=metrics_cb, apply_cache_factor_from_config=apply_cache_factor_from_config, - ) + ) # type: LruCache[KT, VT] self.thread = None # type: Optional[threading.Thread] @@ -240,11 +240,12 @@ def invalidate_many(self, key: KT): self.check_thread() if not isinstance(key, tuple): raise TypeError("The cache key must be a tuple not %r" % (type(key),)) + key = cast(KT, key) self.cache.del_multi(key) # if we have a pending lookup for this key, remove it from the # _pending_deferred_cache, as above - entry_dict = self._pending_deferred_cache.pop(cast(KT, key), None) + entry_dict = self._pending_deferred_cache.pop(key, None) if entry_dict is not None: for entry in iterate_tree_cache_entry(entry_dict): entry.invalidate() diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 8b426c005b33..588d2d49f221 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -12,10 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum import logging import threading from collections import namedtuple +from typing import Any from synapse.util.caches.lrucache import LruCache @@ -38,23 +39,26 @@ def __len__(self): return len(self.value) +class _Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup. + sentinel = object() + + class DictionaryCache: """Caches key -> dictionary lookups, supporting caching partial dicts, i.e. fetching a subset of dictionary keys for a particular key. """ def __init__(self, name, max_entries=1000): - self.cache = LruCache(max_size=max_entries, cache_name=name, size_callback=len) + self.cache = LruCache( + max_size=max_entries, cache_name=name, size_callback=len + ) # type: LruCache[Any, DictionaryEntry] self.name = name self.sequence = 0 self.thread = None - class Sentinel: - __slots__ = [] - - self.sentinel = Sentinel() - def check_thread(self): expected_thread = self.thread if expected_thread is None: @@ -76,8 +80,8 @@ def get(self, key, dict_keys=None): Returns: DictionaryEntry """ - entry = self.cache.get(key, self.sentinel) - if entry is not self.sentinel: + entry = self.cache.get(key, _Sentinel.sentinel) + if entry is not _Sentinel.sentinel: if dict_keys is None: return DictionaryEntry( entry.full, entry.known_absent, dict(entry.value) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index e4804f79e07d..0eed53d3f495 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -15,12 +15,30 @@ import threading from functools import wraps -from typing import Callable, Optional, Type, Union +from typing import ( + Any, + Callable, + Generic, + Iterable, + Optional, + Type, + TypeVar, + Union, + cast, + overload, +) + +from typing_extensions import Literal from synapse.config import cache as cache_config from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches.treecache import TreeCache +T = TypeVar("T") +FT = TypeVar("FT", bound=Callable[..., Any]) +KT = TypeVar("KT") +VT = TypeVar("VT") + def enumerate_leaves(node, depth): if depth == 0: @@ -42,7 +60,7 @@ def __init__(self, prev_node, next_node, key, value, callbacks=set()): self.callbacks = callbacks -class LruCache: +class LruCache(Generic[KT, VT]): """ Least-recently-used cache, supporting prometheus metrics and invalidation callbacks. @@ -128,13 +146,13 @@ def evict(): if metrics: metrics.inc_evictions(evicted_len) - def synchronized(f): + def synchronized(f: FT) -> FT: @wraps(f) def inner(*args, **kwargs): with lock: return f(*args, **kwargs) - return inner + return cast(FT, inner) cached_cache_len = [0] if size_callback is not None: @@ -188,8 +206,31 @@ def delete_node(node): node.callbacks.clear() return deleted_len + @overload + def cache_get( + key: KT, + default: Literal[None] = None, + callbacks: Iterable[Callable[[], None]] = ..., + update_metrics: bool = ..., + ) -> Optional[VT]: + ... + + @overload + def cache_get( + key: KT, + default: T, + callbacks: Iterable[Callable[[], None]] = ..., + update_metrics: bool = ..., + ) -> Union[T, VT]: + ... + @synchronized - def cache_get(key, default=None, callbacks=[], update_metrics=True): + def cache_get( + key: KT, + default=None, + callbacks: Iterable[Callable[[], None]] = [], + update_metrics: bool = True, + ): node = cache.get(key, None) if node is not None: move_node_to_front(node) @@ -203,7 +244,7 @@ def cache_get(key, default=None, callbacks=[], update_metrics=True): return default @synchronized - def cache_set(key, value, callbacks=[]): + def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = []): node = cache.get(key, None) if node is not None: # We sometimes store large objects, e.g. dicts, which cause @@ -232,7 +273,7 @@ def cache_set(key, value, callbacks=[]): evict() @synchronized - def cache_set_default(key, value): + def cache_set_default(key: KT, value: VT) -> VT: node = cache.get(key, None) if node is not None: return node.value @@ -241,8 +282,16 @@ def cache_set_default(key, value): evict() return value + @overload + def cache_pop(key: KT, default: Literal[None] = None) -> Union[None, VT]: + ... + + @overload + def cache_pop(key: KT, default: T) -> Union[T, VT]: + ... + @synchronized - def cache_pop(key, default=None): + def cache_pop(key: KT, default=None): node = cache.get(key, None) if node: delete_node(node) @@ -252,18 +301,18 @@ def cache_pop(key, default=None): return default @synchronized - def cache_del_multi(key): + def cache_del_multi(key: KT) -> None: """ This will only work if constructed with cache_type=TreeCache """ popped = cache.pop(key) if popped is None: return - for leaf in enumerate_leaves(popped, keylen - len(key)): + for leaf in enumerate_leaves(popped, keylen - len(cast(tuple, key))): delete_node(leaf) @synchronized - def cache_clear(): + def cache_clear() -> None: list_root.next_node = list_root list_root.prev_node = list_root for node in cache.values(): @@ -274,7 +323,7 @@ def cache_clear(): cached_cache_len[0] = 0 @synchronized - def cache_contains(key): + def cache_contains(key: KT) -> bool: return key in cache self.sentinel = object() From 402213bf416d63dedfca21e646e630349952ba58 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 11:21:04 +0100 Subject: [PATCH 204/245] changelog --- changelog.d/8562.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8562.misc diff --git a/changelog.d/8562.misc b/changelog.d/8562.misc new file mode 100644 index 000000000000..ebdbddb50048 --- /dev/null +++ b/changelog.d/8562.misc @@ -0,0 +1 @@ +Add type annotations for `LruCache`. From 995cc615a01bb11b70dbf8fdd0eb7f8b3d1fdc1e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 16 Oct 2020 16:14:42 +0100 Subject: [PATCH 205/245] Apply suggestions from code review Co-authored-by: Patrick Cloke --- synapse/push/push_rule_evaluator.py | 2 +- synapse/util/caches/lrucache.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 854ffd625e60..2ce9e444abda 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -187,7 +187,7 @@ def _get_value(self, dotted_key: str) -> Optional[str]: # Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches regex_cache = LruCache( 50000, "regex_push_cache" -) # type: LruCache[Tuple[str, bool, bool],Pattern] +) # type: LruCache[Tuple[str, bool, bool], Pattern] def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 0eed53d3f495..1a2c2d4c0b09 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -283,7 +283,7 @@ def cache_set_default(key: KT, value: VT) -> VT: return value @overload - def cache_pop(key: KT, default: Literal[None] = None) -> Union[None, VT]: + def cache_pop(key: KT, default: Literal[None] = None) -> Optional[VT]: ... @overload From 6d7b22041ddf5ecceaf230404ef00c4d0b432727 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 16:21:43 +0100 Subject: [PATCH 206/245] review comments --- synapse/util/caches/lrucache.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 1a2c2d4c0b09..4e95dd9bf391 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -34,11 +34,16 @@ from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches.treecache import TreeCache -T = TypeVar("T") +# Function type: the type used for invalidation callbacks FT = TypeVar("FT", bound=Callable[..., Any]) + +# Key and Value type for the cache KT = TypeVar("KT") VT = TypeVar("VT") +# a general type var, distinct from either KT or VT +T = TypeVar("T") + def enumerate_leaves(node, depth): if depth == 0: @@ -227,7 +232,7 @@ def cache_get( @synchronized def cache_get( key: KT, - default=None, + default: Optional[T] = None, callbacks: Iterable[Callable[[], None]] = [], update_metrics: bool = True, ): @@ -291,7 +296,7 @@ def cache_pop(key: KT, default: T) -> Union[T, VT]: ... @synchronized - def cache_pop(key: KT, default=None): + def cache_pop(key: KT, default: Optional[T] = None): node = cache.get(key, None) if node: delete_node(node) From c8e9dc4cf47522fae8a8939a8717e11eab88c9e6 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 16 Oct 2020 18:03:38 +0200 Subject: [PATCH 207/245] Add .venv* to .gitignore (#8566) Signed-off-by: Jonathan de Jong --- .gitignore | 1 + changelog.d/8566.misc | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/8566.misc diff --git a/.gitignore b/.gitignore index af36c00cfaa6..9bb5bdd647b9 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ _trial_temp*/ /.python-version /*.signing.key /env/ +/.venv*/ /homeserver*.yaml /logs /media_store/ diff --git a/changelog.d/8566.misc b/changelog.d/8566.misc new file mode 100644 index 000000000000..453cf48ffa9c --- /dev/null +++ b/changelog.d/8566.misc @@ -0,0 +1 @@ +Add virtualenv-generated folders to `.gitignore`. \ No newline at end of file From 1b70662be902e153759a26b3e97f1081d9f8c4bc Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 16 Oct 2020 12:06:17 -0400 Subject: [PATCH 208/245] Clean-up old transaction IDs on the background worker. (#8544) --- changelog.d/8544.feature | 1 + synapse/storage/databases/main/events_worker.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8544.feature diff --git a/changelog.d/8544.feature b/changelog.d/8544.feature new file mode 100644 index 000000000000..542993110bc8 --- /dev/null +++ b/changelog.d/8544.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index ff150f0be7cc..0ad9a19b3d6f 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -137,7 +137,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): db_conn, "events", "stream_ordering", step=-1 ) - if not hs.config.worker.worker_app: + if hs.config.run_background_tasks: # We periodically clean out old transaction ID mappings self._clock.looping_call( run_as_background_process, From 0afd83584bcaccc98b020900a98b3fbe9f825811 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 16 Oct 2020 21:45:31 +0100 Subject: [PATCH 209/245] Fix synmark (#8571) This seems to have been broken since #6513. --- changelog.d/8571.misc | 1 + synmark/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8571.misc diff --git a/changelog.d/8571.misc b/changelog.d/8571.misc new file mode 100644 index 000000000000..f6a65057e0d9 --- /dev/null +++ b/changelog.d/8571.misc @@ -0,0 +1 @@ +Fix `synmark` benchmark runner. diff --git a/synmark/__init__.py b/synmark/__init__.py index 53698bd5ab5a..9ec72c19730b 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -41,7 +41,7 @@ async def make_homeserver(reactor, config=None): config_obj = HomeServerConfig() config_obj.parse_config_dict(config, "", "") - hs = await setup_test_homeserver( + hs = setup_test_homeserver( cleanup_tasks.append, config=config_obj, reactor=reactor, clock=clock ) stor = hs.get_datastore() From 79c1f973cee73f2d24c1b9140500e1999b25a479 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 17 Oct 2020 10:51:38 +0200 Subject: [PATCH 210/245] Pre-emptively fix synapse.storage.types.Connection for future mypy release (#8577) Fix the Connection protocol according to typeshed's assertions about sqlite3.Connection --- changelog.d/8577.misc | 1 + synapse/storage/database.py | 2 +- synapse/storage/types.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8577.misc diff --git a/changelog.d/8577.misc b/changelog.d/8577.misc new file mode 100644 index 000000000000..75fe563a0258 --- /dev/null +++ b/changelog.d/8577.misc @@ -0,0 +1 @@ +Adjust a protocol-type definition to fit `sqlite3` assertions. \ No newline at end of file diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 763722d6bce6..0217e631085a 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -160,7 +160,7 @@ def __enter__(self) -> "Connection": self.conn.__enter__() return self - def __exit__(self, exc_type, exc_value, traceback) -> bool: + def __exit__(self, exc_type, exc_value, traceback) -> Optional[bool]: return self.conn.__exit__(exc_type, exc_value, traceback) # Proxy through any unknown lookups to the DB conn class. diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 970bb1b9da35..9cadcba18fc0 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable, Iterator, List, Tuple +from typing import Any, Iterable, Iterator, List, Optional, Tuple from typing_extensions import Protocol @@ -65,5 +65,5 @@ def rollback(self, *args, **kwargs) -> None: def __enter__(self) -> "Connection": ... - def __exit__(self, exc_type, exc_value, traceback) -> bool: + def __exit__(self, exc_type, exc_value, traceback) -> Optional[bool]: ... From 97647b33c248f25571bae617365d95434e6a3d5f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 19 Oct 2020 12:20:29 +0100 Subject: [PATCH 211/245] Replace DeferredCache with LruCache where possible (#8563) Most of these uses don't need a full-blown DeferredCache; LruCache is lighter and more appropriate. --- changelog.d/8563.misc | 1 + synapse/replication/slave/storage/client_ips.py | 10 +++++----- synapse/storage/_base.py | 12 +++++++----- synapse/storage/databases/main/client_ips.py | 8 ++++---- synapse/storage/databases/main/devices.py | 8 ++++---- synapse/storage/databases/main/events.py | 4 +--- synapse/storage/databases/main/events_worker.py | 11 +++++------ synapse/util/caches/lrucache.py | 3 +++ 8 files changed, 30 insertions(+), 27 deletions(-) create mode 100644 changelog.d/8563.misc diff --git a/changelog.d/8563.misc b/changelog.d/8563.misc new file mode 100644 index 000000000000..eeba8e5fee53 --- /dev/null +++ b/changelog.d/8563.misc @@ -0,0 +1 @@ +Replace `DeferredCache` with the lighter-weight `LruCache` where possible. diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 4b0ea0cc01cd..0f5b7adef781 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -15,7 +15,7 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY -from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.lrucache import LruCache from ._base import BaseSlavedStore @@ -24,9 +24,9 @@ class SlavedClientIpStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self.client_ip_last_seen = DeferredCache( - name="client_ip_last_seen", keylen=4, max_entries=50000 - ) # type: DeferredCache[tuple, int] + self.client_ip_last_seen = LruCache( + cache_name="client_ip_last_seen", keylen=4, max_size=50000 + ) # type: LruCache[tuple, int] async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) @@ -41,7 +41,7 @@ async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_i if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY: return - self.client_ip_last_seen.prefill(key, now) + self.client_ip_last_seen.set(key, now) self.hs.get_tcp_replication().send_user_ip( user_id, access_token, ip, user_agent, device_id, now diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index ab49d227de1c..2b196ded1bd0 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -76,14 +76,16 @@ def _attempt_to_invalidate_cache( """ try: - if key is None: - getattr(self, cache_name).invalidate_all() - else: - getattr(self, cache_name).invalidate(tuple(key)) + cache = getattr(self, cache_name) except AttributeError: # We probably haven't pulled in the cache in this worker, # which is fine. - pass + return + + if key is None: + cache.invalidate_all() + else: + cache.invalidate(tuple(key)) def db_to_json(db_content): diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 9e66e6648a19..339bd691a4c1 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -19,7 +19,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_tuple_comparison_clause -from synapse.util.caches.deferred_cache import DeferredCache +from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -410,8 +410,8 @@ def _prune_old_user_ips_txn(txn): class ClientIpStore(ClientIpWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - self.client_ip_last_seen = DeferredCache( - name="client_ip_last_seen", keylen=4, max_entries=50000 + self.client_ip_last_seen = LruCache( + cache_name="client_ip_last_seen", keylen=4, max_size=50000 ) super().__init__(database, db_conn, hs) @@ -442,7 +442,7 @@ async def insert_client_ip( if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY: return - self.client_ip_last_seen.prefill(key, now) + self.client_ip_last_seen.set(key, now) self._batch_row_update[key] = (user_agent, device_id, now) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index e662a20d24a4..dfb4f87b8f3d 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -34,8 +34,8 @@ ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder -from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -1005,8 +1005,8 @@ def __init__(self, database: DatabasePool, db_conn, hs): # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. - self.device_id_exists_cache = DeferredCache( - name="device_id_exists", keylen=2, max_entries=10000 + self.device_id_exists_cache = LruCache( + cache_name="device_id_exists", keylen=2, max_size=10000 ) async def store_device( @@ -1052,7 +1052,7 @@ async def store_device( ) if hidden: raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN) - self.device_id_exists_cache.prefill(key, True) + self.device_id_exists_cache.set(key, True) return inserted except StoreError: raise diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ba3b1769b0ed..87808c148334 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1051,9 +1051,7 @@ def _add_to_cache(self, txn, events_and_contexts): def prefill(): for cache_entry in to_prefill: - self.store._get_event_cache.prefill( - (cache_entry[0].event_id,), cache_entry - ) + self.store._get_event_cache.set((cache_entry[0].event_id,), cache_entry) txn.call_after(prefill) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 0ad9a19b3d6f..c342df2a8b30 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -42,8 +42,8 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.types import Collection, get_domain_from_id -from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.descriptors import cached +from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -146,11 +146,10 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._cleanup_old_transaction_ids, ) - self._get_event_cache = DeferredCache( - "*getEvent*", + self._get_event_cache = LruCache( + cache_name="*getEvent*", keylen=3, - max_entries=hs.config.caches.event_cache_size, - apply_cache_factor_from_config=False, + max_size=hs.config.caches.event_cache_size, ) self._event_fetch_lock = threading.Condition() @@ -749,7 +748,7 @@ async def _get_events_from_db(self, event_ids, allow_rejected=False): event=original_ev, redacted_event=redacted_event ) - self._get_event_cache.prefill((event_id,), cache_entry) + self._get_event_cache.set((event_id,), cache_entry) result_map[event_id] = cache_entry return result_map diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 4e95dd9bf391..3b471d8fd350 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -337,6 +337,9 @@ def cache_contains(key: KT) -> bool: self.set = cache_set self.setdefault = cache_set_default self.pop = cache_pop + # `invalidate` is exposed for consistency with DeferredCache, so that it can be + # invalidated by the cache invalidation replication stream. + self.invalidate = cache_pop if cache_type is TreeCache: self.del_multi = cache_del_multi self.len = synchronized(cache_len) From 1fcdbeb3ab66c20fd559f3a8e169b8185b19d067 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Oct 2020 12:26:26 +0100 Subject: [PATCH 212/245] Start an opentracing span for background processes. (#8567) This should reduce the number of `There was no active span` errors we see. Fixes #8510. --- changelog.d/8567.bugfix | 1 + synapse/metrics/background_process_metrics.py | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8567.bugfix diff --git a/changelog.d/8567.bugfix b/changelog.d/8567.bugfix new file mode 100644 index 000000000000..4d835df6fd43 --- /dev/null +++ b/changelog.d/8567.bugfix @@ -0,0 +1 @@ +Fix increase in the number of `There was no active span...` errors logged when using OpenTracing. diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 5b73463504be..ea5f1c7b6213 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.logging.context import LoggingContext, PreserveLoggingContext +from synapse.logging.opentracing import start_active_span if TYPE_CHECKING: import resource @@ -197,14 +198,14 @@ async def run(): with BackgroundProcessLoggingContext(desc) as context: context.request = "%s-%i" % (desc, count) - try: - result = func(*args, **kwargs) + with start_active_span(desc, tags={"request_id": context.request}): + result = func(*args, **kwargs) - if inspect.isawaitable(result): - result = await result + if inspect.isawaitable(result): + result = await result - return result + return result except Exception: logger.exception( "Background process '%s' threw an exception", desc, From 85c56445fb1e708a773c34b8b69a53f8b020bbc2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 19 Oct 2020 07:27:46 -0400 Subject: [PATCH 213/245] Support running synmark on macOS. (#8578) By using the "poll" reactor since macOS doesn't support epoll. --- changelog.d/8578.misc | 1 + synmark/__init__.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8578.misc diff --git a/changelog.d/8578.misc b/changelog.d/8578.misc new file mode 100644 index 000000000000..e93462255b39 --- /dev/null +++ b/changelog.d/8578.misc @@ -0,0 +1 @@ +Support macOS on the `synmark` benchmark runner. diff --git a/synmark/__init__.py b/synmark/__init__.py index 9ec72c19730b..09bc7e7927e9 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -15,7 +15,10 @@ import sys -from twisted.internet import epollreactor +try: + from twisted.internet.epollreactor import EPollReactor as Reactor +except ImportError: + from twisted.internet.pollreactor import PollReactor as Reactor from twisted.internet.main import installReactor from synapse.config.homeserver import HomeServerConfig @@ -63,7 +66,7 @@ def make_reactor(): Instantiate and install a Twisted reactor suitable for testing (i.e. not the default global one). """ - reactor = epollreactor.EPollReactor() + reactor = Reactor() if "twisted.internet.reactor" in sys.modules: del sys.modules["twisted.internet.reactor"] From c356b4bf422430cd5769c9bf90756fca2efd8451 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 19 Oct 2020 09:12:39 -0400 Subject: [PATCH 214/245] Include a simple message in email notifications that include encrypted content (#8545) --- changelog.d/8545.bugfix | 1 + synapse/push/mailer.py | 16 +++++--- synapse/res/templates/notif.html | 56 +++++++++++++++------------ synapse/res/templates/notif.txt | 24 +++++++----- synapse/res/templates/notif_mail.html | 26 ++++++------- synapse/res/templates/notif_mail.txt | 6 +-- synapse/res/templates/room.html | 26 ++++++------- synapse/res/templates/room.txt | 12 +++--- tests/push/test_email.py | 15 ++++++- 9 files changed, 107 insertions(+), 75 deletions(-) create mode 100644 changelog.d/8545.bugfix diff --git a/changelog.d/8545.bugfix b/changelog.d/8545.bugfix new file mode 100644 index 000000000000..64ba307df069 --- /dev/null +++ b/changelog.d/8545.bugfix @@ -0,0 +1 @@ +Fix a long standing bug where email notifications for encrypted messages were blank. diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 455a1acb46a8..155791b75495 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -387,8 +387,8 @@ async def get_notif_vars(self, notif, user_id, notif_event, room_state_ids): return ret async def get_message_vars(self, notif, event, room_state_ids): - if event.type != EventTypes.Message: - return + if event.type != EventTypes.Message and event.type != EventTypes.Encrypted: + return None sender_state_event_id = room_state_ids[("m.room.member", event.sender)] sender_state_event = await self.store.get_event(sender_state_event_id) @@ -399,10 +399,8 @@ async def get_message_vars(self, notif, event, room_state_ids): # sender_hash % the number of default images to choose from sender_hash = string_ordinal_total(event.sender) - msgtype = event.content.get("msgtype") - ret = { - "msgtype": msgtype, + "event_type": event.type, "is_historical": event.event_id != notif["event_id"], "id": event.event_id, "ts": event.origin_server_ts, @@ -411,6 +409,14 @@ async def get_message_vars(self, notif, event, room_state_ids): "sender_hash": sender_hash, } + # Encrypted messages don't have any additional useful information. + if event.type == EventTypes.Encrypted: + return ret + + msgtype = event.content.get("msgtype") + + ret["msgtype"] = msgtype + if msgtype == "m.text": self.add_text_message_vars(ret, event) elif msgtype == "m.image": diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html index 1a6c70b5624d..6d76064d132f 100644 --- a/synapse/res/templates/notif.html +++ b/synapse/res/templates/notif.html @@ -1,41 +1,47 @@ -{% for message in notif.messages %} +{%- for message in notif.messages %} - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} - {% if message.sender_avatar_url %} + {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + {%- if message.sender_avatar_url %} - {% else %} - {% if message.sender_hash % 3 == 0 %} + {%- else %} + {%- if message.sender_hash % 3 == 0 %} - {% elif message.sender_hash % 3 == 1 %} + {%- elif message.sender_hash % 3 == 1 %} - {% else %} + {%- else %} - {% endif %} - {% endif %} - {% endif %} + {%- endif %} + {%- endif %} + {%- endif %} - {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} -
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
- {% endif %} + {%- if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} +
{%- if message.msgtype == "m.emote" %}*{%- endif %} {{ message.sender_name }}
+ {%- endif %}
- {% if message.msgtype == "m.text" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.emote" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.notice" %} - {{ message.body_text_html }} - {% elif message.msgtype == "m.image" %} - - {% elif message.msgtype == "m.file" %} - {{ message.body_text_plain }} - {% endif %} + {%- if message.event_type == "m.room.encrypted" %} + An encrypted message. + {%- elif message.event_type == "m.room.message" %} + {%- if message.msgtype == "m.text" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} + {%- elif message.msgtype == "m.image" %} + + {%- elif message.msgtype == "m.file" %} + {{ message.body_text_plain }} + {%- else %} + A message with unrecognised content. + {%- endif %} + {%- endif %}
{{ message.ts|format_ts("%H:%M") }} -{% endfor %} +{%- endfor %} diff --git a/synapse/res/templates/notif.txt b/synapse/res/templates/notif.txt index a37bee98332c..1ee7da3c50ef 100644 --- a/synapse/res/templates/notif.txt +++ b/synapse/res/templates/notif.txt @@ -1,16 +1,22 @@ -{% for message in notif.messages %} -{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) -{% if message.msgtype == "m.text" %} +{%- for message in notif.messages %} +{%- if message.event_type == "m.room.encrypted" %} +An encrypted message. +{%- elif message.event_type == "m.room.message" %} +{%- if message.msgtype == "m.emote" %}* {%- endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{%- if message.msgtype == "m.text" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.emote" %} +{%- elif message.msgtype == "m.emote" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.notice" %} +{%- elif message.msgtype == "m.notice" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.image" %} +{%- elif message.msgtype == "m.image" %} {{ message.body_text_plain }} -{% elif message.msgtype == "m.file" %} +{%- elif message.msgtype == "m.file" %} {{ message.body_text_plain }} -{% endif %} -{% endfor %} +{%- else %} +A message with unrecognised content. +{%- endif %} +{%- endif %} +{%- endfor %} View {{ room.title }} at {{ notif.link }} diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html index a2dfeb9e9f78..27d41827907f 100644 --- a/synapse/res/templates/notif_mail.html +++ b/synapse/res/templates/notif_mail.html @@ -2,8 +2,8 @@ @@ -18,21 +18,21 @@
{{ summary_text }}
- {% if app_name == "Riot" %} + {%- if app_name == "Riot" %} [Riot] - {% elif app_name == "Vector" %} + {%- elif app_name == "Vector" %} [Vector] - {% elif app_name == "Element" %} + {%- elif app_name == "Element" %} [Element] - {% else %} + {%- else %} [matrix] - {% endif %} + {%- endif %} - {% for room in rooms %} - {% include 'room.html' with context %} - {% endfor %} + {%- for room in rooms %} + {%- include 'room.html' with context %} + {%- endfor %} diff --git a/synapse/res/templates/notif_mail.txt b/synapse/res/templates/notif_mail.txt index 24843042a540..df3c253979ca 100644 --- a/synapse/res/templates/notif_mail.txt +++ b/synapse/res/templates/notif_mail.txt @@ -2,9 +2,9 @@ Hi {{ user_display_name }}, {{ summary_text }} -{% for room in rooms %} -{% include 'room.txt' with context %} -{% endfor %} +{%- for room in rooms %} +{%- include 'room.txt' with context %} +{%- endfor %} You can disable these notifications at {{ unsubscribe_link }} diff --git a/synapse/res/templates/room.html b/synapse/res/templates/room.html index b8525fef888c..4fc6f6ac9b31 100644 --- a/synapse/res/templates/room.html +++ b/synapse/res/templates/room.html @@ -1,23 +1,23 @@ - {% if room.invite %} + {%- if room.invite %} - {% else %} - {% for notif in room.notifs %} - {% include 'notif.html' with context %} - {% endfor %} - {% endif %} + {%- else %} + {%- for notif in room.notifs %} + {%- include 'notif.html' with context %} + {%- endfor %} + {%- endif %}
- {% if room.avatar_url %} + {%- if room.avatar_url %} - {% else %} - {% if room.hash % 3 == 0 %} + {%- else %} + {%- if room.hash % 3 == 0 %} - {% elif room.hash % 3 == 1 %} + {%- elif room.hash % 3 == 1 %} - {% else %} + {%- else %} - {% endif %} - {% endif %} + {%- endif %} + {%- endif %} {{ room.title }}
@@ -25,9 +25,9 @@
diff --git a/synapse/res/templates/room.txt b/synapse/res/templates/room.txt index 84648c710ece..df841e9e6f00 100644 --- a/synapse/res/templates/room.txt +++ b/synapse/res/templates/room.txt @@ -1,9 +1,9 @@ {{ room.title }} -{% if room.invite %} +{%- if room.invite %} You've been invited, join at {{ room.link }} -{% else %} - {% for notif in room.notifs %} - {% include 'notif.txt' with context %} - {% endfor %} -{% endif %} +{%- else %} + {%- for notif in room.notifs %} + {%- include 'notif.txt' with context %} + {%- endfor %} +{%- endif %} diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 322456864065..55545d93410d 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -158,8 +158,21 @@ def test_multiple_members_email(self): # We should get emailed about those messages self._check_for_mail() + def test_encrypted_message(self): + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id + ) + self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) + + # The other user sends some messages + self.helper.send_event(room, "m.room.encrypted", {}, tok=self.others[0].token) + + # We should get emailed about that message + self._check_for_mail() + def _check_for_mail(self): - "Check that the user receives an email notification" + """Check that the user receives an email notification""" # Get the stream ordering before it gets sent pushers = self.get_success( From 903d11c43a5df9f704e5dad4d14506a6470524fc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 19 Oct 2020 15:00:12 +0100 Subject: [PATCH 215/245] Add `DeferredCache.get_immediate` method (#8568) * Add `DeferredCache.get_immediate` method A bunch of things that are currently calling `DeferredCache.get` are only really interested in the result if it's completed. We can optimise and simplify this case. * Remove unused 'default' parameter to DeferredCache.get() * another get_immediate instance --- changelog.d/8568.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/storage/databases/main/pusher.py | 2 +- synapse/storage/databases/main/receipts.py | 11 +----- synapse/storage/databases/main/roommember.py | 2 +- synapse/util/caches/deferred_cache.py | 35 ++++++++++++++------ tests/util/caches/test_deferred_cache.py | 27 ++++++++++++--- 7 files changed, 53 insertions(+), 27 deletions(-) create mode 100644 changelog.d/8568.misc diff --git a/changelog.d/8568.misc b/changelog.d/8568.misc new file mode 100644 index 000000000000..0ed7db92d355 --- /dev/null +++ b/changelog.d/8568.misc @@ -0,0 +1 @@ +Add `get_immediate` method to `DeferredCache`. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c440f2545c5e..a701defcdda8 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -496,6 +496,6 @@ class _Invalidation(namedtuple("_Invalidation", ("cache", "room_id"))): # dedupe when we add callbacks to lru cache nodes, otherwise the number # of callbacks would grow. def __call__(self): - rules = self.cache.get(self.room_id, None, update_metrics=False) + rules = self.cache.get_immediate(self.room_id, None, update_metrics=False) if rules: rules.invalidate_all() diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index df8609b97bea..7997242d90b6 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -303,7 +303,7 @@ async def add_pusher( lock=False, ) - user_has_pusher = self.get_if_user_has_pusher.cache.get( + user_has_pusher = self.get_if_user_has_pusher.cache.get_immediate( (user_id,), None, update_metrics=False ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 5cdf16521c3f..ca7917c9895b 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -25,7 +25,6 @@ from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict from synapse.util import json_encoder -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -413,18 +412,10 @@ def _invalidate_get_users_with_receipts_in_room( if receipt_type != "m.read": return - # Returns either an ObservableDeferred or the raw result - res = self.get_users_with_read_receipts_in_room.cache.get( + res = self.get_users_with_read_receipts_in_room.cache.get_immediate( room_id, None, update_metrics=False ) - # first handle the ObservableDeferred case - if isinstance(res, ObservableDeferred): - if res.has_called(): - res = res.get_result() - else: - res = None - if res and user_id in res: # We'd only be adding to the set, so no point invalidating if the # user is already there diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 20fcdaa529ca..9b08b498625c 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -531,7 +531,7 @@ async def _get_joined_users_from_context( # If we do then we can reuse that result and simply update it with # any membership changes in `delta_ids` if context.prev_group and context.delta_ids: - prev_res = self._get_joined_users_from_context.cache.get( + prev_res = self._get_joined_users_from_context.cache.get_immediate( (room_id, context.prev_group), None ) if prev_res and isinstance(prev_res, dict): diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 4026e1f8fadc..faeef75506ba 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -17,7 +17,16 @@ import enum import threading -from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, cast +from typing import ( + Callable, + Generic, + Iterable, + MutableMapping, + Optional, + TypeVar, + Union, + cast, +) from prometheus_client import Gauge @@ -33,7 +42,7 @@ ["name"], ) - +T = TypeVar("T") KT = TypeVar("KT") VT = TypeVar("VT") @@ -119,21 +128,21 @@ def check_thread(self): def get( self, key: KT, - default=_Sentinel.sentinel, callback: Optional[Callable[[], None]] = None, update_metrics: bool = True, - ): + ) -> Union[ObservableDeferred, VT]: """Looks the key up in the caches. Args: key(tuple) - default: What is returned if key is not in the caches. If not - specified then function throws KeyError instead callback(fn): Gets called when the entry in the cache is invalidated update_metrics (bool): whether to update the cache hit rate metrics Returns: Either an ObservableDeferred or the result itself + + Raises: + KeyError if the key is not found in the cache """ callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _Sentinel.sentinel) @@ -145,13 +154,19 @@ def get( m.inc_hits() return val.deferred - val = self.cache.get( - key, default, callbacks=callbacks, update_metrics=update_metrics + val2 = self.cache.get( + key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics ) - if val is _Sentinel.sentinel: + if val2 is _Sentinel.sentinel: raise KeyError() else: - return val + return val2 + + def get_immediate( + self, key: KT, default: T, update_metrics: bool = True + ) -> Union[VT, T]: + """If we have a *completed* cached value, return it.""" + return self.cache.get(key, default, update_metrics=update_metrics) def set( self, diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 9717be56b6cd..8a08ab6661d8 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -38,6 +38,22 @@ def test_hit(self): self.assertEquals(cache.get("foo"), 123) + def test_get_immediate(self): + cache = DeferredCache("test") + d1 = defer.Deferred() + cache.set("key1", d1) + + # get_immediate should return default + v = cache.get_immediate("key1", 1) + self.assertEqual(v, 1) + + # now complete the set + d1.callback(2) + + # get_immediate should return result + v = cache.get_immediate("key1", 1) + self.assertEqual(v, 2) + def test_invalidate(self): cache = DeferredCache("test") cache.prefill(("foo",), 123) @@ -80,9 +96,11 @@ def record_callback(idx): # now do the invalidation cache.invalidate_all() - # lookup should return none - self.assertIsNone(cache.get("key1", None)) - self.assertIsNone(cache.get("key2", None)) + # lookup should fail + with self.assertRaises(KeyError): + cache.get("key1") + with self.assertRaises(KeyError): + cache.get("key2") # both callbacks should have been callbacked self.assertTrue(callback_record[0], "Invalidation callback for key1 not called") @@ -90,7 +108,8 @@ def record_callback(idx): # letting the other lookup complete should do nothing d1.callback("result1") - self.assertIsNone(cache.get("key1", None)) + with self.assertRaises(KeyError): + cache.get("key1", None) def test_eviction(self): cache = DeferredCache( From 8f27b7fde12978a7e5b3833a2d989a9b0456d857 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 19 Oct 2020 13:03:55 -0400 Subject: [PATCH 216/245] Expose the experimental appservice login flow to clients. (#8504) --- changelog.d/8504.bugfix | 1 + synapse/rest/client/v1/login.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/8504.bugfix diff --git a/changelog.d/8504.bugfix b/changelog.d/8504.bugfix new file mode 100644 index 000000000000..2bd0dbb8b464 --- /dev/null +++ b/changelog.d/8504.bugfix @@ -0,0 +1 @@ +Expose the `uk.half-shot.msc2778.login.application_service` to clients from the login API. This feature was added in v1.21.0, but was not exposed as a potential login flow. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index d7deb9300d71..b82a4e978a47 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -110,6 +110,8 @@ def on_GET(self, request: SynapseRequest): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) + flows.append({"type": LoginRestServlet.APPSERVICE_TYPE}) + return 200, {"flows": flows} def on_OPTIONS(self, request: SynapseRequest): From 21bb50ca3fd4c414405b03dbbe9124128d0f2613 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Mon, 19 Oct 2020 19:32:24 +0200 Subject: [PATCH 217/245] Fix mypy error: auth handler "checkpw" internal function type mismatch (#8569) --- changelog.d/8569.misc | 1 + synapse/handlers/auth.py | 8 +++++--- tox.ini | 1 - 3 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/8569.misc diff --git a/changelog.d/8569.misc b/changelog.d/8569.misc new file mode 100644 index 000000000000..3b6e0625e57e --- /dev/null +++ b/changelog.d/8569.misc @@ -0,0 +1 @@ +Fix mypy not properly checking across the codebase, additionally, fix a typing assertion error in `handlers/auth.py`. \ No newline at end of file diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 1d1ddc22454b..8619fbb982f6 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1122,20 +1122,22 @@ async def validate_hash( Whether self.hash(password) == stored_hash. """ - def _do_validate_hash(): + def _do_validate_hash(checked_hash: bytes): # Normalise the Unicode in the password pw = unicodedata.normalize("NFKC", password) return bcrypt.checkpw( pw.encode("utf8") + self.hs.config.password_pepper.encode("utf8"), - stored_hash, + checked_hash, ) if stored_hash: if not isinstance(stored_hash, bytes): stored_hash = stored_hash.encode("ascii") - return await defer_to_thread(self.hs.get_reactor(), _do_validate_hash) + return await defer_to_thread( + self.hs.get_reactor(), _do_validate_hash, stored_hash + ) else: return False diff --git a/tox.ini b/tox.ini index 4d132eff4cab..6d08153782aa 100644 --- a/tox.ini +++ b/tox.ini @@ -158,7 +158,6 @@ commands= coverage html [testenv:mypy] -skip_install = True deps = {[base]deps} mypy==0.782 From 34c20493b998313ae9379f83e49cda08029f8c5c Mon Sep 17 00:00:00 2001 From: Vasilis Gerakaris Date: Mon, 19 Oct 2020 21:06:54 +0300 Subject: [PATCH 218/245] Drop unused `device_max_stream_id` table (#8589) Signed-off-by: Vasilis Gerakaris --- changelog.d/8589.removal | 1 + .../main/schema/delta/58/21drop_device_max_stream_id.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/8589.removal create mode 100644 synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql diff --git a/changelog.d/8589.removal b/changelog.d/8589.removal new file mode 100644 index 000000000000..b80f29d6bbd9 --- /dev/null +++ b/changelog.d/8589.removal @@ -0,0 +1 @@ +Drop unused `device_max_stream_id` table. diff --git a/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql b/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql new file mode 100644 index 000000000000..01ea6eddcf49 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql @@ -0,0 +1 @@ +DROP TABLE device_max_stream_id; From 96e7d3c4a0feec6d19b873fd550bcfffd485d910 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 19 Oct 2020 21:13:50 +0100 Subject: [PATCH 219/245] Fix 'LruCache' object has no attribute '_on_resize' (#8591) We need to make sure we are readu for the `set_cache_factor` callback. --- changelog.d/8591.misc | 1 + synapse/util/caches/lrucache.py | 10 +++++++++- tests/util/test_lrucache.py | 8 +++++++- 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8591.misc diff --git a/changelog.d/8591.misc b/changelog.d/8591.misc new file mode 100644 index 000000000000..8f16bc3e7e22 --- /dev/null +++ b/changelog.d/8591.misc @@ -0,0 +1 @@ + Move metric registration code down into `LruCache`. diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 3b471d8fd350..60bb6ff642f2 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -124,6 +124,10 @@ def __init__( else: self.max_size = int(max_size) + # register_cache might call our "set_cache_factor" callback; there's nothing to + # do yet when we get resized. + self._on_resize = None # type: Optional[Callable[[],None]] + if cache_name is not None: metrics = register_cache( "lru_cache", @@ -332,7 +336,10 @@ def cache_contains(key: KT) -> bool: return key in cache self.sentinel = object() + + # make sure that we clear out any excess entries after we get resized. self._on_resize = evict + self.get = cache_get self.set = cache_set self.setdefault = cache_set_default @@ -383,6 +390,7 @@ def set_cache_factor(self, factor: float) -> bool: new_size = int(self._original_max_size * factor) if new_size != self.max_size: self.max_size = new_size - self._on_resize() + if self._on_resize: + self._on_resize() return True return False diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index f12834edab2c..a739a6aaaf8c 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -19,7 +19,8 @@ from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache -from .. import unittest +from tests import unittest +from tests.unittest import override_config class LruCacheTestCase(unittest.HomeserverTestCase): @@ -83,6 +84,11 @@ def test_clear(self): cache.clear() self.assertEquals(len(cache), 0) + @override_config({"caches": {"per_cache_factors": {"mycache": 10}}}) + def test_special_size(self): + cache = LruCache(10, "mycache") + self.assertEqual(cache.max_size, 100) + class LruCacheCallbacksTestCase(unittest.HomeserverTestCase): def test_get(self): From 626b8f0846816c56cf8b3f0f3cf6a5ce12dde67b Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Tue, 20 Oct 2020 10:18:55 +0100 Subject: [PATCH 220/245] Move schema file for as_device_stream (#8590) * Move schema file * Add a . * Add matching changelog entry * Fix sqlite --- changelog.d/8590.misc | 1 + .../{59/19as_device_stream.sql => 58/21as_device_stream.sql} | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8590.misc rename synapse/storage/databases/main/schema/delta/{59/19as_device_stream.sql => 58/21as_device_stream.sql} (80%) diff --git a/changelog.d/8590.misc b/changelog.d/8590.misc new file mode 100644 index 000000000000..4abcccb326e0 --- /dev/null +++ b/changelog.d/8590.misc @@ -0,0 +1 @@ +Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. diff --git a/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql b/synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql similarity index 80% rename from synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql rename to synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql index 20f5a95a24f8..7b84a207fd82 100644 --- a/synapse/storage/databases/main/schema/delta/59/19as_device_stream.sql +++ b/synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql @@ -13,6 +13,5 @@ * limitations under the License. */ -ALTER TABLE application_services_state - ADD COLUMN read_receipt_stream_id INT, - ADD COLUMN presence_stream_id INT; \ No newline at end of file +ALTER TABLE application_services_state ADD COLUMN read_receipt_stream_id INT; +ALTER TABLE application_services_state ADD COLUMN presence_stream_id INT; \ No newline at end of file From a312e890f5b5746f991b07970dd92d680e08dd4c Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 20 Oct 2020 11:47:24 +0100 Subject: [PATCH 221/245] Cast errors generated during synapse_port_db to str (#8585) I noticed in https://github.com/matrix-org/synapse/issues/8575 that the `end_error` variable in `synapse_port_db` is set to an `Exception`, even though later we expect it to be a `str`. This PR simply casts an exception raised to a string. I'm doing this instead of having `end_error` be of type exception as we explicitly set `end_error` to a str here: https://github.com/matrix-org/synapse/blob/d25eb8f3709965d0face01a041d5292490bf0139/scripts/synapse_port_db#L542-L547 This whole file could probably use some heavy refactoring, but until then at least this fix will prevent exception contents from being hidden from us and users. --- changelog.d/8585.bugfix | 1 + scripts/synapse_port_db | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8585.bugfix diff --git a/changelog.d/8585.bugfix b/changelog.d/8585.bugfix new file mode 100644 index 000000000000..e97e6ac1d8cb --- /dev/null +++ b/changelog.d/8585.bugfix @@ -0,0 +1 @@ +Fix a bug that prevented errors encountered during execution of the `synapse_port_db` from being correctly printed. \ No newline at end of file diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 2d0b59ab534e..6c7664ad4a42 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -22,6 +22,7 @@ import logging import sys import time import traceback +from typing import Optional import yaml @@ -152,7 +153,7 @@ IGNORED_TABLES = { # Error returned by the run function. Used at the top-level part of the script to # handle errors and return codes. -end_error = None +end_error = None # type: Optional[str] # The exec_info for the error, if any. If error is defined but not exec_info the script # will show only the error message without the stacktrace, if exec_info is defined but # not the error then the script will show nothing outside of what's printed in the run @@ -635,7 +636,7 @@ class Porter(object): self.progress.done() except Exception as e: global end_error_exec_info - end_error = e + end_error = str(e) end_error_exec_info = sys.exc_info() logger.exception("") finally: From 74f29284aa4311f6cd798b21b0a401a667163a35 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 20 Oct 2020 11:49:15 +0100 Subject: [PATCH 222/245] Remove some extraneous @unittest.INFOs on unit tests (#8592) --- changelog.d/8592.misc | 1 + tests/rest/client/v2_alpha/test_account.py | 1 - tests/rest/client/v2_alpha/test_auth.py | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/8592.misc diff --git a/changelog.d/8592.misc b/changelog.d/8592.misc new file mode 100644 index 000000000000..099e8fb7bb2c --- /dev/null +++ b/changelog.d/8592.misc @@ -0,0 +1 @@ +Remove extraneous unittest logging decorators from unit tests. \ No newline at end of file diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index ae2cd67f35de..66ac4dbe858d 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -352,7 +352,6 @@ def test_deactivate_account(self): self.render(request) self.assertEqual(request.code, 401) - @unittest.INFO def test_pending_invites(self): """Tests that deactivating a user rejects every pending invite for them.""" store = self.hs.get_datastore() diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 293ccfba2bb1..86184f0d2ec7 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -104,7 +104,6 @@ def recaptcha( self.assertEqual(len(attempts), 1) self.assertEqual(attempts[0][0]["response"], "a") - @unittest.INFO def test_fallback_captcha(self): """Ensure that fallback auth via a captcha works.""" # Returns a 401 as per the spec From 84c0e46cced7b1fe0e3fd27eed5111884959cb36 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Tue, 20 Oct 2020 13:55:21 +0200 Subject: [PATCH 223/245] Update mypy to 0.790, and move dependencies to extras (#8583) --- changelog.d/8583.misc | 1 + setup.py | 2 ++ tox.ini | 4 +--- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8583.misc diff --git a/changelog.d/8583.misc b/changelog.d/8583.misc new file mode 100644 index 000000000000..d24973f09af1 --- /dev/null +++ b/changelog.d/8583.misc @@ -0,0 +1 @@ +Update `mypy` static type checker to 0.790. \ No newline at end of file diff --git a/setup.py b/setup.py index 08843fe2a3e4..494f50239f30 100755 --- a/setup.py +++ b/setup.py @@ -102,6 +102,8 @@ def exec_file(path_segments): "flake8", ] +CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope"] + # Dependencies which are exclusively required by unit test code. This is # NOT a list of all modules that are necessary to run the unit tests. # Tests assume that all optional dependencies are installed. diff --git a/tox.ini b/tox.ini index 6d08153782aa..6dcc439a4038 100644 --- a/tox.ini +++ b/tox.ini @@ -160,9 +160,7 @@ commands= [testenv:mypy] deps = {[base]deps} - mypy==0.782 - mypy-zope -extras = all +extras = all,mypy commands = mypy # To find all folders that pass mypy you run: From 9e0f22874f7ec338895d85645e91db8674c383c0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 20 Oct 2020 11:29:38 -0400 Subject: [PATCH 224/245] Consistently use wrap_as_background_task in more places (#8599) --- changelog.d/8599.feature | 1 + synapse/handlers/account_validity.py | 12 +++--------- synapse/handlers/profile.py | 10 +++------- synapse/storage/databases/main/events_worker.py | 11 ++++++----- synapse/storage/databases/main/roommember.py | 16 +++++++--------- 5 files changed, 20 insertions(+), 30 deletions(-) create mode 100644 changelog.d/8599.feature diff --git a/changelog.d/8599.feature b/changelog.d/8599.feature new file mode 100644 index 000000000000..542993110bc8 --- /dev/null +++ b/changelog.d/8599.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index f33044e97aa6..fd4f762f333d 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -22,7 +22,7 @@ from synapse.api.errors import StoreError from synapse.logging.context import make_deferred_yieldable -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.types import UserID from synapse.util import stringutils @@ -63,16 +63,10 @@ def __init__(self, hs): self._raw_from = email.utils.parseaddr(self._from_string)[1] # Check the renewal emails to send and send them every 30min. - def send_emails(): - # run as a background process to make sure that the database transactions - # have a logcontext to report to - return run_as_background_process( - "send_renewals", self._send_renewal_emails - ) - if hs.config.run_background_tasks: - self.clock.looping_call(send_emails, 30 * 60 * 1000) + self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) + @wrap_as_background_process("send_renewals") async def _send_renewal_emails(self): """Gets the list of users whose account is expiring in the amount of time configured in the ``renew_at`` parameter from the ``account_validity`` diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index b78493875522..b78a12ad015a 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -24,7 +24,7 @@ StoreError, SynapseError, ) -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.types import UserID, create_requester, get_domain_from_id from ._base import BaseHandler @@ -57,7 +57,7 @@ def __init__(self, hs): if hs.config.run_background_tasks: self.clock.looping_call( - self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS + self._update_remote_profile_cache, self.PROFILE_UPDATE_MS ) async def get_profile(self, user_id): @@ -370,11 +370,7 @@ async def check_profile_query_allowed(self, target_user, requester=None): raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) raise - def _start_update_remote_profile_cache(self): - return run_as_background_process( - "Update remote profile", self._update_remote_profile_cache - ) - + @wrap_as_background_process("Update remote profile") async def _update_remote_profile_cache(self): """Called periodically to check profiles of remote users we haven't checked in a while. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index c342df2a8b30..6e7f16f39c05 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -33,7 +33,10 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event from synapse.logging.context import PreserveLoggingContext, current_context -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import BackfillStream from synapse.replication.tcp.streams.events import EventsStream @@ -140,10 +143,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): if hs.config.run_background_tasks: # We periodically clean out old transaction ID mappings self._clock.looping_call( - run_as_background_process, - 5 * 60 * 1000, - "_cleanup_old_transaction_ids", - self._cleanup_old_transaction_ids, + self._cleanup_old_transaction_ids, 5 * 60 * 1000, ) self._get_event_cache = LruCache( @@ -1374,6 +1374,7 @@ async def get_already_persisted_events( return mapping + @wrap_as_background_process("_cleanup_old_transaction_ids") async def _cleanup_old_transaction_ids(self): """Cleans out transaction id mappings older than 24hrs. """ diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 9b08b498625c..01d9dbb36f44 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -20,7 +20,10 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.metrics import LaterGauge -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventsWorkerStore @@ -67,16 +70,10 @@ def __init__(self, database: DatabasePool, db_conn, hs): ): self._known_servers_count = 1 self.hs.get_clock().looping_call( - run_as_background_process, - 60 * 1000, - "_count_known_servers", - self._count_known_servers, + self._count_known_servers, 60 * 1000, ) self.hs.get_clock().call_later( - 1000, - run_as_background_process, - "_count_known_servers", - self._count_known_servers, + 1000, self._count_known_servers, ) LaterGauge( "synapse_federation_known_servers", @@ -85,6 +82,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): lambda: self._known_servers_count, ) + @wrap_as_background_process("_count_known_servers") async def _count_known_servers(self): """ Count the servers that this server knows about. From de5cafe980391ae6e2de1d38ac4e42dea182a304 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Oct 2020 06:44:31 -0400 Subject: [PATCH 225/245] Add type hints to profile and base handlers. (#8609) --- changelog.d/8609.misc | 1 + mypy.ini | 4 +- synapse/handlers/_base.py | 20 +++--- synapse/handlers/initial_sync.py | 8 ++- synapse/handlers/profile.py | 74 +++++++++++++++-------- synapse/storage/databases/main/profile.py | 6 +- 6 files changed, 72 insertions(+), 41 deletions(-) create mode 100644 changelog.d/8609.misc diff --git a/changelog.d/8609.misc b/changelog.d/8609.misc new file mode 100644 index 000000000000..5e3f3c199397 --- /dev/null +++ b/changelog.d/8609.misc @@ -0,0 +1 @@ +Add type hints to profile and base handler. diff --git a/mypy.ini b/mypy.ini index b5db54ee3b93..5e9f7b1259e4 100644 --- a/mypy.ini +++ b/mypy.ini @@ -15,8 +15,9 @@ files = synapse/events/builder.py, synapse/events/spamcheck.py, synapse/federation, - synapse/handlers/appservice.py, + synapse/handlers/_base.py, synapse/handlers/account_data.py, + synapse/handlers/appservice.py, synapse/handlers/auth.py, synapse/handlers/cas_handler.py, synapse/handlers/deactivate_account.py, @@ -32,6 +33,7 @@ files = synapse/handlers/pagination.py, synapse/handlers/password_policy.py, synapse/handlers/presence.py, + synapse/handlers/profile.py, synapse/handlers/read_marker.py, synapse/handlers/room.py, synapse/handlers/room_member.py, diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 0206320e9692..bd8e71ae56a3 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import TYPE_CHECKING, Optional import synapse.state import synapse.storage @@ -22,6 +23,9 @@ from synapse.api.ratelimiting import Ratelimiter from synapse.types import UserID +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) @@ -30,11 +34,7 @@ class BaseHandler: Common base class for the event handlers. """ - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): - """ + def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() # type: synapse.storage.DataStore self.auth = hs.get_auth() self.notifier = hs.get_notifier() @@ -56,7 +56,7 @@ def __init__(self, hs): clock=self.clock, rate_hz=self.hs.config.rc_admin_redaction.per_second, burst_count=self.hs.config.rc_admin_redaction.burst_count, - ) + ) # type: Optional[Ratelimiter] else: self.admin_redaction_ratelimiter = None @@ -127,15 +127,15 @@ async def maybe_kick_guest_users(self, event, context=None): if guest_access != "can_join": if context: current_state_ids = await context.get_current_state_ids() - current_state = await self.store.get_events( + current_state_dict = await self.store.get_events( list(current_state_ids.values()) ) + current_state = list(current_state_dict.values()) else: - current_state = await self.state_handler.get_current_state( + current_state_map = await self.state_handler.get_current_state( event.room_id ) - - current_state = list(current_state.values()) + current_state = list(current_state_map.values()) logger.info("maybe_kick_guest_users %r", current_state) await self.kick_guest_users(current_state) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 98075f48d2b3..cb11754bf878 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -293,6 +293,10 @@ async def room_initial_sync( user_id, room_id, pagin_config, membership, is_peeking ) elif membership == Membership.LEAVE: + # The member_event_id will always be available if membership is set + # to leave. + assert member_event_id + result = await self._room_initial_sync_parted( user_id, room_id, pagin_config, membership, member_event_id, is_peeking ) @@ -315,7 +319,7 @@ async def _room_initial_sync_parted( user_id: str, room_id: str, pagin_config: PaginationConfig, - membership: Membership, + membership: str, member_event_id: str, is_peeking: bool, ) -> JsonDict: @@ -367,7 +371,7 @@ async def _room_initial_sync_joined( user_id: str, room_id: str, pagin_config: PaginationConfig, - membership: Membership, + membership: str, is_peeking: bool, ) -> JsonDict: current_state = await self.state.get_current_state(room_id=room_id) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index b78a12ad015a..92700b589c82 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -12,9 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import random +from typing import TYPE_CHECKING, Optional from synapse.api.errors import ( AuthError, @@ -25,10 +25,19 @@ SynapseError, ) from synapse.metrics.background_process_metrics import wrap_as_background_process -from synapse.types import UserID, create_requester, get_domain_from_id +from synapse.types import ( + JsonDict, + Requester, + UserID, + create_requester, + get_domain_from_id, +) from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) MAX_DISPLAYNAME_LEN = 256 @@ -45,7 +54,7 @@ class ProfileHandler(BaseHandler): PROFILE_UPDATE_MS = 60 * 1000 PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.federation = hs.get_federation_client() @@ -60,7 +69,7 @@ def __init__(self, hs): self._update_remote_profile_cache, self.PROFILE_UPDATE_MS ) - async def get_profile(self, user_id): + async def get_profile(self, user_id: str) -> JsonDict: target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): @@ -91,7 +100,7 @@ async def get_profile(self, user_id): except HttpResponseException as e: raise e.to_synapse_error() - async def get_profile_from_cache(self, user_id): + async def get_profile_from_cache(self, user_id: str) -> JsonDict: """Get the profile information from our local cache. If the user is ours then the profile information will always be corect. Otherwise, it may be out of date/missing. @@ -115,7 +124,7 @@ async def get_profile_from_cache(self, user_id): profile = await self.store.get_from_remote_profile_cache(user_id) return profile or {} - async def get_displayname(self, target_user): + async def get_displayname(self, target_user: UserID) -> str: if self.hs.is_mine(target_user): try: displayname = await self.store.get_profile_displayname( @@ -143,15 +152,19 @@ async def get_displayname(self, target_user): return result["displayname"] async def set_displayname( - self, target_user, requester, new_displayname, by_admin=False - ): + self, + target_user: UserID, + requester: Requester, + new_displayname: str, + by_admin: bool = False, + ) -> None: """Set the displayname of a user Args: - target_user (UserID): the user whose displayname is to be changed. - requester (Requester): The user attempting to make this change. - new_displayname (str): The displayname to give this user. - by_admin (bool): Whether this change was made by an administrator. + target_user: the user whose displayname is to be changed. + requester: The user attempting to make this change. + new_displayname: The displayname to give this user. + by_admin: Whether this change was made by an administrator. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -176,8 +189,9 @@ async def set_displayname( 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) ) + displayname_to_set = new_displayname # type: Optional[str] if new_displayname == "": - new_displayname = None + displayname_to_set = None # If the admin changes the display name of a user, the requesting user cannot send # the join event to update the displayname in the rooms. @@ -185,7 +199,9 @@ async def set_displayname( if by_admin: requester = create_requester(target_user) - await self.store.set_profile_displayname(target_user.localpart, new_displayname) + await self.store.set_profile_displayname( + target_user.localpart, displayname_to_set + ) if self.hs.config.user_directory_search_all_users: profile = await self.store.get_profileinfo(target_user.localpart) @@ -195,7 +211,7 @@ async def set_displayname( await self._update_join_states(requester, target_user) - async def get_avatar_url(self, target_user): + async def get_avatar_url(self, target_user: UserID) -> str: if self.hs.is_mine(target_user): try: avatar_url = await self.store.get_profile_avatar_url( @@ -222,15 +238,19 @@ async def get_avatar_url(self, target_user): return result["avatar_url"] async def set_avatar_url( - self, target_user, requester, new_avatar_url, by_admin=False + self, + target_user: UserID, + requester: Requester, + new_avatar_url: str, + by_admin: bool = False, ): """Set a new avatar URL for a user. Args: - target_user (UserID): the user whose avatar URL is to be changed. - requester (Requester): The user attempting to make this change. - new_avatar_url (str): The avatar URL to give this user. - by_admin (bool): Whether this change was made by an administrator. + target_user: the user whose avatar URL is to be changed. + requester: The user attempting to make this change. + new_avatar_url: The avatar URL to give this user. + by_admin: Whether this change was made by an administrator. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -267,7 +287,7 @@ async def set_avatar_url( await self._update_join_states(requester, target_user) - async def on_profile_query(self, args): + async def on_profile_query(self, args: JsonDict) -> JsonDict: user = UserID.from_string(args["user_id"]) if not self.hs.is_mine(user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -292,7 +312,9 @@ async def on_profile_query(self, args): return response - async def _update_join_states(self, requester, target_user): + async def _update_join_states( + self, requester: Requester, target_user: UserID + ) -> None: if not self.hs.is_mine(target_user): return @@ -323,15 +345,17 @@ async def _update_join_states(self, requester, target_user): "Failed to update join event for room %s - %s", room_id, str(e) ) - async def check_profile_query_allowed(self, target_user, requester=None): + async def check_profile_query_allowed( + self, target_user: UserID, requester: Optional[UserID] = None + ) -> None: """Checks whether a profile query is allowed. If the 'require_auth_for_profile_requests' config flag is set to True and a 'requester' is provided, the query is only allowed if the two users share a room. Args: - target_user (UserID): The owner of the queried profile. - requester (None|UserID): The user querying for the profile. + target_user: The owner of the queried profile. + requester: The user querying for the profile. Raises: SynapseError(403): The two users share no room, or ne user couldn't diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 1681caa1f031..a6d1eb908a5f 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore @@ -72,7 +72,7 @@ async def create_profile(self, user_localpart: str) -> None: ) async def set_profile_displayname( - self, user_localpart: str, new_displayname: str + self, user_localpart: str, new_displayname: Optional[str] ) -> None: await self.db_pool.simple_update_one( table="profiles", @@ -144,7 +144,7 @@ async def is_subscribed_remote_profile_for_user(self, user_id): async def get_remote_profile_cache_entries_that_expire( self, last_checked: int - ) -> Dict[str, str]: + ) -> List[Dict[str, str]]: """Get all users who haven't been checked since `last_checked` """ From 654cc9470eef8a6371f8ab6c8b4eae467068e329 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Oct 2020 06:45:01 -0400 Subject: [PATCH 226/245] Pin mypy-zope for compatibility with mypy. (#8600) --- changelog.d/8600.misc | 1 + setup.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8600.misc diff --git a/changelog.d/8600.misc b/changelog.d/8600.misc new file mode 100644 index 000000000000..a5a922e641dc --- /dev/null +++ b/changelog.d/8600.misc @@ -0,0 +1 @@ +Update `mypy` static type checker to 0.790. diff --git a/setup.py b/setup.py index 494f50239f30..2f4a3170d268 100755 --- a/setup.py +++ b/setup.py @@ -102,7 +102,7 @@ def exec_file(path_segments): "flake8", ] -CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope"] +CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope==0.2.8"] # Dependencies which are exclusively required by unit test code. This is # NOT a list of all modules that are necessary to run the unit tests. From 20a67aa70da0e8ac49c724cdbc144004b03e5a74 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Oct 2020 06:59:54 -0400 Subject: [PATCH 227/245] Separate the TCP and terse JSON formatting code. (#8587) This should (theoretically) allow for using the TCP code with a different output type and make it easier to use the JSON code with files / console. --- changelog.d/8587.misc | 1 + synapse/logging/_remote.py | 225 +++++++++++++++++++++++++++++++ synapse/logging/_terse_json.py | 199 ++------------------------- tests/logging/test_terse_json.py | 2 +- 4 files changed, 240 insertions(+), 187 deletions(-) create mode 100644 changelog.d/8587.misc create mode 100644 synapse/logging/_remote.py diff --git a/changelog.d/8587.misc b/changelog.d/8587.misc new file mode 100644 index 000000000000..9e56551a34bf --- /dev/null +++ b/changelog.d/8587.misc @@ -0,0 +1 @@ +Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py new file mode 100644 index 000000000000..0caf32591623 --- /dev/null +++ b/synapse/logging/_remote.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import traceback +from collections import deque +from ipaddress import IPv4Address, IPv6Address, ip_address +from math import floor +from typing import Callable, Optional + +import attr +from zope.interface import implementer + +from twisted.application.internet import ClientService +from twisted.internet.defer import Deferred +from twisted.internet.endpoints import ( + HostnameEndpoint, + TCP4ClientEndpoint, + TCP6ClientEndpoint, +) +from twisted.internet.interfaces import IPushProducer, ITransport +from twisted.internet.protocol import Factory, Protocol +from twisted.logger import ILogObserver, Logger, LogLevel + + +@attr.s +@implementer(IPushProducer) +class LogProducer: + """ + An IPushProducer that writes logs from its buffer to its transport when it + is resumed. + + Args: + buffer: Log buffer to read logs from. + transport: Transport to write to. + format_event: A callable to format the log entry to a string. + """ + + transport = attr.ib(type=ITransport) + format_event = attr.ib(type=Callable[[dict], str]) + _buffer = attr.ib(type=deque) + _paused = attr.ib(default=False, type=bool, init=False) + + def pauseProducing(self): + self._paused = True + + def stopProducing(self): + self._paused = True + self._buffer = deque() + + def resumeProducing(self): + self._paused = False + + while self._paused is False and (self._buffer and self.transport.connected): + try: + # Request the next event and format it. + event = self._buffer.popleft() + msg = self.format_event(event) + + # Send it as a new line over the transport. + self.transport.write(msg.encode("utf8")) + except Exception: + # Something has gone wrong writing to the transport -- log it + # and break out of the while. + traceback.print_exc(file=sys.__stderr__) + break + + +@attr.s +@implementer(ILogObserver) +class TCPLogObserver: + """ + An IObserver that writes JSON logs to a TCP target. + + Args: + hs (HomeServer): The homeserver that is being logged for. + host: The host of the logging target. + port: The logging target's port. + format_event: A callable to format the log entry to a string. + maximum_buffer: The maximum buffer size. + """ + + hs = attr.ib() + host = attr.ib(type=str) + port = attr.ib(type=int) + format_event = attr.ib(type=Callable[[dict], str]) + maximum_buffer = attr.ib(type=int) + _buffer = attr.ib(default=attr.Factory(deque), type=deque) + _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) + _logger = attr.ib(default=attr.Factory(Logger)) + _producer = attr.ib(default=None, type=Optional[LogProducer]) + + def start(self) -> None: + + # Connect without DNS lookups if it's a direct IP. + try: + ip = ip_address(self.host) + if isinstance(ip, IPv4Address): + endpoint = TCP4ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + elif isinstance(ip, IPv6Address): + endpoint = TCP6ClientEndpoint( + self.hs.get_reactor(), self.host, self.port + ) + else: + raise ValueError("Unknown IP address provided: %s" % (self.host,)) + except ValueError: + endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) + + factory = Factory.forProtocol(Protocol) + self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) + self._service.startService() + self._connect() + + def stop(self): + self._service.stopService() + + def _connect(self) -> None: + """ + Triggers an attempt to connect then write to the remote if not already writing. + """ + if self._connection_waiter: + return + + self._connection_waiter = self._service.whenConnected(failAfterFailures=1) + + @self._connection_waiter.addErrback + def fail(r): + r.printTraceback(file=sys.__stderr__) + self._connection_waiter = None + self._connect() + + @self._connection_waiter.addCallback + def writer(r): + # We have a connection. If we already have a producer, and its + # transport is the same, just trigger a resumeProducing. + if self._producer and r.transport is self._producer.transport: + self._producer.resumeProducing() + self._connection_waiter = None + return + + # If the producer is still producing, stop it. + if self._producer: + self._producer.stopProducing() + + # Make a new producer and start it. + self._producer = LogProducer( + buffer=self._buffer, + transport=r.transport, + format_event=self.format_event, + ) + r.transport.registerProducer(self._producer, True) + self._producer.resumeProducing() + self._connection_waiter = None + + def _handle_pressure(self) -> None: + """ + Handle backpressure by shedding events. + + The buffer will, in this order, until the buffer is below the maximum: + - Shed DEBUG events + - Shed INFO events + - Shed the middle 50% of the events. + """ + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out DEBUGs + self._buffer = deque( + filter(lambda event: event["log_level"] != LogLevel.debug, self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Strip out INFOs + self._buffer = deque( + filter(lambda event: event["log_level"] != LogLevel.info, self._buffer) + ) + + if len(self._buffer) <= self.maximum_buffer: + return + + # Cut the middle entries out + buffer_split = floor(self.maximum_buffer / 2) + + old_buffer = self._buffer + self._buffer = deque() + + for i in range(buffer_split): + self._buffer.append(old_buffer.popleft()) + + end_buffer = [] + for i in range(buffer_split): + end_buffer.append(old_buffer.pop()) + + self._buffer.extend(reversed(end_buffer)) + + def __call__(self, event: dict) -> None: + self._buffer.append(event) + + # Handle backpressure, if it exists. + try: + self._handle_pressure() + except Exception: + # If handling backpressure fails,clear the buffer and log the + # exception. + self._buffer.clear() + self._logger.failure("Failed clearing backpressure") + + # Try and write immediately. + self._connect() diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 1b8916cfa281..9b46956ca96b 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -18,26 +18,11 @@ """ import json -import sys -import traceback -from collections import deque -from ipaddress import IPv4Address, IPv6Address, ip_address -from math import floor -from typing import IO, Optional +from typing import IO -import attr -from zope.interface import implementer +from twisted.logger import FileLogObserver -from twisted.application.internet import ClientService -from twisted.internet.defer import Deferred -from twisted.internet.endpoints import ( - HostnameEndpoint, - TCP4ClientEndpoint, - TCP6ClientEndpoint, -) -from twisted.internet.interfaces import IPushProducer, ITransport -from twisted.internet.protocol import Factory, Protocol -from twisted.logger import FileLogObserver, ILogObserver, Logger +from synapse.logging._remote import TCPLogObserver _encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":")) @@ -150,180 +135,22 @@ def formatEvent(_event: dict) -> str: return FileLogObserver(outFile, formatEvent) -@attr.s -@implementer(IPushProducer) -class LogProducer: +def TerseJSONToTCPLogObserver( + hs, host: str, port: int, metadata: dict, maximum_buffer: int +) -> FileLogObserver: """ - An IPushProducer that writes logs from its buffer to its transport when it - is resumed. - - Args: - buffer: Log buffer to read logs from. - transport: Transport to write to. - """ - - transport = attr.ib(type=ITransport) - _buffer = attr.ib(type=deque) - _paused = attr.ib(default=False, type=bool, init=False) - - def pauseProducing(self): - self._paused = True - - def stopProducing(self): - self._paused = True - self._buffer = deque() - - def resumeProducing(self): - self._paused = False - - while self._paused is False and (self._buffer and self.transport.connected): - try: - event = self._buffer.popleft() - self.transport.write(_encoder.encode(event).encode("utf8")) - self.transport.write(b"\n") - except Exception: - # Something has gone wrong writing to the transport -- log it - # and break out of the while. - traceback.print_exc(file=sys.__stderr__) - break - - -@attr.s -@implementer(ILogObserver) -class TerseJSONToTCPLogObserver: - """ - An IObserver that writes JSON logs to a TCP target. + A log observer that formats events to a flattened JSON representation. Args: hs (HomeServer): The homeserver that is being logged for. host: The host of the logging target. port: The logging target's port. - metadata: Metadata to be added to each log entry. + metadata: Metadata to be added to each log object. + maximum_buffer: The maximum buffer size. """ - hs = attr.ib() - host = attr.ib(type=str) - port = attr.ib(type=int) - metadata = attr.ib(type=dict) - maximum_buffer = attr.ib(type=int) - _buffer = attr.ib(default=attr.Factory(deque), type=deque) - _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) - _logger = attr.ib(default=attr.Factory(Logger)) - _producer = attr.ib(default=None, type=Optional[LogProducer]) - - def start(self) -> None: - - # Connect without DNS lookups if it's a direct IP. - try: - ip = ip_address(self.host) - if isinstance(ip, IPv4Address): - endpoint = TCP4ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) - elif isinstance(ip, IPv6Address): - endpoint = TCP6ClientEndpoint( - self.hs.get_reactor(), self.host, self.port - ) - except ValueError: - endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port) - - factory = Factory.forProtocol(Protocol) - self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) - self._service.startService() - self._connect() - - def stop(self): - self._service.stopService() - - def _connect(self) -> None: - """ - Triggers an attempt to connect then write to the remote if not already writing. - """ - if self._connection_waiter: - return - - self._connection_waiter = self._service.whenConnected(failAfterFailures=1) - - @self._connection_waiter.addErrback - def fail(r): - r.printTraceback(file=sys.__stderr__) - self._connection_waiter = None - self._connect() - - @self._connection_waiter.addCallback - def writer(r): - # We have a connection. If we already have a producer, and its - # transport is the same, just trigger a resumeProducing. - if self._producer and r.transport is self._producer.transport: - self._producer.resumeProducing() - self._connection_waiter = None - return - - # If the producer is still producing, stop it. - if self._producer: - self._producer.stopProducing() - - # Make a new producer and start it. - self._producer = LogProducer(buffer=self._buffer, transport=r.transport) - r.transport.registerProducer(self._producer, True) - self._producer.resumeProducing() - self._connection_waiter = None - - def _handle_pressure(self) -> None: - """ - Handle backpressure by shedding events. - - The buffer will, in this order, until the buffer is below the maximum: - - Shed DEBUG events - - Shed INFO events - - Shed the middle 50% of the events. - """ - if len(self._buffer) <= self.maximum_buffer: - return - - # Strip out DEBUGs - self._buffer = deque( - filter(lambda event: event["level"] != "DEBUG", self._buffer) - ) - - if len(self._buffer) <= self.maximum_buffer: - return - - # Strip out INFOs - self._buffer = deque( - filter(lambda event: event["level"] != "INFO", self._buffer) - ) - - if len(self._buffer) <= self.maximum_buffer: - return - - # Cut the middle entries out - buffer_split = floor(self.maximum_buffer / 2) - - old_buffer = self._buffer - self._buffer = deque() - - for i in range(buffer_split): - self._buffer.append(old_buffer.popleft()) - - end_buffer = [] - for i in range(buffer_split): - end_buffer.append(old_buffer.pop()) - - self._buffer.extend(reversed(end_buffer)) - - def __call__(self, event: dict) -> None: - flattened = flatten_event(event, self.metadata, include_time=True) - self._buffer.append(flattened) - - # Handle backpressure, if it exists. - try: - self._handle_pressure() - except Exception: - # If handling backpressure fails,clear the buffer and log the - # exception. - self._buffer.clear() - self._logger.failure("Failed clearing backpressure") + def formatEvent(_event: dict) -> str: + flattened = flatten_event(_event, metadata, include_time=True) + return _encoder.encode(flattened) + "\n" - # Try and write immediately. - self._connect() + return TCPLogObserver(hs, host, port, formatEvent, maximum_buffer) diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 4cf81f71284b..fd128b88e0c0 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -78,7 +78,7 @@ def test_log_output(self): "server_name", "name", ] - self.assertEqual(set(log.keys()), set(expected_log_keys)) + self.assertCountEqual(log.keys(), expected_log_keys) # It contains the data we expect. self.assertEqual(log["name"], "wally") From 70259d8c8c0be71d3588a16211ccb42af87235da Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 21 Oct 2020 15:36:53 +0100 Subject: [PATCH 228/245] Limit AS transactions to 100 events (#8606) * Limit AS transactions to 100 events * Update changelog.d/8606.feature Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> * Add tests * Update synapse/appservice/scheduler.py Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/8606.feature | 1 + synapse/appservice/scheduler.py | 18 +++++++++++-- tests/appservice/test_scheduler.py | 41 ++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8606.feature diff --git a/changelog.d/8606.feature b/changelog.d/8606.feature new file mode 100644 index 000000000000..fad723c10859 --- /dev/null +++ b/changelog.d/8606.feature @@ -0,0 +1 @@ +Limit appservice transactions to 100 persistent and 100 ephemeral events. diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index ad3c408519ee..58291afc2231 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -60,6 +60,13 @@ logger = logging.getLogger(__name__) +# Maximum number of events to provide in an AS transaction. +MAX_PERSISTENT_EVENTS_PER_TRANSACTION = 100 + +# Maximum number of ephemeral events to provide in an AS transaction. +MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100 + + class ApplicationServiceScheduler: """ Public facing API for this module. Does the required DI to tie the components together. This also serves as the "event_pool", which in this @@ -136,10 +143,17 @@ async def _send_request(self, service: ApplicationService): self.requests_in_flight.add(service.id) try: while True: - events = self.queued_events.pop(service.id, []) - ephemeral = self.queued_ephemeral.pop(service.id, []) + all_events = self.queued_events.get(service.id, []) + events = all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION] + del all_events[:MAX_PERSISTENT_EVENTS_PER_TRANSACTION] + + all_events_ephemeral = self.queued_ephemeral.get(service.id, []) + ephemeral = all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] + del all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] + if not events and not ephemeral: return + try: await self.txn_ctrl.send(service, events, ephemeral) except Exception: diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 2acb8b7603b0..97f8cad0ddd4 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -260,6 +260,31 @@ def do_send(x, y, z): self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], []) self.assertEquals(3, self.txn_ctrl.send.call_count) + def test_send_large_txns(self): + srv_1_defer = defer.Deferred() + srv_2_defer = defer.Deferred() + send_return_list = [srv_1_defer, srv_2_defer] + + def do_send(x, y, z): + return make_deferred_yieldable(send_return_list.pop(0)) + + self.txn_ctrl.send = Mock(side_effect=do_send) + + service = Mock(id=4, name="service") + event_list = [Mock(name="event%i" % (i + 1)) for i in range(200)] + for event in event_list: + self.queuer.enqueue_event(service, event) + + # Expect the first event to be sent immediately. + self.txn_ctrl.send.assert_called_with(service, [event_list[0]], []) + srv_1_defer.callback(service) + # Then send the next 100 events + self.txn_ctrl.send.assert_called_with(service, event_list[1:101], []) + srv_2_defer.callback(service) + # Then the final 99 events + self.txn_ctrl.send.assert_called_with(service, event_list[101:], []) + self.assertEquals(3, self.txn_ctrl.send.call_count) + def test_send_single_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") @@ -296,3 +321,19 @@ def test_send_single_ephemeral_with_queue(self): # Expect the queued events to be sent self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3) self.assertEquals(2, self.txn_ctrl.send.call_count) + + def test_send_large_txns_ephemeral(self): + d = defer.Deferred() + self.txn_ctrl.send = Mock( + side_effect=lambda x, y, z: make_deferred_yieldable(d) + ) + # Expect the event to be sent immediately. + service = Mock(id=4, name="service") + first_chunk = [Mock(name="event%i" % (i + 1)) for i in range(100)] + second_chunk = [Mock(name="event%i" % (i + 101)) for i in range(50)] + event_list = first_chunk + second_chunk + self.queuer.enqueue_ephemeral(service, event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk) + d.callback(service) + self.txn_ctrl.send.assert_called_with(service, [], second_chunk) + self.assertEquals(2, self.txn_ctrl.send.call_count) From 7b71695388fa2edd7ea5fd946b3d2afb68f4ef9d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 22:31:16 +0100 Subject: [PATCH 229/245] Combine the two sets of tests for CacheDescriptor --- tests/storage/test__base.py | 228 ------------------------- tests/util/caches/test_descriptors.py | 230 ++++++++++++++++++++++++++ 2 files changed, 230 insertions(+), 228 deletions(-) diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 8e69b1e9cc91..1ac4ebc61d4c 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -15,237 +15,9 @@ # limitations under the License. -from mock import Mock - -from twisted.internet import defer - -from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.descriptors import cached - from tests import unittest -class CacheDecoratorTestCase(unittest.HomeserverTestCase): - @defer.inlineCallbacks - def test_passthrough(self): - class A: - @cached() - def func(self, key): - return key - - a = A() - - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals((yield a.func("bar")), "bar") - - @defer.inlineCallbacks - def test_hit(self): - callcount = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - a = A() - yield a.func("foo") - - self.assertEquals(callcount[0], 1) - - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals(callcount[0], 1) - - @defer.inlineCallbacks - def test_invalidate(self): - callcount = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - a = A() - yield a.func("foo") - - self.assertEquals(callcount[0], 1) - - a.func.invalidate(("foo",)) - - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - - def test_invalidate_missing(self): - class A: - @cached() - def func(self, key): - return key - - A().func.invalidate(("what",)) - - @defer.inlineCallbacks - def test_max_entries(self): - callcount = [0] - - class A: - @cached(max_entries=10) - def func(self, key): - callcount[0] += 1 - return key - - a = A() - - for k in range(0, 12): - yield a.func(k) - - self.assertEquals(callcount[0], 12) - - # There must have been at least 2 evictions, meaning if we calculate - # all 12 values again, we must get called at least 2 more times - for k in range(0, 12): - yield a.func(k) - - self.assertTrue( - callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) - ) - - def test_prefill(self): - callcount = [0] - - d = defer.succeed(123) - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return d - - a = A() - - a.func.prefill(("foo",), ObservableDeferred(d)) - - self.assertEquals(a.func("foo").result, d.result) - self.assertEquals(callcount[0], 0) - - @defer.inlineCallbacks - def test_invalidate_context(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - yield a.func2("foo") - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) - - a.func.invalidate(("foo",)) - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 1) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - @defer.inlineCallbacks - def test_eviction_context(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached(max_entries=2) - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - yield a.func2("foo") - yield a.func2("foo2") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func("foo3") - - self.assertEquals(callcount[0], 3) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 4) - self.assertEquals(callcount2[0], 3) - - @defer.inlineCallbacks - def test_double_get(self): - callcount = [0] - callcount2 = [0] - - class A: - @cached() - def func(self, key): - callcount[0] += 1 - return key - - @cached(cache_context=True) - def func2(self, key, cache_context): - callcount2[0] += 1 - return self.func(key, on_invalidate=cache_context.invalidate) - - a = A() - a.func2.cache.cache = Mock(wraps=a.func2.cache.cache) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) - - a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 1) - - yield a.func2("foo") - a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 2) - - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 2) - - a.func.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 3) - yield a.func("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) - - yield a.func2("foo") - - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 3) - - class UpsertManyTests(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.storage = hs.get_datastore() diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3d1f960869da..3d738afa7f6f 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -27,6 +27,7 @@ current_context, make_deferred_yieldable, ) +from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import descriptors from synapse.util.caches.descriptors import cached @@ -311,6 +312,235 @@ def fn(self, arg1): self.failureResultOf(d, SynapseError) +class CacheDecoratorTestCase(unittest.HomeserverTestCase): + """More tests for @cached + + The following is a set of tests that got lost in a different file for a while. + + There are probably duplicates of the tests in DescriptorTestCase. Ideally the + duplicates would be removed and the two sets of classes combined. + """ + + @defer.inlineCallbacks + def test_passthrough(self): + class A: + @cached() + def func(self, key): + return key + + a = A() + + self.assertEquals((yield a.func("foo")), "foo") + self.assertEquals((yield a.func("bar")), "bar") + + @defer.inlineCallbacks + def test_hit(self): + callcount = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + a = A() + yield a.func("foo") + + self.assertEquals(callcount[0], 1) + + self.assertEquals((yield a.func("foo")), "foo") + self.assertEquals(callcount[0], 1) + + @defer.inlineCallbacks + def test_invalidate(self): + callcount = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + a = A() + yield a.func("foo") + + self.assertEquals(callcount[0], 1) + + a.func.invalidate(("foo",)) + + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + + def test_invalidate_missing(self): + class A: + @cached() + def func(self, key): + return key + + A().func.invalidate(("what",)) + + @defer.inlineCallbacks + def test_max_entries(self): + callcount = [0] + + class A: + @cached(max_entries=10) + def func(self, key): + callcount[0] += 1 + return key + + a = A() + + for k in range(0, 12): + yield a.func(k) + + self.assertEquals(callcount[0], 12) + + # There must have been at least 2 evictions, meaning if we calculate + # all 12 values again, we must get called at least 2 more times + for k in range(0, 12): + yield a.func(k) + + self.assertTrue( + callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) + ) + + def test_prefill(self): + callcount = [0] + + d = defer.succeed(123) + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return d + + a = A() + + a.func.prefill(("foo",), ObservableDeferred(d)) + + self.assertEquals(a.func("foo").result, d.result) + self.assertEquals(callcount[0], 0) + + @defer.inlineCallbacks + def test_invalidate_context(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + yield a.func2("foo") + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 1) + + a.func.invalidate(("foo",)) + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 1) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + @defer.inlineCallbacks + def test_eviction_context(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached(max_entries=2) + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + yield a.func2("foo") + yield a.func2("foo2") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func("foo3") + + self.assertEquals(callcount[0], 3) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 4) + self.assertEquals(callcount2[0], 3) + + @defer.inlineCallbacks + def test_double_get(self): + callcount = [0] + callcount2 = [0] + + class A: + @cached() + def func(self, key): + callcount[0] += 1 + return key + + @cached(cache_context=True) + def func2(self, key, cache_context): + callcount2[0] += 1 + return self.func(key, on_invalidate=cache_context.invalidate) + + a = A() + a.func2.cache.cache = mock.Mock(wraps=a.func2.cache.cache) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 1) + + a.func2.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 1) + + yield a.func2("foo") + a.func2.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 2) + + self.assertEquals(callcount[0], 1) + self.assertEquals(callcount2[0], 2) + + a.func.invalidate(("foo",)) + self.assertEquals(a.func2.cache.cache.pop.call_count, 3) + yield a.func("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 2) + + yield a.func2("foo") + + self.assertEquals(callcount[0], 2) + self.assertEquals(callcount2[0], 3) + + class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cache(self): From 1f4269700c2353263a605856f28ded28501368e1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 12:34:55 +0100 Subject: [PATCH 230/245] Push some deferred wrangling down into DeferredCache --- changelog.d/8572.misc | 1 + synapse/util/caches/deferred_cache.py | 57 ++++++++++++++++++++---- synapse/util/caches/descriptors.py | 32 +++---------- tests/util/caches/test_deferred_cache.py | 18 ++++---- tests/util/caches/test_descriptors.py | 5 +-- 5 files changed, 67 insertions(+), 46 deletions(-) create mode 100644 changelog.d/8572.misc diff --git a/changelog.d/8572.misc b/changelog.d/8572.misc new file mode 100644 index 000000000000..ea2a6d340d71 --- /dev/null +++ b/changelog.d/8572.misc @@ -0,0 +1 @@ +Modify `DeferredCache.get()` to return `Deferred`s instead of `ObservableDeferred`s. diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index faeef75506ba..6c162e9f3444 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -57,7 +57,7 @@ class DeferredCache(Generic[KT, VT]): """Wraps an LruCache, adding support for Deferred results. It expects that each entry added with set() will be a Deferred; likewise get() - may return an ObservableDeferred. + will return a Deferred. """ __slots__ = ( @@ -130,16 +130,22 @@ def get( key: KT, callback: Optional[Callable[[], None]] = None, update_metrics: bool = True, - ) -> Union[ObservableDeferred, VT]: + ) -> defer.Deferred: """Looks the key up in the caches. + For symmetry with set(), this method does *not* follow the synapse logcontext + rules: the logcontext will not be cleared on return, and the Deferred will run + its callbacks in the sentinel context. In other words: wrap the result with + make_deferred_yieldable() before `await`ing it. + Args: - key(tuple) - callback(fn): Gets called when the entry in the cache is invalidated + key: + callback: Gets called when the entry in the cache is invalidated update_metrics (bool): whether to update the cache hit rate metrics Returns: - Either an ObservableDeferred or the result itself + A Deferred which completes with the result. Note that this may later fail + if there is an ongoing set() operation which later completes with a failure. Raises: KeyError if the key is not found in the cache @@ -152,7 +158,7 @@ def get( m = self.cache.metrics assert m # we always have a name, so should always have metrics m.inc_hits() - return val.deferred + return val.deferred.observe() val2 = self.cache.get( key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics @@ -160,7 +166,7 @@ def get( if val2 is _Sentinel.sentinel: raise KeyError() else: - return val2 + return defer.succeed(val2) def get_immediate( self, key: KT, default: T, update_metrics: bool = True @@ -173,7 +179,36 @@ def set( key: KT, value: defer.Deferred, callback: Optional[Callable[[], None]] = None, - ) -> ObservableDeferred: + ) -> defer.Deferred: + """Adds a new entry to the cache (or updates an existing one). + + The given `value` *must* be a Deferred. + + First any existing entry for the same key is invalidated. Then a new entry + is added to the cache for the given key. + + Until the `value` completes, calls to `get()` for the key will also result in an + incomplete Deferred, which will ultimately complete with the same result as + `value`. + + If `value` completes successfully, subsequent calls to `get()` will then return + a completed deferred with the same result. If it *fails*, the cache is + invalidated and subequent calls to `get()` will raise a KeyError. + + If another call to `set()` happens before `value` completes, then (a) any + invalidation callbacks registered in the interim will be called, (b) any + `get()`s in the interim will continue to complete with the result from the + *original* `value`, (c) any future calls to `get()` will complete with the + result from the *new* `value`. + + It is expected that `value` does *not* follow the synapse logcontext rules - ie, + if it is incomplete, it runs its callbacks in the sentinel context. + + Args: + key: Key to be set + value: a deferred which will complete with a result to add to the cache + callback: An optional callback to be called when the entry is invalidated + """ if not isinstance(value, defer.Deferred): raise TypeError("not a Deferred") @@ -187,6 +222,8 @@ def set( if existing_entry: existing_entry.invalidate() + # XXX: why don't we invalidate the entry in `self.cache` yet? + self._pending_deferred_cache[key] = entry def compare_and_pop(): @@ -230,7 +267,9 @@ def eb(_fail): # _pending_deferred_cache to the real cache. # observer.addCallbacks(cb, eb) - return observable + + # we return a new Deferred which will be called before any subsequent observers. + return observable.observe() def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): callbacks = [callback] if callback else [] diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1f438868047b..a4172345ef55 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -23,7 +23,6 @@ from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.deferred_cache import DeferredCache logger = logging.getLogger(__name__) @@ -156,7 +155,7 @@ def __get__(self, obj, owner): keylen=self.num_args, tree=self.tree, iterable=self.iterable, - ) # type: DeferredCache[Tuple, Any] + ) # type: DeferredCache[CacheKey, Any] def get_cache_key_gen(args, kwargs): """Given some args/kwargs return a generator that resolves into @@ -208,26 +207,12 @@ def _wrapped(*args, **kwargs): kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) try: - cached_result_d = cache.get(cache_key, callback=invalidate_callback) - - if isinstance(cached_result_d, ObservableDeferred): - observer = cached_result_d.observe() - else: - observer = defer.succeed(cached_result_d) - + ret = cache.get(cache_key, callback=invalidate_callback) except KeyError: ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) + ret = cache.set(cache_key, ret, callback=invalidate_callback) - def onErr(f): - cache.invalidate(cache_key) - return f - - ret.addErrback(onErr) - - result_d = cache.set(cache_key, ret, callback=invalidate_callback) - observer = result_d.observe() - - return make_deferred_yieldable(observer) + return make_deferred_yieldable(ret) wrapped = cast(_CachedFunction, _wrapped) @@ -286,7 +271,7 @@ def __init__(self, orig, cached_method_name, list_name, num_args=None): def __get__(self, obj, objtype=None): cached_method = getattr(obj, self.cached_method_name) - cache = cached_method.cache + cache = cached_method.cache # type: DeferredCache[CacheKey, Any] num_args = cached_method.num_args @functools.wraps(self.orig) @@ -326,14 +311,11 @@ def arg_to_cache_key(arg): for arg in list_args: try: res = cache.get(arg_to_cache_key(arg), callback=invalidate_callback) - if not isinstance(res, ObservableDeferred): - results[arg] = res - elif not res.has_succeeded(): - res = res.observe() + if not res.called: res.addCallback(update_results_dict, arg) cached_defers.append(res) else: - results[arg] = res.get_result() + results[arg] = res.result except KeyError: missing.add(arg) diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 8a08ab6661d8..68d26128c103 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -13,15 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest from functools import partial from twisted.internet import defer from synapse.util.caches.deferred_cache import DeferredCache +from tests.unittest import TestCase -class DeferredCacheTestCase(unittest.TestCase): + +class DeferredCacheTestCase(TestCase): def test_empty(self): cache = DeferredCache("test") failed = False @@ -36,7 +37,7 @@ def test_hit(self): cache = DeferredCache("test") cache.prefill("foo", 123) - self.assertEquals(cache.get("foo"), 123) + self.assertEquals(self.successResultOf(cache.get("foo")), 123) def test_get_immediate(self): cache = DeferredCache("test") @@ -82,16 +83,15 @@ def record_callback(idx): d2 = defer.Deferred() cache.set("key2", d2, partial(record_callback, 1)) - # lookup should return observable deferreds - self.assertFalse(cache.get("key1").has_called()) - self.assertFalse(cache.get("key2").has_called()) + # lookup should return pending deferreds + self.assertFalse(cache.get("key1").called) + self.assertFalse(cache.get("key2").called) # let one of the lookups complete d2.callback("result2") - # for now at least, the cache will return real results rather than an - # observabledeferred - self.assertEqual(cache.get("key2"), "result2") + # now the cache will return a completed deferred + self.assertEqual(self.successResultOf(cache.get("key2")), "result2") # now do the invalidation cache.invalidate_all() diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 3d738afa7f6f..fc2663c02d65 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -27,7 +27,6 @@ current_context, make_deferred_yieldable, ) -from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import descriptors from synapse.util.caches.descriptors import cached @@ -419,9 +418,9 @@ def func(self, key): a = A() - a.func.prefill(("foo",), ObservableDeferred(d)) + a.func.prefill(("foo",), 456) - self.assertEquals(a.func("foo").result, d.result) + self.assertEquals(a.func("foo").result, 456) self.assertEquals(callcount[0], 0) @defer.inlineCallbacks From 6d3905c7c7a53eed7a856aa013f6a9bf9292eb7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 21:32:52 +0100 Subject: [PATCH 231/245] Add some more tests --- tests/util/caches/test_deferred_cache.py | 95 ++++++++++++++++++++++++ tests/util/caches/test_descriptors.py | 52 +++++++++++++ 2 files changed, 147 insertions(+) diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index 68d26128c103..dadfabd46d1d 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -39,6 +39,101 @@ def test_hit(self): self.assertEquals(self.successResultOf(cache.get("foo")), 123) + def test_hit_deferred(self): + cache = DeferredCache("test") + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d) + + # get should return an incomplete deferred + get_d = cache.get("k1") + self.assertFalse(get_d.called) + + # add a callback that will make sure that the set_d gets called before the get_d + def check1(r): + self.assertTrue(set_d.called) + return r + + # TODO: Actually ObservableDeferred *doesn't* run its tests in order on py3.8. + # maybe we should fix that? + # get_d.addCallback(check1) + + # now fire off all the deferreds + origin_d.callback(99) + self.assertEqual(self.successResultOf(origin_d), 99) + self.assertEqual(self.successResultOf(set_d), 99) + self.assertEqual(self.successResultOf(get_d), 99) + + def test_callbacks(self): + """Invalidation callbacks are called at the right time""" + cache = DeferredCache("test") + callbacks = set() + + # start with an entry, with a callback + cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) + + # now replace that entry with a pending result + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) + + # ... and also make a get request + get_d = cache.get("k1", callback=lambda: callbacks.add("get")) + + # we don't expect the invalidation callback for the original value to have + # been called yet, even though get() will now return a different result. + # I'm not sure if that is by design or not. + self.assertEqual(callbacks, set()) + + # now fire off all the deferreds + origin_d.callback(20) + self.assertEqual(self.successResultOf(set_d), 20) + self.assertEqual(self.successResultOf(get_d), 20) + + # now the original invalidation callback should have been called, but none of + # the others + self.assertEqual(callbacks, {"prefill"}) + callbacks.clear() + + # another update should invalidate both the previous results + cache.prefill("k1", 30) + self.assertEqual(callbacks, {"set", "get"}) + + def test_set_fail(self): + cache = DeferredCache("test") + callbacks = set() + + # start with an entry, with a callback + cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) + + # now replace that entry with a pending result + origin_d = defer.Deferred() + set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) + + # ... and also make a get request + get_d = cache.get("k1", callback=lambda: callbacks.add("get")) + + # none of the callbacks should have been called yet + self.assertEqual(callbacks, set()) + + # oh noes! fails! + e = Exception("oops") + origin_d.errback(e) + self.assertIs(self.failureResultOf(set_d, Exception).value, e) + self.assertIs(self.failureResultOf(get_d, Exception).value, e) + + # the callbacks for the failed requests should have been called. + # I'm not sure if this is deliberate or not. + self.assertEqual(callbacks, {"get", "set"}) + callbacks.clear() + + # the old value should still be returned now? + get_d2 = cache.get("k1", callback=lambda: callbacks.add("get2")) + self.assertEqual(self.successResultOf(get_d2), 10) + + # replacing the value now should run the callbacks for those requests + # which got the original result + cache.prefill("k1", 30) + self.assertEqual(callbacks, {"prefill", "get2"}) + def test_get_immediate(self): cache = DeferredCache("test") d1 = defer.Deferred() diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index fc2663c02d65..2ad08f541bb9 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Set import mock @@ -130,6 +131,57 @@ def fn(self, arg1): d = obj.fn(1) self.failureResultOf(d, SynapseError) + def test_cache_with_async_exception(self): + """The wrapped function returns a failure + """ + + class Cls: + result = None + call_count = 0 + + @cached() + def fn(self, arg1): + self.call_count += 1 + return self.result + + obj = Cls() + callbacks = set() # type: Set[str] + + # set off an asynchronous request + obj.result = origin_d = defer.Deferred() + + d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1")) + self.assertFalse(d1.called) + + # a second request should also return a deferred, but should not call the + # function itself. + d2 = obj.fn(1, on_invalidate=lambda: callbacks.add("d2")) + self.assertFalse(d2.called) + self.assertEqual(obj.call_count, 1) + + # no callbacks yet + self.assertEqual(callbacks, set()) + + # the original request fails + e = Exception("bzz") + origin_d.errback(e) + + # ... which should cause the lookups to fail similarly + self.assertIs(self.failureResultOf(d1, Exception).value, e) + self.assertIs(self.failureResultOf(d2, Exception).value, e) + + # ... and the callbacks to have been, uh, called. + self.assertEqual(callbacks, {"d1", "d2"}) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should work as normal + obj.result = defer.succeed(100) + d3 = obj.fn(1) + self.assertEqual(self.successResultOf(d3), 100) + self.assertEqual(obj.call_count, 2) + def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" From 2b3af01791120de5d0b829395109b42870d9e465 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 16 Oct 2020 13:16:02 +0100 Subject: [PATCH 232/245] optimise DeferredCache.set --- changelog.d/8593.misc | 1 + synapse/util/caches/deferred_cache.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8593.misc diff --git a/changelog.d/8593.misc b/changelog.d/8593.misc new file mode 100644 index 000000000000..d266ba19a4dc --- /dev/null +++ b/changelog.d/8593.misc @@ -0,0 +1 @@ +Minor optimisations in caching code. diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 6c162e9f3444..fc0102628546 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -214,9 +214,6 @@ def set( callbacks = [callback] if callback else [] self.check_thread() - observable = ObservableDeferred(value, consumeErrors=True) - observer = observable.observe() - entry = CacheEntry(deferred=observable, callbacks=callbacks) existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: @@ -224,6 +221,18 @@ def set( # XXX: why don't we invalidate the entry in `self.cache` yet? + # we can save a whole load of effort if the deferred is ready. + if value.called: + self.cache.set(key, value.result, callbacks) + return value + + # otherwise, we'll add an entry to the _pending_deferred_cache for now, + # and add callbacks to add it to the cache properly later. + + observable = ObservableDeferred(value, consumeErrors=True) + observer = observable.observe() + entry = CacheEntry(deferred=observable, callbacks=callbacks) + self._pending_deferred_cache[key] = entry def compare_and_pop(): From c13820bcee5f23119b65f9386b7c03bd4a33acbe Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 21 Oct 2020 18:54:53 +0100 Subject: [PATCH 233/245] fix failure case --- synapse/util/caches/deferred_cache.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index fc0102628546..601305487c55 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -31,6 +31,7 @@ from prometheus_client import Gauge from twisted.internet import defer +from twisted.python import failure from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.lrucache import LruCache @@ -223,7 +224,9 @@ def set( # we can save a whole load of effort if the deferred is ready. if value.called: - self.cache.set(key, value.result, callbacks) + result = value.result + if not isinstance(result, failure.Failure): + self.cache.set(key, result, callbacks) return value # otherwise, we'll add an entry to the _pending_deferred_cache for now, From b28aaeb3a567ce1bbfd41796362b8bb0813ed0e3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 21 Oct 2020 22:57:45 +0100 Subject: [PATCH 234/245] Optimise CacheDescriptor (#8594) don't bother constricting a CacheContext unless we need one. --- changelog.d/8594.misc | 1 + synapse/util/caches/descriptors.py | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8594.misc diff --git a/changelog.d/8594.misc b/changelog.d/8594.misc new file mode 100644 index 000000000000..d266ba19a4dc --- /dev/null +++ b/changelog.d/8594.misc @@ -0,0 +1 @@ +Minor optimisations in caching code. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index a4172345ef55..5d7fffee66ea 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -201,14 +201,16 @@ def _wrapped(*args, **kwargs): cache_key = get_cache_key(args, kwargs) - # Add our own `cache_context` to argument list if the wrapped function - # has asked for one - if self.add_cache_context: - kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) - try: ret = cache.get(cache_key, callback=invalidate_callback) except KeyError: + # Add our own `cache_context` to argument list if the wrapped function + # has asked for one + if self.add_cache_context: + kwargs["cache_context"] = _CacheContext.get_instance( + cache, cache_key + ) + ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) ret = cache.set(cache_key, ret, callback=invalidate_callback) From ec0e9c4695cbfb2180751c0b6c0453ed5c035e63 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Oct 2020 13:08:42 +0100 Subject: [PATCH 235/245] 1.22.0rc1 --- CHANGES.md | 105 +++++++++++++++++++++++++++++++++++++++ changelog.d/7658.feature | 1 - changelog.d/7921.docker | 1 - changelog.d/8292.feature | 1 - changelog.d/8312.feature | 1 - changelog.d/8369.feature | 1 - changelog.d/8380.feature | 1 - changelog.d/8390.docker | 1 - changelog.d/8407.misc | 1 - changelog.d/8432.misc | 1 - changelog.d/8433.misc | 1 - changelog.d/8437.feature | 1 - changelog.d/8439.misc | 1 - changelog.d/8443.misc | 1 - changelog.d/8448.misc | 1 - changelog.d/8450.misc | 1 - changelog.d/8452.misc | 1 - changelog.d/8454.bugfix | 1 - changelog.d/8457.bugfix | 1 - changelog.d/8458.feature | 1 - changelog.d/8461.feature | 1 - changelog.d/8462.doc | 1 - changelog.d/8463.misc | 1 - changelog.d/8464.misc | 1 - changelog.d/8465.bugfix | 1 - changelog.d/8467.feature | 1 - changelog.d/8468.misc | 1 - changelog.d/8472.misc | 1 - changelog.d/8474.misc | 1 - changelog.d/8476.bugfix | 1 - changelog.d/8477.misc | 1 - changelog.d/8479.feature | 1 - changelog.d/8480.misc | 1 - changelog.d/8486.bugfix | 1 - changelog.d/8488.misc | 1 - changelog.d/8489.feature | 1 - changelog.d/8492.misc | 1 - changelog.d/8493.doc | 1 - changelog.d/8494.misc | 1 - changelog.d/8496.misc | 1 - changelog.d/8497.misc | 1 - changelog.d/8499.misc | 1 - changelog.d/8501.feature | 1 - changelog.d/8502.feature | 1 - changelog.d/8503.misc | 1 - changelog.d/8504.bugfix | 1 - changelog.d/8505.misc | 1 - changelog.d/8507.misc | 1 - changelog.d/8513.feature | 1 - changelog.d/8514.misc | 1 - changelog.d/8515.misc | 1 - changelog.d/8517.bugfix | 1 - changelog.d/8526.doc | 1 - changelog.d/8527.bugfix | 1 - changelog.d/8529.doc | 1 - changelog.d/8535.feature | 1 - changelog.d/8536.bugfix | 1 - changelog.d/8537.misc | 1 - changelog.d/8542.misc | 1 - changelog.d/8544.feature | 1 - changelog.d/8545.bugfix | 1 - changelog.d/8547.misc | 1 - changelog.d/8548.misc | 1 - changelog.d/8561.misc | 1 - changelog.d/8562.misc | 1 - changelog.d/8563.misc | 1 - changelog.d/8564.feature | 1 - changelog.d/8566.misc | 1 - changelog.d/8567.bugfix | 1 - changelog.d/8568.misc | 1 - changelog.d/8569.misc | 1 - changelog.d/8571.misc | 1 - changelog.d/8572.misc | 1 - changelog.d/8577.misc | 1 - changelog.d/8578.misc | 1 - changelog.d/8583.misc | 1 - changelog.d/8585.bugfix | 1 - changelog.d/8587.misc | 1 - changelog.d/8589.removal | 1 - changelog.d/8590.misc | 1 - changelog.d/8591.misc | 1 - changelog.d/8592.misc | 1 - changelog.d/8593.misc | 1 - changelog.d/8594.misc | 1 - changelog.d/8599.feature | 1 - changelog.d/8600.misc | 1 - changelog.d/8606.feature | 1 - changelog.d/8609.misc | 1 - synapse/__init__.py | 2 +- 89 files changed, 106 insertions(+), 88 deletions(-) delete mode 100644 changelog.d/7658.feature delete mode 100644 changelog.d/7921.docker delete mode 100644 changelog.d/8292.feature delete mode 100644 changelog.d/8312.feature delete mode 100644 changelog.d/8369.feature delete mode 100644 changelog.d/8380.feature delete mode 100644 changelog.d/8390.docker delete mode 100644 changelog.d/8407.misc delete mode 100644 changelog.d/8432.misc delete mode 100644 changelog.d/8433.misc delete mode 100644 changelog.d/8437.feature delete mode 100644 changelog.d/8439.misc delete mode 100644 changelog.d/8443.misc delete mode 100644 changelog.d/8448.misc delete mode 100644 changelog.d/8450.misc delete mode 100644 changelog.d/8452.misc delete mode 100644 changelog.d/8454.bugfix delete mode 100644 changelog.d/8457.bugfix delete mode 100644 changelog.d/8458.feature delete mode 100644 changelog.d/8461.feature delete mode 100644 changelog.d/8462.doc delete mode 100644 changelog.d/8463.misc delete mode 100644 changelog.d/8464.misc delete mode 100644 changelog.d/8465.bugfix delete mode 100644 changelog.d/8467.feature delete mode 100644 changelog.d/8468.misc delete mode 100644 changelog.d/8472.misc delete mode 100644 changelog.d/8474.misc delete mode 100644 changelog.d/8476.bugfix delete mode 100644 changelog.d/8477.misc delete mode 100644 changelog.d/8479.feature delete mode 100644 changelog.d/8480.misc delete mode 100644 changelog.d/8486.bugfix delete mode 100644 changelog.d/8488.misc delete mode 100644 changelog.d/8489.feature delete mode 100644 changelog.d/8492.misc delete mode 100644 changelog.d/8493.doc delete mode 100644 changelog.d/8494.misc delete mode 100644 changelog.d/8496.misc delete mode 100644 changelog.d/8497.misc delete mode 100644 changelog.d/8499.misc delete mode 100644 changelog.d/8501.feature delete mode 100644 changelog.d/8502.feature delete mode 100644 changelog.d/8503.misc delete mode 100644 changelog.d/8504.bugfix delete mode 100644 changelog.d/8505.misc delete mode 100644 changelog.d/8507.misc delete mode 100644 changelog.d/8513.feature delete mode 100644 changelog.d/8514.misc delete mode 100644 changelog.d/8515.misc delete mode 100644 changelog.d/8517.bugfix delete mode 100644 changelog.d/8526.doc delete mode 100644 changelog.d/8527.bugfix delete mode 100644 changelog.d/8529.doc delete mode 100644 changelog.d/8535.feature delete mode 100644 changelog.d/8536.bugfix delete mode 100644 changelog.d/8537.misc delete mode 100644 changelog.d/8542.misc delete mode 100644 changelog.d/8544.feature delete mode 100644 changelog.d/8545.bugfix delete mode 100644 changelog.d/8547.misc delete mode 100644 changelog.d/8548.misc delete mode 100644 changelog.d/8561.misc delete mode 100644 changelog.d/8562.misc delete mode 100644 changelog.d/8563.misc delete mode 100644 changelog.d/8564.feature delete mode 100644 changelog.d/8566.misc delete mode 100644 changelog.d/8567.bugfix delete mode 100644 changelog.d/8568.misc delete mode 100644 changelog.d/8569.misc delete mode 100644 changelog.d/8571.misc delete mode 100644 changelog.d/8572.misc delete mode 100644 changelog.d/8577.misc delete mode 100644 changelog.d/8578.misc delete mode 100644 changelog.d/8583.misc delete mode 100644 changelog.d/8585.bugfix delete mode 100644 changelog.d/8587.misc delete mode 100644 changelog.d/8589.removal delete mode 100644 changelog.d/8590.misc delete mode 100644 changelog.d/8591.misc delete mode 100644 changelog.d/8592.misc delete mode 100644 changelog.d/8593.misc delete mode 100644 changelog.d/8594.misc delete mode 100644 changelog.d/8599.feature delete mode 100644 changelog.d/8600.misc delete mode 100644 changelog.d/8606.feature delete mode 100644 changelog.d/8609.misc diff --git a/CHANGES.md b/CHANGES.md index 38a0814bbf53..2015680bc04d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,108 @@ +Synapse 1.22.0rc1 (2020-10-22) +============================== + +Features +-------- + +- Add a configuration option for always using the "userinfo endpoint" for OpenID Connect. This fixes support for some identity providers, e.g. GitLab. Contributed by Benjamin Koch. ([\#7658](https://github.com/matrix-org/synapse/issues/7658)) +- Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. ([\#8292](https://github.com/matrix-org/synapse/issues/8292), [\#8467](https://github.com/matrix-org/synapse/issues/8467)) +- Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). ([\#8312](https://github.com/matrix-org/synapse/issues/8312), [\#8501](https://github.com/matrix-org/synapse/issues/8501)) +- Allow running background tasks in a separate worker process. ([\#8369](https://github.com/matrix-org/synapse/issues/8369), [\#8458](https://github.com/matrix-org/synapse/issues/8458), [\#8489](https://github.com/matrix-org/synapse/issues/8489), [\#8513](https://github.com/matrix-org/synapse/issues/8513), [\#8544](https://github.com/matrix-org/synapse/issues/8544), [\#8599](https://github.com/matrix-org/synapse/issues/8599)) +- Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). ([\#8380](https://github.com/matrix-org/synapse/issues/8380)) +- Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. ([\#8437](https://github.com/matrix-org/synapse/issues/8437)) +- Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). ([\#8461](https://github.com/matrix-org/synapse/issues/8461)) +- Add the ability to send non-membership events into a room via the `ModuleApi`. ([\#8479](https://github.com/matrix-org/synapse/issues/8479)) +- Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. ([\#8502](https://github.com/matrix-org/synapse/issues/8502)) +- Support modifying event content in `ThirdPartyRules` modules. ([\#8535](https://github.com/matrix-org/synapse/issues/8535), [\#8564](https://github.com/matrix-org/synapse/issues/8564)) +- Limit appservice transactions to 100 persistent and 100 ephemeral events. ([\#8606](https://github.com/matrix-org/synapse/issues/8606)) + + +Bugfixes +-------- + +- Fix a longstanding bug where invalid ignored users in account data could break clients. ([\#8454](https://github.com/matrix-org/synapse/issues/8454)) +- Fix a bug where backfilling a room with an event that was missing the `redacts` field would break. ([\#8457](https://github.com/matrix-org/synapse/issues/8457)) +- Don't attempt to respond to some requests if the client has already disconnected. ([\#8465](https://github.com/matrix-org/synapse/issues/8465)) +- Fix message duplication if something goes wrong after persisting the event. ([\#8476](https://github.com/matrix-org/synapse/issues/8476)) +- Fix incremental sync returning an incorrect `prev_batch` token in timeline section, which when used to paginate returned events that were included in the incremental sync. Broken since v0.16.0. ([\#8486](https://github.com/matrix-org/synapse/issues/8486)) +- Expose the `uk.half-shot.msc2778.login.application_service` to clients from the login API. This feature was added in v1.21.0, but was not exposed as a potential login flow. ([\#8504](https://github.com/matrix-org/synapse/issues/8504)) +- Fix error code for `/profile/{userId}/displayname` to be `M_BAD_JSON`. ([\#8517](https://github.com/matrix-org/synapse/issues/8517)) +- Fix a bug introduced in v1.7.0 that could cause Synapse to insert values from non-state `m.room.retention` events into the `room_retention` database table. ([\#8527](https://github.com/matrix-org/synapse/issues/8527)) +- Fix not sending events over federation when using sharded event writers. ([\#8536](https://github.com/matrix-org/synapse/issues/8536)) +- Fix a long standing bug where email notifications for encrypted messages were blank. ([\#8545](https://github.com/matrix-org/synapse/issues/8545)) +- Fix increase in the number of `There was no active span...` errors logged when using OpenTracing. ([\#8567](https://github.com/matrix-org/synapse/issues/8567)) +- Fix a bug that prevented errors encountered during execution of the `synapse_port_db` from being correctly printed. ([\#8585](https://github.com/matrix-org/synapse/issues/8585)) + + +Updates to the Docker image +--------------------------- + +- Added multi-arch support (arm64,arm/v7) for the docker images. Contributed by @maquis196. ([\#7921](https://github.com/matrix-org/synapse/issues/7921)) +- Add support for passing commandline args to the synapse process. Contributed by @samuel-p. ([\#8390](https://github.com/matrix-org/synapse/issues/8390)) + + +Improved Documentation +---------------------- + +- Update the directions for using the manhole with coroutines. ([\#8462](https://github.com/matrix-org/synapse/issues/8462)) +- Improve readme by adding new shield.io badges. ([\#8493](https://github.com/matrix-org/synapse/issues/8493)) +- Added note about docker in manhole.md regarding which ip address to bind to. Contributed by @Maquis196. ([\#8526](https://github.com/matrix-org/synapse/issues/8526)) +- Document the new behaviour of the `allowed_lifetime_min` and `allowed_lifetime_max` settings in the room retention configuration. ([\#8529](https://github.com/matrix-org/synapse/issues/8529)) + + +Deprecations and Removals +------------------------- + +- Drop unused `device_max_stream_id` table. ([\#8589](https://github.com/matrix-org/synapse/issues/8589)) + + +Internal Changes +---------------- + +- Add typing information to the device handler. ([\#8407](https://github.com/matrix-org/synapse/issues/8407)) +- Check for unreachable code with mypy. ([\#8432](https://github.com/matrix-org/synapse/issues/8432)) +- Add unit test for event persister sharding. ([\#8433](https://github.com/matrix-org/synapse/issues/8433)) +- Allow events to be sent to clients sooner when using sharded event persisters. ([\#8439](https://github.com/matrix-org/synapse/issues/8439), [\#8488](https://github.com/matrix-org/synapse/issues/8488), [\#8496](https://github.com/matrix-org/synapse/issues/8496), [\#8499](https://github.com/matrix-org/synapse/issues/8499)) +- Configure `public_baseurl` when using demo scripts. ([\#8443](https://github.com/matrix-org/synapse/issues/8443)) +- Add SQL logging on queries that happen during startup. ([\#8448](https://github.com/matrix-org/synapse/issues/8448)) +- Speed up unit tests when using PostgreSQL. ([\#8450](https://github.com/matrix-org/synapse/issues/8450)) +- Remove redundant databae loads of stream_ordering for events we already have. ([\#8452](https://github.com/matrix-org/synapse/issues/8452)) +- Reduce inconsistencies between codepaths for membership and non-membership events. ([\#8463](https://github.com/matrix-org/synapse/issues/8463)) +- Combine `SpamCheckerApi` with the more generic `ModuleApi`. ([\#8464](https://github.com/matrix-org/synapse/issues/8464)) +- Additional testing for `ThirdPartyEventRules`. ([\#8468](https://github.com/matrix-org/synapse/issues/8468)) +- Add `-d` option to `./scripts-dev/lint.sh` to lint files that have changed since the last git commit. ([\#8472](https://github.com/matrix-org/synapse/issues/8472)) +- Unblacklist some sytests. ([\#8474](https://github.com/matrix-org/synapse/issues/8474)) +- Include the log level in the phone home stats. ([\#8477](https://github.com/matrix-org/synapse/issues/8477)) +- Remove outdated sphinx documentation, scripts and configuration. ([\#8480](https://github.com/matrix-org/synapse/issues/8480)) +- Clarify error message when plugin config parsers raise an error. ([\#8492](https://github.com/matrix-org/synapse/issues/8492)) +- Remove the deprecated `Handlers` object. ([\#8494](https://github.com/matrix-org/synapse/issues/8494)) +- Fix a threadsafety bug in unit tests. ([\#8497](https://github.com/matrix-org/synapse/issues/8497)) +- Add user agent to user_daily_visits table. ([\#8503](https://github.com/matrix-org/synapse/issues/8503)) +- Add type hints to various parts of the code base. ([\#8505](https://github.com/matrix-org/synapse/issues/8505), [\#8507](https://github.com/matrix-org/synapse/issues/8507)) +- Remove unused code from the test framework. ([\#8514](https://github.com/matrix-org/synapse/issues/8514)) +- Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable. ([\#8515](https://github.com/matrix-org/synapse/issues/8515)) +- Factor out common code between `RoomMemberHandler._locally_reject_invite` and `EventCreationHandler.create_event`. ([\#8537](https://github.com/matrix-org/synapse/issues/8537)) +- Improve database performance by executing more queries without starting transactions. ([\#8542](https://github.com/matrix-org/synapse/issues/8542)) +- Enable mypy type checking for `synapse.util.caches`. ([\#8547](https://github.com/matrix-org/synapse/issues/8547)) +- Rename `Cache` to `DeferredCache`, to better reflect its purpose. ([\#8548](https://github.com/matrix-org/synapse/issues/8548)) +- Move metric registration code down into `LruCache`. ([\#8561](https://github.com/matrix-org/synapse/issues/8561), [\#8591](https://github.com/matrix-org/synapse/issues/8591)) +- Add type annotations for `LruCache`. ([\#8562](https://github.com/matrix-org/synapse/issues/8562)) +- Replace `DeferredCache` with the lighter-weight `LruCache` where possible. ([\#8563](https://github.com/matrix-org/synapse/issues/8563)) +- Add virtualenv-generated folders to `.gitignore`. ([\#8566](https://github.com/matrix-org/synapse/issues/8566)) +- Add `get_immediate` method to `DeferredCache`. ([\#8568](https://github.com/matrix-org/synapse/issues/8568)) +- Fix mypy not properly checking across the codebase, additionally, fix a typing assertion error in `handlers/auth.py`. ([\#8569](https://github.com/matrix-org/synapse/issues/8569)) +- Fix `synmark` benchmark runner. ([\#8571](https://github.com/matrix-org/synapse/issues/8571)) +- Modify `DeferredCache.get()` to return `Deferred`s instead of `ObservableDeferred`s. ([\#8572](https://github.com/matrix-org/synapse/issues/8572)) +- Adjust a protocol-type definition to fit `sqlite3` assertions. ([\#8577](https://github.com/matrix-org/synapse/issues/8577)) +- Support macOS on the `synmark` benchmark runner. ([\#8578](https://github.com/matrix-org/synapse/issues/8578)) +- Update `mypy` static type checker to 0.790. ([\#8583](https://github.com/matrix-org/synapse/issues/8583), [\#8600](https://github.com/matrix-org/synapse/issues/8600)) +- Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. ([\#8587](https://github.com/matrix-org/synapse/issues/8587)) +- Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. ([\#8590](https://github.com/matrix-org/synapse/issues/8590)) +- Remove extraneous unittest logging decorators from unit tests. ([\#8592](https://github.com/matrix-org/synapse/issues/8592)) +- Minor optimisations in caching code. ([\#8593](https://github.com/matrix-org/synapse/issues/8593), [\#8594](https://github.com/matrix-org/synapse/issues/8594)) +- Add type hints to profile and base handler. ([\#8609](https://github.com/matrix-org/synapse/issues/8609)) + + Synapse 1.21.2 (2020-10-15) =========================== diff --git a/changelog.d/7658.feature b/changelog.d/7658.feature deleted file mode 100644 index fbf345988d35..000000000000 --- a/changelog.d/7658.feature +++ /dev/null @@ -1 +0,0 @@ -Add a configuration option for always using the "userinfo endpoint" for OpenID Connect. This fixes support for some identity providers, e.g. GitLab. Contributed by Benjamin Koch. diff --git a/changelog.d/7921.docker b/changelog.d/7921.docker deleted file mode 100644 index 7cecd67c6a26..000000000000 --- a/changelog.d/7921.docker +++ /dev/null @@ -1 +0,0 @@ -Added multi-arch support (arm64,arm/v7) for the docker images. Contributed by @maquis196. diff --git a/changelog.d/8292.feature b/changelog.d/8292.feature deleted file mode 100644 index 6d0335e2c827..000000000000 --- a/changelog.d/8292.feature +++ /dev/null @@ -1 +0,0 @@ -Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/changelog.d/8312.feature b/changelog.d/8312.feature deleted file mode 100644 index 222a1b032a4d..000000000000 --- a/changelog.d/8312.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). \ No newline at end of file diff --git a/changelog.d/8369.feature b/changelog.d/8369.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8369.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8380.feature b/changelog.d/8380.feature deleted file mode 100644 index 05ccea19dce4..000000000000 --- a/changelog.d/8380.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). diff --git a/changelog.d/8390.docker b/changelog.d/8390.docker deleted file mode 100644 index f71b8e4bbf2d..000000000000 --- a/changelog.d/8390.docker +++ /dev/null @@ -1 +0,0 @@ -Add support for passing commandline args to the synapse process. Contributed by @samuel-p. diff --git a/changelog.d/8407.misc b/changelog.d/8407.misc deleted file mode 100644 index d37002d75bf1..000000000000 --- a/changelog.d/8407.misc +++ /dev/null @@ -1 +0,0 @@ -Add typing information to the device handler. diff --git a/changelog.d/8432.misc b/changelog.d/8432.misc deleted file mode 100644 index 01fdad4caf6a..000000000000 --- a/changelog.d/8432.misc +++ /dev/null @@ -1 +0,0 @@ -Check for unreachable code with mypy. diff --git a/changelog.d/8433.misc b/changelog.d/8433.misc deleted file mode 100644 index 05f8b5bbf41e..000000000000 --- a/changelog.d/8433.misc +++ /dev/null @@ -1 +0,0 @@ -Add unit test for event persister sharding. diff --git a/changelog.d/8437.feature b/changelog.d/8437.feature deleted file mode 100644 index 4abcccb326e0..000000000000 --- a/changelog.d/8437.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. diff --git a/changelog.d/8439.misc b/changelog.d/8439.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8439.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8443.misc b/changelog.d/8443.misc deleted file mode 100644 index 633598e6b302..000000000000 --- a/changelog.d/8443.misc +++ /dev/null @@ -1 +0,0 @@ -Configure `public_baseurl` when using demo scripts. diff --git a/changelog.d/8448.misc b/changelog.d/8448.misc deleted file mode 100644 index 5ddda1803b9b..000000000000 --- a/changelog.d/8448.misc +++ /dev/null @@ -1 +0,0 @@ -Add SQL logging on queries that happen during startup. diff --git a/changelog.d/8450.misc b/changelog.d/8450.misc deleted file mode 100644 index 4e04c523abef..000000000000 --- a/changelog.d/8450.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up unit tests when using PostgreSQL. diff --git a/changelog.d/8452.misc b/changelog.d/8452.misc deleted file mode 100644 index 8288d91c78b1..000000000000 --- a/changelog.d/8452.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant databae loads of stream_ordering for events we already have. diff --git a/changelog.d/8454.bugfix b/changelog.d/8454.bugfix deleted file mode 100644 index c06d490b6f15..000000000000 --- a/changelog.d/8454.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug where invalid ignored users in account data could break clients. diff --git a/changelog.d/8457.bugfix b/changelog.d/8457.bugfix deleted file mode 100644 index 545b06d180c1..000000000000 --- a/changelog.d/8457.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where backfilling a room with an event that was missing the `redacts` field would break. diff --git a/changelog.d/8458.feature b/changelog.d/8458.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8458.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8461.feature b/changelog.d/8461.feature deleted file mode 100644 index 3665d670e117..000000000000 --- a/changelog.d/8461.feature +++ /dev/null @@ -1 +0,0 @@ -Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). diff --git a/changelog.d/8462.doc b/changelog.d/8462.doc deleted file mode 100644 index cf84db6db7f2..000000000000 --- a/changelog.d/8462.doc +++ /dev/null @@ -1 +0,0 @@ -Update the directions for using the manhole with coroutines. diff --git a/changelog.d/8463.misc b/changelog.d/8463.misc deleted file mode 100644 index 040c9bb90f12..000000000000 --- a/changelog.d/8463.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce inconsistencies between codepaths for membership and non-membership events. diff --git a/changelog.d/8464.misc b/changelog.d/8464.misc deleted file mode 100644 index a552e88f9fc8..000000000000 --- a/changelog.d/8464.misc +++ /dev/null @@ -1 +0,0 @@ -Combine `SpamCheckerApi` with the more generic `ModuleApi`. diff --git a/changelog.d/8465.bugfix b/changelog.d/8465.bugfix deleted file mode 100644 index 73f895b26879..000000000000 --- a/changelog.d/8465.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't attempt to respond to some requests if the client has already disconnected. \ No newline at end of file diff --git a/changelog.d/8467.feature b/changelog.d/8467.feature deleted file mode 100644 index 6d0335e2c827..000000000000 --- a/changelog.d/8467.feature +++ /dev/null @@ -1 +0,0 @@ -Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/changelog.d/8468.misc b/changelog.d/8468.misc deleted file mode 100644 index 32ba991e6421..000000000000 --- a/changelog.d/8468.misc +++ /dev/null @@ -1 +0,0 @@ -Additional testing for `ThirdPartyEventRules`. diff --git a/changelog.d/8472.misc b/changelog.d/8472.misc deleted file mode 100644 index 880f3f5e14fa..000000000000 --- a/changelog.d/8472.misc +++ /dev/null @@ -1 +0,0 @@ -Add `-d` option to `./scripts-dev/lint.sh` to lint files that have changed since the last git commit. \ No newline at end of file diff --git a/changelog.d/8474.misc b/changelog.d/8474.misc deleted file mode 100644 index 65e329a6e3e3..000000000000 --- a/changelog.d/8474.misc +++ /dev/null @@ -1 +0,0 @@ -Unblacklist some sytests. diff --git a/changelog.d/8476.bugfix b/changelog.d/8476.bugfix deleted file mode 100644 index 993a269979af..000000000000 --- a/changelog.d/8476.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix message duplication if something goes wrong after persisting the event. diff --git a/changelog.d/8477.misc b/changelog.d/8477.misc deleted file mode 100644 index 2ee1606b6e32..000000000000 --- a/changelog.d/8477.misc +++ /dev/null @@ -1 +0,0 @@ -Include the log level in the phone home stats. diff --git a/changelog.d/8479.feature b/changelog.d/8479.feature deleted file mode 100644 index 11adeec8a987..000000000000 --- a/changelog.d/8479.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to send non-membership events into a room via the `ModuleApi`. \ No newline at end of file diff --git a/changelog.d/8480.misc b/changelog.d/8480.misc deleted file mode 100644 index 81633af2965e..000000000000 --- a/changelog.d/8480.misc +++ /dev/null @@ -1 +0,0 @@ -Remove outdated sphinx documentation, scripts and configuration. \ No newline at end of file diff --git a/changelog.d/8486.bugfix b/changelog.d/8486.bugfix deleted file mode 100644 index 63fc091ba674..000000000000 --- a/changelog.d/8486.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incremental sync returning an incorrect `prev_batch` token in timeline section, which when used to paginate returned events that were included in the incremental sync. Broken since v0.16.0. diff --git a/changelog.d/8488.misc b/changelog.d/8488.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8488.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8489.feature b/changelog.d/8489.feature deleted file mode 100644 index 22591870a49f..000000000000 --- a/changelog.d/8489.feature +++ /dev/null @@ -1 +0,0 @@ - Allow running background tasks in a separate worker process. diff --git a/changelog.d/8492.misc b/changelog.d/8492.misc deleted file mode 100644 index a344aee791b2..000000000000 --- a/changelog.d/8492.misc +++ /dev/null @@ -1 +0,0 @@ -Clarify error message when plugin config parsers raise an error. diff --git a/changelog.d/8493.doc b/changelog.d/8493.doc deleted file mode 100644 index 26797cd99e3b..000000000000 --- a/changelog.d/8493.doc +++ /dev/null @@ -1 +0,0 @@ -Improve readme by adding new shield.io badges. diff --git a/changelog.d/8494.misc b/changelog.d/8494.misc deleted file mode 100644 index 6e56c6b8548c..000000000000 --- a/changelog.d/8494.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the deprecated `Handlers` object. diff --git a/changelog.d/8496.misc b/changelog.d/8496.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8496.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8497.misc b/changelog.d/8497.misc deleted file mode 100644 index 8bc05e8df63b..000000000000 --- a/changelog.d/8497.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a threadsafety bug in unit tests. diff --git a/changelog.d/8499.misc b/changelog.d/8499.misc deleted file mode 100644 index 237cb3b31135..000000000000 --- a/changelog.d/8499.misc +++ /dev/null @@ -1 +0,0 @@ -Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/changelog.d/8501.feature b/changelog.d/8501.feature deleted file mode 100644 index 5220ddd48252..000000000000 --- a/changelog.d/8501.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). diff --git a/changelog.d/8502.feature b/changelog.d/8502.feature deleted file mode 100644 index faab8d30422d..000000000000 --- a/changelog.d/8502.feature +++ /dev/null @@ -1 +0,0 @@ -Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. diff --git a/changelog.d/8503.misc b/changelog.d/8503.misc deleted file mode 100644 index edb1be8aa8e9..000000000000 --- a/changelog.d/8503.misc +++ /dev/null @@ -1 +0,0 @@ -Add user agent to user_daily_visits table. diff --git a/changelog.d/8504.bugfix b/changelog.d/8504.bugfix deleted file mode 100644 index 2bd0dbb8b464..000000000000 --- a/changelog.d/8504.bugfix +++ /dev/null @@ -1 +0,0 @@ -Expose the `uk.half-shot.msc2778.login.application_service` to clients from the login API. This feature was added in v1.21.0, but was not exposed as a potential login flow. diff --git a/changelog.d/8505.misc b/changelog.d/8505.misc deleted file mode 100644 index 5aa5c113bd78..000000000000 --- a/changelog.d/8505.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to various parts of the code base. diff --git a/changelog.d/8507.misc b/changelog.d/8507.misc deleted file mode 100644 index 724da8a9960e..000000000000 --- a/changelog.d/8507.misc +++ /dev/null @@ -1 +0,0 @@ - Add type hints to various parts of the code base. diff --git a/changelog.d/8513.feature b/changelog.d/8513.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8513.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8514.misc b/changelog.d/8514.misc deleted file mode 100644 index 0e7ac4f2207d..000000000000 --- a/changelog.d/8514.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused code from the test framework. diff --git a/changelog.d/8515.misc b/changelog.d/8515.misc deleted file mode 100644 index 1f8aa292d81d..000000000000 --- a/changelog.d/8515.misc +++ /dev/null @@ -1 +0,0 @@ -Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable. diff --git a/changelog.d/8517.bugfix b/changelog.d/8517.bugfix deleted file mode 100644 index 1ab623c59fcf..000000000000 --- a/changelog.d/8517.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error code for `/profile/{userId}/displayname` to be `M_BAD_JSON`. diff --git a/changelog.d/8526.doc b/changelog.d/8526.doc deleted file mode 100644 index cbf48680c12f..000000000000 --- a/changelog.d/8526.doc +++ /dev/null @@ -1 +0,0 @@ -Added note about docker in manhole.md regarding which ip address to bind to. Contributed by @Maquis196. diff --git a/changelog.d/8527.bugfix b/changelog.d/8527.bugfix deleted file mode 100644 index 727e0ba2992a..000000000000 --- a/changelog.d/8527.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.7.0 that could cause Synapse to insert values from non-state `m.room.retention` events into the `room_retention` database table. diff --git a/changelog.d/8529.doc b/changelog.d/8529.doc deleted file mode 100644 index 6e710e6527bb..000000000000 --- a/changelog.d/8529.doc +++ /dev/null @@ -1 +0,0 @@ -Document the new behaviour of the `allowed_lifetime_min` and `allowed_lifetime_max` settings in the room retention configuration. diff --git a/changelog.d/8535.feature b/changelog.d/8535.feature deleted file mode 100644 index 45342e66ad7d..000000000000 --- a/changelog.d/8535.feature +++ /dev/null @@ -1 +0,0 @@ -Support modifying event content in `ThirdPartyRules` modules. diff --git a/changelog.d/8536.bugfix b/changelog.d/8536.bugfix deleted file mode 100644 index 8d238cc00853..000000000000 --- a/changelog.d/8536.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix not sending events over federation when using sharded event writers. diff --git a/changelog.d/8537.misc b/changelog.d/8537.misc deleted file mode 100644 index 26309b5b9393..000000000000 --- a/changelog.d/8537.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out common code between `RoomMemberHandler._locally_reject_invite` and `EventCreationHandler.create_event`. diff --git a/changelog.d/8542.misc b/changelog.d/8542.misc deleted file mode 100644 index 63149fd9b982..000000000000 --- a/changelog.d/8542.misc +++ /dev/null @@ -1 +0,0 @@ -Improve database performance by executing more queries without starting transactions. diff --git a/changelog.d/8544.feature b/changelog.d/8544.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8544.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8545.bugfix b/changelog.d/8545.bugfix deleted file mode 100644 index 64ba307df069..000000000000 --- a/changelog.d/8545.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long standing bug where email notifications for encrypted messages were blank. diff --git a/changelog.d/8547.misc b/changelog.d/8547.misc deleted file mode 100644 index fafb1c8347b2..000000000000 --- a/changelog.d/8547.misc +++ /dev/null @@ -1 +0,0 @@ -Enable mypy type checking for `synapse.util.caches`. diff --git a/changelog.d/8548.misc b/changelog.d/8548.misc deleted file mode 100644 index fba10bd731ca..000000000000 --- a/changelog.d/8548.misc +++ /dev/null @@ -1 +0,0 @@ -Rename `Cache` to `DeferredCache`, to better reflect its purpose. diff --git a/changelog.d/8561.misc b/changelog.d/8561.misc deleted file mode 100644 index a40dedfa8e6b..000000000000 --- a/changelog.d/8561.misc +++ /dev/null @@ -1 +0,0 @@ -Move metric registration code down into `LruCache`. diff --git a/changelog.d/8562.misc b/changelog.d/8562.misc deleted file mode 100644 index ebdbddb50048..000000000000 --- a/changelog.d/8562.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations for `LruCache`. diff --git a/changelog.d/8563.misc b/changelog.d/8563.misc deleted file mode 100644 index eeba8e5fee53..000000000000 --- a/changelog.d/8563.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `DeferredCache` with the lighter-weight `LruCache` where possible. diff --git a/changelog.d/8564.feature b/changelog.d/8564.feature deleted file mode 100644 index 45342e66ad7d..000000000000 --- a/changelog.d/8564.feature +++ /dev/null @@ -1 +0,0 @@ -Support modifying event content in `ThirdPartyRules` modules. diff --git a/changelog.d/8566.misc b/changelog.d/8566.misc deleted file mode 100644 index 453cf48ffa9c..000000000000 --- a/changelog.d/8566.misc +++ /dev/null @@ -1 +0,0 @@ -Add virtualenv-generated folders to `.gitignore`. \ No newline at end of file diff --git a/changelog.d/8567.bugfix b/changelog.d/8567.bugfix deleted file mode 100644 index 4d835df6fd43..000000000000 --- a/changelog.d/8567.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix increase in the number of `There was no active span...` errors logged when using OpenTracing. diff --git a/changelog.d/8568.misc b/changelog.d/8568.misc deleted file mode 100644 index 0ed7db92d355..000000000000 --- a/changelog.d/8568.misc +++ /dev/null @@ -1 +0,0 @@ -Add `get_immediate` method to `DeferredCache`. diff --git a/changelog.d/8569.misc b/changelog.d/8569.misc deleted file mode 100644 index 3b6e0625e57e..000000000000 --- a/changelog.d/8569.misc +++ /dev/null @@ -1 +0,0 @@ -Fix mypy not properly checking across the codebase, additionally, fix a typing assertion error in `handlers/auth.py`. \ No newline at end of file diff --git a/changelog.d/8571.misc b/changelog.d/8571.misc deleted file mode 100644 index f6a65057e0d9..000000000000 --- a/changelog.d/8571.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `synmark` benchmark runner. diff --git a/changelog.d/8572.misc b/changelog.d/8572.misc deleted file mode 100644 index ea2a6d340d71..000000000000 --- a/changelog.d/8572.misc +++ /dev/null @@ -1 +0,0 @@ -Modify `DeferredCache.get()` to return `Deferred`s instead of `ObservableDeferred`s. diff --git a/changelog.d/8577.misc b/changelog.d/8577.misc deleted file mode 100644 index 75fe563a0258..000000000000 --- a/changelog.d/8577.misc +++ /dev/null @@ -1 +0,0 @@ -Adjust a protocol-type definition to fit `sqlite3` assertions. \ No newline at end of file diff --git a/changelog.d/8578.misc b/changelog.d/8578.misc deleted file mode 100644 index e93462255b39..000000000000 --- a/changelog.d/8578.misc +++ /dev/null @@ -1 +0,0 @@ -Support macOS on the `synmark` benchmark runner. diff --git a/changelog.d/8583.misc b/changelog.d/8583.misc deleted file mode 100644 index d24973f09af1..000000000000 --- a/changelog.d/8583.misc +++ /dev/null @@ -1 +0,0 @@ -Update `mypy` static type checker to 0.790. \ No newline at end of file diff --git a/changelog.d/8585.bugfix b/changelog.d/8585.bugfix deleted file mode 100644 index e97e6ac1d8cb..000000000000 --- a/changelog.d/8585.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug that prevented errors encountered during execution of the `synapse_port_db` from being correctly printed. \ No newline at end of file diff --git a/changelog.d/8587.misc b/changelog.d/8587.misc deleted file mode 100644 index 9e56551a34bf..000000000000 --- a/changelog.d/8587.misc +++ /dev/null @@ -1 +0,0 @@ -Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. diff --git a/changelog.d/8589.removal b/changelog.d/8589.removal deleted file mode 100644 index b80f29d6bbd9..000000000000 --- a/changelog.d/8589.removal +++ /dev/null @@ -1 +0,0 @@ -Drop unused `device_max_stream_id` table. diff --git a/changelog.d/8590.misc b/changelog.d/8590.misc deleted file mode 100644 index 4abcccb326e0..000000000000 --- a/changelog.d/8590.misc +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. diff --git a/changelog.d/8591.misc b/changelog.d/8591.misc deleted file mode 100644 index 8f16bc3e7e22..000000000000 --- a/changelog.d/8591.misc +++ /dev/null @@ -1 +0,0 @@ - Move metric registration code down into `LruCache`. diff --git a/changelog.d/8592.misc b/changelog.d/8592.misc deleted file mode 100644 index 099e8fb7bb2c..000000000000 --- a/changelog.d/8592.misc +++ /dev/null @@ -1 +0,0 @@ -Remove extraneous unittest logging decorators from unit tests. \ No newline at end of file diff --git a/changelog.d/8593.misc b/changelog.d/8593.misc deleted file mode 100644 index d266ba19a4dc..000000000000 --- a/changelog.d/8593.misc +++ /dev/null @@ -1 +0,0 @@ -Minor optimisations in caching code. diff --git a/changelog.d/8594.misc b/changelog.d/8594.misc deleted file mode 100644 index d266ba19a4dc..000000000000 --- a/changelog.d/8594.misc +++ /dev/null @@ -1 +0,0 @@ -Minor optimisations in caching code. diff --git a/changelog.d/8599.feature b/changelog.d/8599.feature deleted file mode 100644 index 542993110bc8..000000000000 --- a/changelog.d/8599.feature +++ /dev/null @@ -1 +0,0 @@ -Allow running background tasks in a separate worker process. diff --git a/changelog.d/8600.misc b/changelog.d/8600.misc deleted file mode 100644 index a5a922e641dc..000000000000 --- a/changelog.d/8600.misc +++ /dev/null @@ -1 +0,0 @@ -Update `mypy` static type checker to 0.790. diff --git a/changelog.d/8606.feature b/changelog.d/8606.feature deleted file mode 100644 index fad723c10859..000000000000 --- a/changelog.d/8606.feature +++ /dev/null @@ -1 +0,0 @@ -Limit appservice transactions to 100 persistent and 100 ephemeral events. diff --git a/changelog.d/8609.misc b/changelog.d/8609.misc deleted file mode 100644 index 5e3f3c199397..000000000000 --- a/changelog.d/8609.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to profile and base handler. diff --git a/synapse/__init__.py b/synapse/__init__.py index 83b8e4897f3c..c9d53e767a1b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.21.2" +__version__ = "1.22.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From a622e1ed9fe0a8753447dad1f1d4c637123c9184 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Oct 2020 13:12:22 +0100 Subject: [PATCH 236/245] Fixup changelog --- CHANGES.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2015680bc04d..11e3539b4255 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,15 +5,15 @@ Features -------- - Add a configuration option for always using the "userinfo endpoint" for OpenID Connect. This fixes support for some identity providers, e.g. GitLab. Contributed by Benjamin Koch. ([\#7658](https://github.com/matrix-org/synapse/issues/7658)) -- Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. ([\#8292](https://github.com/matrix-org/synapse/issues/8292), [\#8467](https://github.com/matrix-org/synapse/issues/8467)) +- Add ability for `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. ([\#8292](https://github.com/matrix-org/synapse/issues/8292), [\#8467](https://github.com/matrix-org/synapse/issues/8467)) - Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). ([\#8312](https://github.com/matrix-org/synapse/issues/8312), [\#8501](https://github.com/matrix-org/synapse/issues/8501)) -- Allow running background tasks in a separate worker process. ([\#8369](https://github.com/matrix-org/synapse/issues/8369), [\#8458](https://github.com/matrix-org/synapse/issues/8458), [\#8489](https://github.com/matrix-org/synapse/issues/8489), [\#8513](https://github.com/matrix-org/synapse/issues/8513), [\#8544](https://github.com/matrix-org/synapse/issues/8544), [\#8599](https://github.com/matrix-org/synapse/issues/8599)) +- Add support for running background tasks in a separate worker process. ([\#8369](https://github.com/matrix-org/synapse/issues/8369), [\#8458](https://github.com/matrix-org/synapse/issues/8458), [\#8489](https://github.com/matrix-org/synapse/issues/8489), [\#8513](https://github.com/matrix-org/synapse/issues/8513), [\#8544](https://github.com/matrix-org/synapse/issues/8544), [\#8599](https://github.com/matrix-org/synapse/issues/8599)) - Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). ([\#8380](https://github.com/matrix-org/synapse/issues/8380)) -- Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. ([\#8437](https://github.com/matrix-org/synapse/issues/8437)) +- Add support for [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409), which allows sending typing, read receipts, and presence events to appservices. ([\#8437](https://github.com/matrix-org/synapse/issues/8437)) - Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). ([\#8461](https://github.com/matrix-org/synapse/issues/8461)) - Add the ability to send non-membership events into a room via the `ModuleApi`. ([\#8479](https://github.com/matrix-org/synapse/issues/8479)) - Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. ([\#8502](https://github.com/matrix-org/synapse/issues/8502)) -- Support modifying event content in `ThirdPartyRules` modules. ([\#8535](https://github.com/matrix-org/synapse/issues/8535), [\#8564](https://github.com/matrix-org/synapse/issues/8564)) +- Add support for modifying event content in `ThirdPartyRules` modules. ([\#8535](https://github.com/matrix-org/synapse/issues/8535), [\#8564](https://github.com/matrix-org/synapse/issues/8564)) - Limit appservice transactions to 100 persistent and 100 ephemeral events. ([\#8606](https://github.com/matrix-org/synapse/issues/8606)) From 88b8b8403c9ee7145cbcc212d10a1f28ac15e624 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Oct 2020 13:19:37 +0100 Subject: [PATCH 237/245] Fixup changelog some more --- CHANGES.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 11e3539b4255..70f822d4df93 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,7 +14,6 @@ Features - Add the ability to send non-membership events into a room via the `ModuleApi`. ([\#8479](https://github.com/matrix-org/synapse/issues/8479)) - Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. ([\#8502](https://github.com/matrix-org/synapse/issues/8502)) - Add support for modifying event content in `ThirdPartyRules` modules. ([\#8535](https://github.com/matrix-org/synapse/issues/8535), [\#8564](https://github.com/matrix-org/synapse/issues/8564)) -- Limit appservice transactions to 100 persistent and 100 ephemeral events. ([\#8606](https://github.com/matrix-org/synapse/issues/8606)) Bugfixes @@ -32,6 +31,7 @@ Bugfixes - Fix a long standing bug where email notifications for encrypted messages were blank. ([\#8545](https://github.com/matrix-org/synapse/issues/8545)) - Fix increase in the number of `There was no active span...` errors logged when using OpenTracing. ([\#8567](https://github.com/matrix-org/synapse/issues/8567)) - Fix a bug that prevented errors encountered during execution of the `synapse_port_db` from being correctly printed. ([\#8585](https://github.com/matrix-org/synapse/issues/8585)) +- Fix appservice transactions to only include a maximum of 100 persistent and 100 ephemeral events. ([\#8606](https://github.com/matrix-org/synapse/issues/8606)) Updates to the Docker image @@ -59,14 +59,13 @@ Deprecations and Removals Internal Changes ---------------- -- Add typing information to the device handler. ([\#8407](https://github.com/matrix-org/synapse/issues/8407)) - Check for unreachable code with mypy. ([\#8432](https://github.com/matrix-org/synapse/issues/8432)) - Add unit test for event persister sharding. ([\#8433](https://github.com/matrix-org/synapse/issues/8433)) - Allow events to be sent to clients sooner when using sharded event persisters. ([\#8439](https://github.com/matrix-org/synapse/issues/8439), [\#8488](https://github.com/matrix-org/synapse/issues/8488), [\#8496](https://github.com/matrix-org/synapse/issues/8496), [\#8499](https://github.com/matrix-org/synapse/issues/8499)) - Configure `public_baseurl` when using demo scripts. ([\#8443](https://github.com/matrix-org/synapse/issues/8443)) - Add SQL logging on queries that happen during startup. ([\#8448](https://github.com/matrix-org/synapse/issues/8448)) - Speed up unit tests when using PostgreSQL. ([\#8450](https://github.com/matrix-org/synapse/issues/8450)) -- Remove redundant databae loads of stream_ordering for events we already have. ([\#8452](https://github.com/matrix-org/synapse/issues/8452)) +- Remove redundant database loads of stream_ordering for events we already have. ([\#8452](https://github.com/matrix-org/synapse/issues/8452)) - Reduce inconsistencies between codepaths for membership and non-membership events. ([\#8463](https://github.com/matrix-org/synapse/issues/8463)) - Combine `SpamCheckerApi` with the more generic `ModuleApi`. ([\#8464](https://github.com/matrix-org/synapse/issues/8464)) - Additional testing for `ThirdPartyEventRules`. ([\#8468](https://github.com/matrix-org/synapse/issues/8468)) @@ -78,15 +77,13 @@ Internal Changes - Remove the deprecated `Handlers` object. ([\#8494](https://github.com/matrix-org/synapse/issues/8494)) - Fix a threadsafety bug in unit tests. ([\#8497](https://github.com/matrix-org/synapse/issues/8497)) - Add user agent to user_daily_visits table. ([\#8503](https://github.com/matrix-org/synapse/issues/8503)) -- Add type hints to various parts of the code base. ([\#8505](https://github.com/matrix-org/synapse/issues/8505), [\#8507](https://github.com/matrix-org/synapse/issues/8507)) +- Add type hints to various parts of the code base. ([\#8407](https://github.com/matrix-org/synapse/issues/8407), [\#8505](https://github.com/matrix-org/synapse/issues/8505), [\#8507](https://github.com/matrix-org/synapse/issues/8507), [\#8547](https://github.com/matrix-org/synapse/issues/8547), [\#8562](https://github.com/matrix-org/synapse/issues/8562), [\#8609](https://github.com/matrix-org/synapse/issues/8609)) - Remove unused code from the test framework. ([\#8514](https://github.com/matrix-org/synapse/issues/8514)) - Apply some internal fixes to the `HomeServer` class to make its code more idiomatic and statically-verifiable. ([\#8515](https://github.com/matrix-org/synapse/issues/8515)) - Factor out common code between `RoomMemberHandler._locally_reject_invite` and `EventCreationHandler.create_event`. ([\#8537](https://github.com/matrix-org/synapse/issues/8537)) - Improve database performance by executing more queries without starting transactions. ([\#8542](https://github.com/matrix-org/synapse/issues/8542)) -- Enable mypy type checking for `synapse.util.caches`. ([\#8547](https://github.com/matrix-org/synapse/issues/8547)) - Rename `Cache` to `DeferredCache`, to better reflect its purpose. ([\#8548](https://github.com/matrix-org/synapse/issues/8548)) - Move metric registration code down into `LruCache`. ([\#8561](https://github.com/matrix-org/synapse/issues/8561), [\#8591](https://github.com/matrix-org/synapse/issues/8591)) -- Add type annotations for `LruCache`. ([\#8562](https://github.com/matrix-org/synapse/issues/8562)) - Replace `DeferredCache` with the lighter-weight `LruCache` where possible. ([\#8563](https://github.com/matrix-org/synapse/issues/8563)) - Add virtualenv-generated folders to `.gitignore`. ([\#8566](https://github.com/matrix-org/synapse/issues/8566)) - Add `get_immediate` method to `DeferredCache`. ([\#8568](https://github.com/matrix-org/synapse/issues/8568)) @@ -100,7 +97,6 @@ Internal Changes - Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. ([\#8590](https://github.com/matrix-org/synapse/issues/8590)) - Remove extraneous unittest logging decorators from unit tests. ([\#8592](https://github.com/matrix-org/synapse/issues/8592)) - Minor optimisations in caching code. ([\#8593](https://github.com/matrix-org/synapse/issues/8593), [\#8594](https://github.com/matrix-org/synapse/issues/8594)) -- Add type hints to profile and base handler. ([\#8609](https://github.com/matrix-org/synapse/issues/8609)) Synapse 1.21.2 (2020-10-15) From 5065048110cc7f425575a2696648fb0b194236df Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Oct 2020 13:25:22 +0100 Subject: [PATCH 238/245] Fixup changelog even more --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 70f822d4df93..6066a38c5a7b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features - Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). ([\#8312](https://github.com/matrix-org/synapse/issues/8312), [\#8501](https://github.com/matrix-org/synapse/issues/8501)) - Add support for running background tasks in a separate worker process. ([\#8369](https://github.com/matrix-org/synapse/issues/8369), [\#8458](https://github.com/matrix-org/synapse/issues/8458), [\#8489](https://github.com/matrix-org/synapse/issues/8489), [\#8513](https://github.com/matrix-org/synapse/issues/8513), [\#8544](https://github.com/matrix-org/synapse/issues/8544), [\#8599](https://github.com/matrix-org/synapse/issues/8599)) - Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). ([\#8380](https://github.com/matrix-org/synapse/issues/8380)) -- Add support for [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409), which allows sending typing, read receipts, and presence events to appservices. ([\#8437](https://github.com/matrix-org/synapse/issues/8437)) +- Add support for [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409), which allows sending typing, read receipts, and presence events to appservices. ([\#8437](https://github.com/matrix-org/synapse/issues/8437), [\#8590](https://github.com/matrix-org/synapse/issues/8590)) - Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). ([\#8461](https://github.com/matrix-org/synapse/issues/8461)) - Add the ability to send non-membership events into a room via the `ModuleApi`. ([\#8479](https://github.com/matrix-org/synapse/issues/8479)) - Increase default upload size limit from 10M to 50M. Contributed by @Akkowicz. ([\#8502](https://github.com/matrix-org/synapse/issues/8502)) @@ -94,7 +94,6 @@ Internal Changes - Support macOS on the `synmark` benchmark runner. ([\#8578](https://github.com/matrix-org/synapse/issues/8578)) - Update `mypy` static type checker to 0.790. ([\#8583](https://github.com/matrix-org/synapse/issues/8583), [\#8600](https://github.com/matrix-org/synapse/issues/8600)) - Re-organize the structured logging code to separate the TCP transport handling from the JSON formatting. ([\#8587](https://github.com/matrix-org/synapse/issues/8587)) -- Implement [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409) to send typing, read receipts, and presence events to appservices. ([\#8590](https://github.com/matrix-org/synapse/issues/8590)) - Remove extraneous unittest logging decorators from unit tests. ([\#8592](https://github.com/matrix-org/synapse/issues/8592)) - Minor optimisations in caching code. ([\#8593](https://github.com/matrix-org/synapse/issues/8593), [\#8594](https://github.com/matrix-org/synapse/issues/8594)) From 437a99fb99f42e64a0488ffe0a394cbc60921254 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Oct 2020 13:16:32 +0000 Subject: [PATCH 239/245] Fix user_daily_visits to not have duplicate rows for UA. (#8654) * Fix user_daily_visits to not have duplicate rows for UA. Fixes #8641. * Newsfile * Fix typo. Co-authored-by: Patrick Cloke --- changelog.d/8654.bugfix | 1 + synapse/storage/databases/main/metrics.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/8654.bugfix diff --git a/changelog.d/8654.bugfix b/changelog.d/8654.bugfix new file mode 100644 index 000000000000..91d3265b7f82 --- /dev/null +++ b/changelog.d/8654.bugfix @@ -0,0 +1 @@ +Fix `user_daily_visits` to not have duplicate rows for UA. Broke in v1.22.0rc1. diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 79b01d16f9d5..ab18cc4d79be 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -282,9 +282,10 @@ def _generate_user_daily_visits(txn): now = self._clock.time_msec() # A note on user_agent. Technically a given device can have multiple - # user agents, so we need to decide which one to pick. We could have handled this - # in number of ways, but given that we don't _that_ much have gone for MAX() - # For more details of the other options considered see + # user agents, so we need to decide which one to pick. We could have + # handled this in number of ways, but given that we don't care + # _that_ much we have gone for MAX(). For more details of the other + # options considered see # https://github.com/matrix-org/synapse/pull/8503#discussion_r502306111 sql = """ INSERT INTO user_daily_visits (user_id, device_id, timestamp, user_agent) @@ -299,7 +300,7 @@ def _generate_user_daily_visits(txn): WHERE last_seen > ? AND last_seen <= ? AND udv.timestamp IS NULL AND users.is_guest=0 AND users.appservice_id IS NULL - GROUP BY u.user_id, u.device_id, u.user_agent + GROUP BY u.user_id, u.device_id """ # This means that the day has rolled over but there could still From 5eda0185612be2e2c15eaba0a607442febd4a5a8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 26 Oct 2020 09:19:07 -0400 Subject: [PATCH 240/245] Properly handle presence events for application services. (#8656) --- changelog.d/8656.bugfix | 1 + synapse/handlers/appservice.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/8656.bugfix diff --git a/changelog.d/8656.bugfix b/changelog.d/8656.bugfix new file mode 100644 index 000000000000..d6415e8282af --- /dev/null +++ b/changelog.d/8656.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.22.0rc1 where presence events were not properly passed to application services. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 07240d3a14ba..fe8cfc9b18a1 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -238,7 +238,7 @@ async def _handle_receipts(self, service: ApplicationService): async def _handle_presence( self, service: ApplicationService, users: Collection[UserID] - ): + ) -> List[JsonDict]: events = [] # type: List[JsonDict] presence_source = self.event_sources.sources["presence"] from_key = await self.store.get_type_stream_id_for_appservice( @@ -252,7 +252,7 @@ async def _handle_presence( user=user, service=service, from_key=from_key, ) time_now = self.clock.time_msec() - presence_events = [ + events.extend( { "type": "m.presence", "sender": event.user_id, @@ -261,8 +261,9 @@ async def _handle_presence( ), } for event in presence_events - ] - events = events + presence_events + ) + + return events async def query_user_exists(self, user_id): """Check if any application service knows this user_id exists. From 9e0f5a0ac44678cd8a1dcb82ba7e116f8b554c4d Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 26 Oct 2020 14:51:33 +0000 Subject: [PATCH 241/245] Fix get|set_type_stream_id_for_appservice store functions (#8648) --- changelog.d/8648.bugfix | 1 + synapse/handlers/appservice.py | 12 ++--- synapse/storage/databases/main/appservice.py | 29 +++++++--- tests/storage/test_appservice.py | 56 ++++++++++++++++++++ 4 files changed, 85 insertions(+), 13 deletions(-) create mode 100644 changelog.d/8648.bugfix diff --git a/changelog.d/8648.bugfix b/changelog.d/8648.bugfix new file mode 100644 index 000000000000..aa71ad0ff2e8 --- /dev/null +++ b/changelog.d/8648.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.22.0rc1 which would cause ephemeral events to not be sent to appservices. \ No newline at end of file diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index fe8cfc9b18a1..64dea23fc5ee 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -203,16 +203,16 @@ async def notify_interested_services_ephemeral( events = await self._handle_receipts(service) if events: self.scheduler.submit_ephemeral_events_for_as(service, events) - await self.store.set_type_stream_id_for_appservice( - service, "read_receipt", new_token - ) + await self.store.set_type_stream_id_for_appservice( + service, "read_receipt", new_token + ) elif stream_key == "presence_key": events = await self._handle_presence(service, users) if events: self.scheduler.submit_ephemeral_events_for_as(service, events) - await self.store.set_type_stream_id_for_appservice( - service, "presence", new_token - ) + await self.store.set_type_stream_id_for_appservice( + service, "presence", new_token + ) async def _handle_typing(self, service: ApplicationService, new_token: int): typing_source = self.event_sources.sources["typing"] diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 43bf0f649abf..637a938bacaa 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -369,17 +369,25 @@ def get_new_events_for_appservice_txn(txn): async def get_type_stream_id_for_appservice( self, service: ApplicationService, type: str ) -> int: + if type not in ("read_receipt", "presence"): + raise ValueError( + "Expected type to be a valid application stream id type, got %s" + % (type,) + ) + def get_type_stream_id_for_appservice_txn(txn): stream_id_type = "%s_stream_id" % type txn.execute( - "SELECT ? FROM application_services_state WHERE as_id=?", - (stream_id_type, service.id,), + # We do NOT want to escape `stream_id_type`. + "SELECT %s FROM application_services_state WHERE as_id=?" + % stream_id_type, + (service.id,), ) - last_txn_id = txn.fetchone() - if last_txn_id is None or last_txn_id[0] is None: # no row exists + last_stream_id = txn.fetchone() + if last_stream_id is None or last_stream_id[0] is None: # no row exists return 0 else: - return int(last_txn_id[0]) + return int(last_stream_id[0]) return await self.db_pool.runInteraction( "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn @@ -388,11 +396,18 @@ def get_type_stream_id_for_appservice_txn(txn): async def set_type_stream_id_for_appservice( self, service: ApplicationService, type: str, pos: int ) -> None: + if type not in ("read_receipt", "presence"): + raise ValueError( + "Expected type to be a valid application stream id type, got %s" + % (type,) + ) + def set_type_stream_id_for_appservice_txn(txn): stream_id_type = "%s_stream_id" % type txn.execute( - "UPDATE ? SET device_list_stream_id = ? WHERE as_id=?", - (stream_id_type, pos, service.id), + "UPDATE application_services_state SET %s = ? WHERE as_id=?" + % stream_id_type, + (pos, service.id), ) await self.db_pool.runInteraction( diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index c5c79873495d..1ce29af5fd9d 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -410,6 +410,62 @@ def test_get_appservices_by_state_multiple(self): ) +class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def prepare(self, hs, reactor, clock): + self.service = Mock(id="foo") + self.store = self.hs.get_datastore() + self.get_success(self.store.set_appservice_state(self.service, "up")) + + def test_get_type_stream_id_for_appservice_no_value(self): + value = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "read_receipt") + ) + self.assertEquals(value, 0) + + value = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "presence") + ) + self.assertEquals(value, 0) + + def test_get_type_stream_id_for_appservice_invalid_type(self): + self.get_failure( + self.store.get_type_stream_id_for_appservice(self.service, "foobar"), + ValueError, + ) + + def test_set_type_stream_id_for_appservice(self): + read_receipt_value = 1024 + self.get_success( + self.store.set_type_stream_id_for_appservice( + self.service, "read_receipt", read_receipt_value + ) + ) + result = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "read_receipt") + ) + self.assertEqual(result, read_receipt_value) + + self.get_success( + self.store.set_type_stream_id_for_appservice( + self.service, "presence", read_receipt_value + ) + ) + result = self.get_success( + self.store.get_type_stream_id_for_appservice(self.service, "presence") + ) + self.assertEqual(result, read_receipt_value) + + def test_set_type_stream_id_for_appservice_invalid_type(self): + self.get_failure( + self.store.set_type_stream_id_for_appservice(self.service, "foobar", 1024), + ValueError, + ) + + # required for ApplicationServiceTransactionStoreTestCase tests class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore): def __init__(self, database: DatabasePool, db_conn, hs): From 7a3adbd7af320e076d480071f3870b277a6f9ff8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Oct 2020 15:11:03 +0000 Subject: [PATCH 242/245] 1.22.0rc2 --- CHANGES.md | 11 +++++++++++ changelog.d/8648.bugfix | 1 - changelog.d/8654.bugfix | 1 - changelog.d/8656.bugfix | 1 - synapse/__init__.py | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/8648.bugfix delete mode 100644 changelog.d/8654.bugfix delete mode 100644 changelog.d/8656.bugfix diff --git a/CHANGES.md b/CHANGES.md index 6066a38c5a7b..406d7de7e3b6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.22.0rc2 (2020-10-26) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in v1.22.0rc1 which would cause ephemeral events to not be sent to appservices. ([\#8648](https://github.com/matrix-org/synapse/issues/8648)) +- Fix `user_daily_visits` to not have duplicate rows for UA. Broke in v1.22.0rc1. ([\#8654](https://github.com/matrix-org/synapse/issues/8654)) +- Fix a bug introduced in v1.22.0rc1 where presence events were not properly passed to application services. ([\#8656](https://github.com/matrix-org/synapse/issues/8656)) + + Synapse 1.22.0rc1 (2020-10-22) ============================== diff --git a/changelog.d/8648.bugfix b/changelog.d/8648.bugfix deleted file mode 100644 index aa71ad0ff2e8..000000000000 --- a/changelog.d/8648.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.22.0rc1 which would cause ephemeral events to not be sent to appservices. \ No newline at end of file diff --git a/changelog.d/8654.bugfix b/changelog.d/8654.bugfix deleted file mode 100644 index 91d3265b7f82..000000000000 --- a/changelog.d/8654.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `user_daily_visits` to not have duplicate rows for UA. Broke in v1.22.0rc1. diff --git a/changelog.d/8656.bugfix b/changelog.d/8656.bugfix deleted file mode 100644 index d6415e8282af..000000000000 --- a/changelog.d/8656.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.22.0rc1 where presence events were not properly passed to application services. diff --git a/synapse/__init__.py b/synapse/__init__.py index c9d53e767a1b..24a5815447db 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.22.0rc1" +__version__ = "1.22.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From f40a4ba08e92960431e068ef7b5cb0907415b6b4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Oct 2020 15:15:49 +0000 Subject: [PATCH 243/245] Expand changelog entry --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 406d7de7e3b6..9da1d95002e0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a bug introduced in v1.22.0rc1 which would cause ephemeral events to not be sent to appservices. ([\#8648](https://github.com/matrix-org/synapse/issues/8648)) -- Fix `user_daily_visits` to not have duplicate rows for UA. Broke in v1.22.0rc1. ([\#8654](https://github.com/matrix-org/synapse/issues/8654)) +- Fix `user_daily_visits` table to not have duplicate rows per user/device due to multiple user agents. Broke in v1.22.0rc1. ([\#8654](https://github.com/matrix-org/synapse/issues/8654)) - Fix a bug introduced in v1.22.0rc1 where presence events were not properly passed to application services. ([\#8656](https://github.com/matrix-org/synapse/issues/8656)) From 191f2e5d5d12cde1145f29f9d3a9aa0f993f4306 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Oct 2020 15:17:31 +0000 Subject: [PATCH 244/245] Fixup changelog --- CHANGES.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9da1d95002e0..54a642e11a17 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,10 +4,8 @@ Synapse 1.22.0rc2 (2020-10-26) Bugfixes -------- -- Fix a bug introduced in v1.22.0rc1 which would cause ephemeral events to not be sent to appservices. ([\#8648](https://github.com/matrix-org/synapse/issues/8648)) +- Fix bugs where ephemeral events were not sent to appservices. Broke in v1.22.0rc1. ([\#8648](https://github.com/matrix-org/synapse/issues/8648), [\#8656](https://github.com/matrix-org/synapse/issues/8656)) - Fix `user_daily_visits` table to not have duplicate rows per user/device due to multiple user agents. Broke in v1.22.0rc1. ([\#8654](https://github.com/matrix-org/synapse/issues/8654)) -- Fix a bug introduced in v1.22.0rc1 where presence events were not properly passed to application services. ([\#8656](https://github.com/matrix-org/synapse/issues/8656)) - Synapse 1.22.0rc1 (2020-10-22) ============================== From fedfdfd7502e63d28054d644d63fef18b57befd7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Oct 2020 12:07:19 +0000 Subject: [PATCH 245/245] 1.22.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 54a642e11a17..3a522f8518f8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.22.0 (2020-10-27) +=========================== + +No significant changes. + + Synapse 1.22.0rc2 (2020-10-26) ============================== diff --git a/debian/changelog b/debian/changelog index 8d873a4845c8..40f02443f96d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.22.0) stable; urgency=medium + + * New synapse release 1.22.0. + + -- Synapse Packaging team Tue, 27 Oct 2020 12:07:12 +0000 + matrix-synapse-py3 (1.21.2) stable; urgency=medium [ Synapse Packaging team ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 24a5815447db..35a82c0bfeb5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.22.0rc2" +__version__ = "1.22.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when