Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Fix MultiWriteIdGenerator's handling of restarts. #8374

Merged
merged 7 commits into from
Sep 24, 2020
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/8374.bugfix
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers.
2 changes: 2 additions & 0 deletions synapse/replication/slave/storage/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,13 @@ def __init__(self, database: DatabasePool, db_conn, hs):
self._cache_id_gen = MultiWriterIdGenerator(
db_conn,
database,
stream_name="caches",
instance_name=hs.get_instance_name(),
table="cache_invalidation_stream_by_instance",
instance_column="instance_name",
id_column="stream_id",
sequence_name="cache_invalidation_stream_seq",
writers=[],
) # type: Optional[MultiWriterIdGenerator]
else:
self._cache_id_gen = None
Expand Down
8 changes: 7 additions & 1 deletion synapse/storage/databases/main/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,14 +160,20 @@ def __init__(self, database: DatabasePool, db_conn, hs):
)

if isinstance(self.database_engine, PostgresEngine):
# We set the `writers` to an empty list here as we don't care about
# missing updates over restarts, as we'll not have anything in our
# caches to invalidate. (This reduces the amount of writes to the DB
# that happen).
self._cache_id_gen = MultiWriterIdGenerator(
db_conn,
database,
instance_name="master",
stream_name="caches",
instance_name=hs.get_instance_name(),
table="cache_invalidation_stream_by_instance",
instance_column="instance_name",
id_column="stream_id",
sequence_name="cache_invalidation_stream_seq",
writers=[],
)
else:
self._cache_id_gen = None
Expand Down
4 changes: 4 additions & 0 deletions synapse/storage/databases/main/events_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,21 +83,25 @@ def __init__(self, database: DatabasePool, db_conn, hs):
self._stream_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="events",
instance_name=hs.get_instance_name(),
table="events",
instance_column="instance_name",
id_column="stream_ordering",
sequence_name="events_stream_seq",
writers=hs.config.worker.writers.events,
)
self._backfill_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="backfill",
instance_name=hs.get_instance_name(),
table="events",
instance_column="instance_name",
id_column="stream_ordering",
sequence_name="events_backfill_stream_seq",
positive=False,
writers=hs.config.worker.writers.events,
)
else:
# We shouldn't be running in worker mode with SQLite, but its useful
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/* Copyright 2020 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

CREATE TABLE stream_positions (
stream_name TEXT NOT NULL,
instance_name TEXT NOT NULL,
stream_id BIGINT NOT NULL
);

CREATE UNIQUE INDEX stream_positions_idx ON stream_positions(stream_name, instance_name);
150 changes: 129 additions & 21 deletions synapse/storage/util/id_generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import attr
from typing_extensions import Deque

from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.util.sequence import PostgresSequenceGenerator

Expand Down Expand Up @@ -184,12 +185,16 @@ class MultiWriterIdGenerator:
Args:
db_conn
db
stream_name: A name for the stream.
instance_name: The name of this instance.
table: Database table associated with stream.
instance_column: Column that stores the row's writer's instance name
id_column: Column that stores the stream ID.
sequence_name: The name of the postgres sequence used to generate new
IDs.
writers: A list of known writers to use to populate current positions
on startup. Can be empty if nothing uses `get_current_token` or
`get_positions` (e.g. caches stream).
positive: Whether the IDs are positive (true) or negative (false).
When using negative IDs we go backwards from -1 to -2, -3, etc.
"""
Expand All @@ -198,16 +203,20 @@ def __init__(
self,
db_conn,
db: DatabasePool,
stream_name: str,
instance_name: str,
table: str,
instance_column: str,
id_column: str,
sequence_name: str,
writers: List[str],
positive: bool = True,
):
self._db = db
self._stream_name = stream_name
self._instance_name = instance_name
self._positive = positive
self._writers = writers
self._return_factor = 1 if positive else -1

# We lock as some functions may be called from DB threads.
Expand All @@ -216,9 +225,7 @@ def __init__(
# Note: If we are a negative stream then we still store all the IDs as
# positive to make life easier for us, and simply negate the IDs when we
# return them.
self._current_positions = self._load_current_ids(
db_conn, table, instance_column, id_column
)
self._current_positions = {} # type: Dict[str, int]

# Set of local IDs that we're still processing. The current position
# should be less than the minimum of this set (if not empty).
Expand Down Expand Up @@ -251,30 +258,82 @@ def __init__(

self._sequence_gen = PostgresSequenceGenerator(sequence_name)

# This goes and fills out the above state from the database.
self._load_current_ids(db_conn, table, instance_column, id_column)

def _load_current_ids(
self, db_conn, table: str, instance_column: str, id_column: str
) -> Dict[str, int]:
# If positive stream aggregate via MAX. For negative stream use MIN
# *and* negate the result to get a positive number.
sql = """
SELECT %(instance)s, %(agg)s(%(id)s) FROM %(table)s
GROUP BY %(instance)s
""" % {
"instance": instance_column,
"id": id_column,
"table": table,
"agg": "MAX" if self._positive else "-MIN",
}

):
cur = db_conn.cursor()
cur.execute(sql)

# `cur` is an iterable over returned rows, which are 2-tuples.
current_positions = dict(cur)
# Load the current positions of all writers for the stream.
if self._writers:
sql = """
SELECT instance_name, stream_id FROM stream_positions
WHERE stream_name = ?
"""
sql = self._db.engine.convert_param_style(sql)

cur.close()
cur.execute(sql, (self._stream_name,))

self._current_positions = {
instance: stream_id * self._return_factor
for instance, stream_id in cur
if instance in self._writers
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By excluding only the known writers here could there be a dataloss situation when removing a writer, if that writer is the minimum position?

If you have writers:

  • A at pos 15
  • B at pos 17
  • C at pos 12

And then remove "C", _current_positions will be {A: 15, B: 17} and min_stream_id will be 15. I'm not sure if this is OK or not?

It seems you also don't want to include writers that you no longer care about or else you can end up in a situation where you have an old writer always saying it is far behind, which I'm guessing this if-statement checks against?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As per discussion elsewhere: this is OK because if C has been removed from the list it means its been turned off, and so we know its up to date.

It's worth noting that these positions aren't really "where C has gotten up to in the rooms", but instead is more "where C is currently persisting events". I.e. we're not worried that if we remove C from the deployment then A or B will start persisting at position 12, as either C finished persisting a row at 12 or the request gets retried and A or B will persist it with a new position.

}
else:
self._current_positions = {}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like _current_positions is initiated to an empty dict. We probably don't need this.


# We set the `_persisted_upto_position` to be the minimum of all current
# positions. If empty we use the max stream ID from the DB table.
min_stream_id = min(self._current_positions.values(), default=None)

if min_stream_id is None:
sql = """
SELECT COALESCE(%(agg)s(%(id)s), 1) FROM %(table)s
""" % {
"id": id_column,
"table": table,
"agg": "MAX" if self._positive else "-MIN",
}
cur.execute(sql)
(stream_id,) = cur.fetchone()
self._persisted_upto_position = stream_id
else:
# If we have a min_stream_id then we pull out everything greater
# than it from the DB so that we can prefill
# `_known_persisted_positions` and get a more accurate
# `_persisted_upto_position`.
#
# We also check if any of the later rows are from this instance, in
# which case we use that for this instance's current position. This
# is to handle the case where we didn't finish persisting to the
# stream positions table before restart (or the stream position
# table otherwise got out of date).
Comment on lines +306 to +310
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't this true of any instance in _current_positions? I'm having trouble following why this is only true of the current instance.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We know that if the current instance has just been restarted then we don't have any rows that are currently being persisted, so its safe to set the current position of itself to the max stream ID. Other instances may not have been restarted so may still be persisting things.

(We don't just set the current position of the instance to the max stream ID as in future we want every entry in current_positions to have matching instance in the DB, i.e. if we have {A: 5, B: 6} then we want the row 5 in the DB to have an instance of A and row 6 to have an instance of B. This will allow us to serialise the current position to (5, 6), as we can then just look the rows up in the DB to get back to {A: 5, B: 6})


sql = """
SELECT %(instance)s, %(id)s FROM %(table)s
WHERE ? %(cmp)s %(id)s
""" % {
"id": id_column,
"table": table,
"instance": instance_column,
"cmp": "<=" if self._positive else ">=",
}
sql = self._db.engine.convert_param_style(sql)
cur.execute(sql, (min_stream_id,))

self._persisted_upto_position = min_stream_id

with self._lock:
for (instance, stream_id,) in cur:
stream_id = self._return_factor * stream_id
self._add_persisted_position(stream_id)

return current_positions
if instance == self._instance_name:
self._current_positions[instance] = stream_id

cur.close()

def _load_next_id_txn(self, txn) -> int:
return self._sequence_gen.get_next_id_txn(txn)
Expand Down Expand Up @@ -316,6 +375,21 @@ def get_next_txn(self, txn: LoggingTransaction):
txn.call_after(self._mark_id_as_finished, next_id)
txn.call_on_exception(self._mark_id_as_finished, next_id)

# Update the `stream_positions` table with newly updated stream
# ID (unless self._writers is not set in which case we don't
# bother, as nothing will read it).
#
# We only do this on the success path so that the persisted current
# position points to a persited row with the correct instance name.
if self._writers:
txn.call_after(
run_as_background_process,
"MultiWriterIdGenerator._update_table",
self._db.runInteraction,
"MultiWriterIdGenerator._update_table",
self._update_stream_positions_table_txn,
)
Comment on lines +383 to +389
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are there any issues with this being a different transaction? (What if this transaction fails?) I suspect it is OK since we always update to the largest position anyway.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We want to do it after we've marked the associated ID as "persisted", which happens after the transaction has already finished.

Though a follow up question of: can we mark the ID as persisted before we finish the transaction? The answer to that is probably yes if we faff around a bit.

One thing to note is that the only stream that actually uses get_next_txn is the cache invalidation stream, which we explicitly dont' record the stream positions for. The only reason I've added updating the stream positions table here is for consistency and to avoid future foot guns.


return self._return_factor * next_id

def _mark_id_as_finished(self, next_id: int):
Expand Down Expand Up @@ -447,6 +521,28 @@ def _add_persisted_position(self, new_id: int):
# do.
break

def _update_stream_positions_table_txn(self, txn):
"""Update the `stream_positions` table with newly persisted position.
"""

if not self._writers:
return
Comment on lines +526 to +527
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doesn't really matter, but this is checked in the callers too. I'm assuming this was just double checking?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, just in case somewhere forgets to do the check.


# We upsert the value, ensuring on conflict that we always increase the
# value (or decrease if stream goes backwards).
sql = """
INSERT INTO stream_positions (stream_name, instance_name, stream_id)
VALUES (?, ?, ?)
ON CONFLICT (stream_name, instance_name)
DO UPDATE SET
stream_id = %(agg)s(stream_positions.stream_id, EXCLUDED.stream_id)
""" % {
"agg": "GREATEST" if self._positive else "LEAST",
}

pos = (self.get_current_token_for_writer(self._instance_name),)
txn.execute(sql, (self._stream_name, self._instance_name, pos))


@attr.s(slots=True)
class _AsyncCtxManagerWrapper:
Expand Down Expand Up @@ -503,4 +599,16 @@ async def __aexit__(self, exc_type, exc, tb):
if exc_type is not None:
return False

# Update the `stream_positions` table with newly updated stream
# ID (unless self._writers is not set in which case we don't
# bother, as nothing will read it).
#
# We only do this on the success path so that the persisted current
# position points to a persisted row with the correct instance name.
if self.id_gen._writers:
await self.id_gen._db.runInteraction(
"MultiWriterIdGenerator._update_table",
self.id_gen._update_stream_positions_table_txn,
)

return False
Loading