Require SQLite >= 3.27.0 (#13760)

pull/13769/head
David Robertson 2022-09-09 11:14:10 +01:00 committed by GitHub
parent 69fa29700e
commit f2d2481e56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 103 additions and 205 deletions

View File

@ -0,0 +1 @@
Synapse will now refuse to start if configured to use SQLite < 3.27.

View File

@ -533,15 +533,14 @@ class DatabasePool:
if isinstance(self.engine, Sqlite3Engine): if isinstance(self.engine, Sqlite3Engine):
self._unsafe_to_upsert_tables.add("user_directory_search") self._unsafe_to_upsert_tables.add("user_directory_search")
if self.engine.can_native_upsert: # Check ASAP (and then later, every 1s) to see if we have finished
# Check ASAP (and then later, every 1s) to see if we have finished # background updates of tables that aren't safe to update.
# background updates of tables that aren't safe to update. self._clock.call_later(
self._clock.call_later( 0.0,
0.0, run_as_background_process,
run_as_background_process, "upsert_safety_check",
"upsert_safety_check", self._check_safe_to_upsert,
self._check_safe_to_upsert, )
)
def name(self) -> str: def name(self) -> str:
"Return the name of this database" "Return the name of this database"
@ -1160,11 +1159,8 @@ class DatabasePool:
attempts = 0 attempts = 0
while True: while True:
try: try:
# We can autocommit if we are going to use native upserts # We can autocommit if it is safe to upsert
autocommit = ( autocommit = table not in self._unsafe_to_upsert_tables
self.engine.can_native_upsert
and table not in self._unsafe_to_upsert_tables
)
return await self.runInteraction( return await self.runInteraction(
desc, desc,
@ -1199,7 +1195,7 @@ class DatabasePool:
) -> bool: ) -> bool:
""" """
Pick the UPSERT method which works best on the platform. Either the Pick the UPSERT method which works best on the platform. Either the
native one (Pg9.5+, recent SQLites), or fall back to an emulated method. native one (Pg9.5+, SQLite >= 3.24), or fall back to an emulated method.
Args: Args:
txn: The transaction to use. txn: The transaction to use.
@ -1207,14 +1203,15 @@ class DatabasePool:
keyvalues: The unique key tables and their new values keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting insertion_values: additional key/values to use only when inserting
lock: True to lock the table when doing the upsert. lock: True to lock the table when doing the upsert. Unused when performing
a native upsert.
Returns: Returns:
Returns True if a row was inserted or updated (i.e. if `values` is Returns True if a row was inserted or updated (i.e. if `values` is
not empty then this always returns True) not empty then this always returns True)
""" """
insertion_values = insertion_values or {} insertion_values = insertion_values or {}
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: if table not in self._unsafe_to_upsert_tables:
return self.simple_upsert_txn_native_upsert( return self.simple_upsert_txn_native_upsert(
txn, table, keyvalues, values, insertion_values=insertion_values txn, table, keyvalues, values, insertion_values=insertion_values
) )
@ -1365,14 +1362,12 @@ class DatabasePool:
value_names: The value column names value_names: The value column names
value_values: A list of each row's value column values. value_values: A list of each row's value column values.
Ignored if value_names is empty. Ignored if value_names is empty.
lock: True to lock the table when doing the upsert. Unused if the database engine lock: True to lock the table when doing the upsert. Unused when performing
supports native upserts. a native upsert.
""" """
# We can autocommit if we are going to use native upserts # We can autocommit if it safe to upsert
autocommit = ( autocommit = table not in self._unsafe_to_upsert_tables
self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables
)
await self.runInteraction( await self.runInteraction(
desc, desc,
@ -1406,10 +1401,10 @@ class DatabasePool:
value_names: The value column names value_names: The value column names
value_values: A list of each row's value column values. value_values: A list of each row's value column values.
Ignored if value_names is empty. Ignored if value_names is empty.
lock: True to lock the table when doing the upsert. Unused if the database engine lock: True to lock the table when doing the upsert. Unused when performing
supports native upserts. a native upsert.
""" """
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: if table not in self._unsafe_to_upsert_tables:
return self.simple_upsert_many_txn_native_upsert( return self.simple_upsert_many_txn_native_upsert(
txn, table, key_names, key_values, value_names, value_values txn, table, key_names, key_values, value_names, value_values
) )

View File

@ -129,91 +129,48 @@ class LockStore(SQLBaseStore):
now = self._clock.time_msec() now = self._clock.time_msec()
token = random_string(6) token = random_string(6)
if self.db_pool.engine.can_native_upsert: def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
# We take out the lock if either a) there is no row for the lock
def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool: # already, b) the existing row has timed out, or c) the row is
# We take out the lock if either a) there is no row for the lock # for this instance (which means the process got killed and
# already, b) the existing row has timed out, or c) the row is # restarted)
# for this instance (which means the process got killed and sql = """
# restarted) INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
sql = """ VALUES (?, ?, ?, ?, ?)
INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts) ON CONFLICT (lock_name, lock_key)
VALUES (?, ?, ?, ?, ?) DO UPDATE
ON CONFLICT (lock_name, lock_key) SET
DO UPDATE token = EXCLUDED.token,
SET instance_name = EXCLUDED.instance_name,
token = EXCLUDED.token, last_renewed_ts = EXCLUDED.last_renewed_ts
instance_name = EXCLUDED.instance_name, WHERE
last_renewed_ts = EXCLUDED.last_renewed_ts worker_locks.last_renewed_ts < ?
WHERE OR worker_locks.instance_name = EXCLUDED.instance_name
worker_locks.last_renewed_ts < ? """
OR worker_locks.instance_name = EXCLUDED.instance_name txn.execute(
""" sql,
txn.execute( (
sql, lock_name,
( lock_key,
lock_name, self._instance_name,
lock_key, token,
self._instance_name, now,
token, now - _LOCK_TIMEOUT_MS,
now, ),
now - _LOCK_TIMEOUT_MS,
),
)
# We only acquired the lock if we inserted or updated the table.
return bool(txn.rowcount)
did_lock = await self.db_pool.runInteraction(
"try_acquire_lock",
_try_acquire_lock_txn,
# We can autocommit here as we're executing a single query, this
# will avoid serialization errors.
db_autocommit=True,
)
if not did_lock:
return None
else:
# If we're on an old SQLite we emulate the above logic by first
# clearing out any existing stale locks and then upserting.
def _try_acquire_lock_emulated_txn(txn: LoggingTransaction) -> bool:
sql = """
DELETE FROM worker_locks
WHERE
lock_name = ?
AND lock_key = ?
AND (last_renewed_ts < ? OR instance_name = ?)
"""
txn.execute(
sql,
(lock_name, lock_key, now - _LOCK_TIMEOUT_MS, self._instance_name),
)
inserted = self.db_pool.simple_upsert_txn_emulated(
txn,
table="worker_locks",
keyvalues={
"lock_name": lock_name,
"lock_key": lock_key,
},
values={},
insertion_values={
"token": token,
"last_renewed_ts": self._clock.time_msec(),
"instance_name": self._instance_name,
},
)
return inserted
did_lock = await self.db_pool.runInteraction(
"try_acquire_lock_emulated", _try_acquire_lock_emulated_txn
) )
if not did_lock: # We only acquired the lock if we inserted or updated the table.
return None return bool(txn.rowcount)
did_lock = await self.db_pool.runInteraction(
"try_acquire_lock",
_try_acquire_lock_txn,
# We can autocommit here as we're executing a single query, this
# will avoid serialization errors.
db_autocommit=True,
)
if not did_lock:
return None
lock = Lock( lock = Lock(
self._reactor, self._reactor,

View File

@ -446,59 +446,41 @@ class StatsStore(StateDeltasStore):
absolutes: Absolute (set) fields absolutes: Absolute (set) fields
additive_relatives: Fields that will be added onto if existing row present. additive_relatives: Fields that will be added onto if existing row present.
""" """
if self.database_engine.can_native_upsert: absolute_updates = [
absolute_updates = [ "%(field)s = EXCLUDED.%(field)s" % {"field": field}
"%(field)s = EXCLUDED.%(field)s" % {"field": field} for field in absolutes.keys()
for field in absolutes.keys() ]
]
relative_updates = [ relative_updates = [
"%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)" "%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
% {"table": table, "field": field} % {"table": table, "field": field}
for field in additive_relatives.keys() for field in additive_relatives.keys()
] ]
insert_cols = [] insert_cols = []
qargs = [] qargs = []
for (key, val) in chain( for (key, val) in chain(
keyvalues.items(), absolutes.items(), additive_relatives.items() keyvalues.items(), absolutes.items(), additive_relatives.items()
): ):
insert_cols.append(key) insert_cols.append(key)
qargs.append(val) qargs.append(val)
sql = """ sql = """
INSERT INTO %(table)s (%(insert_cols_cs)s) INSERT INTO %(table)s (%(insert_cols_cs)s)
VALUES (%(insert_vals_qs)s) VALUES (%(insert_vals_qs)s)
ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
""" % { """ % {
"table": table, "table": table,
"insert_cols_cs": ", ".join(insert_cols), "insert_cols_cs": ", ".join(insert_cols),
"insert_vals_qs": ", ".join( "insert_vals_qs": ", ".join(
["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives)) ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
), ),
"key_columns": ", ".join(keyvalues), "key_columns": ", ".join(keyvalues),
"updates": ", ".join(chain(absolute_updates, relative_updates)), "updates": ", ".join(chain(absolute_updates, relative_updates)),
} }
txn.execute(sql, qargs) txn.execute(sql, qargs)
else:
self.database_engine.lock_table(txn, table)
retcols = list(chain(absolutes.keys(), additive_relatives.keys()))
current_row = self.db_pool.simple_select_one_txn(
txn, table, keyvalues, retcols, allow_none=True
)
if current_row is None:
merged_dict = {**keyvalues, **absolutes, **additive_relatives}
self.db_pool.simple_insert_txn(txn, table, merged_dict)
else:
for (key, val) in additive_relatives.items():
if current_row[key] is None:
current_row[key] = val
else:
current_row[key] += val
current_row.update(absolutes)
self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row)
async def _calculate_and_set_initial_state_for_room(self, room_id: str) -> None: async def _calculate_and_set_initial_state_for_room(self, room_id: str) -> None:
"""Calculate and insert an entry into room_stats_current. """Calculate and insert an entry into room_stats_current.

View File

@ -221,25 +221,15 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
retry_interval: how long until next retry in ms retry_interval: how long until next retry in ms
""" """
if self.database_engine.can_native_upsert: await self.db_pool.runInteraction(
await self.db_pool.runInteraction( "set_destination_retry_timings",
"set_destination_retry_timings", self._set_destination_retry_timings_native,
self._set_destination_retry_timings_native, destination,
destination, failure_ts,
failure_ts, retry_last_ts,
retry_last_ts, retry_interval,
retry_interval, db_autocommit=True, # Safe as it's a single upsert
db_autocommit=True, # Safe as its a single upsert )
)
else:
await self.db_pool.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings_emulated,
destination,
failure_ts,
retry_last_ts,
retry_interval,
)
def _set_destination_retry_timings_native( def _set_destination_retry_timings_native(
self, self,
@ -249,8 +239,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
retry_last_ts: int, retry_last_ts: int,
retry_interval: int, retry_interval: int,
) -> None: ) -> None:
assert self.database_engine.can_native_upsert
# Upsert retry time interval if retry_interval is zero (i.e. we're # Upsert retry time interval if retry_interval is zero (i.e. we're
# resetting it) or greater than the existing retry interval. # resetting it) or greater than the existing retry interval.
# #

View File

@ -43,14 +43,6 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
def single_threaded(self) -> bool: def single_threaded(self) -> bool:
... ...
@property
@abc.abstractmethod
def can_native_upsert(self) -> bool:
"""
Do we support native UPSERTs?
"""
...
@property @property
@abc.abstractmethod @abc.abstractmethod
def supports_using_any_list(self) -> bool: def supports_using_any_list(self) -> bool:

View File

@ -158,13 +158,6 @@ class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
cursor.close() cursor.close()
db_conn.commit() db_conn.commit()
@property
def can_native_upsert(self) -> bool:
"""
Can we use native UPSERTs?
"""
return True
@property @property
def supports_using_any_list(self) -> bool: def supports_using_any_list(self) -> bool:
"""Do we support using `a = ANY(?)` and passing a list""" """Do we support using `a = ANY(?)` and passing a list"""

View File

@ -48,14 +48,6 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
def single_threaded(self) -> bool: def single_threaded(self) -> bool:
return True return True
@property
def can_native_upsert(self) -> bool:
"""
Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
more work we haven't done yet to tell what was inserted vs updated.
"""
return sqlite3.sqlite_version_info >= (3, 24, 0)
@property @property
def supports_using_any_list(self) -> bool: def supports_using_any_list(self) -> bool:
"""Do we support using `a = ANY(?)` and passing a list""" """Do we support using `a = ANY(?)` and passing a list"""
@ -70,12 +62,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
self, db_conn: sqlite3.Connection, allow_outdated_version: bool = False self, db_conn: sqlite3.Connection, allow_outdated_version: bool = False
) -> None: ) -> None:
if not allow_outdated_version: if not allow_outdated_version:
version = sqlite3.sqlite_version_info
# Synapse is untested against older SQLite versions, and we don't want # Synapse is untested against older SQLite versions, and we don't want
# to let users upgrade to a version of Synapse with broken support for their # to let users upgrade to a version of Synapse with broken support for their
# sqlite version, because it risks leaving them with a half-upgraded db. # sqlite version, because it risks leaving them with a half-upgraded db.
if version < (3, 22, 0): if sqlite3.sqlite_version_info < (3, 27, 0):
raise RuntimeError("Synapse requires sqlite 3.22 or above.") raise RuntimeError("Synapse requires sqlite 3.27 or above.")
def check_new_database(self, txn: Cursor) -> None: def check_new_database(self, txn: Cursor) -> None:
"""Gets called when setting up a brand new database. This allows us to """Gets called when setting up a brand new database. This allows us to

View File

@ -54,7 +54,6 @@ class SQLBaseStoreTestCase(unittest.TestCase):
sqlite_config = {"name": "sqlite3"} sqlite_config = {"name": "sqlite3"}
engine = create_engine(sqlite_config) engine = create_engine(sqlite_config)
fake_engine = Mock(wraps=engine) fake_engine = Mock(wraps=engine)
fake_engine.can_native_upsert = False
fake_engine.in_transaction.return_value = False fake_engine.in_transaction.return_value = False
db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine)