2021-06-29 20:15:47 +02:00
|
|
|
# Copyright 2021 Matrix.org Foundation C.I.C.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
import logging
|
2023-07-31 11:58:03 +02:00
|
|
|
from contextlib import AsyncExitStack
|
2021-06-29 20:15:47 +02:00
|
|
|
from types import TracebackType
|
2023-07-31 11:58:03 +02:00
|
|
|
from typing import TYPE_CHECKING, Collection, Optional, Set, Tuple, Type
|
2021-11-08 10:54:47 +01:00
|
|
|
from weakref import WeakValueDictionary
|
2021-06-29 20:15:47 +02:00
|
|
|
|
|
|
|
from twisted.internet.interfaces import IReactorCore
|
|
|
|
|
|
|
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
|
|
|
from synapse.storage._base import SQLBaseStore
|
2021-12-13 18:05:00 +01:00
|
|
|
from synapse.storage.database import (
|
|
|
|
DatabasePool,
|
|
|
|
LoggingDatabaseConnection,
|
|
|
|
LoggingTransaction,
|
|
|
|
)
|
2023-07-05 18:25:00 +02:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
2021-06-29 20:15:47 +02:00
|
|
|
from synapse.util import Clock
|
|
|
|
from synapse.util.stringutils import random_string
|
|
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
# How often to renew an acquired lock by updating the `last_renewed_ts` time in
|
|
|
|
# the lock table.
|
|
|
|
_RENEWAL_INTERVAL_MS = 30 * 1000
|
|
|
|
|
|
|
|
# How long before an acquired lock times out.
|
|
|
|
_LOCK_TIMEOUT_MS = 2 * 60 * 1000
|
|
|
|
|
|
|
|
|
|
|
|
class LockStore(SQLBaseStore):
|
|
|
|
"""Provides a best effort distributed lock between worker instances.
|
|
|
|
|
|
|
|
Locks are identified by a name and key. A lock is acquired by inserting into
|
|
|
|
the `worker_locks` table if a) there is no existing row for the name/key or
|
|
|
|
b) the existing row has a `last_renewed_ts` older than `_LOCK_TIMEOUT_MS`.
|
|
|
|
|
|
|
|
When a lock is taken out the instance inserts a random `token`, the instance
|
|
|
|
that holds that token holds the lock until it drops (or times out).
|
|
|
|
|
|
|
|
The instance that holds the lock should regularly update the
|
|
|
|
`last_renewed_ts` column with the current time.
|
|
|
|
"""
|
|
|
|
|
2021-12-13 18:05:00 +01:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2021-06-29 20:15:47 +02:00
|
|
|
super().__init__(database, db_conn, hs)
|
|
|
|
|
|
|
|
self._reactor = hs.get_reactor()
|
|
|
|
self._instance_name = hs.get_instance_id()
|
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
# A map from `(lock_name, lock_key)` to lock that we think we
|
|
|
|
# currently hold.
|
|
|
|
self._live_lock_tokens: WeakValueDictionary[
|
2021-11-11 14:47:31 +01:00
|
|
|
Tuple[str, str], Lock
|
|
|
|
] = WeakValueDictionary()
|
2021-06-29 20:15:47 +02:00
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
# A map from `(lock_name, lock_key, token)` to read/write lock that we
|
|
|
|
# think we currently hold. For a given lock_name/lock_key, there can be
|
|
|
|
# multiple read locks at a time but only one write lock (no mixing read
|
|
|
|
# and write locks at the same time).
|
|
|
|
self._live_read_write_lock_tokens: WeakValueDictionary[
|
|
|
|
Tuple[str, str, str], Lock
|
|
|
|
] = WeakValueDictionary()
|
|
|
|
|
2021-06-29 20:15:47 +02:00
|
|
|
# When we shut down we want to remove the locks. Technically this can
|
|
|
|
# lead to a race, as we may drop the lock while we are still processing.
|
|
|
|
# However, a) it should be a small window, b) the lock is best effort
|
|
|
|
# anyway and c) we want to really avoid leaking locks when we restart.
|
|
|
|
hs.get_reactor().addSystemEventTrigger(
|
|
|
|
"before",
|
|
|
|
"shutdown",
|
|
|
|
self._on_shutdown,
|
|
|
|
)
|
|
|
|
|
2022-05-30 10:41:13 +02:00
|
|
|
self._acquiring_locks: Set[Tuple[str, str]] = set()
|
|
|
|
|
2021-06-29 20:15:47 +02:00
|
|
|
@wrap_as_background_process("LockStore._on_shutdown")
|
|
|
|
async def _on_shutdown(self) -> None:
|
|
|
|
"""Called when the server is shutting down"""
|
|
|
|
logger.info("Dropping held locks due to shutdown")
|
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
# We need to take a copy of the locks as dropping the locks will cause
|
|
|
|
# the dictionary to change.
|
|
|
|
locks = list(self._live_lock_tokens.values()) + list(
|
|
|
|
self._live_read_write_lock_tokens.values()
|
|
|
|
)
|
2021-07-20 15:24:25 +02:00
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
for lock in locks:
|
2021-11-08 10:54:47 +01:00
|
|
|
await lock.release()
|
2021-06-29 20:15:47 +02:00
|
|
|
|
|
|
|
logger.info("Dropped locks due to shutdown")
|
|
|
|
|
|
|
|
async def try_acquire_lock(self, lock_name: str, lock_key: str) -> Optional["Lock"]:
|
|
|
|
"""Try to acquire a lock for the given name/key. Will return an async
|
|
|
|
context manager if the lock is successfully acquired, which *must* be
|
|
|
|
used (otherwise the lock will leak).
|
|
|
|
"""
|
2022-05-30 10:41:13 +02:00
|
|
|
if (lock_name, lock_key) in self._acquiring_locks:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
self._acquiring_locks.add((lock_name, lock_key))
|
|
|
|
return await self._try_acquire_lock(lock_name, lock_key)
|
|
|
|
finally:
|
|
|
|
self._acquiring_locks.discard((lock_name, lock_key))
|
|
|
|
|
|
|
|
async def _try_acquire_lock(
|
|
|
|
self, lock_name: str, lock_key: str
|
|
|
|
) -> Optional["Lock"]:
|
|
|
|
"""Try to acquire a lock for the given name/key. Will return an async
|
|
|
|
context manager if the lock is successfully acquired, which *must* be
|
|
|
|
used (otherwise the lock will leak).
|
|
|
|
"""
|
2021-06-29 20:15:47 +02:00
|
|
|
|
2021-11-08 10:54:47 +01:00
|
|
|
# Check if this process has taken out a lock and if it's still valid.
|
2023-07-05 18:25:00 +02:00
|
|
|
lock = self._live_lock_tokens.get((lock_name, lock_key))
|
2021-11-08 10:54:47 +01:00
|
|
|
if lock and await lock.is_still_valid():
|
|
|
|
return None
|
|
|
|
|
2021-06-29 20:15:47 +02:00
|
|
|
now = self._clock.time_msec()
|
|
|
|
token = random_string(6)
|
|
|
|
|
2022-09-09 12:14:10 +02:00
|
|
|
def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
|
|
|
|
# We take out the lock if either a) there is no row for the lock
|
|
|
|
# already, b) the existing row has timed out, or c) the row is
|
|
|
|
# for this instance (which means the process got killed and
|
|
|
|
# restarted)
|
|
|
|
sql = """
|
|
|
|
INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
|
|
|
|
VALUES (?, ?, ?, ?, ?)
|
|
|
|
ON CONFLICT (lock_name, lock_key)
|
|
|
|
DO UPDATE
|
|
|
|
SET
|
|
|
|
token = EXCLUDED.token,
|
|
|
|
instance_name = EXCLUDED.instance_name,
|
|
|
|
last_renewed_ts = EXCLUDED.last_renewed_ts
|
|
|
|
WHERE
|
|
|
|
worker_locks.last_renewed_ts < ?
|
|
|
|
OR worker_locks.instance_name = EXCLUDED.instance_name
|
|
|
|
"""
|
|
|
|
txn.execute(
|
|
|
|
sql,
|
|
|
|
(
|
|
|
|
lock_name,
|
|
|
|
lock_key,
|
|
|
|
self._instance_name,
|
|
|
|
token,
|
|
|
|
now,
|
|
|
|
now - _LOCK_TIMEOUT_MS,
|
|
|
|
),
|
2021-06-29 20:15:47 +02:00
|
|
|
)
|
|
|
|
|
2022-09-09 12:14:10 +02:00
|
|
|
# We only acquired the lock if we inserted or updated the table.
|
|
|
|
return bool(txn.rowcount)
|
|
|
|
|
|
|
|
did_lock = await self.db_pool.runInteraction(
|
|
|
|
"try_acquire_lock",
|
|
|
|
_try_acquire_lock_txn,
|
|
|
|
# We can autocommit here as we're executing a single query, this
|
|
|
|
# will avoid serialization errors.
|
|
|
|
db_autocommit=True,
|
|
|
|
)
|
|
|
|
if not did_lock:
|
|
|
|
return None
|
2021-06-29 20:15:47 +02:00
|
|
|
|
2021-11-08 10:54:47 +01:00
|
|
|
lock = Lock(
|
2021-06-29 20:15:47 +02:00
|
|
|
self._reactor,
|
|
|
|
self._clock,
|
|
|
|
self,
|
2023-07-05 18:25:00 +02:00
|
|
|
read_write=False,
|
2021-06-29 20:15:47 +02:00
|
|
|
lock_name=lock_name,
|
|
|
|
lock_key=lock_key,
|
|
|
|
token=token,
|
|
|
|
)
|
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
self._live_lock_tokens[(lock_name, lock_key)] = lock
|
2021-11-08 10:54:47 +01:00
|
|
|
|
|
|
|
return lock
|
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
async def try_acquire_read_write_lock(
|
|
|
|
self,
|
|
|
|
lock_name: str,
|
|
|
|
lock_key: str,
|
|
|
|
write: bool,
|
|
|
|
) -> Optional["Lock"]:
|
|
|
|
"""Try to acquire a lock for the given name/key. Will return an async
|
|
|
|
context manager if the lock is successfully acquired, which *must* be
|
|
|
|
used (otherwise the lock will leak).
|
|
|
|
"""
|
2021-06-29 20:15:47 +02:00
|
|
|
|
2023-07-31 11:58:03 +02:00
|
|
|
try:
|
|
|
|
lock = await self.db_pool.runInteraction(
|
|
|
|
"try_acquire_read_write_lock",
|
|
|
|
self._try_acquire_read_write_lock_txn,
|
|
|
|
lock_name,
|
|
|
|
lock_key,
|
|
|
|
write,
|
|
|
|
)
|
|
|
|
except self.database_engine.module.IntegrityError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
return lock
|
|
|
|
|
|
|
|
def _try_acquire_read_write_lock_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
lock_name: str,
|
|
|
|
lock_key: str,
|
|
|
|
write: bool,
|
|
|
|
) -> "Lock":
|
|
|
|
# We attempt to acquire the lock by inserting into
|
|
|
|
# `worker_read_write_locks` and seeing if that fails any
|
|
|
|
# constraints. If it doesn't then we have acquired the lock,
|
|
|
|
# otherwise we haven't.
|
|
|
|
#
|
|
|
|
# Before that though we clear the table of any stale locks.
|
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
now = self._clock.time_msec()
|
|
|
|
token = random_string(6)
|
2021-06-29 20:15:47 +02:00
|
|
|
|
2023-07-31 11:58:03 +02:00
|
|
|
delete_sql = """
|
|
|
|
DELETE FROM worker_read_write_locks
|
|
|
|
WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?;
|
|
|
|
"""
|
2023-07-05 18:25:00 +02:00
|
|
|
|
2023-07-31 11:58:03 +02:00
|
|
|
insert_sql = """
|
|
|
|
INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts)
|
|
|
|
VALUES (?, ?, ?, ?, ?, ?)
|
|
|
|
"""
|
2023-07-05 18:25:00 +02:00
|
|
|
|
2023-07-31 11:58:03 +02:00
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
# For Postgres we can send these queries at the same time.
|
|
|
|
txn.execute(
|
|
|
|
delete_sql + ";" + insert_sql,
|
|
|
|
(
|
|
|
|
# DELETE args
|
|
|
|
now - _LOCK_TIMEOUT_MS,
|
|
|
|
lock_name,
|
|
|
|
lock_key,
|
|
|
|
# UPSERT args
|
|
|
|
lock_name,
|
|
|
|
lock_key,
|
|
|
|
write,
|
|
|
|
self._instance_name,
|
|
|
|
token,
|
|
|
|
now,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
# For SQLite these need to be two queries.
|
|
|
|
txn.execute(
|
|
|
|
delete_sql,
|
|
|
|
(
|
|
|
|
now - _LOCK_TIMEOUT_MS,
|
|
|
|
lock_name,
|
|
|
|
lock_key,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
txn.execute(
|
|
|
|
insert_sql,
|
|
|
|
(
|
|
|
|
lock_name,
|
|
|
|
lock_key,
|
|
|
|
write,
|
|
|
|
self._instance_name,
|
|
|
|
token,
|
|
|
|
now,
|
|
|
|
),
|
2023-07-05 18:25:00 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
lock = Lock(
|
|
|
|
self._reactor,
|
|
|
|
self._clock,
|
|
|
|
self,
|
|
|
|
read_write=True,
|
|
|
|
lock_name=lock_name,
|
|
|
|
lock_key=lock_key,
|
|
|
|
token=token,
|
2021-06-29 20:15:47 +02:00
|
|
|
)
|
|
|
|
|
2023-07-31 11:58:03 +02:00
|
|
|
def set_lock() -> None:
|
|
|
|
self._live_read_write_lock_tokens[(lock_name, lock_key, token)] = lock
|
|
|
|
|
|
|
|
txn.call_after(set_lock)
|
2023-07-05 18:25:00 +02:00
|
|
|
|
|
|
|
return lock
|
2021-06-29 20:15:47 +02:00
|
|
|
|
2023-07-31 11:58:03 +02:00
|
|
|
async def try_acquire_multi_read_write_lock(
|
|
|
|
self,
|
|
|
|
lock_names: Collection[Tuple[str, str]],
|
|
|
|
write: bool,
|
|
|
|
) -> Optional[AsyncExitStack]:
|
|
|
|
"""Try to acquire multiple locks for the given names/keys. Will return
|
|
|
|
an async context manager if the locks are successfully acquired, which
|
|
|
|
*must* be used (otherwise the lock will leak).
|
|
|
|
|
|
|
|
If only a subset of the locks can be acquired then it will immediately
|
|
|
|
drop them and return `None`.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
locks = await self.db_pool.runInteraction(
|
|
|
|
"try_acquire_multi_read_write_lock",
|
|
|
|
self._try_acquire_multi_read_write_lock_txn,
|
|
|
|
lock_names,
|
|
|
|
write,
|
|
|
|
)
|
|
|
|
except self.database_engine.module.IntegrityError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
stack = AsyncExitStack()
|
|
|
|
|
|
|
|
for lock in locks:
|
|
|
|
await stack.enter_async_context(lock)
|
|
|
|
|
|
|
|
return stack
|
|
|
|
|
|
|
|
def _try_acquire_multi_read_write_lock_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
lock_names: Collection[Tuple[str, str]],
|
|
|
|
write: bool,
|
|
|
|
) -> Collection["Lock"]:
|
|
|
|
locks = []
|
|
|
|
|
|
|
|
for lock_name, lock_key in lock_names:
|
|
|
|
lock = self._try_acquire_read_write_lock_txn(
|
|
|
|
txn, lock_name, lock_key, write
|
|
|
|
)
|
|
|
|
locks.append(lock)
|
|
|
|
|
|
|
|
return locks
|
|
|
|
|
2021-06-29 20:15:47 +02:00
|
|
|
|
|
|
|
class Lock:
|
|
|
|
"""An async context manager that manages an acquired lock, ensuring it is
|
|
|
|
regularly renewed and dropping it when the context manager exits.
|
|
|
|
|
|
|
|
The lock object has an `is_still_valid` method which can be used to
|
|
|
|
double-check the lock is still valid, if e.g. processing work in a loop.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
|
|
|
|
lock = await self.store.try_acquire_lock(...)
|
|
|
|
if not lock:
|
|
|
|
return
|
|
|
|
|
|
|
|
async with lock:
|
|
|
|
for item in work:
|
|
|
|
await process(item)
|
|
|
|
|
|
|
|
if not await lock.is_still_valid():
|
|
|
|
break
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
reactor: IReactorCore,
|
|
|
|
clock: Clock,
|
|
|
|
store: LockStore,
|
2023-07-05 18:25:00 +02:00
|
|
|
read_write: bool,
|
2021-06-29 20:15:47 +02:00
|
|
|
lock_name: str,
|
|
|
|
lock_key: str,
|
|
|
|
token: str,
|
|
|
|
) -> None:
|
|
|
|
self._reactor = reactor
|
|
|
|
self._clock = clock
|
|
|
|
self._store = store
|
2023-07-05 18:25:00 +02:00
|
|
|
self._read_write = read_write
|
2021-06-29 20:15:47 +02:00
|
|
|
self._lock_name = lock_name
|
|
|
|
self._lock_key = lock_key
|
|
|
|
|
|
|
|
self._token = token
|
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
self._table = "worker_read_write_locks" if read_write else "worker_locks"
|
|
|
|
|
2021-06-29 20:15:47 +02:00
|
|
|
self._looping_call = clock.looping_call(
|
2023-07-05 18:25:00 +02:00
|
|
|
self._renew,
|
|
|
|
_RENEWAL_INTERVAL_MS,
|
|
|
|
store,
|
|
|
|
clock,
|
|
|
|
read_write,
|
|
|
|
lock_name,
|
|
|
|
lock_key,
|
|
|
|
token,
|
2021-06-29 20:15:47 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
self._dropped = False
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@wrap_as_background_process("Lock._renew")
|
|
|
|
async def _renew(
|
|
|
|
store: LockStore,
|
2023-07-05 18:25:00 +02:00
|
|
|
clock: Clock,
|
|
|
|
read_write: bool,
|
2021-06-29 20:15:47 +02:00
|
|
|
lock_name: str,
|
|
|
|
lock_key: str,
|
|
|
|
token: str,
|
|
|
|
) -> None:
|
|
|
|
"""Renew the lock.
|
|
|
|
|
|
|
|
Note: this is a static method, rather than using self.*, so that we
|
|
|
|
don't end up with a reference to `self` in the reactor, which would stop
|
|
|
|
this from being cleaned up if we dropped the context manager.
|
|
|
|
"""
|
2023-07-05 18:25:00 +02:00
|
|
|
table = "worker_read_write_locks" if read_write else "worker_locks"
|
|
|
|
await store.db_pool.simple_update(
|
|
|
|
table=table,
|
|
|
|
keyvalues={
|
|
|
|
"lock_name": lock_name,
|
|
|
|
"lock_key": lock_key,
|
|
|
|
"token": token,
|
|
|
|
},
|
|
|
|
updatevalues={"last_renewed_ts": clock.time_msec()},
|
|
|
|
desc="renew_lock",
|
|
|
|
)
|
2021-06-29 20:15:47 +02:00
|
|
|
|
|
|
|
async def is_still_valid(self) -> bool:
|
|
|
|
"""Check if the lock is still held by us"""
|
2023-07-05 18:25:00 +02:00
|
|
|
last_renewed_ts = await self._store.db_pool.simple_select_one_onecol(
|
|
|
|
table=self._table,
|
|
|
|
keyvalues={
|
|
|
|
"lock_name": self._lock_name,
|
|
|
|
"lock_key": self._lock_key,
|
|
|
|
"token": self._token,
|
|
|
|
},
|
|
|
|
retcol="last_renewed_ts",
|
|
|
|
allow_none=True,
|
|
|
|
desc="is_lock_still_valid",
|
|
|
|
)
|
|
|
|
return (
|
|
|
|
last_renewed_ts is not None
|
|
|
|
and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
|
2021-06-29 20:15:47 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
async def __aenter__(self) -> None:
|
|
|
|
if self._dropped:
|
|
|
|
raise Exception("Cannot reuse a Lock object")
|
|
|
|
|
|
|
|
async def __aexit__(
|
|
|
|
self,
|
|
|
|
_exctype: Optional[Type[BaseException]],
|
|
|
|
_excinst: Optional[BaseException],
|
|
|
|
_exctb: Optional[TracebackType],
|
|
|
|
) -> bool:
|
2021-07-09 11:16:54 +02:00
|
|
|
await self.release()
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
async def release(self) -> None:
|
|
|
|
"""Release the lock.
|
|
|
|
|
|
|
|
This is automatically called when using the lock as a context manager.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if self._dropped:
|
|
|
|
return
|
|
|
|
|
2021-06-29 20:15:47 +02:00
|
|
|
if self._looping_call.running:
|
|
|
|
self._looping_call.stop()
|
|
|
|
|
2023-07-05 18:25:00 +02:00
|
|
|
await self._store.db_pool.simple_delete(
|
|
|
|
table=self._table,
|
|
|
|
keyvalues={
|
|
|
|
"lock_name": self._lock_name,
|
|
|
|
"lock_key": self._lock_key,
|
|
|
|
"token": self._token,
|
|
|
|
},
|
|
|
|
desc="drop_lock",
|
|
|
|
)
|
|
|
|
|
|
|
|
if self._read_write:
|
|
|
|
self._store._live_read_write_lock_tokens.pop(
|
|
|
|
(self._lock_name, self._lock_key, self._token), None
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self._store._live_lock_tokens.pop((self._lock_name, self._lock_key), None)
|
|
|
|
|
2021-06-29 20:15:47 +02:00
|
|
|
self._dropped = True
|
|
|
|
|
|
|
|
def __del__(self) -> None:
|
|
|
|
if not self._dropped:
|
|
|
|
# We should not be dropped without the lock being released (unless
|
|
|
|
# we're shutting down), but if we are then let's at least stop
|
|
|
|
# renewing the lock.
|
|
|
|
if self._looping_call.running:
|
|
|
|
self._looping_call.stop()
|
|
|
|
|
|
|
|
if self._reactor.running:
|
|
|
|
logger.error(
|
|
|
|
"Lock for (%s, %s) dropped without being released",
|
|
|
|
self._lock_name,
|
|
|
|
self._lock_key,
|
|
|
|
)
|