Fix error on sqlite 3.7
Create the url_cache index on local_media_repository as a background update, so that we can detect whether we are on sqlite or not and create a partial or complete index accordingly. To avoid running the cleanup job before we have built the index, add a bailout which will defer the cleanup if the bg updates are still running. Fixes https://github.com/matrix-org/synapse/issues/2572.pull/2697/head
parent
2145ee1976
commit
7098b65cb8
|
@ -348,11 +348,16 @@ class PreviewUrlResource(Resource):
|
||||||
def _expire_url_cache_data(self):
|
def _expire_url_cache_data(self):
|
||||||
"""Clean up expired url cache content, media and thumbnails.
|
"""Clean up expired url cache content, media and thumbnails.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO: Delete from backup media store
|
# TODO: Delete from backup media store
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
|
|
||||||
|
logger.info("Running url preview cache expiry")
|
||||||
|
|
||||||
|
if not self.store.has_completed_background_updates():
|
||||||
|
logger.info("Still running DB updates; skipping expiry")
|
||||||
|
return
|
||||||
|
|
||||||
# First we delete expired url cache entries
|
# First we delete expired url cache entries
|
||||||
media_ids = yield self.store.get_expired_url_cache(now)
|
media_ids = yield self.store.get_expired_url_cache(now)
|
||||||
|
|
||||||
|
@ -426,8 +431,7 @@ class PreviewUrlResource(Resource):
|
||||||
|
|
||||||
yield self.store.delete_url_cache_media(removed_media)
|
yield self.store.delete_url_cache_media(removed_media)
|
||||||
|
|
||||||
if removed_media:
|
logger.info("Deleted %d media from url cache", len(removed_media))
|
||||||
logger.info("Deleted %d media from url cache", len(removed_media))
|
|
||||||
|
|
||||||
|
|
||||||
def decode_and_calc_og(body, media_uri, request_encoding=None):
|
def decode_and_calc_og(body, media_uri, request_encoding=None):
|
||||||
|
|
|
@ -85,6 +85,7 @@ class BackgroundUpdateStore(SQLBaseStore):
|
||||||
self._background_update_performance = {}
|
self._background_update_performance = {}
|
||||||
self._background_update_queue = []
|
self._background_update_queue = []
|
||||||
self._background_update_handlers = {}
|
self._background_update_handlers = {}
|
||||||
|
self._all_done = False
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def start_doing_background_updates(self):
|
def start_doing_background_updates(self):
|
||||||
|
@ -106,8 +107,17 @@ class BackgroundUpdateStore(SQLBaseStore):
|
||||||
"No more background updates to do."
|
"No more background updates to do."
|
||||||
" Unscheduling background update task."
|
" Unscheduling background update task."
|
||||||
)
|
)
|
||||||
|
self._all_done = True
|
||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
def has_completed_background_updates(self):
|
||||||
|
"""Check if all the background updates have completed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if all background updates have completed
|
||||||
|
"""
|
||||||
|
return self._all_done
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_next_background_update(self, desired_duration_ms):
|
def do_next_background_update(self, desired_duration_ms):
|
||||||
"""Does some amount of work on the next queued background update
|
"""Does some amount of work on the next queued background update
|
||||||
|
@ -269,7 +279,7 @@ class BackgroundUpdateStore(SQLBaseStore):
|
||||||
# Sqlite doesn't support concurrent creation of indexes.
|
# Sqlite doesn't support concurrent creation of indexes.
|
||||||
#
|
#
|
||||||
# We don't use partial indices on SQLite as it wasn't introduced
|
# We don't use partial indices on SQLite as it wasn't introduced
|
||||||
# until 3.8, and wheezy has 3.7
|
# until 3.8, and wheezy and CentOS 7 have 3.7
|
||||||
#
|
#
|
||||||
# We assume that sqlite doesn't give us invalid indices; however
|
# We assume that sqlite doesn't give us invalid indices; however
|
||||||
# we may still end up with the index existing but the
|
# we may still end up with the index existing but the
|
||||||
|
|
|
@ -12,13 +12,23 @@
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||||
from ._base import SQLBaseStore
|
|
||||||
|
|
||||||
|
|
||||||
class MediaRepositoryStore(SQLBaseStore):
|
class MediaRepositoryStore(BackgroundUpdateStore):
|
||||||
"""Persistence for attachments and avatars"""
|
"""Persistence for attachments and avatars"""
|
||||||
|
|
||||||
|
def __init__(self, db_conn, hs):
|
||||||
|
super(MediaRepositoryStore, self).__init__(db_conn, hs)
|
||||||
|
|
||||||
|
self.register_background_index_update(
|
||||||
|
update_name='local_media_repository_url_idx',
|
||||||
|
index_name='local_media_repository_url_idx',
|
||||||
|
table='local_media_repository',
|
||||||
|
columns=['created_ts'],
|
||||||
|
where_clause='url_cache IS NOT NULL',
|
||||||
|
)
|
||||||
|
|
||||||
def get_default_thumbnails(self, top_level_type, sub_type):
|
def get_default_thumbnails(self, top_level_type, sub_type):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,10 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
CREATE INDEX local_media_repository_url_idx ON local_media_repository(created_ts) WHERE url_cache IS NOT NULL;
|
-- this didn't work on SQLite 3.7 (because of lack of partial indexes), so was
|
||||||
|
-- removed and replaced with 46/local_media_repository_url_idx.sql.
|
||||||
|
--
|
||||||
|
-- CREATE INDEX local_media_repository_url_idx ON local_media_repository(created_ts) WHERE url_cache IS NOT NULL;
|
||||||
|
|
||||||
-- we need to change `expires` to `expires_ts` so that we can index on it. SQLite doesn't support
|
-- we need to change `expires` to `expires_ts` so that we can index on it. SQLite doesn't support
|
||||||
-- indices on expressions until 3.9.
|
-- indices on expressions until 3.9.
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
/* Copyright 2017 New Vector Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
-- register a background update which will recreate the
|
||||||
|
-- local_media_repository_url_idx index.
|
||||||
|
--
|
||||||
|
-- We do this as a bg update not because it is a particularly onerous
|
||||||
|
-- operation, but because we'd like it to be a partial index if possible, and
|
||||||
|
-- the background_index_update code will understand whether we are on
|
||||||
|
-- postgres or sqlite and behave accordingly.
|
||||||
|
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||||
|
('local_media_repository_url_idx', '{}');
|
Loading…
Reference in New Issue