bump purge logging to info

this thing takes ages and the only sign of any progress is the logs, so having
some logs is useful.
pull/2858/head
Richard van der Hoff 2018-02-07 17:34:35 +00:00
parent 671540dccf
commit 61ffaa8137
1 changed files with 14 additions and 13 deletions

View File

@ -2081,7 +2081,7 @@ class EventsStore(SQLBaseStore):
400, "topological_ordering is greater than forward extremeties"
)
logger.debug("[purge] looking for events to delete")
logger.info("[purge] looking for events to delete")
txn.execute(
"SELECT event_id, state_key FROM events"
@ -2102,7 +2102,7 @@ class EventsStore(SQLBaseStore):
for event_id, state_key in event_rows:
txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
logger.debug("[purge] Finding new backward extremities")
logger.info("[purge] Finding new backward extremities")
# We calculate the new entries for the backward extremeties by finding
# all events that point to events that are to be purged
@ -2116,7 +2116,7 @@ class EventsStore(SQLBaseStore):
)
new_backwards_extrems = txn.fetchall()
logger.debug("[purge] replacing backward extremities: %r", new_backwards_extrems)
logger.info("[purge] replacing backward extremities: %r", new_backwards_extrems)
txn.execute(
"DELETE FROM event_backward_extremities WHERE room_id = ?",
@ -2132,7 +2132,7 @@ class EventsStore(SQLBaseStore):
]
)
logger.debug("[purge] finding redundant state groups")
logger.info("[purge] finding redundant state groups")
# Get all state groups that are only referenced by events that are
# to be deleted.
@ -2149,15 +2149,15 @@ class EventsStore(SQLBaseStore):
)
state_rows = txn.fetchall()
logger.debug("[purge] found %i redundant state groups", len(state_rows))
logger.info("[purge] found %i redundant state groups", len(state_rows))
# make a set of the redundant state groups, so that we can look them up
# efficiently
state_groups_to_delete = set([sg for sg, in state_rows])
# Now we get all the state groups that rely on these state groups
logger.debug("[purge] finding state groups which depend on redundant"
" state groups")
logger.info("[purge] finding state groups which depend on redundant"
" state groups")
remaining_state_groups = []
for i in xrange(0, len(state_rows), 100):
chunk = [sg for sg, in state_rows[i:i + 100]]
@ -2182,7 +2182,7 @@ class EventsStore(SQLBaseStore):
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.debug("[purge] de-delta-ing remaining state group %s", sg)
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(
txn, [sg], types=None
)
@ -2219,7 +2219,7 @@ class EventsStore(SQLBaseStore):
],
)
logger.debug("[purge] removing redundant state groups")
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
state_rows
@ -2230,13 +2230,13 @@ class EventsStore(SQLBaseStore):
)
# Delete all non-state
logger.debug("[purge] removing events from event_to_state_groups")
logger.info("[purge] removing events from event_to_state_groups")
txn.executemany(
"DELETE FROM event_to_state_groups WHERE event_id = ?",
[(event_id,) for event_id, _ in event_rows]
)
logger.debug("[purge] updating room_depth")
logger.info("[purge] updating room_depth")
txn.execute(
"UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
(topological_ordering, room_id,)
@ -2258,7 +2258,8 @@ class EventsStore(SQLBaseStore):
"event_signatures",
"rejections",
):
logger.debug("[purge] removing remote non-state events from %s", table)
logger.info("[purge] removing remote non-state events from %s",
table)
txn.executemany(
"DELETE FROM %s WHERE event_id = ?" % (table,),
@ -2266,7 +2267,7 @@ class EventsStore(SQLBaseStore):
)
# Mark all state and own events as outliers
logger.debug("[purge] marking remaining events as outliers")
logger.info("[purge] marking remaining events as outliers")
txn.executemany(
"UPDATE events SET outlier = ?"
" WHERE event_id = ?",