Fix bug where we failed to delete old push actions (#13194)
This happened if we encountered a stream ordering in `event_push_actions` that had more rows than the batch size of the delete, as If we don't delete any rows in an iteration then the next time round we get the exact same stream ordering and get stuck.pull/13195/head
parent
68db233f0c
commit
a0f51b059c
|
@ -0,0 +1 @@
|
|||
Fix bug where rows were not deleted from `event_push_actions` table on large servers. Introduced in v1.62.0.
|
|
@ -1114,7 +1114,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
txn.execute(
|
||||
"""
|
||||
SELECT stream_ordering FROM event_push_actions
|
||||
WHERE stream_ordering < ? AND highlight = 0
|
||||
WHERE stream_ordering <= ? AND highlight = 0
|
||||
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
|
||||
""",
|
||||
(
|
||||
|
@ -1129,10 +1129,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
else:
|
||||
stream_ordering = max_stream_ordering_to_delete
|
||||
|
||||
# We need to use a inclusive bound here to handle the case where a
|
||||
# single stream ordering has more than `batch_size` rows.
|
||||
txn.execute(
|
||||
"""
|
||||
DELETE FROM event_push_actions
|
||||
WHERE stream_ordering < ? AND highlight = 0
|
||||
WHERE stream_ordering <= ? AND highlight = 0
|
||||
""",
|
||||
(stream_ordering,),
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue