Use OrderedDict for @cached backing store, so we can evict the oldest key unbiased

pull/90/head
Paul "LeoNerd" Evans 2015-02-23 18:41:58 +00:00
parent f53fcbce97
commit 9640510de2
1 changed files with 4 additions and 7 deletions

View File

@ -23,7 +23,7 @@ from synapse.util.lrucache import LruCache
from twisted.internet import defer from twisted.internet import defer
import collections from collections import namedtuple, OrderedDict
import simplejson as json import simplejson as json
import sys import sys
import time import time
@ -54,14 +54,11 @@ def cached(max_entries=1000):
calling the calculation function. calling the calculation function.
""" """
def wrap(orig): def wrap(orig):
cache = {} cache = OrderedDict()
def prefill(key, value): def prefill(key, value):
while len(cache) > max_entries: while len(cache) > max_entries:
# TODO(paul): This feels too biased. However, a random index cache.popitem(last=False)
# would be a bit inefficient, walking the list of keys just
# to ignore most of them?
del cache[cache.keys()[0]]
cache[key] = value cache[key] = value
@ -836,7 +833,7 @@ class JoinHelper(object):
for table in self.tables: for table in self.tables:
res += [f for f in table.fields if f not in res] res += [f for f in table.fields if f not in res]
self.EntryType = collections.namedtuple("JoinHelperEntry", res) self.EntryType = namedtuple("JoinHelperEntry", res)
def get_fields(self, **prefixes): def get_fields(self, **prefixes):
"""Get a string representing a list of fields for use in SELECT """Get a string representing a list of fields for use in SELECT