2014-08-12 16:10:52 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2015-01-06 14:21:39 +01:00
|
|
|
# Copyright 2014, 2015 OpenMarket Ltd
|
2014-08-12 16:10:52 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
import logging
|
|
|
|
|
|
|
|
from synapse.api.errors import StoreError
|
2014-09-12 18:11:00 +02:00
|
|
|
from synapse.util.logutils import log_function
|
2015-05-08 17:32:18 +02:00
|
|
|
from synapse.util.logcontext import preserve_context_over_fn, LoggingContext
|
2015-02-11 16:01:15 +01:00
|
|
|
from synapse.util.lrucache import LruCache
|
2015-03-04 17:04:46 +01:00
|
|
|
import synapse.metrics
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-04-09 12:41:36 +02:00
|
|
|
from util.id_generators import IdGenerator, StreamIdGenerator
|
|
|
|
|
2014-11-14 12:16:50 +01:00
|
|
|
from twisted.internet import defer
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-02-23 19:41:58 +01:00
|
|
|
from collections import namedtuple, OrderedDict
|
2015-05-14 17:54:35 +02:00
|
|
|
|
2015-03-17 18:19:22 +01:00
|
|
|
import functools
|
2015-07-27 14:57:29 +02:00
|
|
|
import inspect
|
2014-10-29 17:59:24 +01:00
|
|
|
import sys
|
2014-10-28 11:34:05 +01:00
|
|
|
import time
|
2015-05-05 15:08:03 +02:00
|
|
|
import threading
|
2014-08-13 17:27:14 +02:00
|
|
|
|
2015-05-05 17:24:04 +02:00
|
|
|
DEBUG_CACHES = False
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2014-09-12 15:37:55 +02:00
|
|
|
sql_logger = logging.getLogger("synapse.storage.SQL")
|
2014-10-28 12:18:04 +01:00
|
|
|
transaction_logger = logging.getLogger("synapse.storage.txn")
|
2015-04-08 14:12:38 +02:00
|
|
|
perf_logger = logging.getLogger("synapse.storage.TIME")
|
2014-09-12 15:37:55 +02:00
|
|
|
|
|
|
|
|
2015-03-04 20:45:16 +01:00
|
|
|
metrics = synapse.metrics.get_metrics_for("synapse.storage")
|
|
|
|
|
2015-03-16 18:21:59 +01:00
|
|
|
sql_scheduling_timer = metrics.register_distribution("schedule_time")
|
|
|
|
|
2015-03-10 16:24:02 +01:00
|
|
|
sql_query_timer = metrics.register_distribution("query_time", labels=["verb"])
|
|
|
|
sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"])
|
2015-03-06 17:18:21 +01:00
|
|
|
|
|
|
|
caches_by_name = {}
|
|
|
|
cache_counter = metrics.register_cache(
|
|
|
|
"cache",
|
|
|
|
lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
|
|
|
|
labels=["name"],
|
|
|
|
)
|
2015-03-04 20:45:16 +01:00
|
|
|
|
|
|
|
|
2015-03-20 19:13:49 +01:00
|
|
|
class Cache(object):
|
|
|
|
|
2015-03-25 20:05:34 +01:00
|
|
|
def __init__(self, name, max_entries=1000, keylen=1, lru=False):
|
|
|
|
if lru:
|
|
|
|
self.cache = LruCache(max_size=max_entries)
|
|
|
|
self.max_entries = None
|
|
|
|
else:
|
|
|
|
self.cache = OrderedDict()
|
|
|
|
self.max_entries = max_entries
|
2015-03-20 19:13:49 +01:00
|
|
|
|
|
|
|
self.name = name
|
|
|
|
self.keylen = keylen
|
2015-05-05 15:08:03 +02:00
|
|
|
self.sequence = 0
|
|
|
|
self.thread = None
|
2015-03-20 19:13:49 +01:00
|
|
|
caches_by_name[name] = self.cache
|
|
|
|
|
2015-05-05 15:08:03 +02:00
|
|
|
def check_thread(self):
|
|
|
|
expected_thread = self.thread
|
|
|
|
if expected_thread is None:
|
|
|
|
self.thread = threading.current_thread()
|
|
|
|
else:
|
|
|
|
if expected_thread is not threading.current_thread():
|
|
|
|
raise ValueError(
|
|
|
|
"Cache objects can only be accessed from the main thread"
|
|
|
|
)
|
|
|
|
|
2015-03-20 19:13:49 +01:00
|
|
|
def get(self, *keyargs):
|
|
|
|
if len(keyargs) != self.keylen:
|
|
|
|
raise ValueError("Expected a key to have %d items", self.keylen)
|
|
|
|
|
|
|
|
if keyargs in self.cache:
|
|
|
|
cache_counter.inc_hits(self.name)
|
|
|
|
return self.cache[keyargs]
|
|
|
|
|
|
|
|
cache_counter.inc_misses(self.name)
|
|
|
|
raise KeyError()
|
|
|
|
|
2015-05-05 15:08:03 +02:00
|
|
|
def update(self, sequence, *args):
|
|
|
|
self.check_thread()
|
|
|
|
if self.sequence == sequence:
|
2015-05-05 17:32:44 +02:00
|
|
|
# Only update the cache if the caches sequence number matches the
|
|
|
|
# number that the cache had before the SELECT was started (SYN-369)
|
2015-05-05 15:08:03 +02:00
|
|
|
self.prefill(*args)
|
|
|
|
|
2015-03-20 19:13:49 +01:00
|
|
|
def prefill(self, *args): # because I can't *keyargs, value
|
|
|
|
keyargs = args[:-1]
|
|
|
|
value = args[-1]
|
|
|
|
|
|
|
|
if len(keyargs) != self.keylen:
|
|
|
|
raise ValueError("Expected a key to have %d items", self.keylen)
|
|
|
|
|
2015-03-25 20:05:34 +01:00
|
|
|
if self.max_entries is not None:
|
|
|
|
while len(self.cache) >= self.max_entries:
|
|
|
|
self.cache.popitem(last=False)
|
2015-03-20 19:13:49 +01:00
|
|
|
|
|
|
|
self.cache[keyargs] = value
|
|
|
|
|
|
|
|
def invalidate(self, *keyargs):
|
2015-05-05 15:08:03 +02:00
|
|
|
self.check_thread()
|
2015-03-20 19:13:49 +01:00
|
|
|
if len(keyargs) != self.keylen:
|
|
|
|
raise ValueError("Expected a key to have %d items", self.keylen)
|
2015-05-05 17:32:44 +02:00
|
|
|
# Increment the sequence number so that any SELECT statements that
|
|
|
|
# raced with the INSERT don't update the cache (SYN-369)
|
2015-05-05 15:08:03 +02:00
|
|
|
self.sequence += 1
|
2015-03-20 19:13:49 +01:00
|
|
|
self.cache.pop(keyargs, None)
|
|
|
|
|
2015-05-21 12:13:19 +02:00
|
|
|
def invalidate_all(self):
|
|
|
|
self.check_thread()
|
|
|
|
self.sequence += 1
|
|
|
|
self.cache.clear()
|
|
|
|
|
2015-03-20 19:13:49 +01:00
|
|
|
|
2015-06-03 15:45:17 +02:00
|
|
|
class CacheDescriptor(object):
|
2015-02-19 18:29:39 +01:00
|
|
|
""" A method decorator that applies a memoizing cache around the function.
|
|
|
|
|
2015-03-17 19:38:55 +01:00
|
|
|
The function is presumed to take zero or more arguments, which are used in
|
|
|
|
a tuple as the key for the cache. Hits are served directly from the cache;
|
2015-02-19 18:29:39 +01:00
|
|
|
misses use the function body to generate the value.
|
|
|
|
|
|
|
|
The wrapped function has an additional member, a callable called
|
|
|
|
"invalidate". This can be used to remove individual entries from the cache.
|
2015-02-23 16:41:54 +01:00
|
|
|
|
|
|
|
The wrapped function has another additional callable, called "prefill",
|
|
|
|
which can be used to insert values into the cache specifically, without
|
|
|
|
calling the calculation function.
|
2015-02-19 18:29:39 +01:00
|
|
|
"""
|
2015-07-27 14:57:29 +02:00
|
|
|
def __init__(self, orig, max_entries=1000, num_args=1, lru=False,
|
|
|
|
inlineCallbacks=False):
|
2015-06-03 15:45:17 +02:00
|
|
|
self.orig = orig
|
|
|
|
|
2015-07-27 14:57:29 +02:00
|
|
|
if inlineCallbacks:
|
|
|
|
self.function_to_call = defer.inlineCallbacks(orig)
|
|
|
|
else:
|
|
|
|
self.function_to_call = orig
|
|
|
|
|
2015-06-03 15:45:17 +02:00
|
|
|
self.max_entries = max_entries
|
|
|
|
self.num_args = num_args
|
|
|
|
self.lru = lru
|
|
|
|
|
2015-07-27 14:57:29 +02:00
|
|
|
self.arg_names = inspect.getargspec(orig).args[1:num_args+1]
|
|
|
|
|
|
|
|
if len(self.arg_names) < self.num_args:
|
|
|
|
raise Exception(
|
|
|
|
"Not enough explicit positional arguments to key off of for %r."
|
|
|
|
" (@cached cannot key off of *args or **kwars)"
|
|
|
|
% (orig.__name__,)
|
|
|
|
)
|
|
|
|
|
2015-06-03 15:45:17 +02:00
|
|
|
def __get__(self, obj, objtype=None):
|
2015-03-20 19:13:49 +01:00
|
|
|
cache = Cache(
|
2015-06-03 15:45:17 +02:00
|
|
|
name=self.orig.__name__,
|
|
|
|
max_entries=self.max_entries,
|
|
|
|
keylen=self.num_args,
|
|
|
|
lru=self.lru,
|
2015-03-20 19:13:49 +01:00
|
|
|
)
|
2015-02-23 16:41:54 +01:00
|
|
|
|
2015-06-03 15:45:17 +02:00
|
|
|
@functools.wraps(self.orig)
|
2015-02-23 16:41:54 +01:00
|
|
|
@defer.inlineCallbacks
|
2015-07-27 14:57:29 +02:00
|
|
|
def wrapped(*args, **kwargs):
|
|
|
|
arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs)
|
|
|
|
keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names]
|
2015-03-20 19:13:49 +01:00
|
|
|
try:
|
2015-07-27 14:57:29 +02:00
|
|
|
cached_result = cache.get(*keyargs)
|
2015-05-05 17:24:04 +02:00
|
|
|
if DEBUG_CACHES:
|
2015-07-27 14:57:29 +02:00
|
|
|
actual_result = yield self.function_to_call(obj, *args, **kwargs)
|
2015-05-05 17:24:04 +02:00
|
|
|
if actual_result != cached_result:
|
|
|
|
logger.error(
|
|
|
|
"Stale cache entry %s%r: cached: %r, actual %r",
|
2015-06-03 15:45:17 +02:00
|
|
|
self.orig.__name__, keyargs,
|
2015-05-05 17:24:04 +02:00
|
|
|
cached_result, actual_result,
|
|
|
|
)
|
|
|
|
raise ValueError("Stale cache entry")
|
|
|
|
defer.returnValue(cached_result)
|
2015-03-20 19:13:49 +01:00
|
|
|
except KeyError:
|
2015-05-05 17:32:44 +02:00
|
|
|
# Get the sequence number of the cache before reading from the
|
|
|
|
# database so that we can tell if the cache is invalidated
|
|
|
|
# while the SELECT is executing (SYN-369)
|
2015-05-05 15:08:03 +02:00
|
|
|
sequence = cache.sequence
|
|
|
|
|
2015-07-27 14:57:29 +02:00
|
|
|
ret = yield self.function_to_call(obj, *args, **kwargs)
|
2015-02-19 18:29:39 +01:00
|
|
|
|
2015-07-27 14:57:29 +02:00
|
|
|
cache.update(sequence, *(keyargs + [ret]))
|
2015-03-17 19:38:55 +01:00
|
|
|
|
2015-03-20 19:13:49 +01:00
|
|
|
defer.returnValue(ret)
|
2015-02-19 19:36:02 +01:00
|
|
|
|
2015-03-20 19:13:49 +01:00
|
|
|
wrapped.invalidate = cache.invalidate
|
2015-05-21 16:14:26 +02:00
|
|
|
wrapped.invalidate_all = cache.invalidate_all
|
2015-03-20 19:13:49 +01:00
|
|
|
wrapped.prefill = cache.prefill
|
2015-06-03 15:45:17 +02:00
|
|
|
|
|
|
|
obj.__dict__[self.orig.__name__] = wrapped
|
|
|
|
|
2015-02-19 19:36:02 +01:00
|
|
|
return wrapped
|
|
|
|
|
2015-06-03 15:45:17 +02:00
|
|
|
|
|
|
|
def cached(max_entries=1000, num_args=1, lru=False):
|
|
|
|
return lambda orig: CacheDescriptor(
|
|
|
|
orig,
|
|
|
|
max_entries=max_entries,
|
|
|
|
num_args=num_args,
|
|
|
|
lru=lru
|
|
|
|
)
|
2015-02-19 18:29:39 +01:00
|
|
|
|
|
|
|
|
2015-07-27 14:57:29 +02:00
|
|
|
def cachedInlineCallbacks(max_entries=1000, num_args=1, lru=False):
|
|
|
|
return lambda orig: CacheDescriptor(
|
|
|
|
orig,
|
|
|
|
max_entries=max_entries,
|
|
|
|
num_args=num_args,
|
|
|
|
lru=lru,
|
|
|
|
inlineCallbacks=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2014-09-12 15:37:55 +02:00
|
|
|
class LoggingTransaction(object):
|
|
|
|
"""An object that almost-transparently proxies for the 'txn' object
|
2015-03-04 20:45:16 +01:00
|
|
|
passed to the constructor. Adds logging and metrics to the .execute()
|
|
|
|
method."""
|
2015-05-05 18:32:21 +02:00
|
|
|
__slots__ = ["txn", "name", "database_engine", "after_callbacks"]
|
2014-09-12 15:37:55 +02:00
|
|
|
|
2015-05-05 18:32:21 +02:00
|
|
|
def __init__(self, txn, name, database_engine, after_callbacks):
|
2014-09-12 15:37:55 +02:00
|
|
|
object.__setattr__(self, "txn", txn)
|
2014-10-28 12:18:04 +01:00
|
|
|
object.__setattr__(self, "name", name)
|
2015-04-01 15:12:33 +02:00
|
|
|
object.__setattr__(self, "database_engine", database_engine)
|
2015-05-05 18:32:21 +02:00
|
|
|
object.__setattr__(self, "after_callbacks", after_callbacks)
|
|
|
|
|
|
|
|
def call_after(self, callback, *args):
|
2015-05-05 18:45:11 +02:00
|
|
|
"""Call the given callback on the main twisted thread after the
|
|
|
|
transaction has finished. Used to invalidate the caches on the
|
|
|
|
correct thread.
|
|
|
|
"""
|
2015-05-05 18:32:21 +02:00
|
|
|
self.after_callbacks.append((callback, args))
|
2014-09-12 15:37:55 +02:00
|
|
|
|
2014-10-28 11:53:11 +01:00
|
|
|
def __getattr__(self, name):
|
|
|
|
return getattr(self.txn, name)
|
2014-09-12 15:37:55 +02:00
|
|
|
|
|
|
|
def __setattr__(self, name, value):
|
2014-10-28 11:53:11 +01:00
|
|
|
setattr(self.txn, name, value)
|
2014-09-12 15:37:55 +02:00
|
|
|
|
2015-05-05 16:13:25 +02:00
|
|
|
def execute(self, sql, *args):
|
|
|
|
self._do_execute(self.txn.execute, sql, *args)
|
|
|
|
|
|
|
|
def executemany(self, sql, *args):
|
|
|
|
self._do_execute(self.txn.executemany, sql, *args)
|
|
|
|
|
|
|
|
def _do_execute(self, func, sql, *args):
|
2014-09-12 15:37:55 +02:00
|
|
|
# TODO(paul): Maybe use 'info' and 'debug' for values?
|
2014-10-28 12:18:04 +01:00
|
|
|
sql_logger.debug("[SQL] {%s} %s", self.name, sql)
|
2015-03-04 20:45:16 +01:00
|
|
|
|
2015-04-01 15:12:33 +02:00
|
|
|
sql = self.database_engine.convert_param_style(sql)
|
2015-03-19 16:59:48 +01:00
|
|
|
|
2015-05-05 16:13:25 +02:00
|
|
|
if args:
|
2015-04-02 11:06:22 +02:00
|
|
|
try:
|
2014-10-28 12:18:04 +01:00
|
|
|
sql_logger.debug(
|
2015-05-05 16:13:25 +02:00
|
|
|
"[SQL values] {%s} %r",
|
|
|
|
self.name, args[0]
|
2014-10-28 12:18:04 +01:00
|
|
|
)
|
2015-04-02 11:06:22 +02:00
|
|
|
except:
|
|
|
|
# Don't let logging failures stop SQL from working
|
|
|
|
pass
|
2014-09-12 15:37:55 +02:00
|
|
|
|
2015-01-06 17:34:26 +01:00
|
|
|
start = time.time() * 1000
|
2015-04-02 11:06:22 +02:00
|
|
|
|
2014-10-28 11:34:05 +01:00
|
|
|
try:
|
2015-05-05 16:13:25 +02:00
|
|
|
return func(
|
|
|
|
sql, *args
|
2015-04-08 14:11:28 +02:00
|
|
|
)
|
2015-04-07 13:05:36 +02:00
|
|
|
except Exception as e:
|
2015-04-08 14:11:28 +02:00
|
|
|
logger.debug("[SQL FAIL] {%s} %s", self.name, e)
|
|
|
|
raise
|
2014-10-28 11:34:05 +01:00
|
|
|
finally:
|
2015-03-04 20:45:16 +01:00
|
|
|
msecs = (time.time() * 1000) - start
|
|
|
|
sql_logger.debug("[SQL time] {%s} %f", self.name, msecs)
|
2015-03-10 16:21:03 +01:00
|
|
|
sql_query_timer.inc_by(msecs, sql.split()[0])
|
2014-09-12 15:37:55 +02:00
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-02-10 15:50:53 +01:00
|
|
|
class PerformanceCounters(object):
|
|
|
|
def __init__(self):
|
|
|
|
self.current_counters = {}
|
|
|
|
self.previous_counters = {}
|
|
|
|
|
|
|
|
def update(self, key, start_time, end_time=None):
|
|
|
|
if end_time is None:
|
2015-02-10 17:30:48 +01:00
|
|
|
end_time = time.time() * 1000
|
2015-02-10 15:50:53 +01:00
|
|
|
duration = end_time - start_time
|
|
|
|
count, cum_time = self.current_counters.get(key, (0, 0))
|
|
|
|
count += 1
|
|
|
|
cum_time += duration
|
|
|
|
self.current_counters[key] = (count, cum_time)
|
|
|
|
return end_time
|
|
|
|
|
|
|
|
def interval(self, interval_duration, limit=3):
|
|
|
|
counters = []
|
|
|
|
for name, (count, cum_time) in self.current_counters.items():
|
|
|
|
prev_count, prev_time = self.previous_counters.get(name, (0, 0))
|
|
|
|
counters.append((
|
|
|
|
(cum_time - prev_time) / interval_duration,
|
|
|
|
count - prev_count,
|
|
|
|
name
|
|
|
|
))
|
|
|
|
|
|
|
|
self.previous_counters = dict(self.current_counters)
|
|
|
|
|
|
|
|
counters.sort(reverse=True)
|
|
|
|
|
|
|
|
top_n_counters = ", ".join(
|
|
|
|
"%s(%d): %.3f%%" % (name, count, 100 * ratio)
|
2015-02-10 15:54:07 +01:00
|
|
|
for ratio, count, name in counters[:limit]
|
2015-02-10 15:50:53 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
return top_n_counters
|
|
|
|
|
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
class SQLBaseStore(object):
|
2014-10-28 12:18:04 +01:00
|
|
|
_TXN_ID = 0
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
def __init__(self, hs):
|
2014-08-13 19:15:23 +02:00
|
|
|
self.hs = hs
|
2014-08-12 16:10:52 +02:00
|
|
|
self._db_pool = hs.get_db_pool()
|
2014-08-13 20:18:55 +02:00
|
|
|
self._clock = hs.get_clock()
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-02-09 15:22:52 +01:00
|
|
|
self._previous_txn_total_time = 0
|
|
|
|
self._current_txn_total_time = 0
|
|
|
|
self._previous_loop_ts = 0
|
2015-03-05 16:58:03 +01:00
|
|
|
|
|
|
|
# TODO(paul): These can eventually be removed once the metrics code
|
|
|
|
# is running in mainline, and we have some nice monitoring frontends
|
|
|
|
# to watch it
|
2015-02-10 15:50:53 +01:00
|
|
|
self._txn_perf_counters = PerformanceCounters()
|
|
|
|
self._get_event_counters = PerformanceCounters()
|
2015-02-09 15:22:52 +01:00
|
|
|
|
2015-03-25 20:12:16 +01:00
|
|
|
self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
|
|
|
|
max_entries=hs.config.event_cache_size)
|
2015-03-09 21:39:17 +01:00
|
|
|
|
2015-05-15 12:35:04 +02:00
|
|
|
self._event_fetch_lock = threading.Condition()
|
2015-05-14 16:34:02 +02:00
|
|
|
self._event_fetch_list = []
|
2015-05-14 16:40:21 +02:00
|
|
|
self._event_fetch_ongoing = 0
|
2015-05-14 16:34:02 +02:00
|
|
|
|
2015-05-15 11:54:04 +02:00
|
|
|
self._pending_ds = []
|
|
|
|
|
2015-04-01 15:12:33 +02:00
|
|
|
self.database_engine = hs.database_engine
|
|
|
|
|
2015-07-01 12:41:55 +02:00
|
|
|
self._stream_id_gen = StreamIdGenerator("events", "stream_ordering")
|
2015-04-07 13:05:36 +02:00
|
|
|
self._transaction_id_gen = IdGenerator("sent_transactions", "id", self)
|
|
|
|
self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
|
|
|
|
self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self)
|
2015-04-15 17:24:14 +02:00
|
|
|
self._pushers_id_gen = IdGenerator("pushers", "id", self)
|
2015-05-11 11:58:36 +02:00
|
|
|
self._push_rule_id_gen = IdGenerator("push_rules", "id", self)
|
2015-05-14 12:44:03 +02:00
|
|
|
self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self)
|
2015-07-01 12:41:55 +02:00
|
|
|
self._receipts_id_gen = StreamIdGenerator("receipts_linearized", "stream_id")
|
2015-04-01 15:12:33 +02:00
|
|
|
|
2015-02-09 15:22:52 +01:00
|
|
|
def start_profiling(self):
|
|
|
|
self._previous_loop_ts = self._clock.time_msec()
|
|
|
|
|
|
|
|
def loop():
|
|
|
|
curr = self._current_txn_total_time
|
|
|
|
prev = self._previous_txn_total_time
|
|
|
|
self._previous_txn_total_time = curr
|
|
|
|
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
time_then = self._previous_loop_ts
|
|
|
|
self._previous_loop_ts = time_now
|
|
|
|
|
|
|
|
ratio = (curr - prev)/(time_now - time_then)
|
|
|
|
|
2015-02-10 15:50:53 +01:00
|
|
|
top_three_counters = self._txn_perf_counters.interval(
|
|
|
|
time_now - time_then, limit=3
|
|
|
|
)
|
|
|
|
|
|
|
|
top_3_event_counters = self._get_event_counters.interval(
|
|
|
|
time_now - time_then, limit=3
|
2015-02-09 18:55:56 +01:00
|
|
|
)
|
|
|
|
|
2015-04-08 14:12:38 +02:00
|
|
|
perf_logger.info(
|
2015-02-10 15:54:07 +01:00
|
|
|
"Total database time: %.3f%% {%s} {%s}",
|
|
|
|
ratio * 100, top_three_counters, top_3_event_counters
|
2015-02-09 18:55:56 +01:00
|
|
|
)
|
2015-02-09 15:22:52 +01:00
|
|
|
|
2015-02-09 15:45:15 +01:00
|
|
|
self._clock.looping_call(loop, 10000)
|
2015-02-09 15:22:52 +01:00
|
|
|
|
2015-05-15 11:54:04 +02:00
|
|
|
def _new_transaction(self, conn, desc, after_callbacks, func, *args, **kwargs):
|
2015-05-14 17:54:35 +02:00
|
|
|
start = time.time() * 1000
|
|
|
|
txn_id = self._TXN_ID
|
|
|
|
|
|
|
|
# We don't really need these to be unique, so lets stop it from
|
|
|
|
# growing really large.
|
|
|
|
self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1)
|
|
|
|
|
|
|
|
name = "%s-%x" % (desc, txn_id, )
|
|
|
|
|
|
|
|
transaction_logger.debug("[TXN START] {%s}", name)
|
|
|
|
|
|
|
|
try:
|
|
|
|
i = 0
|
|
|
|
N = 5
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
txn = conn.cursor()
|
|
|
|
txn = LoggingTransaction(
|
|
|
|
txn, name, self.database_engine, after_callbacks
|
|
|
|
)
|
2015-05-15 11:54:04 +02:00
|
|
|
r = func(txn, *args, **kwargs)
|
|
|
|
conn.commit()
|
|
|
|
return r
|
2015-05-14 17:54:35 +02:00
|
|
|
except self.database_engine.module.OperationalError as e:
|
|
|
|
# This can happen if the database disappears mid
|
|
|
|
# transaction.
|
|
|
|
logger.warn(
|
|
|
|
"[TXN OPERROR] {%s} %s %d/%d",
|
|
|
|
name, e, i, N
|
|
|
|
)
|
|
|
|
if i < N:
|
|
|
|
i += 1
|
|
|
|
try:
|
|
|
|
conn.rollback()
|
|
|
|
except self.database_engine.module.Error as e1:
|
|
|
|
logger.warn(
|
|
|
|
"[TXN EROLL] {%s} %s",
|
|
|
|
name, e1,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
raise
|
|
|
|
except self.database_engine.module.DatabaseError as e:
|
|
|
|
if self.database_engine.is_deadlock(e):
|
|
|
|
logger.warn("[TXN DEADLOCK] {%s} %d/%d", name, i, N)
|
|
|
|
if i < N:
|
|
|
|
i += 1
|
|
|
|
try:
|
|
|
|
conn.rollback()
|
|
|
|
except self.database_engine.module.Error as e1:
|
|
|
|
logger.warn(
|
|
|
|
"[TXN EROLL] {%s} %s",
|
|
|
|
name, e1,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
raise
|
|
|
|
except Exception as e:
|
|
|
|
logger.debug("[TXN FAIL] {%s} %s", name, e)
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
end = time.time() * 1000
|
|
|
|
duration = end - start
|
|
|
|
|
|
|
|
transaction_logger.debug("[TXN END] {%s} %f", name, duration)
|
|
|
|
|
|
|
|
self._current_txn_total_time += duration
|
|
|
|
self._txn_perf_counters.update(desc, start, end)
|
|
|
|
sql_txn_timer.inc_by(duration, desc)
|
|
|
|
|
2014-10-30 02:21:33 +01:00
|
|
|
@defer.inlineCallbacks
|
2014-10-28 12:18:04 +01:00
|
|
|
def runInteraction(self, desc, func, *args, **kwargs):
|
2014-09-12 14:57:24 +02:00
|
|
|
"""Wraps the .runInteraction() method on the underlying db_pool."""
|
2014-10-30 02:21:33 +01:00
|
|
|
current_context = LoggingContext.current_context()
|
2014-11-20 18:26:36 +01:00
|
|
|
|
2015-03-16 18:21:59 +01:00
|
|
|
start_time = time.time() * 1000
|
|
|
|
|
2015-05-05 18:32:21 +02:00
|
|
|
after_callbacks = []
|
|
|
|
|
2015-04-08 14:11:28 +02:00
|
|
|
def inner_func(conn, *args, **kwargs):
|
2014-10-30 02:21:33 +01:00
|
|
|
with LoggingContext("runInteraction") as context:
|
2015-05-14 17:54:35 +02:00
|
|
|
sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
|
|
|
|
|
2015-04-27 13:40:49 +02:00
|
|
|
if self.database_engine.is_connection_closed(conn):
|
2015-05-01 11:24:24 +02:00
|
|
|
logger.debug("Reconnecting closed database connection")
|
2015-04-27 13:40:49 +02:00
|
|
|
conn.reconnect()
|
|
|
|
|
2014-10-30 02:21:33 +01:00
|
|
|
current_context.copy_to(context)
|
2015-05-15 11:54:04 +02:00
|
|
|
return self._new_transaction(
|
|
|
|
conn, desc, after_callbacks, func, *args, **kwargs
|
|
|
|
)
|
2014-11-14 12:16:50 +01:00
|
|
|
|
2015-05-14 17:54:35 +02:00
|
|
|
result = yield preserve_context_over_fn(
|
|
|
|
self._db_pool.runWithConnection,
|
|
|
|
inner_func, *args, **kwargs
|
|
|
|
)
|
2014-11-14 12:16:50 +01:00
|
|
|
|
2015-05-14 17:54:35 +02:00
|
|
|
for after_callback, after_args in after_callbacks:
|
|
|
|
after_callback(*after_args)
|
|
|
|
defer.returnValue(result)
|
2014-11-14 12:16:50 +01:00
|
|
|
|
2015-05-14 17:54:35 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def runWithConnection(self, func, *args, **kwargs):
|
|
|
|
"""Wraps the .runInteraction() method on the underlying db_pool."""
|
|
|
|
current_context = LoggingContext.current_context()
|
|
|
|
|
|
|
|
start_time = time.time() * 1000
|
|
|
|
|
|
|
|
def inner_func(conn, *args, **kwargs):
|
|
|
|
with LoggingContext("runWithConnection") as context:
|
2015-03-16 18:21:59 +01:00
|
|
|
sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)
|
2015-02-09 15:22:52 +01:00
|
|
|
|
2015-05-14 17:54:35 +02:00
|
|
|
if self.database_engine.is_connection_closed(conn):
|
|
|
|
logger.debug("Reconnecting closed database connection")
|
|
|
|
conn.reconnect()
|
|
|
|
|
|
|
|
current_context.copy_to(context)
|
2015-02-09 18:55:56 +01:00
|
|
|
|
2015-05-14 17:54:35 +02:00
|
|
|
return func(conn, *args, **kwargs)
|
2015-03-05 16:58:03 +01:00
|
|
|
|
2015-05-08 17:32:18 +02:00
|
|
|
result = yield preserve_context_over_fn(
|
|
|
|
self._db_pool.runWithConnection,
|
|
|
|
inner_func, *args, **kwargs
|
|
|
|
)
|
|
|
|
|
2014-10-30 02:21:33 +01:00
|
|
|
defer.returnValue(result)
|
2014-09-12 14:57:24 +02:00
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
def cursor_to_dict(self, cursor):
|
|
|
|
"""Converts a SQL cursor into an list of dicts.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
cursor : The DBAPI cursor which has executed a query.
|
|
|
|
Returns:
|
|
|
|
A list of dicts where the key is the column header.
|
|
|
|
"""
|
|
|
|
col_headers = list(column[0] for column in cursor.description)
|
|
|
|
results = list(
|
|
|
|
dict(zip(col_headers, row)) for row in cursor.fetchall()
|
|
|
|
)
|
|
|
|
return results
|
|
|
|
|
2015-03-11 18:19:17 +01:00
|
|
|
def _execute(self, desc, decoder, query, *args):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Runs a single query for a result set.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
decoder - The function which can resolve the cursor results to
|
|
|
|
something meaningful.
|
|
|
|
query - The query string to execute
|
|
|
|
*args - Query args.
|
|
|
|
Returns:
|
|
|
|
The result of decoder(results)
|
|
|
|
"""
|
|
|
|
def interaction(txn):
|
2015-03-19 16:59:48 +01:00
|
|
|
txn.execute(query, args)
|
2014-08-14 17:02:10 +02:00
|
|
|
if decoder:
|
2015-03-19 16:59:48 +01:00
|
|
|
return decoder(txn)
|
2014-08-14 17:02:10 +02:00
|
|
|
else:
|
2015-03-19 16:59:48 +01:00
|
|
|
return txn.fetchall()
|
2014-08-14 17:02:10 +02:00
|
|
|
|
2015-03-11 18:19:17 +01:00
|
|
|
return self.runInteraction(desc, interaction)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-03-11 18:08:57 +01:00
|
|
|
def _execute_and_decode(self, desc, query, *args):
|
2015-03-11 18:19:17 +01:00
|
|
|
return self._execute(desc, self.cursor_to_dict, query, *args)
|
2014-08-13 17:27:14 +02:00
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
# "Simple" SQL API methods that operate on a single table with no JOINs,
|
|
|
|
# no complex WHERE clauses, just a dict of values for columns.
|
|
|
|
|
2015-04-15 15:51:21 +02:00
|
|
|
@defer.inlineCallbacks
|
2015-04-07 13:06:01 +02:00
|
|
|
def _simple_insert(self, table, values, or_ignore=False,
|
|
|
|
desc="_simple_insert"):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes an INSERT query on the named table.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
|
|
|
values : dict of new column names and values for them
|
|
|
|
"""
|
2015-04-15 15:51:21 +02:00
|
|
|
try:
|
|
|
|
yield self.runInteraction(
|
|
|
|
desc,
|
|
|
|
self._simple_insert_txn, table, values,
|
|
|
|
)
|
|
|
|
except self.database_engine.module.IntegrityError:
|
|
|
|
# We have to do or_ignore flag at this layer, since we can't reuse
|
|
|
|
# a cursor after we receive an error from the db.
|
|
|
|
if not or_ignore:
|
|
|
|
raise
|
2014-08-26 15:31:48 +02:00
|
|
|
|
2014-09-12 18:11:00 +02:00
|
|
|
@log_function
|
2015-04-15 15:51:21 +02:00
|
|
|
def _simple_insert_txn(self, txn, table, values):
|
2015-05-05 16:13:25 +02:00
|
|
|
keys, vals = zip(*values.items())
|
|
|
|
|
2015-03-23 16:38:56 +01:00
|
|
|
sql = "INSERT INTO %s (%s) VALUES(%s)" % (
|
2014-08-12 16:10:52 +02:00
|
|
|
table,
|
2015-05-05 16:13:25 +02:00
|
|
|
", ".join(k for k in keys),
|
|
|
|
", ".join("?" for _ in keys)
|
2014-08-12 16:10:52 +02:00
|
|
|
)
|
2014-09-12 18:11:00 +02:00
|
|
|
|
2015-05-05 16:13:25 +02:00
|
|
|
txn.execute(sql, vals)
|
|
|
|
|
|
|
|
def _simple_insert_many_txn(self, txn, table, values):
|
|
|
|
if not values:
|
|
|
|
return
|
|
|
|
|
2015-05-05 18:06:55 +02:00
|
|
|
# This is a *slight* abomination to get a list of tuples of key names
|
|
|
|
# and a list of tuples of value names.
|
|
|
|
#
|
|
|
|
# i.e. [{"a": 1, "b": 2}, {"c": 3, "d": 4}]
|
|
|
|
# => [("a", "b",), ("c", "d",)] and [(1, 2,), (3, 4,)]
|
|
|
|
#
|
|
|
|
# The sort is to ensure that we don't rely on dictionary iteration
|
|
|
|
# order.
|
2015-05-05 16:13:25 +02:00
|
|
|
keys, vals = zip(*[
|
|
|
|
zip(
|
|
|
|
*(sorted(i.items(), key=lambda kv: kv[0]))
|
|
|
|
)
|
|
|
|
for i in values
|
|
|
|
if i
|
|
|
|
])
|
|
|
|
|
|
|
|
for k in keys:
|
|
|
|
if k != keys[0]:
|
|
|
|
raise RuntimeError(
|
|
|
|
"All items must have the same keys"
|
|
|
|
)
|
|
|
|
|
|
|
|
sql = "INSERT INTO %s (%s) VALUES(%s)" % (
|
|
|
|
table,
|
|
|
|
", ".join(k for k in keys[0]),
|
|
|
|
", ".join("?" for _ in keys[0])
|
2014-09-12 18:11:00 +02:00
|
|
|
)
|
|
|
|
|
2015-05-05 16:13:25 +02:00
|
|
|
txn.executemany(sql, vals)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-04-15 17:24:14 +02:00
|
|
|
def _simple_upsert(self, table, keyvalues, values,
|
2015-05-01 11:46:48 +02:00
|
|
|
insertion_values={}, desc="_simple_upsert", lock=True):
|
2014-12-18 15:49:22 +01:00
|
|
|
"""
|
2015-01-28 15:44:41 +01:00
|
|
|
Args:
|
|
|
|
table (str): The table to upsert into
|
|
|
|
keyvalues (dict): The unique key tables and their new values
|
|
|
|
values (dict): The nonunique columns and their new values
|
2015-04-15 17:24:14 +02:00
|
|
|
insertion_values (dict): key/values to use when inserting
|
2015-01-28 15:44:41 +01:00
|
|
|
Returns: A deferred
|
2014-12-18 15:49:22 +01:00
|
|
|
"""
|
|
|
|
return self.runInteraction(
|
2015-03-20 16:59:18 +01:00
|
|
|
desc,
|
2015-04-15 17:24:14 +02:00
|
|
|
self._simple_upsert_txn, table, keyvalues, values, insertion_values,
|
2015-05-01 11:46:48 +02:00
|
|
|
lock
|
2014-12-18 15:49:22 +01:00
|
|
|
)
|
|
|
|
|
2015-05-01 11:46:48 +02:00
|
|
|
def _simple_upsert_txn(self, txn, table, keyvalues, values, insertion_values={},
|
|
|
|
lock=True):
|
|
|
|
# We need to lock the table :(, unless we're *really* careful
|
|
|
|
if lock:
|
|
|
|
self.database_engine.lock_table(txn, table)
|
2015-04-27 14:22:30 +02:00
|
|
|
|
2014-12-18 15:49:22 +01:00
|
|
|
# Try to update
|
|
|
|
sql = "UPDATE %s SET %s WHERE %s" % (
|
|
|
|
table,
|
2015-01-28 15:48:07 +01:00
|
|
|
", ".join("%s = ?" % (k,) for k in values),
|
|
|
|
" AND ".join("%s = ?" % (k,) for k in keyvalues)
|
2014-12-18 15:49:22 +01:00
|
|
|
)
|
|
|
|
sqlargs = values.values() + keyvalues.values()
|
|
|
|
logger.debug(
|
|
|
|
"[SQL] %s Args=%s",
|
|
|
|
sql, sqlargs,
|
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(sql, sqlargs)
|
|
|
|
if txn.rowcount == 0:
|
|
|
|
# We didn't update and rows so insert a new one
|
|
|
|
allvalues = {}
|
|
|
|
allvalues.update(keyvalues)
|
|
|
|
allvalues.update(values)
|
2015-04-15 17:24:14 +02:00
|
|
|
allvalues.update(insertion_values)
|
2014-12-18 15:49:22 +01:00
|
|
|
|
|
|
|
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
|
|
|
table,
|
|
|
|
", ".join(k for k in allvalues),
|
|
|
|
", ".join("?" for _ in allvalues)
|
|
|
|
)
|
|
|
|
logger.debug(
|
|
|
|
"[SQL] %s Args=%s",
|
|
|
|
sql, keyvalues.values(),
|
|
|
|
)
|
|
|
|
txn.execute(sql, allvalues.values())
|
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
def _simple_select_one(self, table, keyvalues, retcols,
|
2015-03-20 15:59:48 +01:00
|
|
|
allow_none=False, desc="_simple_select_one"):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes a SELECT query on the named table, which is expected to
|
|
|
|
return a single row, returning a single column from it.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
|
|
|
keyvalues : dict of column names and values to select the row with
|
|
|
|
retcols : list of strings giving the names of the columns to return
|
|
|
|
|
|
|
|
allow_none : If true, return None instead of failing if the SELECT
|
|
|
|
statement returns no rows
|
|
|
|
"""
|
2015-03-20 15:59:48 +01:00
|
|
|
return self.runInteraction(
|
|
|
|
desc,
|
|
|
|
self._simple_select_one_txn,
|
|
|
|
table, keyvalues, retcols, allow_none,
|
2014-08-12 16:10:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
def _simple_select_one_onecol(self, table, keyvalues, retcol,
|
2015-03-20 16:59:18 +01:00
|
|
|
allow_none=False,
|
|
|
|
desc="_simple_select_one_onecol"):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes a SELECT query on the named table, which is expected to
|
|
|
|
return a single row, returning a single column from it."
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
|
|
|
keyvalues : dict of column names and values to select the row with
|
|
|
|
retcol : string giving the name of the column to return
|
|
|
|
"""
|
2014-10-28 17:42:35 +01:00
|
|
|
return self.runInteraction(
|
2015-03-20 16:59:18 +01:00
|
|
|
desc,
|
2014-10-28 17:42:35 +01:00
|
|
|
self._simple_select_one_onecol_txn,
|
|
|
|
table, keyvalues, retcol, allow_none=allow_none,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _simple_select_one_onecol_txn(self, txn, table, keyvalues, retcol,
|
|
|
|
allow_none=False):
|
|
|
|
ret = self._simple_select_onecol_txn(
|
|
|
|
txn,
|
2014-08-12 16:10:52 +02:00
|
|
|
table=table,
|
|
|
|
keyvalues=keyvalues,
|
2014-10-29 17:59:24 +01:00
|
|
|
retcol=retcol,
|
2014-08-12 16:10:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if ret:
|
2014-10-29 17:59:24 +01:00
|
|
|
return ret[0]
|
2014-08-12 16:10:52 +02:00
|
|
|
else:
|
2014-10-28 17:42:35 +01:00
|
|
|
if allow_none:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
raise StoreError(404, "No row found")
|
|
|
|
|
|
|
|
def _simple_select_onecol_txn(self, txn, table, keyvalues, retcol):
|
2014-11-24 11:50:28 +01:00
|
|
|
sql = (
|
2015-03-19 16:59:48 +01:00
|
|
|
"SELECT %(retcol)s FROM %(table)s WHERE %(where)s"
|
2014-11-24 11:50:28 +01:00
|
|
|
) % {
|
2014-10-28 17:42:35 +01:00
|
|
|
"retcol": retcol,
|
|
|
|
"table": table,
|
|
|
|
"where": " AND ".join("%s = ?" % k for k in keyvalues.keys()),
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.execute(sql, keyvalues.values())
|
|
|
|
|
|
|
|
return [r[0] for r in txn.fetchall()]
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-03-20 16:59:18 +01:00
|
|
|
def _simple_select_onecol(self, table, keyvalues, retcol,
|
|
|
|
desc="_simple_select_onecol"):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes a SELECT query on the named table, which returns a list
|
|
|
|
comprising of the values of the named column from the selected rows.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table (str): table name
|
|
|
|
keyvalues (dict): column names and values to select the rows with
|
|
|
|
retcol (str): column whos value we wish to retrieve.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred: Results in a list
|
|
|
|
"""
|
2014-10-28 17:42:35 +01:00
|
|
|
return self.runInteraction(
|
2015-03-20 16:59:18 +01:00
|
|
|
desc,
|
2014-10-28 17:42:35 +01:00
|
|
|
self._simple_select_onecol_txn,
|
|
|
|
table, keyvalues, retcol
|
|
|
|
)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-03-20 16:59:18 +01:00
|
|
|
def _simple_select_list(self, table, keyvalues, retcols,
|
|
|
|
desc="_simple_select_list"):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes a SELECT query on the named table, which may return zero or
|
|
|
|
more rows, returning the result as a list of dicts.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
2015-03-02 11:16:24 +01:00
|
|
|
keyvalues : dict of column names and values to select the rows with,
|
|
|
|
or None to not apply a WHERE clause.
|
2014-08-12 16:10:52 +02:00
|
|
|
retcols : list of strings giving the names of the columns to return
|
|
|
|
"""
|
2014-11-06 16:10:55 +01:00
|
|
|
return self.runInteraction(
|
2015-03-20 16:59:18 +01:00
|
|
|
desc,
|
2014-11-06 16:10:55 +01:00
|
|
|
self._simple_select_list_txn,
|
|
|
|
table, keyvalues, retcols
|
|
|
|
)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2014-11-06 16:10:55 +01:00
|
|
|
def _simple_select_list_txn(self, txn, table, keyvalues, retcols):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes a SELECT query on the named table, which may return zero or
|
|
|
|
more rows, returning the result as a list of dicts.
|
|
|
|
|
|
|
|
Args:
|
2014-11-06 16:10:55 +01:00
|
|
|
txn : Transaction object
|
2014-08-12 16:10:52 +02:00
|
|
|
table : string giving the table name
|
|
|
|
keyvalues : dict of column names and values to select the rows with
|
|
|
|
retcols : list of strings giving the names of the columns to return
|
|
|
|
"""
|
2015-03-02 11:16:24 +01:00
|
|
|
if keyvalues:
|
2015-03-19 16:59:48 +01:00
|
|
|
sql = "SELECT %s FROM %s WHERE %s" % (
|
2015-03-02 11:16:24 +01:00
|
|
|
", ".join(retcols),
|
|
|
|
table,
|
|
|
|
" AND ".join("%s = ?" % (k, ) for k in keyvalues)
|
|
|
|
)
|
|
|
|
txn.execute(sql, keyvalues.values())
|
|
|
|
else:
|
2015-03-19 16:59:48 +01:00
|
|
|
sql = "SELECT %s FROM %s" % (
|
2015-03-02 11:16:24 +01:00
|
|
|
", ".join(retcols),
|
|
|
|
table
|
|
|
|
)
|
|
|
|
txn.execute(sql)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2014-11-06 16:10:55 +01:00
|
|
|
return self.cursor_to_dict(txn)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
def _simple_update_one(self, table, keyvalues, updatevalues,
|
2015-03-20 15:59:48 +01:00
|
|
|
desc="_simple_update_one"):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes an UPDATE query on the named table, setting new values for
|
|
|
|
columns in a row matching the key values.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
|
|
|
keyvalues : dict of column names and values to select the row with
|
|
|
|
updatevalues : dict giving column names and values to update
|
|
|
|
retcols : optional list of column names to return
|
|
|
|
|
|
|
|
If present, retcols gives a list of column names on which to perform
|
|
|
|
a SELECT statement *before* performing the UPDATE statement. The values
|
|
|
|
of these will be returned in a dict.
|
|
|
|
|
|
|
|
These are performed within the same transaction, allowing an atomic
|
|
|
|
get-and-set. This can be used to implement compare-and-set by putting
|
|
|
|
the update column in the 'keyvalues' dict as well.
|
|
|
|
"""
|
2015-03-20 15:59:48 +01:00
|
|
|
return self.runInteraction(
|
|
|
|
desc,
|
|
|
|
self._simple_update_one_txn,
|
|
|
|
table, keyvalues, updatevalues,
|
|
|
|
)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-03-20 15:59:48 +01:00
|
|
|
def _simple_update_one_txn(self, txn, table, keyvalues, updatevalues):
|
|
|
|
update_sql = "UPDATE %s SET %s WHERE %s" % (
|
|
|
|
table,
|
|
|
|
", ".join("%s = ?" % (k,) for k in updatevalues),
|
|
|
|
" AND ".join("%s = ?" % (k,) for k in keyvalues)
|
|
|
|
)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-03-20 15:59:48 +01:00
|
|
|
txn.execute(
|
|
|
|
update_sql,
|
|
|
|
updatevalues.values() + keyvalues.values()
|
|
|
|
)
|
|
|
|
|
|
|
|
if txn.rowcount == 0:
|
|
|
|
raise StoreError(404, "No row found")
|
|
|
|
if txn.rowcount > 1:
|
|
|
|
raise StoreError(500, "More than one row matched")
|
|
|
|
|
|
|
|
def _simple_select_one_txn(self, txn, table, keyvalues, retcols,
|
2015-03-20 17:03:25 +01:00
|
|
|
allow_none=False):
|
2015-03-24 17:19:01 +01:00
|
|
|
select_sql = "SELECT %s FROM %s WHERE %s" % (
|
2015-03-20 15:59:48 +01:00
|
|
|
", ".join(retcols),
|
|
|
|
table,
|
2015-04-01 15:12:33 +02:00
|
|
|
" AND ".join("%s = ?" % (k,) for k in keyvalues)
|
2015-03-20 15:59:48 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(select_sql, keyvalues.values())
|
|
|
|
|
|
|
|
row = txn.fetchone()
|
|
|
|
if not row:
|
|
|
|
if allow_none:
|
|
|
|
return None
|
|
|
|
raise StoreError(404, "No row found")
|
|
|
|
if txn.rowcount > 1:
|
|
|
|
raise StoreError(500, "More than one row matched")
|
|
|
|
|
|
|
|
return dict(zip(retcols, row))
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
def _simple_selectupdate_one(self, table, keyvalues, updatevalues=None,
|
2015-03-20 16:59:18 +01:00
|
|
|
retcols=None, allow_none=False,
|
|
|
|
desc="_simple_selectupdate_one"):
|
2014-08-12 16:10:52 +02:00
|
|
|
""" Combined SELECT then UPDATE."""
|
|
|
|
def func(txn):
|
|
|
|
ret = None
|
|
|
|
if retcols:
|
2015-03-20 15:59:48 +01:00
|
|
|
ret = self._simple_select_one_txn(
|
|
|
|
txn,
|
|
|
|
table=table,
|
|
|
|
keyvalues=keyvalues,
|
|
|
|
retcols=retcols,
|
|
|
|
allow_none=allow_none,
|
|
|
|
)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
if updatevalues:
|
2015-03-20 15:59:48 +01:00
|
|
|
self._simple_update_one_txn(
|
|
|
|
txn,
|
|
|
|
table=table,
|
|
|
|
keyvalues=keyvalues,
|
|
|
|
updatevalues=updatevalues,
|
2014-08-12 16:10:52 +02:00
|
|
|
)
|
|
|
|
|
2015-03-19 16:59:48 +01:00
|
|
|
# if txn.rowcount == 0:
|
|
|
|
# raise StoreError(404, "No row found")
|
2014-08-12 16:10:52 +02:00
|
|
|
if txn.rowcount > 1:
|
|
|
|
raise StoreError(500, "More than one row matched")
|
|
|
|
|
|
|
|
return ret
|
2015-03-20 16:59:18 +01:00
|
|
|
return self.runInteraction(desc, func)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-03-20 16:59:18 +01:00
|
|
|
def _simple_delete_one(self, table, keyvalues, desc="_simple_delete_one"):
|
2014-08-12 16:10:52 +02:00
|
|
|
"""Executes a DELETE query on the named table, expecting to delete a
|
|
|
|
single row.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
|
|
|
keyvalues : dict of column names and values to select the row with
|
|
|
|
"""
|
|
|
|
sql = "DELETE FROM %s WHERE %s" % (
|
|
|
|
table,
|
2014-10-28 12:18:04 +01:00
|
|
|
" AND ".join("%s = ?" % (k, ) for k in keyvalues)
|
2014-08-12 16:10:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
def func(txn):
|
|
|
|
txn.execute(sql, keyvalues.values())
|
|
|
|
if txn.rowcount == 0:
|
|
|
|
raise StoreError(404, "No row found")
|
|
|
|
if txn.rowcount > 1:
|
|
|
|
raise StoreError(500, "more than one row matched")
|
2015-03-20 16:59:18 +01:00
|
|
|
return self.runInteraction(desc, func)
|
2014-10-28 12:18:04 +01:00
|
|
|
|
2015-03-20 16:59:18 +01:00
|
|
|
def _simple_delete(self, table, keyvalues, desc="_simple_delete"):
|
2014-10-28 12:18:04 +01:00
|
|
|
"""Executes a DELETE query on the named table.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
|
|
|
keyvalues : dict of column names and values to select the row with
|
|
|
|
"""
|
|
|
|
|
2015-03-20 16:59:18 +01:00
|
|
|
return self.runInteraction(desc, self._simple_delete_txn)
|
2014-10-28 12:18:04 +01:00
|
|
|
|
|
|
|
def _simple_delete_txn(self, txn, table, keyvalues):
|
|
|
|
sql = "DELETE FROM %s WHERE %s" % (
|
|
|
|
table,
|
|
|
|
" AND ".join("%s = ?" % (k, ) for k in keyvalues)
|
|
|
|
)
|
|
|
|
|
|
|
|
return txn.execute(sql, keyvalues.values())
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
def _simple_max_id(self, table):
|
|
|
|
"""Executes a SELECT query on the named table, expecting to return the
|
|
|
|
max value for the column "id".
|
|
|
|
|
|
|
|
Args:
|
|
|
|
table : string giving the table name
|
|
|
|
"""
|
|
|
|
sql = "SELECT MAX(id) AS id FROM %s" % table
|
|
|
|
|
|
|
|
def func(txn):
|
|
|
|
txn.execute(sql)
|
|
|
|
max_id = self.cursor_to_dict(txn)[0]["id"]
|
|
|
|
if max_id is None:
|
|
|
|
return 0
|
|
|
|
return max_id
|
|
|
|
|
2014-10-28 12:18:04 +01:00
|
|
|
return self.runInteraction("_simple_max_id", func)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-04-01 15:12:33 +02:00
|
|
|
def get_next_stream_id(self):
|
|
|
|
with self._next_stream_id_lock:
|
|
|
|
i = self._next_stream_id
|
|
|
|
self._next_stream_id += 1
|
|
|
|
return i
|
|
|
|
|
2014-09-23 16:28:32 +02:00
|
|
|
|
2015-03-20 14:52:56 +01:00
|
|
|
class _RollbackButIsFineException(Exception):
|
|
|
|
""" This exception is used to rollback a transaction without implying
|
|
|
|
something went wrong.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
class Table(object):
|
|
|
|
""" A base class used to store information about a particular table.
|
|
|
|
"""
|
|
|
|
|
|
|
|
table_name = None
|
|
|
|
""" str: The name of the table """
|
|
|
|
|
|
|
|
fields = None
|
|
|
|
""" list: The field names """
|
|
|
|
|
|
|
|
EntryType = None
|
|
|
|
""" Type: A tuple type used to decode the results """
|
|
|
|
|
|
|
|
_select_where_clause = "SELECT %s FROM %s WHERE %s"
|
|
|
|
_select_clause = "SELECT %s FROM %s"
|
2015-03-19 16:59:48 +01:00
|
|
|
_insert_clause = "REPLACE INTO %s (%s) VALUES (%s)"
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def select_statement(cls, where_clause=None):
|
|
|
|
"""
|
|
|
|
Args:
|
|
|
|
where_clause (str): The WHERE clause to use.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: An SQL statement to select rows from the table with the given
|
|
|
|
WHERE clause.
|
|
|
|
"""
|
|
|
|
if where_clause:
|
|
|
|
return cls._select_where_clause % (
|
|
|
|
", ".join(cls.fields),
|
|
|
|
cls.table_name,
|
|
|
|
where_clause
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return cls._select_clause % (
|
|
|
|
", ".join(cls.fields),
|
|
|
|
cls.table_name,
|
|
|
|
)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def insert_statement(cls):
|
|
|
|
return cls._insert_clause % (
|
|
|
|
cls.table_name,
|
|
|
|
", ".join(cls.fields),
|
|
|
|
", ".join(["?"] * len(cls.fields)),
|
|
|
|
)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def decode_single_result(cls, results):
|
|
|
|
""" Given an iterable of tuples, return a single instance of
|
|
|
|
`EntryType` or None if the iterable is empty
|
|
|
|
Args:
|
|
|
|
results (list): The results list to convert to `EntryType`
|
|
|
|
Returns:
|
|
|
|
EntryType: An instance of `EntryType`
|
|
|
|
"""
|
|
|
|
results = list(results)
|
|
|
|
if results:
|
|
|
|
return cls.EntryType(*results[0])
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def decode_results(cls, results):
|
|
|
|
""" Given an iterable of tuples, return a list of `EntryType`
|
|
|
|
Args:
|
|
|
|
results (list): The results list to convert to `EntryType`
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list: A list of `EntryType`
|
|
|
|
"""
|
|
|
|
return [cls.EntryType(*row) for row in results]
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_fields_string(cls, prefix=None):
|
|
|
|
if prefix:
|
|
|
|
to_join = ("%s.%s" % (prefix, f) for f in cls.fields)
|
|
|
|
else:
|
|
|
|
to_join = cls.fields
|
|
|
|
|
|
|
|
return ", ".join(to_join)
|
|
|
|
|
|
|
|
|
|
|
|
class JoinHelper(object):
|
|
|
|
""" Used to help do joins on tables by looking at the tables' fields and
|
|
|
|
creating a list of unique fields to use with SELECTs and a namedtuple
|
|
|
|
to dump the results into.
|
|
|
|
|
|
|
|
Attributes:
|
2014-12-07 03:26:07 +01:00
|
|
|
tables (list): List of `Table` classes
|
2014-08-12 16:10:52 +02:00
|
|
|
EntryType (type)
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, *tables):
|
|
|
|
self.tables = tables
|
|
|
|
|
|
|
|
res = []
|
|
|
|
for table in self.tables:
|
|
|
|
res += [f for f in table.fields if f not in res]
|
|
|
|
|
2015-02-23 19:41:58 +01:00
|
|
|
self.EntryType = namedtuple("JoinHelperEntry", res)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
def get_fields(self, **prefixes):
|
|
|
|
"""Get a string representing a list of fields for use in SELECT
|
|
|
|
statements with the given prefixes applied to each.
|
|
|
|
|
|
|
|
For example::
|
|
|
|
|
|
|
|
JoinHelper(PdusTable, StateTable).get_fields(
|
|
|
|
PdusTable="pdus",
|
|
|
|
StateTable="state"
|
|
|
|
)
|
|
|
|
"""
|
|
|
|
res = []
|
|
|
|
for field in self.EntryType._fields:
|
|
|
|
for table in self.tables:
|
|
|
|
if field in table.fields:
|
|
|
|
res.append("%s.%s" % (prefixes[table.__name__], field))
|
|
|
|
break
|
|
|
|
|
|
|
|
return ", ".join(res)
|
|
|
|
|
|
|
|
def decode_results(self, rows):
|
|
|
|
return [self.EntryType(*row) for row in rows]
|