2015-02-24 19:03:39 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-07 05:26:29 +01:00
|
|
|
# Copyright 2015, 2016 OpenMarket Ltd
|
2015-02-24 19:03:39 +01:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2015-08-13 12:38:59 +02:00
|
|
|
import functools
|
2016-05-09 11:13:25 +02:00
|
|
|
import gc
|
2018-07-09 08:09:20 +02:00
|
|
|
import logging
|
2018-05-23 20:03:56 +02:00
|
|
|
import os
|
2018-01-08 01:53:32 +01:00
|
|
|
import platform
|
2018-09-14 15:39:59 +02:00
|
|
|
import threading
|
2018-07-09 08:09:20 +02:00
|
|
|
import time
|
2020-03-19 11:00:24 +01:00
|
|
|
from typing import Callable, Dict, Iterable, Optional, Tuple, Union
|
2015-08-13 12:38:59 +02:00
|
|
|
|
2018-07-09 08:09:20 +02:00
|
|
|
import attr
|
|
|
|
from prometheus_client import Counter, Gauge, Histogram
|
2020-05-22 12:08:41 +02:00
|
|
|
from prometheus_client.core import (
|
|
|
|
REGISTRY,
|
|
|
|
CounterMetricFamily,
|
|
|
|
GaugeMetricFamily,
|
|
|
|
HistogramMetricFamily,
|
|
|
|
)
|
2015-03-04 18:13:09 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
from twisted.internet import reactor
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2019-09-09 16:14:58 +02:00
|
|
|
import synapse
|
2019-07-18 15:57:15 +02:00
|
|
|
from synapse.metrics._exposition import (
|
|
|
|
MetricsResource,
|
|
|
|
generate_latest,
|
|
|
|
start_http_server,
|
|
|
|
)
|
2019-09-09 16:14:58 +02:00
|
|
|
from synapse.util.versionstring import get_version_string
|
2019-07-18 15:57:15 +02:00
|
|
|
|
2015-03-04 18:13:09 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2019-07-18 15:57:15 +02:00
|
|
|
METRICS_PREFIX = "/_synapse/metrics"
|
|
|
|
|
2018-05-23 00:32:57 +02:00
|
|
|
running_on_pypy = platform.python_implementation() == "PyPy"
|
2019-09-12 18:29:55 +02:00
|
|
|
all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]]
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2018-05-29 03:22:27 +02:00
|
|
|
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
2018-05-22 23:28:23 +02:00
|
|
|
|
2018-05-29 03:32:15 +02:00
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class RegistryProxy:
|
2018-05-31 11:04:50 +02:00
|
|
|
@staticmethod
|
|
|
|
def collect():
|
2018-05-22 23:28:23 +02:00
|
|
|
for metric in REGISTRY.collect():
|
|
|
|
if not metric.name.startswith("__"):
|
|
|
|
yield metric
|
|
|
|
|
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
@attr.s(slots=True, hash=True)
|
2020-09-04 12:54:56 +02:00
|
|
|
class LaterGauge:
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2020-03-19 11:00:24 +01:00
|
|
|
name = attr.ib(type=str)
|
|
|
|
desc = attr.ib(type=str)
|
|
|
|
labels = attr.ib(hash=False, type=Optional[Iterable[str]])
|
|
|
|
# callback: should either return a value (if there are no labels for this metric),
|
|
|
|
# or dict mapping from a label tuple to a value
|
|
|
|
caller = attr.ib(type=Callable[[], Union[Dict[Tuple[str, ...], float], float]])
|
2018-04-11 12:07:33 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
def collect(self):
|
2015-03-04 16:47:23 +01:00
|
|
|
|
2018-05-22 23:28:23 +02:00
|
|
|
g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
|
2015-03-04 16:47:23 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
try:
|
|
|
|
calls = self.caller()
|
2018-06-04 16:59:14 +02:00
|
|
|
except Exception:
|
2019-06-13 14:40:52 +02:00
|
|
|
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
|
2018-05-22 02:47:37 +02:00
|
|
|
yield g
|
2018-06-04 16:59:14 +02:00
|
|
|
return
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
if isinstance(calls, dict):
|
2020-06-15 13:03:36 +02:00
|
|
|
for k, v in calls.items():
|
2018-05-22 02:47:37 +02:00
|
|
|
g.add_metric(k, v)
|
|
|
|
else:
|
|
|
|
g.add_metric([], calls)
|
2016-07-20 16:47:28 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
yield g
|
2016-07-20 16:47:28 +02:00
|
|
|
|
2018-05-22 17:50:26 +02:00
|
|
|
def __attrs_post_init__(self):
|
|
|
|
self._register()
|
2016-07-20 16:47:28 +02:00
|
|
|
|
2018-05-22 17:50:26 +02:00
|
|
|
def _register(self):
|
2018-05-22 02:47:37 +02:00
|
|
|
if self.name in all_gauges.keys():
|
2018-05-22 17:50:26 +02:00
|
|
|
logger.warning("%s already registered, reregistering" % (self.name,))
|
2018-09-14 15:39:59 +02:00
|
|
|
REGISTRY.unregister(all_gauges.pop(self.name))
|
|
|
|
|
|
|
|
REGISTRY.register(self)
|
|
|
|
all_gauges[self.name] = self
|
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class InFlightGauge:
|
2018-09-14 15:39:59 +02:00
|
|
|
"""Tracks number of things (e.g. requests, Measure blocks, etc) in flight
|
|
|
|
at any given time.
|
|
|
|
|
|
|
|
Each InFlightGauge will create a metric called `<name>_total` that counts
|
|
|
|
the number of in flight blocks, as well as a metrics for each item in the
|
|
|
|
given `sub_metrics` as `<name>_<sub_metric>` which will get updated by the
|
|
|
|
callbacks.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
name (str)
|
|
|
|
desc (str)
|
|
|
|
labels (list[str])
|
|
|
|
sub_metrics (list[str]): A list of sub metrics that the callbacks
|
|
|
|
will update.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, name, desc, labels, sub_metrics):
|
|
|
|
self.name = name
|
|
|
|
self.desc = desc
|
|
|
|
self.labels = labels
|
|
|
|
self.sub_metrics = sub_metrics
|
|
|
|
|
|
|
|
# Create a class which have the sub_metrics values as attributes, which
|
|
|
|
# default to 0 on initialization. Used to pass to registered callbacks.
|
|
|
|
self._metrics_class = attr.make_class(
|
2019-06-13 14:40:52 +02:00
|
|
|
"_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True
|
2018-09-14 15:39:59 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# Counts number of in flight blocks for a given set of label values
|
2019-10-02 14:29:01 +02:00
|
|
|
self._registrations = {} # type: Dict
|
2018-09-14 15:39:59 +02:00
|
|
|
|
|
|
|
# Protects access to _registrations
|
|
|
|
self._lock = threading.Lock()
|
|
|
|
|
|
|
|
self._register_with_collector()
|
|
|
|
|
|
|
|
def register(self, key, callback):
|
|
|
|
"""Registers that we've entered a new block with labels `key`.
|
|
|
|
|
|
|
|
`callback` gets called each time the metrics are collected. The same
|
|
|
|
value must also be given to `unregister`.
|
|
|
|
|
|
|
|
`callback` gets called with an object that has an attribute per
|
|
|
|
sub_metric, which should be updated with the necessary values. Note that
|
|
|
|
the metrics object is shared between all callbacks registered with the
|
|
|
|
same key.
|
|
|
|
|
|
|
|
Note that `callback` may be called on a separate thread.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
self._registrations.setdefault(key, set()).add(callback)
|
|
|
|
|
|
|
|
def unregister(self, key, callback):
|
|
|
|
"""Registers that we've exited a block with labels `key`.
|
|
|
|
"""
|
|
|
|
|
|
|
|
with self._lock:
|
|
|
|
self._registrations.setdefault(key, set()).discard(callback)
|
|
|
|
|
|
|
|
def collect(self):
|
|
|
|
"""Called by prometheus client when it reads metrics.
|
|
|
|
|
|
|
|
Note: may be called by a separate thread.
|
|
|
|
"""
|
2019-06-13 14:40:52 +02:00
|
|
|
in_flight = GaugeMetricFamily(
|
|
|
|
self.name + "_total", self.desc, labels=self.labels
|
|
|
|
)
|
2018-09-14 15:39:59 +02:00
|
|
|
|
|
|
|
metrics_by_key = {}
|
|
|
|
|
|
|
|
# We copy so that we don't mutate the list while iterating
|
|
|
|
with self._lock:
|
|
|
|
keys = list(self._registrations)
|
|
|
|
|
|
|
|
for key in keys:
|
|
|
|
with self._lock:
|
|
|
|
callbacks = set(self._registrations[key])
|
|
|
|
|
|
|
|
in_flight.add_metric(key, len(callbacks))
|
|
|
|
|
|
|
|
metrics = self._metrics_class()
|
|
|
|
metrics_by_key[key] = metrics
|
|
|
|
for callback in callbacks:
|
|
|
|
callback(metrics)
|
|
|
|
|
|
|
|
yield in_flight
|
|
|
|
|
|
|
|
for name in self.sub_metrics:
|
2019-06-13 14:40:52 +02:00
|
|
|
gauge = GaugeMetricFamily(
|
|
|
|
"_".join([self.name, name]), "", labels=self.labels
|
|
|
|
)
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, metrics in metrics_by_key.items():
|
2018-09-14 15:39:59 +02:00
|
|
|
gauge.add_metric(key, getattr(metrics, name))
|
|
|
|
yield gauge
|
|
|
|
|
|
|
|
def _register_with_collector(self):
|
|
|
|
if self.name in all_gauges.keys():
|
|
|
|
logger.warning("%s already registered, reregistering" % (self.name,))
|
2018-05-22 02:47:37 +02:00
|
|
|
REGISTRY.unregister(all_gauges.pop(self.name))
|
2015-03-06 17:18:21 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
REGISTRY.register(self)
|
|
|
|
all_gauges[self.name] = self
|
2015-02-24 19:03:39 +01:00
|
|
|
|
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
@attr.s(slots=True, hash=True)
|
2020-09-04 12:54:56 +02:00
|
|
|
class BucketCollector:
|
2019-06-13 14:40:52 +02:00
|
|
|
"""
|
|
|
|
Like a Histogram, but allows buckets to be point-in-time instead of
|
|
|
|
incrementally added to.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
name (str): Base name of metric to be exported to Prometheus.
|
|
|
|
data_collector (callable -> dict): A synchronous callable that
|
|
|
|
returns a dict mapping bucket to number of items in the
|
|
|
|
bucket. If these buckets are not the same as the buckets
|
|
|
|
given to this class, they will be remapped into them.
|
|
|
|
buckets (list[float]): List of floats/ints of the buckets to
|
|
|
|
give to Prometheus. +Inf is ignored, if given.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
name = attr.ib()
|
|
|
|
data_collector = attr.ib()
|
|
|
|
buckets = attr.ib()
|
|
|
|
|
|
|
|
def collect(self):
|
|
|
|
|
|
|
|
# Fetch the data -- this must be synchronous!
|
|
|
|
data = self.data_collector()
|
|
|
|
|
2019-10-02 14:29:01 +02:00
|
|
|
buckets = {} # type: Dict[float, int]
|
2019-06-13 14:40:52 +02:00
|
|
|
|
|
|
|
res = []
|
|
|
|
for x in data.keys():
|
|
|
|
for i, bound in enumerate(self.buckets):
|
|
|
|
if x <= bound:
|
|
|
|
buckets[bound] = buckets.get(bound, 0) + data[x]
|
|
|
|
|
|
|
|
for i in self.buckets:
|
2019-06-14 13:09:33 +02:00
|
|
|
res.append([str(i), buckets.get(i, 0)])
|
2019-06-13 14:40:52 +02:00
|
|
|
|
|
|
|
res.append(["+Inf", sum(data.values())])
|
|
|
|
|
|
|
|
metric = HistogramMetricFamily(
|
2020-02-21 13:15:07 +01:00
|
|
|
self.name, "", buckets=res, sum_value=sum(x * y for x, y in data.items())
|
2019-06-13 14:40:52 +02:00
|
|
|
)
|
|
|
|
yield metric
|
|
|
|
|
|
|
|
def __attrs_post_init__(self):
|
|
|
|
self.buckets = [float(x) for x in self.buckets if x != "+Inf"]
|
|
|
|
if self.buckets != sorted(self.buckets):
|
|
|
|
raise ValueError("Buckets not sorted")
|
|
|
|
|
|
|
|
self.buckets = tuple(self.buckets)
|
|
|
|
|
|
|
|
if self.name in all_gauges.keys():
|
|
|
|
logger.warning("%s already registered, reregistering" % (self.name,))
|
|
|
|
REGISTRY.unregister(all_gauges.pop(self.name))
|
|
|
|
|
|
|
|
REGISTRY.register(self)
|
|
|
|
all_gauges[self.name] = self
|
|
|
|
|
|
|
|
|
2018-05-23 20:03:56 +02:00
|
|
|
#
|
|
|
|
# Detailed CPU metrics
|
|
|
|
#
|
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class CPUMetrics:
|
2018-05-23 20:03:56 +02:00
|
|
|
def __init__(self):
|
|
|
|
ticks_per_sec = 100
|
|
|
|
try:
|
|
|
|
# Try and get the system config
|
2019-06-20 11:32:02 +02:00
|
|
|
ticks_per_sec = os.sysconf("SC_CLK_TCK")
|
2018-05-23 20:03:56 +02:00
|
|
|
except (ValueError, TypeError, AttributeError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
self.ticks_per_sec = ticks_per_sec
|
|
|
|
|
|
|
|
def collect(self):
|
2018-05-29 03:22:27 +02:00
|
|
|
if not HAVE_PROC_SELF_STAT:
|
|
|
|
return
|
2018-05-23 20:03:56 +02:00
|
|
|
|
|
|
|
with open("/proc/self/stat") as s:
|
|
|
|
line = s.read()
|
|
|
|
raw_stats = line.split(") ", 1)[1].split(" ")
|
|
|
|
|
|
|
|
user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
|
|
|
|
user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
|
|
|
|
yield user
|
|
|
|
|
|
|
|
sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
|
|
|
|
sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
|
|
|
|
yield sys
|
|
|
|
|
2018-05-23 20:08:59 +02:00
|
|
|
|
2018-05-23 20:03:56 +02:00
|
|
|
REGISTRY.register(CPUMetrics())
|
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
#
|
|
|
|
# Python GC metrics
|
|
|
|
#
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"])
|
2018-05-23 00:32:57 +02:00
|
|
|
gc_time = Histogram(
|
|
|
|
"python_gc_time",
|
2018-05-28 11:10:27 +02:00
|
|
|
"Time taken to GC (sec)",
|
2018-05-23 00:32:57 +02:00
|
|
|
["gen"],
|
2019-06-13 14:40:52 +02:00
|
|
|
buckets=[
|
|
|
|
0.0025,
|
|
|
|
0.005,
|
|
|
|
0.01,
|
|
|
|
0.025,
|
|
|
|
0.05,
|
|
|
|
0.10,
|
|
|
|
0.25,
|
|
|
|
0.50,
|
|
|
|
1.00,
|
|
|
|
2.50,
|
|
|
|
5.00,
|
|
|
|
7.50,
|
|
|
|
15.00,
|
|
|
|
30.00,
|
|
|
|
45.00,
|
|
|
|
60.00,
|
|
|
|
],
|
2018-05-23 00:32:57 +02:00
|
|
|
)
|
|
|
|
|
2015-03-05 17:15:21 +01:00
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class GCCounts:
|
2018-05-22 02:47:37 +02:00
|
|
|
def collect(self):
|
2018-06-21 11:02:42 +02:00
|
|
|
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
|
2018-05-22 02:47:37 +02:00
|
|
|
for n, m in enumerate(gc.get_count()):
|
|
|
|
cm.add_metric([str(n)], m)
|
2015-03-06 20:08:47 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
yield cm
|
2015-03-05 17:15:21 +01:00
|
|
|
|
2018-05-23 00:32:57 +02:00
|
|
|
|
2018-06-28 15:49:57 +02:00
|
|
|
if not running_on_pypy:
|
|
|
|
REGISTRY.register(GCCounts())
|
2015-03-05 17:15:21 +01:00
|
|
|
|
2020-05-22 12:08:41 +02:00
|
|
|
|
|
|
|
#
|
|
|
|
# PyPy GC / memory metrics
|
|
|
|
#
|
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class PyPyGCStats:
|
2020-05-22 12:08:41 +02:00
|
|
|
def collect(self):
|
|
|
|
|
|
|
|
# @stats is a pretty-printer object with __str__() returning a nice table,
|
|
|
|
# plus some fields that contain data from that table.
|
|
|
|
# unfortunately, fields are pretty-printed themselves (i. e. '4.5MB').
|
|
|
|
stats = gc.get_stats(memory_pressure=False) # type: ignore
|
|
|
|
# @s contains same fields as @stats, but as actual integers.
|
|
|
|
s = stats._s # type: ignore
|
|
|
|
|
|
|
|
# also note that field naming is completely braindead
|
|
|
|
# and only vaguely correlates with the pretty-printed table.
|
|
|
|
# >>>> gc.get_stats(False)
|
|
|
|
# Total memory consumed:
|
|
|
|
# GC used: 8.7MB (peak: 39.0MB) # s.total_gc_memory, s.peak_memory
|
|
|
|
# in arenas: 3.0MB # s.total_arena_memory
|
|
|
|
# rawmalloced: 1.7MB # s.total_rawmalloced_memory
|
|
|
|
# nursery: 4.0MB # s.nursery_size
|
|
|
|
# raw assembler used: 31.0kB # s.jit_backend_used
|
|
|
|
# -----------------------------
|
|
|
|
# Total: 8.8MB # stats.memory_used_sum
|
|
|
|
#
|
|
|
|
# Total memory allocated:
|
|
|
|
# GC allocated: 38.7MB (peak: 41.1MB) # s.total_allocated_memory, s.peak_allocated_memory
|
|
|
|
# in arenas: 30.9MB # s.peak_arena_memory
|
|
|
|
# rawmalloced: 4.1MB # s.peak_rawmalloced_memory
|
|
|
|
# nursery: 4.0MB # s.nursery_size
|
|
|
|
# raw assembler allocated: 1.0MB # s.jit_backend_allocated
|
|
|
|
# -----------------------------
|
|
|
|
# Total: 39.7MB # stats.memory_allocated_sum
|
|
|
|
#
|
|
|
|
# Total time spent in GC: 0.073 # s.total_gc_time
|
|
|
|
|
|
|
|
pypy_gc_time = CounterMetricFamily(
|
|
|
|
"pypy_gc_time_seconds_total", "Total time spent in PyPy GC", labels=[],
|
|
|
|
)
|
|
|
|
pypy_gc_time.add_metric([], s.total_gc_time / 1000)
|
|
|
|
yield pypy_gc_time
|
|
|
|
|
|
|
|
pypy_mem = GaugeMetricFamily(
|
|
|
|
"pypy_memory_bytes",
|
|
|
|
"Memory tracked by PyPy allocator",
|
|
|
|
labels=["state", "class", "kind"],
|
|
|
|
)
|
|
|
|
# memory used by JIT assembler
|
|
|
|
pypy_mem.add_metric(["used", "", "jit"], s.jit_backend_used)
|
|
|
|
pypy_mem.add_metric(["allocated", "", "jit"], s.jit_backend_allocated)
|
|
|
|
# memory used by GCed objects
|
|
|
|
pypy_mem.add_metric(["used", "", "arenas"], s.total_arena_memory)
|
|
|
|
pypy_mem.add_metric(["allocated", "", "arenas"], s.peak_arena_memory)
|
|
|
|
pypy_mem.add_metric(["used", "", "rawmalloced"], s.total_rawmalloced_memory)
|
|
|
|
pypy_mem.add_metric(["allocated", "", "rawmalloced"], s.peak_rawmalloced_memory)
|
|
|
|
pypy_mem.add_metric(["used", "", "nursery"], s.nursery_size)
|
|
|
|
pypy_mem.add_metric(["allocated", "", "nursery"], s.nursery_size)
|
|
|
|
# totals
|
|
|
|
pypy_mem.add_metric(["used", "totals", "gc"], s.total_gc_memory)
|
|
|
|
pypy_mem.add_metric(["allocated", "totals", "gc"], s.total_allocated_memory)
|
|
|
|
pypy_mem.add_metric(["used", "totals", "gc_peak"], s.peak_memory)
|
|
|
|
pypy_mem.add_metric(["allocated", "totals", "gc_peak"], s.peak_allocated_memory)
|
|
|
|
yield pypy_mem
|
|
|
|
|
|
|
|
|
|
|
|
if running_on_pypy:
|
|
|
|
REGISTRY.register(PyPyGCStats())
|
|
|
|
|
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
#
|
|
|
|
# Twisted reactor metrics
|
|
|
|
#
|
2016-11-03 18:03:52 +01:00
|
|
|
|
2018-05-23 00:32:57 +02:00
|
|
|
tick_time = Histogram(
|
|
|
|
"python_twisted_reactor_tick_time",
|
2018-05-28 11:10:27 +02:00
|
|
|
"Tick time of the Twisted reactor (sec)",
|
2018-05-28 11:16:09 +02:00
|
|
|
buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5],
|
2018-05-23 00:32:57 +02:00
|
|
|
)
|
|
|
|
pending_calls_metric = Histogram(
|
|
|
|
"python_twisted_reactor_pending_calls",
|
|
|
|
"Pending calls",
|
|
|
|
buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000],
|
|
|
|
)
|
2015-08-13 12:38:59 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
#
|
|
|
|
# Federation Metrics
|
|
|
|
#
|
2015-08-13 12:38:59 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
|
2016-10-27 19:09:34 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
events_processed_counter = Counter("synapse_federation_client_events_processed", "")
|
2018-04-11 12:07:51 +02:00
|
|
|
|
2018-08-07 20:09:48 +02:00
|
|
|
event_processing_loop_counter = Counter(
|
2019-06-13 14:40:52 +02:00
|
|
|
"synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
|
2018-08-07 20:09:48 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
event_processing_loop_room_count = Counter(
|
|
|
|
"synapse_event_processing_loop_room_count",
|
|
|
|
"Rooms seen per event processing loop iteration",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-04-11 12:07:51 +02:00
|
|
|
# Used to track where various components have processed in the event stream,
|
|
|
|
# e.g. federation sending, appservice sending, etc.
|
2018-05-22 02:47:37 +02:00
|
|
|
event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"])
|
2018-04-11 12:07:51 +02:00
|
|
|
|
|
|
|
# Used to track the current max events stream position
|
2018-05-22 02:47:37 +02:00
|
|
|
event_persisted_position = Gauge("synapse_event_persisted_position", "")
|
2018-04-11 12:07:51 +02:00
|
|
|
|
2018-04-11 12:52:19 +02:00
|
|
|
# Used to track the received_ts of the last event processed by various
|
|
|
|
# components
|
2018-05-22 02:47:37 +02:00
|
|
|
event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"])
|
2018-04-11 12:52:19 +02:00
|
|
|
|
|
|
|
# Used to track the lag processing events. This is the time difference
|
|
|
|
# between the last processed event's received_ts and the time it was
|
|
|
|
# finished being processed.
|
2018-05-22 02:47:37 +02:00
|
|
|
event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
|
2015-08-13 12:38:59 +02:00
|
|
|
|
2020-06-30 17:58:06 +02:00
|
|
|
event_processing_lag_by_event = Histogram(
|
|
|
|
"synapse_event_processing_lag_by_event",
|
|
|
|
"Time between an event being persisted and it being queued up to be sent to the relevant remote servers",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
2019-09-09 16:14:58 +02:00
|
|
|
# Build info of the running server.
|
|
|
|
build_info = Gauge(
|
|
|
|
"synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
|
|
|
|
)
|
|
|
|
build_info.labels(
|
|
|
|
" ".join([platform.python_implementation(), platform.python_version()]),
|
|
|
|
get_version_string(synapse),
|
|
|
|
" ".join([platform.system(), platform.release()]),
|
|
|
|
).set(1)
|
|
|
|
|
2018-06-14 12:26:59 +02:00
|
|
|
last_ticked = time.time()
|
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class ReactorLastSeenMetric:
|
2018-06-14 12:26:59 +02:00
|
|
|
def collect(self):
|
|
|
|
cm = GaugeMetricFamily(
|
|
|
|
"python_twisted_reactor_last_seen",
|
|
|
|
"Seconds since the Twisted reactor was last seen",
|
|
|
|
)
|
|
|
|
cm.add_metric([], time.time() - last_ticked)
|
|
|
|
yield cm
|
|
|
|
|
|
|
|
|
|
|
|
REGISTRY.register(ReactorLastSeenMetric())
|
|
|
|
|
2018-05-23 00:32:57 +02:00
|
|
|
|
2015-08-13 12:38:59 +02:00
|
|
|
def runUntilCurrentTimer(func):
|
|
|
|
@functools.wraps(func)
|
|
|
|
def f(*args, **kwargs):
|
2015-08-14 16:42:52 +02:00
|
|
|
now = reactor.seconds()
|
|
|
|
num_pending = 0
|
|
|
|
|
|
|
|
# _newTimedCalls is one long list of *all* pending calls. Below loop
|
|
|
|
# is based off of impl of reactor.runUntilCurrent
|
2015-08-18 12:47:00 +02:00
|
|
|
for delayed_call in reactor._newTimedCalls:
|
|
|
|
if delayed_call.time > now:
|
2015-08-14 16:42:52 +02:00
|
|
|
break
|
|
|
|
|
2015-08-18 12:47:00 +02:00
|
|
|
if delayed_call.delayed_time > 0:
|
2015-08-14 16:42:52 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
num_pending += 1
|
|
|
|
|
|
|
|
num_pending += len(reactor.threadCallQueue)
|
2018-05-28 11:10:27 +02:00
|
|
|
start = time.time()
|
2015-08-13 12:38:59 +02:00
|
|
|
ret = func(*args, **kwargs)
|
2018-05-28 11:10:27 +02:00
|
|
|
end = time.time()
|
2018-01-20 00:51:04 +01:00
|
|
|
|
|
|
|
# record the amount of wallclock time spent running pending calls.
|
|
|
|
# This is a proxy for the actual amount of time between reactor polls,
|
|
|
|
# since about 25% of time is actually spent running things triggered by
|
|
|
|
# I/O events, but that is harder to capture without rewriting half the
|
|
|
|
# reactor.
|
2018-05-22 02:47:37 +02:00
|
|
|
tick_time.observe(end - start)
|
|
|
|
pending_calls_metric.observe(num_pending)
|
2016-05-09 11:13:25 +02:00
|
|
|
|
2018-06-14 12:26:59 +02:00
|
|
|
# Update the time we last ticked, for the metric to test whether
|
|
|
|
# Synapse's reactor has frozen
|
|
|
|
global last_ticked
|
|
|
|
last_ticked = end
|
|
|
|
|
2018-01-08 01:53:32 +01:00
|
|
|
if running_on_pypy:
|
|
|
|
return ret
|
|
|
|
|
2016-05-13 17:31:08 +02:00
|
|
|
# Check if we need to do a manual GC (since its been disabled), and do
|
|
|
|
# one if necessary.
|
2016-05-09 11:13:25 +02:00
|
|
|
threshold = gc.get_threshold()
|
|
|
|
counts = gc.get_count()
|
2016-06-07 14:40:22 +02:00
|
|
|
for i in (2, 1, 0):
|
2016-05-09 11:13:25 +02:00
|
|
|
if threshold[i] < counts[i]:
|
2019-06-28 13:45:33 +02:00
|
|
|
if i == 0:
|
|
|
|
logger.debug("Collecting gc %d", i)
|
|
|
|
else:
|
|
|
|
logger.info("Collecting gc %d", i)
|
2016-05-16 10:32:29 +02:00
|
|
|
|
2018-05-28 11:10:27 +02:00
|
|
|
start = time.time()
|
2016-06-07 14:40:22 +02:00
|
|
|
unreachable = gc.collect(i)
|
2018-05-28 11:10:27 +02:00
|
|
|
end = time.time()
|
2016-05-16 10:32:29 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
gc_time.labels(i).observe(end - start)
|
|
|
|
gc_unreachable.labels(i).set(unreachable)
|
2016-05-09 11:13:25 +02:00
|
|
|
|
2015-08-13 12:38:59 +02:00
|
|
|
return ret
|
|
|
|
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
2015-08-18 12:51:08 +02:00
|
|
|
try:
|
|
|
|
# Ensure the reactor has all the attributes we expect
|
|
|
|
reactor.runUntilCurrent
|
|
|
|
reactor._newTimedCalls
|
|
|
|
reactor.threadCallQueue
|
|
|
|
|
2015-08-13 12:38:59 +02:00
|
|
|
# runUntilCurrent is called when we have pending calls. It is called once
|
|
|
|
# per iteratation after fd polling.
|
|
|
|
reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent)
|
2016-05-13 17:31:08 +02:00
|
|
|
|
|
|
|
# We manually run the GC each reactor tick so that we can get some metrics
|
|
|
|
# about time spent doing GC,
|
2018-01-08 01:53:32 +01:00
|
|
|
if not running_on_pypy:
|
|
|
|
gc.disable()
|
2015-08-18 12:51:08 +02:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
2019-07-18 15:57:15 +02:00
|
|
|
|
|
|
|
__all__ = [
|
|
|
|
"MetricsResource",
|
|
|
|
"generate_latest",
|
|
|
|
"start_http_server",
|
|
|
|
"LaterGauge",
|
|
|
|
"InFlightGauge",
|
|
|
|
"BucketCollector",
|
|
|
|
]
|