2016-01-07 05:26:29 +01:00
|
|
|
# Copyright 2015, 2016 OpenMarket Ltd
|
2022-04-06 14:59:04 +02:00
|
|
|
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
2015-02-24 19:03:39 +01:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
import itertools
|
2018-07-09 08:09:20 +02:00
|
|
|
import logging
|
2018-05-23 20:03:56 +02:00
|
|
|
import os
|
2018-01-08 01:53:32 +01:00
|
|
|
import platform
|
2018-09-14 15:39:59 +02:00
|
|
|
import threading
|
2021-11-17 20:07:02 +01:00
|
|
|
from typing import (
|
|
|
|
Callable,
|
|
|
|
Dict,
|
|
|
|
Generic,
|
|
|
|
Iterable,
|
|
|
|
Mapping,
|
|
|
|
Optional,
|
|
|
|
Sequence,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
Type,
|
|
|
|
TypeVar,
|
|
|
|
Union,
|
2022-02-02 17:51:00 +01:00
|
|
|
cast,
|
2021-11-17 20:07:02 +01:00
|
|
|
)
|
2015-08-13 12:38:59 +02:00
|
|
|
|
2018-07-09 08:09:20 +02:00
|
|
|
import attr
|
2021-11-17 20:07:02 +01:00
|
|
|
from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric
|
2020-05-22 12:08:41 +02:00
|
|
|
from prometheus_client.core import (
|
|
|
|
REGISTRY,
|
2020-09-29 23:26:28 +02:00
|
|
|
GaugeHistogramMetricFamily,
|
2020-05-22 12:08:41 +02:00
|
|
|
GaugeMetricFamily,
|
|
|
|
)
|
2015-03-04 18:13:09 +01:00
|
|
|
|
2021-11-01 12:21:36 +01:00
|
|
|
from twisted.python.threadpool import ThreadPool
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2022-02-14 14:12:22 +01:00
|
|
|
# This module is imported for its side effects; flake8 needn't warn that it's unused.
|
|
|
|
import synapse.metrics._reactor_metrics # noqa: F401
|
2022-08-24 13:35:54 +02:00
|
|
|
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
|
|
|
|
from synapse.metrics._legacy_exposition import (
|
2019-07-18 15:57:15 +02:00
|
|
|
MetricsResource,
|
|
|
|
generate_latest,
|
|
|
|
start_http_server,
|
|
|
|
)
|
2022-04-06 14:59:04 +02:00
|
|
|
from synapse.metrics._types import Collector
|
2022-06-07 16:24:11 +02:00
|
|
|
from synapse.util import SYNAPSE_VERSION
|
2019-07-18 15:57:15 +02:00
|
|
|
|
2015-03-04 18:13:09 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2019-07-18 15:57:15 +02:00
|
|
|
METRICS_PREFIX = "/_synapse/metrics"
|
|
|
|
|
2022-04-06 14:59:04 +02:00
|
|
|
all_gauges: Dict[str, Collector] = {}
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2018-05-29 03:22:27 +02:00
|
|
|
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
2018-05-22 23:28:23 +02:00
|
|
|
|
2018-05-29 03:32:15 +02:00
|
|
|
|
2022-02-02 17:51:00 +01:00
|
|
|
class _RegistryProxy:
|
2018-05-31 11:04:50 +02:00
|
|
|
@staticmethod
|
2021-11-17 20:07:02 +01:00
|
|
|
def collect() -> Iterable[Metric]:
|
2018-05-22 23:28:23 +02:00
|
|
|
for metric in REGISTRY.collect():
|
|
|
|
if not metric.name.startswith("__"):
|
|
|
|
yield metric
|
|
|
|
|
|
|
|
|
2022-02-02 17:51:00 +01:00
|
|
|
# A little bit nasty, but collect() above is static so a Protocol doesn't work.
|
|
|
|
# _RegistryProxy matches the signature of a CollectorRegistry instance enough
|
|
|
|
# for it to be usable in the contexts in which we use it.
|
|
|
|
# TODO Do something nicer about this.
|
|
|
|
RegistryProxy = cast(CollectorRegistry, _RegistryProxy)
|
|
|
|
|
|
|
|
|
2022-01-13 14:49:28 +01:00
|
|
|
@attr.s(slots=True, hash=True, auto_attribs=True)
|
2022-04-06 14:59:04 +02:00
|
|
|
class LaterGauge(Collector):
|
2022-01-13 14:49:28 +01:00
|
|
|
name: str
|
|
|
|
desc: str
|
2022-04-06 14:59:04 +02:00
|
|
|
labels: Optional[Sequence[str]] = attr.ib(hash=False)
|
2020-03-19 11:00:24 +01:00
|
|
|
# callback: should either return a value (if there are no labels for this metric),
|
|
|
|
# or dict mapping from a label tuple to a value
|
2022-01-13 14:49:28 +01:00
|
|
|
caller: Callable[
|
|
|
|
[], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]]
|
|
|
|
]
|
2018-04-11 12:07:33 +02:00
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def collect(self) -> Iterable[Metric]:
|
2015-03-04 16:47:23 +01:00
|
|
|
|
2018-05-22 23:28:23 +02:00
|
|
|
g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
|
2015-03-04 16:47:23 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
try:
|
|
|
|
calls = self.caller()
|
2018-06-04 16:59:14 +02:00
|
|
|
except Exception:
|
2019-06-13 14:40:52 +02:00
|
|
|
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
|
2018-05-22 02:47:37 +02:00
|
|
|
yield g
|
2018-06-04 16:59:14 +02:00
|
|
|
return
|
2015-02-24 19:03:39 +01:00
|
|
|
|
2021-10-28 15:14:42 +02:00
|
|
|
if isinstance(calls, (int, float)):
|
|
|
|
g.add_metric([], calls)
|
|
|
|
else:
|
2020-06-15 13:03:36 +02:00
|
|
|
for k, v in calls.items():
|
2018-05-22 02:47:37 +02:00
|
|
|
g.add_metric(k, v)
|
2016-07-20 16:47:28 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
yield g
|
2016-07-20 16:47:28 +02:00
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def __attrs_post_init__(self) -> None:
|
2018-05-22 17:50:26 +02:00
|
|
|
self._register()
|
2016-07-20 16:47:28 +02:00
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def _register(self) -> None:
|
2018-05-22 02:47:37 +02:00
|
|
|
if self.name in all_gauges.keys():
|
2018-05-22 17:50:26 +02:00
|
|
|
logger.warning("%s already registered, reregistering" % (self.name,))
|
2018-09-14 15:39:59 +02:00
|
|
|
REGISTRY.unregister(all_gauges.pop(self.name))
|
|
|
|
|
|
|
|
REGISTRY.register(self)
|
|
|
|
all_gauges[self.name] = self
|
|
|
|
|
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
# `MetricsEntry` only makes sense when it is a `Protocol`,
|
|
|
|
# but `Protocol` can't be used as a `TypeVar` bound.
|
|
|
|
MetricsEntry = TypeVar("MetricsEntry")
|
|
|
|
|
|
|
|
|
2022-04-06 14:59:04 +02:00
|
|
|
class InFlightGauge(Generic[MetricsEntry], Collector):
|
2018-09-14 15:39:59 +02:00
|
|
|
"""Tracks number of things (e.g. requests, Measure blocks, etc) in flight
|
|
|
|
at any given time.
|
|
|
|
|
|
|
|
Each InFlightGauge will create a metric called `<name>_total` that counts
|
|
|
|
the number of in flight blocks, as well as a metrics for each item in the
|
|
|
|
given `sub_metrics` as `<name>_<sub_metric>` which will get updated by the
|
|
|
|
callbacks.
|
|
|
|
|
|
|
|
Args:
|
2021-11-17 20:07:02 +01:00
|
|
|
name
|
|
|
|
desc
|
|
|
|
labels
|
|
|
|
sub_metrics: A list of sub metrics that the callbacks will update.
|
2018-09-14 15:39:59 +02:00
|
|
|
"""
|
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
name: str,
|
|
|
|
desc: str,
|
|
|
|
labels: Sequence[str],
|
|
|
|
sub_metrics: Sequence[str],
|
|
|
|
):
|
2018-09-14 15:39:59 +02:00
|
|
|
self.name = name
|
|
|
|
self.desc = desc
|
|
|
|
self.labels = labels
|
|
|
|
self.sub_metrics = sub_metrics
|
|
|
|
|
|
|
|
# Create a class which have the sub_metrics values as attributes, which
|
|
|
|
# default to 0 on initialization. Used to pass to registered callbacks.
|
2021-11-17 20:07:02 +01:00
|
|
|
self._metrics_class: Type[MetricsEntry] = attr.make_class(
|
2022-01-13 14:49:28 +01:00
|
|
|
"_MetricsEntry",
|
|
|
|
attrs={x: attr.ib(default=0) for x in sub_metrics},
|
|
|
|
slots=True,
|
2018-09-14 15:39:59 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# Counts number of in flight blocks for a given set of label values
|
2021-11-17 20:07:02 +01:00
|
|
|
self._registrations: Dict[
|
|
|
|
Tuple[str, ...], Set[Callable[[MetricsEntry], None]]
|
|
|
|
] = {}
|
2018-09-14 15:39:59 +02:00
|
|
|
|
|
|
|
# Protects access to _registrations
|
|
|
|
self._lock = threading.Lock()
|
|
|
|
|
|
|
|
self._register_with_collector()
|
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def register(
|
|
|
|
self,
|
|
|
|
key: Tuple[str, ...],
|
|
|
|
callback: Callable[[MetricsEntry], None],
|
|
|
|
) -> None:
|
2018-09-14 15:39:59 +02:00
|
|
|
"""Registers that we've entered a new block with labels `key`.
|
|
|
|
|
|
|
|
`callback` gets called each time the metrics are collected. The same
|
|
|
|
value must also be given to `unregister`.
|
|
|
|
|
|
|
|
`callback` gets called with an object that has an attribute per
|
|
|
|
sub_metric, which should be updated with the necessary values. Note that
|
|
|
|
the metrics object is shared between all callbacks registered with the
|
|
|
|
same key.
|
|
|
|
|
|
|
|
Note that `callback` may be called on a separate thread.
|
|
|
|
"""
|
|
|
|
with self._lock:
|
|
|
|
self._registrations.setdefault(key, set()).add(callback)
|
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def unregister(
|
|
|
|
self,
|
|
|
|
key: Tuple[str, ...],
|
|
|
|
callback: Callable[[MetricsEntry], None],
|
|
|
|
) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Registers that we've exited a block with labels `key`."""
|
2018-09-14 15:39:59 +02:00
|
|
|
|
|
|
|
with self._lock:
|
|
|
|
self._registrations.setdefault(key, set()).discard(callback)
|
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def collect(self) -> Iterable[Metric]:
|
2018-09-14 15:39:59 +02:00
|
|
|
"""Called by prometheus client when it reads metrics.
|
|
|
|
|
|
|
|
Note: may be called by a separate thread.
|
|
|
|
"""
|
2019-06-13 14:40:52 +02:00
|
|
|
in_flight = GaugeMetricFamily(
|
|
|
|
self.name + "_total", self.desc, labels=self.labels
|
|
|
|
)
|
2018-09-14 15:39:59 +02:00
|
|
|
|
|
|
|
metrics_by_key = {}
|
|
|
|
|
|
|
|
# We copy so that we don't mutate the list while iterating
|
|
|
|
with self._lock:
|
|
|
|
keys = list(self._registrations)
|
|
|
|
|
|
|
|
for key in keys:
|
|
|
|
with self._lock:
|
|
|
|
callbacks = set(self._registrations[key])
|
|
|
|
|
|
|
|
in_flight.add_metric(key, len(callbacks))
|
|
|
|
|
|
|
|
metrics = self._metrics_class()
|
|
|
|
metrics_by_key[key] = metrics
|
|
|
|
for callback in callbacks:
|
|
|
|
callback(metrics)
|
|
|
|
|
|
|
|
yield in_flight
|
|
|
|
|
|
|
|
for name in self.sub_metrics:
|
2019-06-13 14:40:52 +02:00
|
|
|
gauge = GaugeMetricFamily(
|
|
|
|
"_".join([self.name, name]), "", labels=self.labels
|
|
|
|
)
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, metrics in metrics_by_key.items():
|
2018-09-14 15:39:59 +02:00
|
|
|
gauge.add_metric(key, getattr(metrics, name))
|
|
|
|
yield gauge
|
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def _register_with_collector(self) -> None:
|
2018-09-14 15:39:59 +02:00
|
|
|
if self.name in all_gauges.keys():
|
|
|
|
logger.warning("%s already registered, reregistering" % (self.name,))
|
2018-05-22 02:47:37 +02:00
|
|
|
REGISTRY.unregister(all_gauges.pop(self.name))
|
2015-03-06 17:18:21 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
REGISTRY.register(self)
|
|
|
|
all_gauges[self.name] = self
|
2015-02-24 19:03:39 +01:00
|
|
|
|
|
|
|
|
2022-04-06 14:59:04 +02:00
|
|
|
class GaugeBucketCollector(Collector):
|
2020-09-29 23:26:28 +02:00
|
|
|
"""Like a Histogram, but the buckets are Gauges which are updated atomically.
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
The data is updated by calling `update_data` with an iterable of measurements.
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
We assume that the data is updated less frequently than it is reported to
|
|
|
|
Prometheus, and optimise for that case.
|
2019-06-13 14:40:52 +02:00
|
|
|
"""
|
|
|
|
|
2021-04-06 17:32:04 +02:00
|
|
|
__slots__ = (
|
|
|
|
"_name",
|
|
|
|
"_documentation",
|
|
|
|
"_bucket_bounds",
|
|
|
|
"_metric",
|
|
|
|
)
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
name: str,
|
|
|
|
documentation: str,
|
|
|
|
buckets: Iterable[float],
|
2021-11-17 20:07:02 +01:00
|
|
|
registry: CollectorRegistry = REGISTRY,
|
2020-09-29 23:26:28 +02:00
|
|
|
):
|
|
|
|
"""
|
|
|
|
Args:
|
|
|
|
name: base name of metric to be exported to Prometheus. (a _bucket suffix
|
|
|
|
will be added.)
|
|
|
|
documentation: help text for the metric
|
|
|
|
buckets: The top bounds of the buckets to report
|
|
|
|
registry: metric registry to register with
|
|
|
|
"""
|
|
|
|
self._name = name
|
|
|
|
self._documentation = documentation
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
# the tops of the buckets
|
|
|
|
self._bucket_bounds = [float(b) for b in buckets]
|
|
|
|
if self._bucket_bounds != sorted(self._bucket_bounds):
|
|
|
|
raise ValueError("Buckets not in sorted order")
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
if self._bucket_bounds[-1] != float("inf"):
|
|
|
|
self._bucket_bounds.append(float("inf"))
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2021-04-06 17:32:04 +02:00
|
|
|
# We initially set this to None. We won't report metrics until
|
|
|
|
# this has been initialised after a successful data update
|
2021-07-15 12:02:43 +02:00
|
|
|
self._metric: Optional[GaugeHistogramMetricFamily] = None
|
2021-04-06 17:32:04 +02:00
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
registry.register(self)
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def collect(self) -> Iterable[Metric]:
|
2021-04-06 17:32:04 +02:00
|
|
|
# Don't report metrics unless we've already collected some data
|
|
|
|
if self._metric is not None:
|
|
|
|
yield self._metric
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def update_data(self, values: Iterable[float]) -> None:
|
2020-09-29 23:26:28 +02:00
|
|
|
"""Update the data to be reported by the metric
|
2019-06-13 14:40:52 +02:00
|
|
|
|
2020-09-29 23:26:28 +02:00
|
|
|
The existing data is cleared, and each measurement in the input is assigned
|
|
|
|
to the relevant bucket.
|
|
|
|
"""
|
|
|
|
self._metric = self._values_to_metric(values)
|
|
|
|
|
|
|
|
def _values_to_metric(self, values: Iterable[float]) -> GaugeHistogramMetricFamily:
|
|
|
|
total = 0.0
|
|
|
|
bucket_values = [0 for _ in self._bucket_bounds]
|
|
|
|
|
|
|
|
for v in values:
|
|
|
|
# assign each value to a bucket
|
|
|
|
for i, bound in enumerate(self._bucket_bounds):
|
|
|
|
if v <= bound:
|
|
|
|
bucket_values[i] += 1
|
|
|
|
break
|
|
|
|
|
|
|
|
# ... and increment the sum
|
|
|
|
total += v
|
|
|
|
|
|
|
|
# now, aggregate the bucket values so that they count the number of entries in
|
|
|
|
# that bucket or below.
|
|
|
|
accumulated_values = itertools.accumulate(bucket_values)
|
|
|
|
|
|
|
|
return GaugeHistogramMetricFamily(
|
|
|
|
self._name,
|
|
|
|
self._documentation,
|
|
|
|
buckets=list(
|
|
|
|
zip((str(b) for b in self._bucket_bounds), accumulated_values)
|
|
|
|
),
|
|
|
|
gsum_value=total,
|
2019-06-13 14:40:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-05-23 20:03:56 +02:00
|
|
|
#
|
|
|
|
# Detailed CPU metrics
|
|
|
|
#
|
|
|
|
|
|
|
|
|
2022-04-06 14:59:04 +02:00
|
|
|
class CPUMetrics(Collector):
|
2021-11-17 20:07:02 +01:00
|
|
|
def __init__(self) -> None:
|
2018-05-23 20:03:56 +02:00
|
|
|
ticks_per_sec = 100
|
|
|
|
try:
|
|
|
|
# Try and get the system config
|
2019-06-20 11:32:02 +02:00
|
|
|
ticks_per_sec = os.sysconf("SC_CLK_TCK")
|
2018-05-23 20:03:56 +02:00
|
|
|
except (ValueError, TypeError, AttributeError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
self.ticks_per_sec = ticks_per_sec
|
|
|
|
|
2021-11-17 20:07:02 +01:00
|
|
|
def collect(self) -> Iterable[Metric]:
|
2018-05-29 03:22:27 +02:00
|
|
|
if not HAVE_PROC_SELF_STAT:
|
|
|
|
return
|
2018-05-23 20:03:56 +02:00
|
|
|
|
|
|
|
with open("/proc/self/stat") as s:
|
|
|
|
line = s.read()
|
|
|
|
raw_stats = line.split(") ", 1)[1].split(" ")
|
|
|
|
|
|
|
|
user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
|
|
|
|
user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
|
|
|
|
yield user
|
|
|
|
|
|
|
|
sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
|
|
|
|
sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
|
|
|
|
yield sys
|
|
|
|
|
2018-05-23 20:08:59 +02:00
|
|
|
|
2018-05-23 20:03:56 +02:00
|
|
|
REGISTRY.register(CPUMetrics())
|
|
|
|
|
2020-05-22 12:08:41 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
#
|
|
|
|
# Federation Metrics
|
|
|
|
#
|
2015-08-13 12:38:59 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
|
2016-10-27 19:09:34 +02:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
events_processed_counter = Counter("synapse_federation_client_events_processed", "")
|
2018-04-11 12:07:51 +02:00
|
|
|
|
2018-08-07 20:09:48 +02:00
|
|
|
event_processing_loop_counter = Counter(
|
2019-06-13 14:40:52 +02:00
|
|
|
"synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
|
2018-08-07 20:09:48 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
event_processing_loop_room_count = Counter(
|
|
|
|
"synapse_event_processing_loop_room_count",
|
|
|
|
"Rooms seen per event processing loop iteration",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2018-04-11 12:07:51 +02:00
|
|
|
# Used to track where various components have processed in the event stream,
|
|
|
|
# e.g. federation sending, appservice sending, etc.
|
2018-05-22 02:47:37 +02:00
|
|
|
event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"])
|
2018-04-11 12:07:51 +02:00
|
|
|
|
|
|
|
# Used to track the current max events stream position
|
2018-05-22 02:47:37 +02:00
|
|
|
event_persisted_position = Gauge("synapse_event_persisted_position", "")
|
2018-04-11 12:07:51 +02:00
|
|
|
|
2018-04-11 12:52:19 +02:00
|
|
|
# Used to track the received_ts of the last event processed by various
|
|
|
|
# components
|
2018-05-22 02:47:37 +02:00
|
|
|
event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"])
|
2018-04-11 12:52:19 +02:00
|
|
|
|
|
|
|
# Used to track the lag processing events. This is the time difference
|
|
|
|
# between the last processed event's received_ts and the time it was
|
|
|
|
# finished being processed.
|
2018-05-22 02:47:37 +02:00
|
|
|
event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
|
2015-08-13 12:38:59 +02:00
|
|
|
|
2020-06-30 17:58:06 +02:00
|
|
|
event_processing_lag_by_event = Histogram(
|
|
|
|
"synapse_event_processing_lag_by_event",
|
|
|
|
"Time between an event being persisted and it being queued up to be sent to the relevant remote servers",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
2019-09-09 16:14:58 +02:00
|
|
|
# Build info of the running server.
|
|
|
|
build_info = Gauge(
|
|
|
|
"synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
|
|
|
|
)
|
|
|
|
build_info.labels(
|
|
|
|
" ".join([platform.python_implementation(), platform.python_version()]),
|
2022-06-07 16:24:11 +02:00
|
|
|
SYNAPSE_VERSION,
|
2019-09-09 16:14:58 +02:00
|
|
|
" ".join([platform.system(), platform.release()]),
|
|
|
|
).set(1)
|
|
|
|
|
2020-11-13 13:03:51 +01:00
|
|
|
# 3PID send info
|
|
|
|
threepid_send_requests = Histogram(
|
|
|
|
"synapse_threepid_send_requests_with_tries",
|
|
|
|
documentation="Number of requests for a 3pid token by try count. Note if"
|
|
|
|
" there is a request with try count of 4, then there would have been one"
|
|
|
|
" each for 1, 2 and 3",
|
|
|
|
buckets=(1, 2, 3, 4, 5, 10),
|
|
|
|
labelnames=("type", "reason"),
|
|
|
|
)
|
|
|
|
|
2021-11-01 12:21:36 +01:00
|
|
|
threadpool_total_threads = Gauge(
|
|
|
|
"synapse_threadpool_total_threads",
|
|
|
|
"Total number of threads currently in the threadpool",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
|
|
|
threadpool_total_working_threads = Gauge(
|
|
|
|
"synapse_threadpool_working_threads",
|
|
|
|
"Number of threads currently working in the threadpool",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
|
|
|
threadpool_total_min_threads = Gauge(
|
|
|
|
"synapse_threadpool_min_threads",
|
|
|
|
"Minimum number of threads configured in the threadpool",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
|
|
|
threadpool_total_max_threads = Gauge(
|
|
|
|
"synapse_threadpool_max_threads",
|
|
|
|
"Maximum number of threads configured in the threadpool",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def register_threadpool(name: str, threadpool: ThreadPool) -> None:
|
|
|
|
"""Add metrics for the threadpool."""
|
|
|
|
|
|
|
|
threadpool_total_min_threads.labels(name).set(threadpool.min)
|
|
|
|
threadpool_total_max_threads.labels(name).set(threadpool.max)
|
|
|
|
|
|
|
|
threadpool_total_threads.labels(name).set_function(lambda: len(threadpool.threads))
|
|
|
|
threadpool_total_working_threads.labels(name).set_function(
|
|
|
|
lambda: len(threadpool.working)
|
|
|
|
)
|
|
|
|
|
2018-06-14 12:26:59 +02:00
|
|
|
|
2019-07-18 15:57:15 +02:00
|
|
|
__all__ = [
|
2022-04-06 14:59:04 +02:00
|
|
|
"Collector",
|
2019-07-18 15:57:15 +02:00
|
|
|
"MetricsResource",
|
|
|
|
"generate_latest",
|
|
|
|
"start_http_server",
|
|
|
|
"LaterGauge",
|
|
|
|
"InFlightGauge",
|
2021-11-17 20:07:02 +01:00
|
|
|
"GaugeBucketCollector",
|
2022-01-13 15:35:52 +01:00
|
|
|
"MIN_TIME_BETWEEN_GCS",
|
|
|
|
"install_gc_manager",
|
2019-07-18 15:57:15 +02:00
|
|
|
]
|