Fix some typos.

pull/9407/head
Patrick Cloke 2021-02-12 11:01:48 -05:00
parent 2c9b4a5f16
commit 7950aa8a27
23 changed files with 34 additions and 34 deletions

View File

@ -98,7 +98,7 @@ class AuthConfig(Config):
# session to be active.
#
# This defaults to 0, meaning the user is queried for their credentials
# before every action, but this can be overridden to alow a single
# before every action, but this can be overridden to allow a single
# validation to be re-used. This weakens the protections afforded by
# the user-interactive authentication process, by allowing for multiple
# (and potentially different) operations to use the same validation session.

View File

@ -123,7 +123,7 @@ class RoomDirectoryConfig(Config):
alias (str)
Returns:
boolean: True if user is allowed to crate the alias
boolean: True if user is allowed to create the alias
"""
for rule in self._alias_creation_rules:
if rule.matches(user_id, room_id, [alias]):

View File

@ -829,7 +829,7 @@ class RoomCreationHandler(BaseHandler):
if room_alias:
result["room_alias"] = room_alias.to_string()
# Always wait for room creation to progate before returning
# Always wait for room creation to propagate before returning
await self._replication.wait_for_stream_position(
self.hs.config.worker.events_shard_config.get_instance(room_id),
"events",

View File

@ -524,7 +524,7 @@ class RulesForRoom:
class _Invalidation:
# _Invalidation is passed as an `on_invalidate` callback to bulk_get_push_rules,
# which means that it it is stored on the bulk_get_push_rules cache entry. In order
# to ensure that we don't accumulate lots of redunant callbacks on the cache entry,
# to ensure that we don't accumulate lots of redundant callbacks on the cache entry,
# we need to ensure that two _Invalidation objects are "equal" if they refer to the
# same `cache` and `room_id`.
#

View File

@ -752,7 +752,7 @@ class PushersRestServlet(RestServlet):
Returns:
pushers: Dictionary containing pushers information.
total: Number of pushers in dictonary `pushers`.
total: Number of pushers in dictionary `pushers`.
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")

View File

@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
class RoomUpgradeRestServlet(RestServlet):
"""Handler for room uprade requests.
"""Handler for room upgrade requests.
Handles requests of the form:

View File

@ -137,7 +137,7 @@ def add_file_headers(
# section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
# is (essentially) a single US-ASCII word, and a `quoted-string` is a
# US-ASCII string surrounded by double-quotes, using backslash as an
# escape charater. Note that %-encoding is *not* permitted.
# escape character. Note that %-encoding is *not* permitted.
#
# `filename*` is defined to be an `ext-value`, which is defined in
# RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,

View File

@ -184,7 +184,7 @@ class MediaRepository:
async def get_local_media(
self, request: Request, media_id: str, name: Optional[str]
) -> None:
"""Responds to reqests for local media, if exists, or returns 404.
"""Responds to requests for local media, if exists, or returns 404.
Args:
request: The incoming request.
@ -306,7 +306,7 @@ class MediaRepository:
media_info = await self.store.get_cached_remote_media(server_name, media_id)
# file_id is the ID we use to track the file locally. If we've already
# seen the file then reuse the existing ID, otherwise genereate a new
# seen the file then reuse the existing ID, otherwise generate a new
# one.
# If we have an entry in the DB, try and look for it
@ -927,10 +927,10 @@ class MediaRepositoryResource(Resource):
<thumbnail>
The thumbnail methods are "crop" and "scale". "scale" trys to return an
The thumbnail methods are "crop" and "scale". "scale" tries to return an
image where either the width or the height is smaller than the requested
size. The client should then scale and letterbox the image if it needs to
fit within a given rectangle. "crop" trys to return an image where the
fit within a given rectangle. "crop" tries to return an image where the
width and height are close to the requested size and the aspect matches
the requested size. The client should scale the image if it needs to fit
within a given rectangle.

View File

@ -615,7 +615,7 @@ class StateResolutionHandler:
event_map:
a dict from event_id to event, for any events that we happen to
have in flight (eg, those currently being persisted). This will be
used as a starting point fof finding the state we need; any missing
used as a starting point for finding the state we need; any missing
events will be requested via state_map_factory.
If None, all events will be fetched via state_res_store.

View File

@ -450,7 +450,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
},
)
# Add the messages to the approriate local device inboxes so that
# Add the messages to the appropriate local device inboxes so that
# they'll be sent to the devices when they next sync.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device

View File

@ -371,7 +371,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
# and state sets {A} and {B} then walking the auth chains of A and B
# would immediately show that C is reachable by both. However, if we
# stopped at C then we'd only reach E via the auth chain of B and so E
# would errornously get included in the returned difference.
# would erroneously get included in the returned difference.
#
# The other thing that we do is limit the number of auth chains we walk
# at once, due to practical limits (i.e. we can only query the database
@ -497,7 +497,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
a_ids = new_aids
# Mark that the auth event is reachable by the approriate sets.
# Mark that the auth event is reachable by the appropriate sets.
sets.intersection_update(event_to_missing_sets[event_id])
search.sort()

View File

@ -1050,7 +1050,7 @@ class PersistEventsStore:
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# and which we have added, then we invalidate the caches for all
# those users.
members_changed = {
state_key

View File

@ -155,7 +155,7 @@ class KeyStore(SQLBaseStore):
(server_name, key_id, from_server) triplet if one already existed.
Args:
server_name: The name of the server.
key_id: The identifer of the key this JSON is for.
key_id: The identifier of the key this JSON is for.
from_server: The server this JSON was fetched from.
ts_now_ms: The time now in milliseconds.
ts_valid_until_ms: The time when this json stops being valid.
@ -182,7 +182,7 @@ class KeyStore(SQLBaseStore):
async def get_server_keys_json(
self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]]
) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]:
"""Retrive the key json for a list of server_keys and key ids.
"""Retrieve the key json for a list of server_keys and key ids.
If no keys are found for a given server, key_id and source then
that server, key_id, and source triplet entry will be an empty list.
The JSON is returned as a byte array so that it can be efficiently

View File

@ -111,7 +111,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
async def count_daily_sent_e2ee_messages(self):
def _count_messages(txn):
# This is good enough as if you have silly characters in your own
# hostname then thats your own fault.
# hostname then that's your own fault.
like_clause = "%:" + self.hs.hostname
sql = """
@ -167,7 +167,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
async def count_daily_sent_messages(self):
def _count_messages(txn):
# This is good enough as if you have silly characters in your own
# hostname then thats your own fault.
# hostname then that's your own fault.
like_clause = "%:" + self.hs.hostname
sql = """

View File

@ -160,7 +160,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
Args:
room_id: List of room_ids.
to_key: Max stream id to fetch receipts upto.
to_key: Max stream id to fetch receipts up to.
from_key: Min stream id to fetch receipts from. None fetches
from the start.
@ -189,7 +189,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
Args:
room_ids: The room id.
to_key: Max stream id to fetch receipts upto.
to_key: Max stream id to fetch receipts up to.
from_key: Min stream id to fetch receipts from. None fetches
from the start.
@ -312,7 +312,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
to a limit of the latest 100 read receipts.
Args:
to_key: Max stream id to fetch receipts upto.
to_key: Max stream id to fetch receipts up to.
from_key: Min stream id to fetch receipts from. None fetches
from the start.

View File

@ -1044,7 +1044,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
async def _background_add_rooms_room_version_column(
self, progress: dict, batch_size: int
):
"""Background update to go and add room version inforamtion to `rooms`
"""Background update to go and add room version information to `rooms`
table from `current_state_events` table.
"""

View File

@ -64,7 +64,7 @@ class StateDeltasStore(SQLBaseStore):
def get_current_state_deltas_txn(txn):
# First we calculate the max stream id that will give us less than
# N results.
# We arbitarily limit to 100 stream_id entries to ensure we don't
# We arbitrarily limit to 100 stream_id entries to ensure we don't
# select toooo many.
sql = """
SELECT stream_id, count(*)
@ -81,7 +81,7 @@ class StateDeltasStore(SQLBaseStore):
for stream_id, count in txn:
total += count
if total > 100:
# We arbitarily limit to 100 entries to ensure we don't
# We arbitrarily limit to 100 entries to ensure we don't
# select toooo many.
logger.debug(
"Clipping current_state_delta_stream rows to stream_id %i",

View File

@ -198,7 +198,7 @@ class TransactionStore(TransactionWorkerStore):
retry_interval: int,
) -> None:
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occuring.
Both timings should be zero if retrying is no longer occurring.
Args:
destination

View File

@ -27,7 +27,7 @@ MAX_STATE_DELTA_HOPS = 100
class StateGroupBackgroundUpdateStore(SQLBaseStore):
"""Defines functions related to state groups needed to run the state backgroud
"""Defines functions related to state groups needed to run the state background
updates.
"""

View File

@ -113,7 +113,7 @@ def prepare_database(
# which should be empty.
if config is None:
raise ValueError(
"config==None in prepare_database, but databse is not empty"
"config==None in prepare_database, but database is not empty"
)
# if it's a worker app, refuse to upgrade the database, to avoid multiple

View File

@ -245,7 +245,7 @@ class MultiWriterIdGenerator:
# and b) noting that if we have seen a run of persisted positions
# without gaps (e.g. 5, 6, 7) then we can skip forward (e.g. to 7).
#
# Note: There is no guarentee that the IDs generated by the sequence
# Note: There is no guarantee that the IDs generated by the sequence
# will be gapless; gaps can form when e.g. a transaction was rolled
# back. This means that sometimes we won't be able to skip forward the
# position even though everything has been persisted. However, since
@ -418,7 +418,7 @@ class MultiWriterIdGenerator:
# bother, as nothing will read it).
#
# We only do this on the success path so that the persisted current
# position points to a persited row with the correct instance name.
# position points to a persisted row with the correct instance name.
if self._writers:
txn.call_after(
run_as_background_process,
@ -509,7 +509,7 @@ class MultiWriterIdGenerator:
}
def advance(self, instance_name: str, new_id: int):
"""Advance the postion of the named writer to the given ID, if greater
"""Advance the position of the named writer to the given ID, if greater
than existing entry.
"""

View File

@ -675,7 +675,7 @@ class PersistedEventPosition:
persisted in the same room after this position will be after the
returned `RoomStreamToken`.
Note: no guarentees are made about ordering w.r.t. events in other
Note: no guarantees are made about ordering w.r.t. events in other
rooms.
"""
# Doing the naive thing satisfies the desired properties described in

View File

@ -497,7 +497,7 @@ def timeout_deferred(
delayed_call = reactor.callLater(timeout, time_it_out)
def convert_cancelled(value: failure.Failure):
# if the orgininal deferred was cancelled, and our timeout has fired, then
# if the original deferred was cancelled, and our timeout has fired, then
# the reason it was cancelled was due to our timeout. Turn the CancelledError
# into a TimeoutError.
if timed_out[0] and value.check(CancelledError):