response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Validate that a given string matches the client_secret defined by the spec
def assert_valid_client_secret(client_secret: str) -> None: """Validate that a given string matches the client_secret defined by the spec""" if ( len(client_secret) <= 0 or len(client_secret) > 255 or CLIENT_SECRET_REGEX.match(client_secret) is None ): raise SynapseError( 400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM )
Split a server name into host/port parts. Args: server_name: server name to parse Returns: host/port parts. Raises: ValueError if the server name could not be parsed.
def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]: """Split a server name into host/port parts. Args: server_name: server name to parse Returns: host/port parts. Raises: ValueError if the server name could not be parsed. """ try: if server_name and server_name[-1] == "]": # ipv6 literal, hopefully return server_name, None domain_port = server_name.rsplit(":", 1) domain = domain_port[0] port = int(domain_port[1]) if domain_port[1:] else None return domain, port except Exception: raise ValueError("Invalid server name '%s'" % server_name)
Split a server name into host/port parts and do some basic validation. Args: server_name: server name to parse Returns: host/port parts. Raises: ValueError if the server name could not be parsed.
def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]]: """Split a server name into host/port parts and do some basic validation. Args: server_name: server name to parse Returns: host/port parts. Raises: ValueError if the server name could not be parsed. """ host, port = parse_server_name(server_name) # these tests don't need to be bulletproof as we'll find out soon enough # if somebody is giving us invalid data. What we *do* need is to be sure # that nobody is sneaking IP literals in that look like hostnames, etc. # look for ipv6 literals if host and host[0] == "[": if host[-1] != "]": raise ValueError("Mismatched [...] in server name '%s'" % (server_name,)) # valid_ipv6 raises when given an empty string ipv6_address = host[1:-1] if not ipv6_address or not valid_ipv6(ipv6_address): raise ValueError( "Server name '%s' is not a valid IPv6 address" % (server_name,) ) elif not VALID_HOST_REGEX.match(host): raise ValueError("Server name '%s' has an invalid format" % (server_name,)) return host, port
Check whether an identity server location, such as the one passed as the `id_server` parameter to `/_matrix/client/r0/account/3pid/bind`, is valid. A valid identity server location consists of a valid hostname and optional port number, optionally followed by any number of `/` delimited path components, without any fragment or query string parts. Args: id_server: identity server location string to validate Returns: True if valid, False otherwise.
def valid_id_server_location(id_server: str) -> bool: """Check whether an identity server location, such as the one passed as the `id_server` parameter to `/_matrix/client/r0/account/3pid/bind`, is valid. A valid identity server location consists of a valid hostname and optional port number, optionally followed by any number of `/` delimited path components, without any fragment or query string parts. Args: id_server: identity server location string to validate Returns: True if valid, False otherwise. """ components = id_server.split("/", 1) host = components[0] try: parse_and_validate_server_name(host) except ValueError: return False if len(components) < 2: # no path return True path = components[1] return "#" not in path and "?" not in path
Parse the given string as an MXC URI Checks that the "server name" part is a valid server name Args: mxc: the (alleged) MXC URI to be checked Returns: hostname, port, media id Raises: ValueError if the URI cannot be parsed
def parse_and_validate_mxc_uri(mxc: str) -> Tuple[str, Optional[int], str]: """Parse the given string as an MXC URI Checks that the "server name" part is a valid server name Args: mxc: the (alleged) MXC URI to be checked Returns: hostname, port, media id Raises: ValueError if the URI cannot be parsed """ m = MXC_REGEX.match(mxc) if not m: raise ValueError("mxc URI %r did not match expected format" % (mxc,)) server_name = m.group(1) media_id = m.group(2) host, port = parse_and_validate_server_name(server_name) return host, port, media_id
If iterable has maxitems or fewer, return the stringification of a list containing those items. Otherwise, return the stringification of a list with the first maxitems items, followed by "...". Args: iterable: iterable to truncate maxitems: number of items to return before truncating
def shortstr(iterable: Iterable, maxitems: int = 5) -> str: """If iterable has maxitems or fewer, return the stringification of a list containing those items. Otherwise, return the stringification of a list with the first maxitems items, followed by "...". Args: iterable: iterable to truncate maxitems: number of items to return before truncating """ items = list(itertools.islice(iterable, maxitems + 1)) if len(items) <= maxitems: return str(items) return "[" + ", ".join(repr(r) for r in items[:maxitems]) + ", ...]"
Convert a string representation of truth to True or False True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. This is lifted from distutils.util.strtobool, with the exception that it actually returns a bool, rather than an int.
def strtobool(val: str) -> bool: """Convert a string representation of truth to True or False True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. This is lifted from distutils.util.strtobool, with the exception that it actually returns a bool, rather than an int. """ val = val.lower() if val in ("y", "yes", "t", "true", "on", "1"): return True elif val in ("n", "no", "f", "false", "off", "0"): return False else: raise ValueError("invalid truth value %r" % (val,))
Encode a number using base62 Args: num: number to be encoded minwidth: width to pad to, if the number is small
def base62_encode(num: int, minwidth: int = 1) -> str: """Encode a number using base62 Args: num: number to be encoded minwidth: width to pad to, if the number is small """ res = "" while num: num, rem = divmod(num, 62) res = _BASE62[rem] + res # pad to minimum width pad = "0" * (minwidth - len(res)) return pad + res
Check that the arg is a string containing no null (U+0000) codepoints. If so, returns the given string unmodified; otherwise, returns None.
def non_null_str_or_none(val: Any) -> Optional[str]: """Check that the arg is a string containing no null (U+0000) codepoints. If so, returns the given string unmodified; otherwise, returns None. """ return val if isinstance(val, str) and "\u0000" not in val else None
Set up a Jinja2 environment to load templates from the given search path The returned environment defines the following filters: - format_ts: formats timestamps as strings in the server's local timezone (XXX: why is that useful??) - mxc_to_http: converts mxc: uris to http URIs. Args are: (uri, width, height, resize_method="crop") and the following global variables: - server_name: matrix server name Args: template_search_directories: directories to search for templates config: homeserver config, for things like `server_name` and `public_baseurl` autoescape: whether template variables should be autoescaped. bool, or a function mapping from template name to bool. Defaults to escaping templates whose names end in .html, .xml or .htm. Returns: jinja environment
def build_jinja_env( template_search_directories: Sequence[str], config: "HomeServerConfig", autoescape: Union[bool, Callable[[Optional[str]], bool], None] = None, ) -> jinja2.Environment: """Set up a Jinja2 environment to load templates from the given search path The returned environment defines the following filters: - format_ts: formats timestamps as strings in the server's local timezone (XXX: why is that useful??) - mxc_to_http: converts mxc: uris to http URIs. Args are: (uri, width, height, resize_method="crop") and the following global variables: - server_name: matrix server name Args: template_search_directories: directories to search for templates config: homeserver config, for things like `server_name` and `public_baseurl` autoescape: whether template variables should be autoescaped. bool, or a function mapping from template name to bool. Defaults to escaping templates whose names end in .html, .xml or .htm. Returns: jinja environment """ if autoescape is None: autoescape = jinja2.select_autoescape() loader = jinja2.FileSystemLoader(template_search_directories) env = jinja2.Environment(loader=loader, autoescape=autoescape) # Update the environment with our custom filters env.filters.update( { "format_ts": _format_ts_filter, "mxc_to_http": _create_mxc_to_http_filter(config.server.public_baseurl), "localpart_from_email": _localpart_from_email_filter, } ) # common variables for all templates env.globals.update({"server_name": config.server.server_name}) return env
Create and return a jinja2 filter that converts MXC urls to HTTP Args: public_baseurl: The public, accessible base URL of the homeserver
def _create_mxc_to_http_filter( public_baseurl: Optional[str], ) -> Callable[[str, int, int, str], str]: """Create and return a jinja2 filter that converts MXC urls to HTTP Args: public_baseurl: The public, accessible base URL of the homeserver """ def mxc_to_http_filter( value: str, width: int, height: int, resize_method: str = "crop" ) -> str: if not public_baseurl: raise RuntimeError( "public_baseurl must be set in the homeserver config to convert MXC URLs to HTTP URLs." ) if value[0:6] != "mxc://": return "" server_and_media_id = value[6:] fragment = None if "#" in server_and_media_id: server_and_media_id, fragment = server_and_media_id.split("#", 1) fragment = "#" + fragment params = {"width": width, "height": height, "method": resize_method} return "%s_matrix/media/v1/thumbnail/%s?%s%s" % ( public_baseurl, server_and_media_id, urllib.parse.urlencode(params), fragment or "", ) return mxc_to_http_filter
'Canonicalise' email address Case folding of local part of email address and lowercase domain part See MSC2265, https://github.com/matrix-org/matrix-doc/pull/2265 Args: address: email address to be canonicalised Returns: The canonical form of the email address Raises: ValueError if the address could not be parsed.
def canonicalise_email(address: str) -> str: """'Canonicalise' email address Case folding of local part of email address and lowercase domain part See MSC2265, https://github.com/matrix-org/matrix-doc/pull/2265 Args: address: email address to be canonicalised Returns: The canonical form of the email address Raises: ValueError if the address could not be parsed. """ address = address.strip() parts = address.split("@") if len(parts) != 2: logger.debug("Couldn't parse email address %s", address) raise ValueError("Unable to parse email address") return parts[0].casefold() + "@" + parts[1].lower()
Does some basic validation on an email address. Returns the canonicalised email, as returned by `canonicalise_email`. Raises a ValueError if the email is invalid.
def validate_email(address: str) -> str: """Does some basic validation on an email address. Returns the canonicalised email, as returned by `canonicalise_email`. Raises a ValueError if the email is invalid. """ # First we try canonicalising in case that fails address = canonicalise_email(address) # Email addresses have to be at least 3 characters. if len(address) < 3: raise ValueError("Unable to parse email address") if len(address) > MAX_EMAIL_ADDRESS_LENGTH: raise ValueError("Unable to parse email address") return address
Do not allow Infinity, -Infinity, or NaN values in JSON.
def _reject_invalid_json(val: Any) -> None: """Do not allow Infinity, -Infinity, or NaN values in JSON.""" raise ValueError("Invalid JSON value: '%s'" % val)
Helper for json_encoder. Makes immutabledicts serializable by returning the underlying dict
def _handle_immutabledict(obj: Any) -> Dict[Any, Any]: """Helper for json_encoder. Makes immutabledicts serializable by returning the underlying dict """ if type(obj) is immutabledict: # fishing the protected dict out of the object is a bit nasty, # but we don't really want the overhead of copying the dict. try: # Safety: we catch the AttributeError immediately below. return obj._dict except AttributeError: # If all else fails, resort to making a copy of the immutabledict return dict(obj) raise TypeError( "Object of type %s is not JSON serializable" % obj.__class__.__name__ )
Creates a function suitable for passing to `Deferred.addErrback` that logs any failures that occur. Args: failure: The Failure to log msg: Message to log consumeErrors: If true consumes the failure, otherwise passes on down the callback chain Returns: The Failure if consumeErrors is false. None, otherwise.
def log_failure( failure: Failure, msg: str, consumeErrors: bool = True ) -> Optional[Failure]: """Creates a function suitable for passing to `Deferred.addErrback` that logs any failures that occur. Args: failure: The Failure to log msg: Message to log consumeErrors: If true consumes the failure, otherwise passes on down the callback chain Returns: The Failure if consumeErrors is false. None, otherwise. """ logger.error( msg, exc_info=(failure.type, failure.value, failure.getTracebackObject()) ) if not consumeErrors: return failure return None
Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`. Used to do batch lookups for an already created cache. One of the arguments is specified as a list that is iterated through to lookup keys in the original cache. A new tuple consisting of the (deduplicated) keys that weren't in the cache gets passed to the original function, which is expected to results in a map of key to value for each passed value. The new results are stored in the original cache. Note that any missing values are cached as None. Args: cached_method_name: The name of the single-item lookup method. This is only used to find the cache to use. list_name: The name of the argument that is the iterable to use to do batch lookups in the cache. num_args: Number of arguments to use as the key in the cache (including list_name). Defaults to all named parameters. Example: class Example: @cached() def do_something(self, first_arg, second_arg): ... @cachedList(cached_method_name="do_something", list_name="second_args") def batch_do_something(self, first_arg, second_args): ...
def cachedList( *, cached_method_name: str, list_name: str, num_args: Optional[int] = None, name: Optional[str] = None, ) -> _CachedListFunctionDescriptor: """Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`. Used to do batch lookups for an already created cache. One of the arguments is specified as a list that is iterated through to lookup keys in the original cache. A new tuple consisting of the (deduplicated) keys that weren't in the cache gets passed to the original function, which is expected to results in a map of key to value for each passed value. The new results are stored in the original cache. Note that any missing values are cached as None. Args: cached_method_name: The name of the single-item lookup method. This is only used to find the cache to use. list_name: The name of the argument that is the iterable to use to do batch lookups in the cache. num_args: Number of arguments to use as the key in the cache (including list_name). Defaults to all named parameters. Example: class Example: @cached() def do_something(self, first_arg, second_arg): ... @cachedList(cached_method_name="do_something", list_name="second_args") def batch_do_something(self, first_arg, second_args): ... """ return _CachedListFunctionDescriptor( cached_method_name=cached_method_name, list_name=list_name, num_args=num_args, name=name, )
Construct a function which will build cache keys suitable for a cached function Args: param_names: list of formal parameter names for the cached function include_params: list of bools of whether to include the parameter name in the cache key param_defaults: a mapping from parameter name to default value for that param Returns: A function which will take an (args, kwargs) pair and return a cache key
def _get_cache_key_builder( param_names: Sequence[str], include_params: Sequence[bool], param_defaults: Mapping[str, Any], ) -> Callable[[Sequence[Any], Mapping[str, Any]], CacheKey]: """Construct a function which will build cache keys suitable for a cached function Args: param_names: list of formal parameter names for the cached function include_params: list of bools of whether to include the parameter name in the cache key param_defaults: a mapping from parameter name to default value for that param Returns: A function which will take an (args, kwargs) pair and return a cache key """ # By default our cache key is a tuple, but if there is only one item # then don't bother wrapping in a tuple. This is to save memory. if len(param_names) == 1: nm = param_names[0] assert include_params[0] is True def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: if nm in kwargs: return kwargs[nm] elif len(args): return args[0] else: return param_defaults[nm] else: def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: return tuple( _get_cache_key_gen( param_names, include_params, param_defaults, args, kwargs ) ) return get_cache_key
Given some args/kwargs return a generator that resolves into the cache_key. This is essentially the same operation as `inspect.getcallargs`, but optimised so that we don't need to inspect the target function for each call.
def _get_cache_key_gen( param_names: Iterable[str], include_params: Iterable[bool], param_defaults: Mapping[str, Any], args: Sequence[Any], kwargs: Mapping[str, Any], ) -> Iterable[Any]: """Given some args/kwargs return a generator that resolves into the cache_key. This is essentially the same operation as `inspect.getcallargs`, but optimised so that we don't need to inspect the target function for each call. """ # We loop through each arg name, looking up if its in the `kwargs`, # otherwise using the next argument in `args`. If there are no more # args then we try looking the arg name up in the defaults. pos = 0 for nm, inc in zip(param_names, include_params): if nm in kwargs: if inc: yield kwargs[nm] elif pos < len(args): if inc: yield args[pos] pos += 1 else: if inc: yield param_defaults[nm]
Start a background job that expires all cache entries if they have not been accessed for the given number of seconds, or if a given memory usage threshold has been breached.
def setup_expire_lru_cache_entries(hs: "HomeServer") -> None: """Start a background job that expires all cache entries if they have not been accessed for the given number of seconds, or if a given memory usage threshold has been breached. """ if not hs.config.caches.expiry_time_msec and not hs.config.caches.cache_autotuning: return if hs.config.caches.expiry_time_msec: expiry_time = hs.config.caches.expiry_time_msec / 1000 logger.info("Expiring LRU caches after %d seconds", expiry_time) else: expiry_time = math.inf global USE_GLOBAL_LIST USE_GLOBAL_LIST = True clock = hs.get_clock() clock.looping_call( _expire_old_entries, 30 * 1000, clock, expiry_time, hs.config.caches.cache_autotuning, )
Helper function to iterate over the leaves of a tree, i.e. a dict of that can contain dicts.
def iterate_tree_cache_entry(d): """Helper function to iterate over the leaves of a tree, i.e. a dict of that can contain dicts. """ if isinstance(d, TreeCacheNode): for value_d in d.values(): yield from iterate_tree_cache_entry(value_d) else: yield d
Helper function to iterate over the leaves of a tree, i.e. a dict of that can contain dicts. The provided key is a tuple that will get prepended to the returned keys. Example: cache = TreeCache() cache[(1, 1)] = "a" cache[(1, 2)] = "b" cache[(2, 1)] = "c" tree_node = cache.get((1,)) items = iterate_tree_cache_items((1,), tree_node) assert list(items) == [((1, 1), "a"), ((1, 2), "b")] Returns: A generator yielding key/value pairs.
def iterate_tree_cache_items(key, value): """Helper function to iterate over the leaves of a tree, i.e. a dict of that can contain dicts. The provided key is a tuple that will get prepended to the returned keys. Example: cache = TreeCache() cache[(1, 1)] = "a" cache[(1, 2)] = "b" cache[(2, 1)] = "c" tree_node = cache.get((1,)) items = iterate_tree_cache_items((1,), tree_node) assert list(items) == [((1, 1), "a"), ((1, 2), "b")] Returns: A generator yielding key/value pairs. """ if isinstance(value, TreeCacheNode): for sub_key, sub_value in value.items(): yield from iterate_tree_cache_items((*key, sub_key), sub_value) else: # we've reached a leaf of the tree. yield key, value
Register a cache object for metric collection and resizing. Args: cache_type: a string indicating the "type" of the cache. This is used only for deduplication so isn't too important provided it's constant. cache_name: name of the cache cache: cache itself, which must implement __len__(), and may optionally implement a max_size property collect_callback: If given, a function which is called during metric collection to update additional metrics. resizable: Whether this cache supports being resized, in which case either resize_callback must be provided, or the cache must support set_max_size(). resize_callback: A function which can be called to resize the cache. Returns: an object which provides inc_{hits,misses,evictions} methods
def register_cache( cache_type: str, cache_name: str, cache: Sized, collect_callback: Optional[Callable] = None, resizable: bool = True, resize_callback: Optional[Callable] = None, ) -> CacheMetric: """Register a cache object for metric collection and resizing. Args: cache_type: a string indicating the "type" of the cache. This is used only for deduplication so isn't too important provided it's constant. cache_name: name of the cache cache: cache itself, which must implement __len__(), and may optionally implement a max_size property collect_callback: If given, a function which is called during metric collection to update additional metrics. resizable: Whether this cache supports being resized, in which case either resize_callback must be provided, or the cache must support set_max_size(). resize_callback: A function which can be called to resize the cache. Returns: an object which provides inc_{hits,misses,evictions} methods """ if resizable: if not resize_callback: resize_callback = cache.set_cache_factor # type: ignore add_resizable_cache(cache_name, resize_callback) metric = CacheMetric(cache, cache_type, cache_name, collect_callback) metric_name = "cache_%s_%s" % (cache_type, cache_name) caches_by_name[cache_name] = cache CACHE_METRIC_REGISTRY.register_hook(metric_name, metric.collect) return metric
Takes a (potentially) unicode string and interns it if it's ascii
def intern_string(string: T) -> T: """Takes a (potentially) unicode string and interns it if it's ascii""" if string is None: return None try: return intern(string) except UnicodeEncodeError: return string
Takes a dictionary and interns well known keys and their values
def intern_dict(dictionary: Dict[str, Any]) -> Dict[str, Any]: """Takes a dictionary and interns well known keys and their values""" return { KNOWN_KEYS.get(key, key): _intern_known_values(key, value) for key, value in dictionary.items() }
Given a homeserver, get all registered endpoints and their descriptions.
def get_registered_paths_for_hs( hs: HomeServer, ) -> Dict[Tuple[str, str], EndpointDescription]: """ Given a homeserver, get all registered endpoints and their descriptions. """ enumerator = EnumerationResource(is_worker=hs.config.worker.worker_app is not None) ClientRestResource.register_servlets(enumerator, hs) federation_server = TransportLayerServer(hs) # we can't use `federation_server.register_servlets` but this line does the # same thing, only it uses this enumerator register_federation_servlets( federation_server.hs, resource=enumerator, ratelimiter=federation_server.ratelimiter, authenticator=federation_server.authenticator, servlet_groups=federation_server.servlet_groups, ) # the key server endpoints are separate again RemoteKey(hs).register(enumerator) return enumerator.registrations
Given the name of a worker application and a base homeserver configuration, returns: Dict from (method, path) to EndpointDescription TODO Don't require passing in a config
def get_registered_paths_for_default( worker_app: Optional[str], base_config: HomeServerConfig ) -> Dict[Tuple[str, str], EndpointDescription]: """ Given the name of a worker application and a base homeserver configuration, returns: Dict from (method, path) to EndpointDescription TODO Don't require passing in a config """ hs = MockHomeserver(base_config, worker_app) # TODO We only do this to avoid an error, but don't need the database etc hs.setup() return get_registered_paths_for_hs(hs)
Elides HTTP methods (by replacing them with `*`) if all possible registered methods can be handled by the worker whose registration map is `registrations`. i.e. the only endpoints left with methods (other than `*`) should be the ones where the worker can't handle all possible methods for that path.
def elide_http_methods_if_unconflicting( registrations: Dict[Tuple[str, str], EndpointDescription], all_possible_registrations: Dict[Tuple[str, str], EndpointDescription], ) -> Dict[Tuple[str, str], EndpointDescription]: """ Elides HTTP methods (by replacing them with `*`) if all possible registered methods can be handled by the worker whose registration map is `registrations`. i.e. the only endpoints left with methods (other than `*`) should be the ones where the worker can't handle all possible methods for that path. """ def paths_to_methods_dict( methods_and_paths: Iterable[Tuple[str, str]] ) -> Dict[str, Set[str]]: """ Given (method, path) pairs, produces a dict from path to set of methods available at that path. """ result: Dict[str, Set[str]] = {} for method, path in methods_and_paths: result.setdefault(path, set()).add(method) return result all_possible_reg_methods = paths_to_methods_dict(all_possible_registrations) reg_methods = paths_to_methods_dict(registrations) output = {} for path, handleable_methods in reg_methods.items(): if handleable_methods == all_possible_reg_methods[path]: any_method = next(iter(handleable_methods)) # TODO This assumes that all methods have the same servlet. # I suppose that's possibly dubious? output[("*", path)] = registrations[(any_method, path)] else: for method in handleable_methods: output[(method, path)] = registrations[(method, path)] return output
Simplify all the path regexes for the dict of endpoint descriptions, so that we don't use the Python-specific regex extensions (and also to remove needlessly specific detail).
def simplify_path_regexes( registrations: Dict[Tuple[str, str], EndpointDescription] ) -> Dict[Tuple[str, str], EndpointDescription]: """ Simplify all the path regexes for the dict of endpoint descriptions, so that we don't use the Python-specific regex extensions (and also to remove needlessly specific detail). """ def simplify_path_regex(path: str) -> str: """ Given a regex pattern, replaces all named capturing groups (e.g. `(?P<blah>xyz)`) with a simpler version available in more common regex dialects (e.g. `.*`). """ # TODO it's hard to choose between these two; # `.*` is a vague simplification # return GROUP_PATTERN.sub(r"\1", path) return GROUP_PATTERN.sub(r".*", path) return {(m, simplify_path_regex(p)): v for (m, p), v in registrations.items()}
Prints out a category, in documentation page style. Example: ``` # Category name /path/xyz GET /path/abc ```
def print_category( category_name: Optional[str], elided_worker_paths: Dict[Tuple[str, str], EndpointDescription], ) -> None: """ Prints out a category, in documentation page style. Example: ``` # Category name /path/xyz GET /path/abc ``` """ if category_name: print(f"# {category_name}") else: print("# (Uncategorised requests)") for ln in sorted( p for m, p in simplify_path_regexes(elided_worker_paths) if m == "*" ): print(ln) print() for ln in sorted( f"{m:6} {p}" for m, p in simplify_path_regexes(elided_worker_paths) if m != "*" ): print(ln) print()
Move the given file, and any thumbnails, to the dest repo Args: origin_server: file_id: src_paths: dest_paths:
def move_media( origin_server: str, file_id: str, src_paths: MediaFilePaths, dest_paths: MediaFilePaths, ) -> None: """Move the given file, and any thumbnails, to the dest repo Args: origin_server: file_id: src_paths: dest_paths: """ logger.info("%s/%s", origin_server, file_id) # check that the original exists original_file = src_paths.remote_media_filepath(origin_server, file_id) if not os.path.exists(original_file): logger.warning( "Original for %s/%s (%s) does not exist", origin_server, file_id, original_file, ) else: mkdir_and_move( original_file, dest_paths.remote_media_filepath(origin_server, file_id) ) # now look for thumbnails original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id) if not os.path.exists(original_thumb_dir): return mkdir_and_move( original_thumb_dir, dest_paths.remote_media_thumbnail_dir(origin_server, file_id), )
Check the given file exists, and read it into a string If it does not, exit with an error indicating the problem Args: file_path: the file to be read config_path: where in the configuration file_path came from, so that a useful error can be emitted if it does not exist. Returns: content of the file.
def _read_file(file_path: Any, config_path: str) -> str: """Check the given file exists, and read it into a string If it does not, exit with an error indicating the problem Args: file_path: the file to be read config_path: where in the configuration file_path came from, so that a useful error can be emitted if it does not exist. Returns: content of the file. """ if not isinstance(file_path, str): print(f"{config_path} setting is not a string", file=sys.stderr) sys.exit(1) try: with open(file_path) as file_stream: return file_stream.read() except OSError as e: print(f"Error accessing file {file_path}: {e}", file=sys.stderr) sys.exit(1)
Fetches recently registered users and some info on them.
def get_recent_users( txn: LoggingTransaction, since_ms: int, exclude_app_service: bool ) -> List[UserInfo]: """Fetches recently registered users and some info on them.""" sql = """ SELECT name, creation_ts FROM users WHERE ? <= creation_ts AND deactivated = 0 """ if exclude_app_service: sql += " AND appservice_id IS NULL" txn.execute(sql, (since_ms / 1000,)) user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn] for user_info in user_infos: user_info.emails = DatabasePool.simple_select_onecol_txn( txn, table="user_threepids", keyvalues={"user_id": user_info.user_id, "medium": "email"}, retcol="address", ) sql = """ SELECT room_id, canonical_alias, name, join_rules FROM local_current_membership INNER JOIN room_stats_state USING (room_id) WHERE user_id = ? AND membership = 'join' """ txn.execute(sql, (user_info.user_id,)) for room_id, canonical_alias, name, join_rules in txn: if join_rules == "public": user_info.public_rooms.append(canonical_alias or name or room_id) else: user_info.private_rooms.append(canonical_alias or name or room_id) user_info.ips = DatabasePool.simple_select_onecol_txn( txn, table="user_ips", keyvalues={"user_id": user_info.user_id}, retcol="ip", ) return user_infos
Attempts to start a synapse main or worker process. Args: pidfile: the pidfile we expect the process to create app: the python module to run config_files: config files to pass to synapse daemonize: if True, will include a --daemonize argument to synapse Returns: True if the process started successfully or was already running False if there was an error starting the process
def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) -> bool: """Attempts to start a synapse main or worker process. Args: pidfile: the pidfile we expect the process to create app: the python module to run config_files: config files to pass to synapse daemonize: if True, will include a --daemonize argument to synapse Returns: True if the process started successfully or was already running False if there was an error starting the process """ if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): print(app + " already running") return True args = [sys.executable, "-m", app] for c in config_files: args += ["-c", c] if daemonize: args.append("--daemonize") try: subprocess.check_call(args) write("started %s(%s)" % (app, ",".join(config_files)), colour=GREEN) return True except subprocess.CalledProcessError as e: err = "%s(%s) failed to start (exit code: %d). Check the Synapse logfile" % ( app, ",".join(config_files), e.returncode, ) if daemonize: err += ", or run synctl with --no-daemonize" err += "." write(err, colour=RED, stream=sys.stderr) return False
Attempts to kill a synapse worker from the pidfile. Args: pidfile: path to file containing worker's pid app: name of the worker's appservice Returns: process id, or None if the process was not running
def stop(pidfile: str, app: str) -> Optional[int]: """Attempts to kill a synapse worker from the pidfile. Args: pidfile: path to file containing worker's pid app: name of the worker's appservice Returns: process id, or None if the process was not running """ if os.path.exists(pidfile): pid = int(open(pidfile).read()) try: os.kill(pid, signal.SIGTERM) write("stopped %s" % (app,), colour=GREEN) return pid except OSError as err: if err.errno == errno.ESRCH: write("%s not running" % (app,), colour=YELLOW) elif err.errno == errno.EPERM: abort("Cannot stop %s: Operation not permitted" % (app,)) else: abort("Cannot stop %s: Unknown error" % (app,)) else: write( "No running worker of %s found (from %s)\nThe process might be managed by another controller (e.g. systemd)" % (app, pidfile), colour=YELLOW, ) return None
Instantiate and install a Twisted reactor suitable for testing (i.e. not the default global one).
def make_reactor() -> ISynapseReactor: """ Instantiate and install a Twisted reactor suitable for testing (i.e. not the default global one). """ reactor = Reactor() if "twisted.internet.reactor" in sys.modules: del sys.modules["twisted.internet.reactor"] installReactor(reactor) return cast(ISynapseReactor, reactor)
Take a benchmark function and wrap it in a reactor start and stop.
def make_test( main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]] ) -> Callable[[int], float]: """ Take a benchmark function and wrap it in a reactor start and stop. """ def _main(loops: int) -> float: reactor = make_reactor() file_out = StringIO() with redirect_stderr(file_out): d: "Deferred[float]" = Deferred() d.addCallback(lambda _: ensureDeferred(main(reactor, loops))) def on_done(res: T) -> T: if isinstance(res, Failure): res.printTraceback() print(file_out.getvalue()) reactor.stop() return res d.addBoth(on_done) reactor.callWhenRunning(lambda: d.callback(True)) reactor.run() # mypy thinks this is an object for some reason. return d.result # type: ignore[return-value] return _main
Make a web request using the given method, path and content, and render it Returns the fake Channel object which records the response to the request. Args: reactor: site: The twisted Site to use to render the request method: The HTTP request method ("verb"). path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such). content: The body of the request. JSON-encoded, if a str of bytes. access_token: The access token to add as authorization for the request. request: The request class to create. shorthand: Whether to try and be helpful and prefix the given URL with the usual REST API path, if it doesn't contain it. federation_auth_origin: if set to not-None, we will add a fake Authorization header pretenting to be the given server name. content_is_form: Whether the content is URL encoded form data. Adds the 'Content-Type': 'application/x-www-form-urlencoded' header. await_result: whether to wait for the request to complete rendering. If true, will pump the reactor until the the renderer tells the channel the request is finished. custom_headers: (name, value) pairs to add as request headers client_ip: The IP to use as the requesting IP. Useful for testing ratelimiting. Returns: channel
def make_request( reactor: MemoryReactorClock, site: Union[Site, FakeSite], method: Union[bytes, str], path: Union[bytes, str], content: Union[bytes, str, JsonDict] = b"", access_token: Optional[str] = None, request: Type[Request] = SynapseRequest, shorthand: bool = True, federation_auth_origin: Optional[bytes] = None, content_is_form: bool = False, await_result: bool = True, custom_headers: Optional[Iterable[CustomHeaderType]] = None, client_ip: str = "127.0.0.1", ) -> FakeChannel: """ Make a web request using the given method, path and content, and render it Returns the fake Channel object which records the response to the request. Args: reactor: site: The twisted Site to use to render the request method: The HTTP request method ("verb"). path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such). content: The body of the request. JSON-encoded, if a str of bytes. access_token: The access token to add as authorization for the request. request: The request class to create. shorthand: Whether to try and be helpful and prefix the given URL with the usual REST API path, if it doesn't contain it. federation_auth_origin: if set to not-None, we will add a fake Authorization header pretenting to be the given server name. content_is_form: Whether the content is URL encoded form data. Adds the 'Content-Type': 'application/x-www-form-urlencoded' header. await_result: whether to wait for the request to complete rendering. If true, will pump the reactor until the the renderer tells the channel the request is finished. custom_headers: (name, value) pairs to add as request headers client_ip: The IP to use as the requesting IP. Useful for testing ratelimiting. Returns: channel """ if not isinstance(method, bytes): method = method.encode("ascii") if not isinstance(path, bytes): path = path.encode("ascii") # Decorate it to be the full path, if we're using shorthand if ( shorthand and not path.startswith(b"/_matrix") and not path.startswith(b"/_synapse") ): if path.startswith(b"/"): path = path[1:] path = b"/_matrix/client/r0/" + path if not path.startswith(b"/"): path = b"/" + path if isinstance(content, dict): content = json.dumps(content).encode("utf8") if isinstance(content, str): content = content.encode("utf8") channel = FakeChannel(site, reactor, ip=client_ip) req = request(channel, site) channel.request = req req.content = BytesIO(content) # Twisted expects to be at the end of the content when parsing the request. req.content.seek(0, SEEK_END) # Old version of Twisted (<20.3.0) have issues with parsing x-www-form-urlencoded # bodies if the Content-Length header is missing req.requestHeaders.addRawHeader( b"Content-Length", str(len(content)).encode("ascii") ) if access_token: req.requestHeaders.addRawHeader( b"Authorization", b"Bearer " + access_token.encode("ascii") ) if federation_auth_origin is not None: req.requestHeaders.addRawHeader( b"Authorization", b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,), ) if content: if content_is_form: req.requestHeaders.addRawHeader( b"Content-Type", b"application/x-www-form-urlencoded" ) else: # Assume the body is JSON req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") if custom_headers: for k, v in custom_headers: req.requestHeaders.addRawHeader(k, v) req.parseCookies() req.requestReceived(method, path, b"1.1") if await_result: channel.await_result() return channel
Try to validate the obtained connector as it would happen when synapse is running and the conection will be established. This method will raise a useful exception when necessary, else it will just do nothing. This is in order to help catch quirks related to reactor.connectTCP, since when called directly, the connector's destination will be of type IPv4Address, with the hostname as the literal host that was given (which could be an IPv6-only host or an IPv6 literal). But when called from reactor.connectTCP *through* e.g. an Endpoint, the connector's destination will contain the specific IP address with the correct network stack class. Note that testing code paths that use connectTCP directly should not be affected by this check, unless they specifically add a test with a matching reactor.lookups[HOSTNAME] = "IPv6Literal", where reactor is of type ThreadedMemoryReactorClock. For an example of implementing such tests, see test/handlers/send_email.py.
def validate_connector(connector: tcp.Connector, expected_ip: str) -> None: """Try to validate the obtained connector as it would happen when synapse is running and the conection will be established. This method will raise a useful exception when necessary, else it will just do nothing. This is in order to help catch quirks related to reactor.connectTCP, since when called directly, the connector's destination will be of type IPv4Address, with the hostname as the literal host that was given (which could be an IPv6-only host or an IPv6 literal). But when called from reactor.connectTCP *through* e.g. an Endpoint, the connector's destination will contain the specific IP address with the correct network stack class. Note that testing code paths that use connectTCP directly should not be affected by this check, unless they specifically add a test with a matching reactor.lookups[HOSTNAME] = "IPv6Literal", where reactor is of type ThreadedMemoryReactorClock. For an example of implementing such tests, see test/handlers/send_email.py. """ destination = connector.getDestination() # We use address.IPv{4,6}Address to check what the reactor thinks it is # is sending but check for validity with ipaddress.IPv{4,6}Address # because they fail with IPs on the wrong network stack. cls_mapping = { address.IPv4Address: ipaddress.IPv4Address, address.IPv6Address: ipaddress.IPv6Address, } cls = cls_mapping.get(destination.__class__) if cls is not None: try: cls(expected_ip) except Exception as exc: raise ValueError( "Invalid IP type and resolution for %s. Expected %s to be %s" % (destination, expected_ip, cls.__name__) ) from exc else: raise ValueError( "Unknown address type %s for %s" % (destination.__class__.__name__, destination) )
Make the given test homeserver's database interactions synchronous.
def _make_test_homeserver_synchronous(server: HomeServer) -> None: """ Make the given test homeserver's database interactions synchronous. """ clock = server.get_clock() for database in server.get_datastores().databases: pool = database._db_pool def runWithConnection( func: Callable[..., R], *args: Any, **kwargs: Any ) -> Awaitable[R]: return threads.deferToThreadPool( pool._reactor, pool.threadpool, pool._runWithConnection, func, *args, **kwargs, ) def runInteraction( desc: str, func: Callable[..., R], *args: Any, **kwargs: Any ) -> Awaitable[R]: return threads.deferToThreadPool( pool._reactor, pool.threadpool, pool._runInteraction, desc, func, *args, **kwargs, ) pool.runWithConnection = runWithConnection # type: ignore[method-assign] pool.runInteraction = runInteraction # type: ignore[assignment] # Replace the thread pool with a threadless 'thread' pool pool.threadpool = ThreadPool(clock._reactor) pool.running = True # We've just changed the Databases to run DB transactions on the same # thread, so we need to disable the dedicated thread behaviour. server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False
Connect a client to a fake TCP transport. Args: reactor factory: The connecting factory to build.
def connect_client( reactor: ThreadedMemoryReactorClock, client_id: int ) -> Tuple[IProtocol, AccumulatingProtocol]: """ Connect a client to a fake TCP transport. Args: reactor factory: The connecting factory to build. """ factory = reactor.tcpClients.pop(client_id)[2] client = factory.buildProtocol(None) server = AccumulatingProtocol() server.makeConnection(FakeTransport(client, reactor)) client.makeConnection(FakeTransport(server, reactor)) return client, server
Setup a homeserver suitable for running tests against. Keyword arguments are passed to the Homeserver constructor. If no datastore is supplied, one is created and given to the homeserver. Args: cleanup_func : The function used to register a cleanup routine for after the test. Calling this method directly is deprecated: you should instead derive from HomeserverTestCase.
def setup_test_homeserver( cleanup_func: Callable[[Callable[[], None]], None], name: str = "test", config: Optional[HomeServerConfig] = None, reactor: Optional[ISynapseReactor] = None, homeserver_to_use: Type[HomeServer] = TestHomeServer, **kwargs: Any, ) -> HomeServer: """ Setup a homeserver suitable for running tests against. Keyword arguments are passed to the Homeserver constructor. If no datastore is supplied, one is created and given to the homeserver. Args: cleanup_func : The function used to register a cleanup routine for after the test. Calling this method directly is deprecated: you should instead derive from HomeserverTestCase. """ if reactor is None: from twisted.internet import reactor as _reactor reactor = cast(ISynapseReactor, _reactor) if config is None: config = default_config(name, parse=True) config.caches.resize_all_caches() if "clock" not in kwargs: kwargs["clock"] = MockClock() if USE_POSTGRES_FOR_TESTS: test_db = "synapse_test_%s" % uuid.uuid4().hex database_config = { "name": "psycopg2", "args": { "dbname": test_db, "host": POSTGRES_HOST, "password": POSTGRES_PASSWORD, "user": POSTGRES_USER, "port": POSTGRES_PORT, "cp_min": 1, "cp_max": 5, }, } else: if SQLITE_PERSIST_DB: # The current working directory is in _trial_temp, so this gets created within that directory. test_db_location = os.path.abspath("test.db") logger.debug("Will persist db to %s", test_db_location) # Ensure each test gets a clean database. try: os.remove(test_db_location) except FileNotFoundError: pass else: logger.debug("Removed existing DB at %s", test_db_location) else: test_db_location = ":memory:" database_config = { "name": "sqlite3", "args": {"database": test_db_location, "cp_min": 1, "cp_max": 1}, } # Check if we have set up a DB that we can use as a template. global PREPPED_SQLITE_DB_CONN if PREPPED_SQLITE_DB_CONN is None: temp_engine = create_engine(database_config) PREPPED_SQLITE_DB_CONN = LoggingDatabaseConnection( sqlite3.connect(":memory:"), temp_engine, "PREPPED_CONN" ) database = DatabaseConnectionConfig("master", database_config) config.database.databases = [database] prepare_database( PREPPED_SQLITE_DB_CONN, create_engine(database_config), config ) database_config["_TEST_PREPPED_CONN"] = PREPPED_SQLITE_DB_CONN if "db_txn_limit" in kwargs: database_config["txn_limit"] = kwargs["db_txn_limit"] database = DatabaseConnectionConfig("master", database_config) config.database.databases = [database] db_engine = create_engine(database.config) # Create the database before we actually try and connect to it, based off # the template database we generate in setupdb() if USE_POSTGRES_FOR_TESTS: db_conn = db_engine.module.connect( dbname=POSTGRES_BASE_DB, user=POSTGRES_USER, host=POSTGRES_HOST, port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) db_engine.attempt_to_set_autocommit(db_conn, True) cur = db_conn.cursor() cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) cur.execute( "CREATE DATABASE %s WITH TEMPLATE %s;" % (test_db, POSTGRES_BASE_DB) ) cur.close() db_conn.close() hs = homeserver_to_use( name, config=config, version_string="Synapse/tests", reactor=reactor, ) # Install @cache_in_self attributes for key, val in kwargs.items(): setattr(hs, "_" + key, val) # Mock TLS hs.tls_server_context_factory = Mock() hs.setup() if USE_POSTGRES_FOR_TESTS: database_pool = hs.get_datastores().databases[0] # We need to do cleanup on PostgreSQL def cleanup() -> None: import psycopg2 # Close all the db pools database_pool._db_pool.close() dropped = False # Drop the test database db_conn = db_engine.module.connect( dbname=POSTGRES_BASE_DB, user=POSTGRES_USER, host=POSTGRES_HOST, port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) db_engine.attempt_to_set_autocommit(db_conn, True) cur = db_conn.cursor() # Try a few times to drop the DB. Some things may hold on to the # database for a few more seconds due to flakiness, preventing # us from dropping it when the test is over. If we can't drop # it, warn and move on. for _ in range(5): try: cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) db_conn.commit() dropped = True except psycopg2.OperationalError as e: warnings.warn( "Couldn't drop old db: " + str(e), category=UserWarning, stacklevel=2, ) time.sleep(0.5) cur.close() db_conn.close() if not dropped: warnings.warn( "Failed to drop old DB.", category=UserWarning, stacklevel=2, ) if not LEAVE_DB: # Register the cleanup hook cleanup_func(cleanup) # bcrypt is far too slow to be doing in unit tests # Need to let the HS build an auth handler and then mess with it # because AuthHandler's constructor requires the HS, so we can't make one # beforehand and pass it in to the HS's constructor (chicken / egg) async def hash(p: str) -> str: return hashlib.md5(p.encode("utf8")).hexdigest() hs.get_auth_handler().hash = hash # type: ignore[assignment] async def validate_hash(p: str, h: str) -> bool: return hashlib.md5(p.encode("utf8")).hexdigest() == h hs.get_auth_handler().validate_hash = validate_hash # type: ignore[assignment] # Make the threadpool and database transactions synchronous for testing. _make_test_homeserver_synchronous(hs) # Load any configured modules into the homeserver module_api = hs.get_module_api() for module, module_config in hs.config.modules.loaded_modules: module(config=module_config, api=module_api) load_legacy_spam_checkers(hs) load_legacy_third_party_event_rules(hs) load_legacy_presence_router(hs) load_legacy_password_auth_providers(hs) return hs
If this room version needs it, generate an event id
def _maybe_get_event_id_dict_for_room_version(room_version: RoomVersion) -> dict: """If this room version needs it, generate an event id""" if room_version.event_format != EventFormatVersions.ROOM_V1_V2: return {} global event_count c = event_count event_count += 1 return {"event_id": "!%i:example.com" % (c,)}
A CLOS-style 'around' modifier, which wraps the original method of the given instance with another piece of code. @around(self) def method_name(orig, *args, **kwargs): return orig(*args, **kwargs)
def around(target: TV) -> Callable[[Callable[Concatenate[S, P], R]], None]: """A CLOS-style 'around' modifier, which wraps the original method of the given instance with another piece of code. @around(self) def method_name(orig, *args, **kwargs): return orig(*args, **kwargs) """ def _around(code: Callable[Concatenate[S, P], R]) -> None: name = code.__name__ orig = getattr(target, name) def new(*args: P.args, **kwargs: P.kwargs) -> R: return code(orig, *args, **kwargs) setattr(target, name, new) return _around
Creates a :class:`HomeServerConfig` instance with the given configuration dict. This is equivalent to:: config_obj = HomeServerConfig() config_obj.parse_config_dict(config, "", "") but it keeps a cache of `HomeServerConfig` instances and deepcopies them as needed, to avoid validating the whole configuration every time.
def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig: """Creates a :class:`HomeServerConfig` instance with the given configuration dict. This is equivalent to:: config_obj = HomeServerConfig() config_obj.parse_config_dict(config, "", "") but it keeps a cache of `HomeServerConfig` instances and deepcopies them as needed, to avoid validating the whole configuration every time. """ config_obj = _parse_config_dict(json.dumps(config, sort_keys=True)) return deepcopy_config(config_obj)
A decorator to set the .loglevel attribute to logging.DEBUG. Can apply to either a TestCase or an individual test method.
def DEBUG(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.DEBUG. Can apply to either a TestCase or an individual test method.""" target.loglevel = logging.DEBUG # type: ignore[attr-defined] return target
A decorator to set the .loglevel attribute to logging.INFO. Can apply to either a TestCase or an individual test method.
def INFO(target: TV) -> TV: """A decorator to set the .loglevel attribute to logging.INFO. Can apply to either a TestCase or an individual test method.""" target.loglevel = logging.INFO # type: ignore[attr-defined] return target
A decorator which marks the TestCase or method as 'logcontext_clean' ... ie, any logcontext errors should cause a test failure
def logcontext_clean(target: TV) -> TV: """A decorator which marks the TestCase or method as 'logcontext_clean' ... ie, any logcontext errors should cause a test failure """ def logcontext_error(msg: str) -> NoReturn: raise AssertionError("logcontext error: %s" % (msg)) patcher = patch("synapse.logging.context.logcontext_error", new=logcontext_error) return patcher(target)
Build a suitable Authorization header for an outgoing federation request
def _auth_header_for_request( origin: str, destination: str, signing_key: signedjson.key.SigningKey, method: str, path: str, content: Optional[JsonDict], ) -> str: """Build a suitable Authorization header for an outgoing federation request""" request_description: JsonDict = { "method": method, "uri": path, "destination": destination, "origin": origin, } if content is not None: request_description["content"] = content signature_base64 = unpaddedbase64.encode_base64( signing_key.sign( canonicaljson.encode_canonical_json(request_description) ).signature ) return ( f"X-Matrix origin={origin}," f"key={signing_key.alg}:{signing_key.version}," f"sig={signature_base64}" )
A decorator which can be applied to test functions to give additional HS config For use For example: class MyTestCase(HomeserverTestCase): @override_config({"enable_registration": False, ...}) def test_foo(self): ... Args: extra_config: Additional config settings to be merged into the default config dict before instantiating the test homeserver.
def override_config(extra_config: JsonDict) -> Callable[[TV], TV]: """A decorator which can be applied to test functions to give additional HS config For use For example: class MyTestCase(HomeserverTestCase): @override_config({"enable_registration": False, ...}) def test_foo(self): ... Args: extra_config: Additional config settings to be merged into the default config dict before instantiating the test homeserver. """ def decorator(func: TV) -> TV: # This attribute is being defined. func._extra_config = extra_config # type: ignore[attr-defined] return func return decorator
A test decorator which will skip the decorated test unless a condition is set For example: class MyTestCase(TestCase): @skip_unless(HAS_FOO, "Cannot test without foo") def test_foo(self): ... Args: condition: If true, the test will be skipped reason: the reason to give for skipping the test
def skip_unless(condition: bool, reason: str) -> Callable[[TV], TV]: """A test decorator which will skip the decorated test unless a condition is set For example: class MyTestCase(TestCase): @skip_unless(HAS_FOO, "Cannot test without foo") def test_foo(self): ... Args: condition: If true, the test will be skipped reason: the reason to give for skipping the test """ def decorator(f: TV) -> TV: if not condition: f.skip = reason # type: ignore return f return decorator
Create a reasonable test config.
def default_config( name: str, parse: bool = False ) -> Union[Dict[str, object], HomeServerConfig]: """ Create a reasonable test config. """ config_dict = { "server_name": name, # Setting this to an empty list turns off federation sending. "federation_sender_instances": [], "media_store_path": "media", # the test signing key is just an arbitrary ed25519 key to keep the config # parser happy "signing_key": "ed25519 a_lPym qvioDNmfExFBRPgdTU+wtFYKq4JfwFRv7sYVgWvmgJg", # Disable trusted key servers, otherwise unit tests might try to actually # reach out to matrix.org. "trusted_key_servers": [], "event_cache_size": 1, "enable_registration": True, "enable_registration_captcha": False, "macaroon_secret_key": "not even a little secret", "password_providers": [], "worker_app": None, "block_non_admin_invites": False, "federation_domain_whitelist": None, "filter_timeline_limit": 5000, "user_directory_search_all_users": False, "user_consent_server_notice_content": None, "block_events_without_consent_error": None, "user_consent_at_registration": False, "user_consent_policy_name": "Privacy Policy", "media_storage_providers": [], "autocreate_auto_join_rooms": True, "auto_join_rooms": [], "limit_usage_by_mau": False, "hs_disabled": False, "hs_disabled_message": "", "max_mau_value": 50, "mau_trial_days": 0, "mau_stats_only": False, "mau_limits_reserved_threepids": [], "admin_contact": None, "rc_message": {"per_second": 10000, "burst_count": 10000}, "rc_registration": {"per_second": 10000, "burst_count": 10000}, "rc_login": { "address": {"per_second": 10000, "burst_count": 10000}, "account": {"per_second": 10000, "burst_count": 10000}, "failed_attempts": {"per_second": 10000, "burst_count": 10000}, }, "rc_joins": { "local": {"per_second": 10000, "burst_count": 10000}, "remote": {"per_second": 10000, "burst_count": 10000}, }, "rc_joins_per_room": {"per_second": 10000, "burst_count": 10000}, "rc_invites": { "per_room": {"per_second": 10000, "burst_count": 10000}, "per_user": {"per_second": 10000, "burst_count": 10000}, }, "rc_3pid_validation": {"per_second": 10000, "burst_count": 10000}, "saml2_enabled": False, "public_baseurl": None, "default_identity_server": None, "key_refresh_interval": 24 * 60 * 60 * 1000, "old_signing_keys": {}, "tls_fingerprints": [], "use_frozen_dicts": False, # We need a sane default_room_version, otherwise attempts to create # rooms will fail. "default_room_version": DEFAULT_ROOM_VERSION, # disable user directory updates, because they get done in the # background, which upsets the test runner. Setting this to an # (obviously) fake worker name disables updating the user directory. "update_user_directory_from_worker": "does_not_exist_worker_name", "caches": {"global_factor": 1, "sync_response_cache_duration": 0}, "listeners": [{"port": 0, "type": "http"}], } if parse: config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") return config return config_dict
A version of typing.cast that is checked at runtime. We have our own function for this for two reasons: 1. typing.cast itself is deliberately a no-op at runtime, see https://docs.python.org/3/library/typing.html#typing.cast 2. To help workaround a mypy-zope bug https://github.com/Shoobx/mypy-zope/issues/91 where mypy would erroneously consider `isinstance(x, type)` to be false in all circumstances. For this to make sense, `T` needs to be something that `isinstance` can check; see https://docs.python.org/3/library/functions.html?highlight=isinstance#isinstance https://docs.python.org/3/glossary.html#term-abstract-base-class https://docs.python.org/3/library/typing.html#typing.runtime_checkable for more details.
def checked_cast(type: Type[T], x: object) -> T: """A version of typing.cast that is checked at runtime. We have our own function for this for two reasons: 1. typing.cast itself is deliberately a no-op at runtime, see https://docs.python.org/3/library/typing.html#typing.cast 2. To help workaround a mypy-zope bug https://github.com/Shoobx/mypy-zope/issues/91 where mypy would erroneously consider `isinstance(x, type)` to be false in all circumstances. For this to make sense, `T` needs to be something that `isinstance` can check; see https://docs.python.org/3/library/functions.html?highlight=isinstance#isinstance https://docs.python.org/3/glossary.html#term-abstract-base-class https://docs.python.org/3/library/typing.html#typing.runtime_checkable for more details. """ assert isinstance(x, type) return x
get the options bits from an openssl context object
def _get_ssl_context_options(ssl_context: SSL.Context) -> int: """get the options bits from an openssl context object""" # the OpenSSL.SSL.Context wrapper doesn't expose get_options, so we have to # use the low-level interface return SSL._lib.SSL_CTX_get_options(ssl_context._context)
Get the matrix ID tag for a given SigningKey or VerifyKey
def get_key_id(key: SigningKey) -> str: """Get the matrix ID tag for a given SigningKey or VerifyKey""" return "%s:%s" % (key.alg, key.version)
Perform a sync request for the given user and return the user presence updates they've received, as well as the next_batch token. This method assumes testcase.sync_handler points to the homeserver's sync handler. Args: testcase: The testcase that is currently being run. user_id: The ID of the user to generate a sync response for. since_token: An optional token to indicate from at what point to sync from. Returns: A tuple containing a list of presence updates, and the sync response's next_batch token.
def sync_presence( testcase: HomeserverTestCase, user_id: str, since_token: Optional[StreamToken] = None, ) -> Tuple[List[UserPresenceState], StreamToken]: """Perform a sync request for the given user and return the user presence updates they've received, as well as the next_batch token. This method assumes testcase.sync_handler points to the homeserver's sync handler. Args: testcase: The testcase that is currently being run. user_id: The ID of the user to generate a sync response for. since_token: An optional token to indicate from at what point to sync from. Returns: A tuple containing a list of presence updates, and the sync response's next_batch token. """ requester = create_requester(user_id) sync_config = generate_sync_config(requester.user.to_string()) sync_result = testcase.get_success( testcase.hs.get_sync_handler().wait_for_sync_for_user( requester, sync_config, since_token ) ) return sync_result.presence, sync_result.next_batch
generate a signing key whose version is its public key ... as used by the cross-signing-keys.
def generate_self_id_key() -> SigningKey: """generate a signing key whose version is its public key ... as used by the cross-signing-keys. """ k = key.generate_signing_key("x") k.version = encode_pubkey(k) return k
Encode the public key corresponding to the given signing key as base64
def encode_pubkey(sk: SigningKey) -> str: """Encode the public key corresponding to the given signing key as base64""" return key.encode_verify_key_base64(key.get_verify_key(sk))
Build a dict representing the given device
def build_device_dict(user_id: str, device_id: str, sk: SigningKey) -> JsonDict: """Build a dict representing the given device""" return { "user_id": user_id, "device_id": device_id, "algorithms": [ "m.olm.curve25519-aes-sha2", RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, ], "keys": { "curve25519:" + device_id: "curve25519+key", key_id(sk): encode_pubkey(sk), }, }
Returns a mock which will stand in as a SynapseRequest
def _mock_request() -> Mock: """Returns a mock which will stand in as a SynapseRequest""" mock = Mock( spec=[ "finish", "getClientAddress", "getHeader", "setHeader", "setResponseCode", "write", ] ) # `_disconnected` musn't be another `Mock`, otherwise it will be truthy. mock._disconnected = False return mock
path to a file containing the private half of a test key
def _key_file_path() -> str: """path to a file containing the private half of a test key""" # this key was generated with: # openssl ecparam -name prime256v1 -genkey -noout | # openssl pkcs8 -topk8 -nocrypt -out oidc_test_key.p8 # # we use PKCS8 rather than SEC-1 (which is what openssl ecparam spits out), because # that's what Apple use, and we want to be sure that we work with Apple's keys. # # (For the record: both PKCS8 and SEC-1 specify (different) ways of representing # keys using ASN.1. Both are then typically formatted using PEM, which says: use the # base64-encoded DER encoding of ASN.1, with headers and footers. But we don't # really need to care about any of that.) return os.path.join(os.path.dirname(__file__), "oidc_test_key.p8")
path to a file containing the public half of a test key
def _public_key_file_path() -> str: """path to a file containing the public half of a test key""" # this was generated with: # openssl ec -in oidc_test_key.p8 -pubout -out oidc_test_key.pub.pem # # See above about where oidc_test_key.p8 came from return os.path.join(os.path.dirname(__file__), "oidc_test_key.pub.pem")
Builds a fake SynapseRequest to mock the browser callback Returns a Mock object which looks like the SynapseRequest we get from a browser after SSO (before we return to the client) Args: code: the authorization code which would have been returned by the OIDC provider state: the "state" param which would have been passed around in the query param. Should be the same as was embedded in the session in _build_oidc_session. session: the "session" which would have been passed around in the cookie. ip_address: the IP address to pretend the request came from
def _build_callback_request( code: str, state: str, session: str, ip_address: str = "10.0.0.1", ) -> Mock: """Builds a fake SynapseRequest to mock the browser callback Returns a Mock object which looks like the SynapseRequest we get from a browser after SSO (before we return to the client) Args: code: the authorization code which would have been returned by the OIDC provider state: the "state" param which would have been passed around in the query param. Should be the same as was embedded in the session in _build_oidc_session. session: the "session" which would have been passed around in the cookie. ip_address: the IP address to pretend the request came from """ request = Mock( spec=[ "args", "getCookie", "cookies", "requestHeaders", "getClientAddress", "getHeader", ] ) request.cookies = [] request.getCookie.return_value = session request.args = {} request.args[b"code"] = [code.encode("utf-8")] request.args[b"state"] = [state.encode("utf-8")] request.getClientAddress.return_value.host = ip_address return request
Returns a config dict that will enable the given legacy password auth providers
def legacy_providers_config(*providers: Type[Any]) -> dict: """Returns a config dict that will enable the given legacy password auth providers""" return { "password_providers": [ {"module": "%s.%s" % (__name__, provider.__qualname__), "config": {}} for provider in providers ] }
Returns a config dict that will enable the given modules
def providers_config(*providers: Type[Any]) -> dict: """Returns a config dict that will enable the given modules""" return { "modules": [ {"module": "%s.%s" % (__name__, provider.__qualname__), "config": {}} for provider in providers ] }
Returns a mock which will stand in as a SynapseRequest
def _mock_request() -> Mock: """Returns a mock which will stand in as a SynapseRequest""" mock = Mock( spec=[ "finish", "getClientAddress", "getHeader", "setHeader", "setResponseCode", "write", ] ) # `_disconnected` musn't be another `Mock`, otherwise it will be truthy. mock._disconnected = False return mock
We use this to pass through in testing without using TLS, but saving the context information to check that it would have happened. Note that this is what the MemoryReactor does on connectSSL. It only saves the contextFactory, but starts the connection with the underlying Factory. See: L{twisted.internet.testing.MemoryReactor.connectSSL}
def TestingESMTPTLSClientFactory( contextFactory: ContextFactory, _connectWrapped: bool, wrappedProtocol: IProtocolFactory, ) -> IProtocolFactory: """We use this to pass through in testing without using TLS, but saving the context information to check that it would have happened. Note that this is what the MemoryReactor does on connectSSL. It only saves the contextFactory, but starts the connection with the underlying Factory. See: L{twisted.internet.testing.MemoryReactor.connectSSL}""" wrappedProtocol._testingContextFactory = contextFactory # type: ignore[attr-defined] return wrappedProtocol
Generate a sync config (with a unique request key).
def generate_sync_config( user_id: str, device_id: Optional[str] = "device_id" ) -> SyncConfig: """Generate a sync config (with a unique request key).""" global _request_key _request_key += 1 return SyncConfig( user=UserID.from_string(user_id), filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False, request_key=("request_key", _request_key), device_id=device_id, )
Get a protocol Factory which will build an HTTPChannel Returns: interfaces.IProtocolFactory
def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel Returns: interfaces.IProtocolFactory """ server_factory = Factory.forProtocol(HTTPChannel) # Request.finish expects the factory to have a 'log' method. server_factory.log = _log_request return server_factory
Implements Factory.log, which is expected by Request.finish
def _log_request(request: str) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info(f"Completed request {request}")
Make an object that acts enough like a request.
def make_request(content: Union[bytes, JsonDict]) -> Mock: """Make an object that acts enough like a request.""" request = Mock(spec=["method", "uri", "content"]) if isinstance(content, dict): content = json.dumps(content).encode("utf8") request.method = bytes("STUB_METHOD", "ascii") request.uri = bytes("/test_stub_uri", "ascii") request.content = BytesIO(content) return request
Get a test IPolicyForHTTPS which trusts the test CA cert Returns: IPolicyForHTTPS
def get_test_https_policy() -> BrowserLikePolicyForHTTPS: """Get a test IPolicyForHTTPS which trusts the test CA cert Returns: IPolicyForHTTPS """ ca_file = get_test_ca_cert_file() with open(ca_file) as stream: content = stream.read() cert = Certificate.loadPEM(content) trust_root = trustRootFromCertificates([cert]) return BrowserLikePolicyForHTTPS(trustRoot=trust_root)
Get the path to the test CA cert The keypair is generated with: openssl genrsa -out ca.key 2048 openssl req -new -x509 -key ca.key -days 3650 -out ca.crt -subj '/CN=synapse test CA'
def get_test_ca_cert_file() -> str: """Get the path to the test CA cert The keypair is generated with: openssl genrsa -out ca.key 2048 openssl req -new -x509 -key ca.key -days 3650 -out ca.crt \ -subj '/CN=synapse test CA' """ return os.path.join(os.path.dirname(__file__), "ca.crt")
get the path to the test key The key file is made with: openssl genrsa -out server.key 2048
def get_test_key_file() -> str: """get the path to the test key The key file is made with: openssl genrsa -out server.key 2048 """ return os.path.join(os.path.dirname(__file__), "server.key")
build an x509 certificate file Args: sanlist: a list of subjectAltName values for the cert Returns: The path to the file
def create_test_cert_file(sanlist: List[bytes]) -> str: """build an x509 certificate file Args: sanlist: a list of subjectAltName values for the cert Returns: The path to the file """ global cert_file_count csr_filename = "server.csr" cnf_filename = "server.%i.cnf" % (cert_file_count,) cert_filename = "server.%i.crt" % (cert_file_count,) cert_file_count += 1 # first build a CSR subprocess.check_call( [ "openssl", "req", "-new", "-key", get_test_key_file(), "-subj", "/", "-out", csr_filename, ] ) # now a config file describing the right SAN entries sanentries = b",".join(sanlist) with open(cnf_filename, "wb") as f: f.write(CONFIG_TEMPLATE % {b"sanentries": sanentries}) # finally the cert ca_key_filename = os.path.join(os.path.dirname(__file__), "ca.key") ca_cert_filename = get_test_ca_cert_file() subprocess.check_call( [ "openssl", "x509", "-req", "-in", csr_filename, "-CA", ca_cert_filename, "-CAkey", ca_key_filename, "-set_serial", "1", "-extfile", cnf_filename, "-out", cert_filename, ] ) return cert_filename
Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate signed by our test CA, valid for the domains in `sanlist` Args: factory: protocol factory to wrap sanlist: list of domains the cert should be valid for Returns: interfaces.IProtocolFactory
def wrap_server_factory_for_tls( factory: IProtocolFactory, clock: IReactorTime, sanlist: List[bytes] ) -> TLSMemoryBIOFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate signed by our test CA, valid for the domains in `sanlist` Args: factory: protocol factory to wrap sanlist: list of domains the cert should be valid for Returns: interfaces.IProtocolFactory """ connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist) # Twisted > 23.8.0 has a different API that accepts a clock. if twisted.version <= Version("Twisted", 23, 8, 0): return TLSMemoryBIOFactory( connection_creator, isClient=False, wrappedFactory=factory ) else: return TLSMemoryBIOFactory( connection_creator, isClient=False, wrappedFactory=factory, clock=clock )
Get a protocol Factory which will build an HTTPChannel Returns: interfaces.IProtocolFactory
def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel Returns: interfaces.IProtocolFactory """ server_factory = Factory.forProtocol(HTTPChannel) # Request.finish expects the factory to have a 'log' method. server_factory.log = _log_request return server_factory
Implements Factory.log, which is expected by Request.finish
def _log_request(request: str) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info(f"Completed request {request}")
Disconnects an in-flight request and checks the response. Args: reactor: The twisted reactor running the request handler. channel: The `FakeChannel` for the request. expect_cancellation: `True` if request processing is expected to be cancelled, `False` if the request should run to completion. expected_body: The expected response for the request. expected_code: The expected status code for the request. Defaults to `200` or `499` depending on `expect_cancellation`.
def test_disconnect( reactor: MemoryReactorClock, channel: FakeChannel, expect_cancellation: bool, expected_body: Union[bytes, JsonDict], expected_code: Optional[int] = None, ) -> None: """Disconnects an in-flight request and checks the response. Args: reactor: The twisted reactor running the request handler. channel: The `FakeChannel` for the request. expect_cancellation: `True` if request processing is expected to be cancelled, `False` if the request should run to completion. expected_body: The expected response for the request. expected_code: The expected status code for the request. Defaults to `200` or `499` depending on `expect_cancellation`. """ # Determine the expected status code. if expected_code is None: if expect_cancellation: expected_code = HTTP_STATUS_REQUEST_CANCELLED else: expected_code = 200 request = channel.request if channel.is_finished(): raise AssertionError( "Request finished before we could disconnect - " "ensure `await_result=False` is passed to `make_request`.", ) # We're about to disconnect the request. This also disconnects the channel, so we # have to rely on mocks to extract the response. respond_method: Callable[..., Any] if isinstance(expected_body, bytes): respond_method = respond_with_html_bytes else: respond_method = respond_with_json with mock.patch( f"synapse.http.server.{respond_method.__name__}", wraps=respond_method ) as respond_mock: # Disconnect the request. request.connectionLost(reason=ConnectionDone()) if expect_cancellation: # An immediate cancellation is expected. respond_mock.assert_called_once() else: respond_mock.assert_not_called() # The handler is expected to run to completion. reactor.advance(1.0) respond_mock.assert_called_once() args, _kwargs = respond_mock.call_args code, body = args[1], args[2] if code != expected_code: raise AssertionError( f"{code} != {expected_code} : " "Request did not finish with the expected status code." ) if request.code != expected_code: raise AssertionError( f"{request.code} != {expected_code} : " "Request did not finish with the expected status code." ) if body != expected_body: raise AssertionError( f"{body!r} != {expected_body!r} : " "Request did not finish with the expected status code." )
Performs a request repeatedly, disconnecting at successive `await`s, until one completes. Fails if: * A logging context is lost during cancellation. * A logging context get restarted after it is marked as finished, eg. if a request's logging context is used by some processing started by the request, but the request neglects to cancel that processing or wait for it to complete. Note that "Re-starting finished log context" errors get raised within the request handling code and may or may not get caught. These errors will likely manifest as a different logging context error at a later point. When debugging logging context failures, setting a breakpoint in `logcontext_error` can prove useful. * A request gets stuck, possibly due to a previous cancellation. * The request does not return a 499 when the client disconnects. This implies that a `CancelledError` was swallowed somewhere. It is up to the caller to verify that the request returns the correct data when it finally runs to completion. Note that this function can only cover a single code path and does not guarantee that an endpoint is compatible with cancellation on every code path. To allow inspection of the code path that is being tested, this function will log the stack trace at every `await` that gets cancelled. To view these log lines, `trial` can be run with the `SYNAPSE_TEST_LOG_LEVEL=INFO` environment variable, which will include the log lines in `_trial_temp/test.log`. Alternatively, `_log_for_request` can be modified to write to `sys.stdout`. Args: test_name: The name of the test, which will be logged. reactor: The twisted reactor running the request handler. site: The twisted `Site` to use to render the request. method: The HTTP request method ("verb"). path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such). content: The body of the request. Returns: The `FakeChannel` object which stores the result of the final request that runs to completion.
def make_request_with_cancellation_test( test_name: str, reactor: MemoryReactorClock, site: Site, method: str, path: str, content: Union[bytes, str, JsonDict] = b"", *, token: Optional[str] = None, ) -> FakeChannel: """Performs a request repeatedly, disconnecting at successive `await`s, until one completes. Fails if: * A logging context is lost during cancellation. * A logging context get restarted after it is marked as finished, eg. if a request's logging context is used by some processing started by the request, but the request neglects to cancel that processing or wait for it to complete. Note that "Re-starting finished log context" errors get raised within the request handling code and may or may not get caught. These errors will likely manifest as a different logging context error at a later point. When debugging logging context failures, setting a breakpoint in `logcontext_error` can prove useful. * A request gets stuck, possibly due to a previous cancellation. * The request does not return a 499 when the client disconnects. This implies that a `CancelledError` was swallowed somewhere. It is up to the caller to verify that the request returns the correct data when it finally runs to completion. Note that this function can only cover a single code path and does not guarantee that an endpoint is compatible with cancellation on every code path. To allow inspection of the code path that is being tested, this function will log the stack trace at every `await` that gets cancelled. To view these log lines, `trial` can be run with the `SYNAPSE_TEST_LOG_LEVEL=INFO` environment variable, which will include the log lines in `_trial_temp/test.log`. Alternatively, `_log_for_request` can be modified to write to `sys.stdout`. Args: test_name: The name of the test, which will be logged. reactor: The twisted reactor running the request handler. site: The twisted `Site` to use to render the request. method: The HTTP request method ("verb"). path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such). content: The body of the request. Returns: The `FakeChannel` object which stores the result of the final request that runs to completion. """ # To process a request, a coroutine run is created for the async method handling # the request. That method may then start other coroutine runs, wrapped in # `Deferred`s. # # We would like to trigger a cancellation at the first `await`, re-run the # request and cancel at the second `await`, and so on. By patching # `Deferred.__next__`, we can intercept `await`s, track which ones we have or # have not seen, and force them to block when they wouldn't have. # The set of previously seen `await`s. # Each element is a stringified stack trace. seen_awaits: Set[Tuple[str, ...]] = set() _log_for_request( 0, f"Running make_request_with_cancellation_test for {test_name}..." ) for request_number in itertools.count(1): deferred_patch = Deferred__next__Patch(seen_awaits, request_number) try: with mock.patch( "synapse.http.server.respond_with_json", wraps=respond_with_json ) as respond_mock: with deferred_patch.patch(): # Start the request. channel = make_request( reactor, site, method, path, content, await_result=False, access_token=token, ) request = channel.request # Run the request until we see a new `await` which we have not # yet cancelled at, or it completes. while not respond_mock.called and not deferred_patch.new_await_seen: previous_awaits_seen = deferred_patch.awaits_seen reactor.advance(0.0) if deferred_patch.awaits_seen == previous_awaits_seen: # We didn't see any progress. Try advancing the clock. reactor.advance(1.0) if deferred_patch.awaits_seen == previous_awaits_seen: # We still didn't see any progress. The request might be # stuck. raise AssertionError( "Request appears to be stuck, possibly due to a " "previous cancelled request" ) if respond_mock.called: # The request ran to completion and we are done with testing it. # `respond_with_json` writes the response asynchronously, so we # might have to give the reactor a kick before the channel gets # the response. deferred_patch.unblock_awaits() channel.await_result() return channel # Disconnect the client and wait for the response. request.connectionLost(reason=ConnectionDone()) _log_for_request(request_number, "--- disconnected ---") # Advance the reactor just enough to get a response. # We don't want to advance the reactor too far, because we can only # detect re-starts of finished logging contexts after we set the # finished flag below. for _ in range(2): # We may need to pump the reactor to allow `delay_cancellation`s to # finish. if not respond_mock.called: reactor.advance(0.0) # Try advancing the clock if that didn't work. if not respond_mock.called: reactor.advance(1.0) # `delay_cancellation`s may be waiting for processing that we've # forced to block. Try unblocking them, followed by another round of # pumping the reactor. if not respond_mock.called: deferred_patch.unblock_awaits() # Mark the request's logging context as finished. If it gets # activated again, an `AssertionError` will be raised and bubble up # through request handling code. This `AssertionError` may or may not be # caught. Eventually some other code will deactivate the logging # context which will raise a different `AssertionError` because # resource usage won't have been correctly tracked. if isinstance(request, SynapseRequest) and request.logcontext: request.logcontext.finished = True # Check that the request finished with a 499, # ie. the `CancelledError` wasn't swallowed. respond_mock.assert_called_once() if request.code != HTTP_STATUS_REQUEST_CANCELLED: raise AssertionError( f"{request.code} != {HTTP_STATUS_REQUEST_CANCELLED} : " "Cancelled request did not finish with the correct status code." ) finally: # Unblock any processing that might be shared between requests, if we # haven't already done so. deferred_patch.unblock_awaits() assert False, "unreachable"
Logs a message for an iteration of `make_request_with_cancellation_test`.
def _log_for_request(request_number: int, message: str) -> None: """Logs a message for an iteration of `make_request_with_cancellation_test`.""" # We want consistent alignment when logging stack traces, so ensure the logging # context has a fixed width name. with LoggingContext(name=f"request-{request_number:<2}"): logger.info(message)
Logs the stack for an `await` in `make_request_with_cancellation_test`. Only logs the part of the stack that has changed since the previous call. Example output looks like: ``` delay_cancellation:750 (synapse/util/async_helpers.py:750) DatabasePool._runInteraction:768 (synapse/storage/database.py:768) > *blocked on await* at DatabasePool.runWithConnection:891 (synapse/storage/database.py:891) ``` Args: stack: The stack to log, as returned by `_get_stack()`. previous_stack: The previous stack logged, with callers appearing before callees. request_number: The request number to log against. note: A note to attach to the last stack frame, eg. "blocked on await".
def _log_await_stack( stack: List[inspect.FrameInfo], previous_stack: List[inspect.FrameInfo], request_number: int, note: str, ) -> None: """Logs the stack for an `await` in `make_request_with_cancellation_test`. Only logs the part of the stack that has changed since the previous call. Example output looks like: ``` delay_cancellation:750 (synapse/util/async_helpers.py:750) DatabasePool._runInteraction:768 (synapse/storage/database.py:768) > *blocked on await* at DatabasePool.runWithConnection:891 (synapse/storage/database.py:891) ``` Args: stack: The stack to log, as returned by `_get_stack()`. previous_stack: The previous stack logged, with callers appearing before callees. request_number: The request number to log against. note: A note to attach to the last stack frame, eg. "blocked on await". """ for i, frame_info in enumerate(stack[:-1]): # Skip any frames in common with the previous logging. if i < len(previous_stack) and frame_info == previous_stack[i]: continue frame = _format_stack_frame(frame_info) message = f"{' ' * i}{frame}" _log_for_request(request_number, message) # Always print the final frame with the `await`. # If the frame with the `await` started another coroutine run, we may have already # printed a deeper stack which includes our final frame. We want to log where all # `await`s happen, so we reprint the frame in this case. i = len(stack) - 1 frame_info = stack[i] frame = _format_stack_frame(frame_info) message = f"{' ' * i}> *{note}* at {frame}" _log_for_request(request_number, message)
Returns a string representation of a stack frame. Used for debug logging. Returns: A string, formatted like "JsonResource._async_render:559 (synapse/http/server.py:559)".
def _format_stack_frame(frame_info: inspect.FrameInfo) -> str: """Returns a string representation of a stack frame. Used for debug logging. Returns: A string, formatted like "JsonResource._async_render:559 (synapse/http/server.py:559)". """ method_name = _get_stack_frame_method_name(frame_info) return ( f"{method_name}:{frame_info.lineno} ({frame_info.filename}:{frame_info.lineno})" )
Captures the stack for a request. Skips any twisted frames and stops at `JsonResource.wrapped_async_request_handler`. Used for debug logging. Returns: A list of `inspect.FrameInfo`s, with callers appearing before callees.
def _get_stack(skip_frames: int) -> List[inspect.FrameInfo]: """Captures the stack for a request. Skips any twisted frames and stops at `JsonResource.wrapped_async_request_handler`. Used for debug logging. Returns: A list of `inspect.FrameInfo`s, with callers appearing before callees. """ stack = [] skip_frames += 1 # Also skip `get_stack` itself. for frame_info in inspect.stack()[skip_frames:]: # Skip any twisted `inlineCallbacks` gunk. if "/twisted/" in frame_info.filename: continue # Exclude the reactor frame, upwards. method_name = _get_stack_frame_method_name(frame_info) if method_name == "ThreadedMemoryReactorClock.advance": break stack.append(frame_info) # Stop at `JsonResource`'s `wrapped_async_request_handler`, which is the entry # point for request handling. if frame_info.function == "wrapped_async_request_handler": break return stack[::-1]
Returns the name of a stack frame's method. eg. "JsonResource._async_render".
def _get_stack_frame_method_name(frame_info: inspect.FrameInfo) -> str: """Returns the name of a stack frame's method. eg. "JsonResource._async_render". """ method_name = frame_info.function # Prefix the class name for instance methods. frame_self = frame_info.frame.f_locals.get("self") if frame_self: method = getattr(frame_self, method_name, None) if method: method_name = method.__qualname__ else: # We couldn't find the method on `self`. # Make something up. It's useful to know which class "contains" a # function anyway. method_name = f"{type(frame_self).__name__} {method_name}" return method_name
Turns a stack into a hashable value that can be put into a set.
def _hash_stack(stack: List[inspect.FrameInfo]) -> Tuple[str, ...]: """Turns a stack into a hashable value that can be put into a set.""" return tuple(_format_stack_frame(frame) for frame in stack)
Extract the labels and values of a sample. prometheus_client 0.5 changed the sample type to a named tuple with more members than the plain tuple had in 0.4 and earlier. This function can extract the labels and value from the sample for both sample types. Args: sample: The sample to get the labels and value from. Returns: A tuple of (labels, value) from the sample.
def get_sample_labels_value(sample: Sample) -> Tuple[Dict[str, str], float]: """Extract the labels and values of a sample. prometheus_client 0.5 changed the sample type to a named tuple with more members than the plain tuple had in 0.4 and earlier. This function can extract the labels and value from the sample for both sample types. Args: sample: The sample to get the labels and value from. Returns: A tuple of (labels, value) from the sample. """ # If the sample has a labels and value attribute, use those. if hasattr(sample, "labels") and hasattr(sample, "value"): return sample.labels, sample.value # Otherwise fall back to treating it as a plain 3 tuple. else: # In older versions of prometheus_client Sample was a 3-tuple. labels: Dict[str, str] value: float _, labels, value = sample # type: ignore[misc] return labels, value
Tests that send_local_presence_to_users sends local online presence to local users. This simultaneously tests two different usecases: * Testing that this method works when either called from a worker or the main process. - We test this by calling this method from both a TestCase that runs in monolith mode, and one that runs with a main and generic_worker. * Testing that multiple devices syncing simultaneously will all receive a snapshot of local, online presence - but only once per device. Args: test_with_workers: If True, this method will call ModuleApi.send_local_online_presence_to on a worker process. The test users will still sync with the main process. The purpose of testing with a worker is to check whether a Synapse module running on a worker can inform other workers/ the main process that they should include additional presence when a user next syncs. If this argument is True, `test_case` MUST be an instance of BaseMultiWorkerStreamTestCase.
def _test_sending_local_online_presence_to_local_user( test_case: BaseModuleApiTestCase, test_with_workers: bool = False ) -> None: """Tests that send_local_presence_to_users sends local online presence to local users. This simultaneously tests two different usecases: * Testing that this method works when either called from a worker or the main process. - We test this by calling this method from both a TestCase that runs in monolith mode, and one that runs with a main and generic_worker. * Testing that multiple devices syncing simultaneously will all receive a snapshot of local, online presence - but only once per device. Args: test_with_workers: If True, this method will call ModuleApi.send_local_online_presence_to on a worker process. The test users will still sync with the main process. The purpose of testing with a worker is to check whether a Synapse module running on a worker can inform other workers/ the main process that they should include additional presence when a user next syncs. If this argument is True, `test_case` MUST be an instance of BaseMultiWorkerStreamTestCase. """ if test_with_workers: assert isinstance(test_case, BaseMultiWorkerStreamTestCase) # Create a worker process to make module_api calls against worker_hs = test_case.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "presence_writer"} ) # Create a user who will send presence updates test_case.presence_receiver_id = test_case.register_user( "presence_receiver1", "monkey" ) test_case.presence_receiver_tok = test_case.login("presence_receiver1", "monkey") # And another user that will send presence updates out test_case.presence_sender_id = test_case.register_user("presence_sender2", "monkey") test_case.presence_sender_tok = test_case.login("presence_sender2", "monkey") # Put them in a room together so they will receive each other's presence updates room_id = test_case.helper.create_room_as( test_case.presence_receiver_id, tok=test_case.presence_receiver_tok, ) test_case.helper.join( room_id, test_case.presence_sender_id, tok=test_case.presence_sender_tok ) # Presence sender comes online send_presence_update( test_case, test_case.presence_sender_id, test_case.presence_sender_tok, "online", "I'm online!", ) # Presence receiver should have received it presence_updates, sync_token = sync_presence( test_case, test_case.presence_receiver_id ) test_case.assertEqual(len(presence_updates), 1) presence_update: UserPresenceState = presence_updates[0] test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) test_case.assertEqual(presence_update.state, "online") if test_with_workers: # Replicate the current sync presence token from the main process to the worker process. # We need to do this so that the worker process knows the current presence stream ID to # insert into the database when we call ModuleApi.send_local_online_presence_to. assert isinstance(test_case, BaseMultiWorkerStreamTestCase) test_case.replicate() # Syncing again should result in no presence updates presence_updates, sync_token = sync_presence( test_case, test_case.presence_receiver_id, sync_token ) test_case.assertEqual(len(presence_updates), 0) # We do an (initial) sync with a second "device" now, getting a new sync token. # We'll use this in a moment. _, sync_token_second_device = sync_presence( test_case, test_case.presence_receiver_id ) # Determine on which process (main or worker) to call ModuleApi.send_local_online_presence_to on if test_with_workers: assert isinstance(test_case, BaseMultiWorkerStreamTestCase) module_api_to_use = worker_hs.get_module_api() else: module_api_to_use = test_case.module_api # Trigger sending local online presence. We expect this information # to be saved to the database where all processes can access it. # Note that we're syncing via the master. d = defer.ensureDeferred( module_api_to_use.send_local_online_presence_to( [test_case.presence_receiver_id], ) ) if test_with_workers: # In order for the required presence_set_state replication request to occur between the # worker and main process, we need to pump the reactor. Otherwise, the coordinator that # reads the request on the main process won't do so, and the request will time out. while not d.called: test_case.reactor.advance(0.1) test_case.get_success(d) # The presence receiver should have received online presence again. presence_updates, sync_token = sync_presence( test_case, test_case.presence_receiver_id, sync_token ) test_case.assertEqual(len(presence_updates), 1) presence_update = presence_updates[0] test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) test_case.assertEqual(presence_update.state, "online") # We attempt to sync with the second sync token we received above - just to check that # multiple syncing devices will each receive the necessary online presence. presence_updates, sync_token_second_device = sync_presence( test_case, test_case.presence_receiver_id, sync_token_second_device ) test_case.assertEqual(len(presence_updates), 1) presence_update = presence_updates[0] test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) test_case.assertEqual(presence_update.state, "online") # However, if we now sync with either "device", we won't receive another burst of online presence # until the API is called again sometime in the future presence_updates, sync_token = sync_presence( test_case, test_case.presence_receiver_id, sync_token ) # Now we check that we don't receive *offline* updates using ModuleApi.send_local_online_presence_to. # Presence sender goes offline send_presence_update( test_case, test_case.presence_sender_id, test_case.presence_sender_tok, "offline", "I slink back into the darkness.", ) # Presence receiver should have received the updated, offline state presence_updates, sync_token = sync_presence( test_case, test_case.presence_receiver_id, sync_token ) test_case.assertEqual(len(presence_updates), 1) # Now trigger sending local online presence. d = defer.ensureDeferred( module_api_to_use.send_local_online_presence_to( [ test_case.presence_receiver_id, ] ) ) if test_with_workers: # In order for the required presence_set_state replication request to occur between the # worker and main process, we need to pump the reactor. Otherwise, the coordinator that # reads the request on the main process won't do so, and the request will time out. while not d.called: test_case.reactor.advance(0.1) test_case.get_success(d) # Presence receiver should *not* have received offline state presence_updates, sync_token = sync_presence( test_case, test_case.presence_receiver_id, sync_token ) test_case.assertEqual(len(presence_updates), 0)
Implements Factory.log, which is expected by Request.finish
def _log_request(request: Request) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info("Completed request %s", request)
Sample OIDC provider config used in backchannel logout tests. Args: id: IDP ID for this provider with_localpart_template: Set to `true` to have a default localpart_template in the `user_mapping_provider` config and skip the user mapping session **kwargs: rest of the config Returns: A dict suitable for the `oidc_config` or the `oidc_providers[]` parts of the HS config
def oidc_config( id: str, with_localpart_template: bool, **kwargs: Any ) -> Dict[str, Any]: """Sample OIDC provider config used in backchannel logout tests. Args: id: IDP ID for this provider with_localpart_template: Set to `true` to have a default localpart_template in the `user_mapping_provider` config and skip the user mapping session **kwargs: rest of the config Returns: A dict suitable for the `oidc_config` or the `oidc_providers[]` parts of the HS config """ config: Dict[str, Any] = { "idp_id": id, "idp_name": id, "issuer": TEST_OIDC_ISSUER, "client_id": "test-client-id", "client_secret": "test-client-secret", "scopes": ["openid"], } if with_localpart_template: config["user_mapping_provider"] = { "config": {"localpart_template": "{{ user.sub }}"} } else: config["user_mapping_provider"] = {"config": {}} config.update(kwargs) return config
s -> (s0,s1), (s1,s2), (s2, s3), ...
def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]: "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) return zip(a, b)
Given a set of nodes and a graph, return all possible topological orderings.
def get_all_topologically_sorted_orders( nodes: Iterable[T], graph: Mapping[T, Collection[T]], ) -> List[List[T]]: """Given a set of nodes and a graph, return all possible topological orderings. """ # This is implemented by Kahn's algorithm, and forking execution each time # we have a choice over which node to consider next. degree_map = {node: 0 for node in nodes} reverse_graph: Dict[T, Set[T]] = {} for node, edges in graph.items(): if node not in degree_map: continue for edge in set(edges): if edge in degree_map: degree_map[node] += 1 reverse_graph.setdefault(edge, set()).add(node) reverse_graph.setdefault(node, set()) zero_degree = [node for node, degree in degree_map.items() if degree == 0] return _get_all_topologically_sorted_orders_inner( reverse_graph, zero_degree, degree_map )
Get all subsets of the graph where if node N is in the subgraph, then all nodes that can reach that node (i.e. for all X there exists a path X -> N) are in the subgraph.
def get_all_topologically_consistent_subsets( nodes: Iterable[T], graph: Mapping[T, Collection[T]], ) -> Set[FrozenSet[T]]: """Get all subsets of the graph where if node N is in the subgraph, then all nodes that can reach that node (i.e. for all X there exists a path X -> N) are in the subgraph. """ all_topological_orderings = get_all_topologically_sorted_orders(nodes, graph) graph_subsets = set() for ordering in all_topological_orderings: ordering.reverse() for idx in range(len(ordering)): graph_subsets.add(frozenset(ordering[:idx])) return graph_subsets
Generate `count` threepids as a list.
def gen_3pids(count: int) -> List[Dict[str, Any]]: """Generate `count` threepids as a list.""" return [ {"medium": "email", "address": "user%[email protected]" % i} for i in range(count) ]
A fake implementation of os.listdir which we can use to mock out the filesystem. Args: filepath: The directory to list files for. Returns: A list of files and folders in the directory.
def fake_listdir(filepath: str) -> List[str]: """ A fake implementation of os.listdir which we can use to mock out the filesystem. Args: filepath: The directory to list files for. Returns: A list of files and folders in the directory. """ if filepath.endswith("full_schemas"): return [str(SCHEMA_VERSION)] return ["99_add_unicorn_to_database.sql"]
Configure the python logging appropriately for the tests. (Logs will end up in _trial_temp.)
def setup_logging() -> None: """Configure the python logging appropriately for the tests. (Logs will end up in _trial_temp.) """ root_logger = logging.getLogger() # We exclude `%(asctime)s` from this format because the Twisted logger adds its own # timestamp log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s" handler = ToTwistedHandler() formatter = logging.Formatter(log_format) handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter()) root_logger.addHandler(handler) log_level = os.environ.get("SYNAPSE_TEST_LOG_LEVEL", "ERROR") root_logger.setLevel(log_level) # In order to not add noise by default (since we only log ERROR messages for trial # tests as configured above), we only enable this for developers for looking for # more INFO or DEBUG. if root_logger.isEnabledFor(logging.INFO): # Log when events are (maybe unexpectedly) filtered out of responses in tests. It's # just nice to be able to look at the CI log and figure out why an event isn't being # returned. logging.getLogger("synapse.visibility.filtered_event_debug").setLevel( logging.DEBUG ) # Blow away the pyo3-log cache so that it reloads the configuration. reset_logging_config()
Get the result from an Awaitable which should have completed Asserts that the given awaitable has a result ready, and returns its value
def get_awaitable_result(awaitable: Awaitable[TV]) -> TV: """Get the result from an Awaitable which should have completed Asserts that the given awaitable has a result ready, and returns its value """ i = awaitable.__await__() try: next(i) except StopIteration as e: # awaitable returned a result return e.value # if next didn't raise, the awaitable hasn't completed. raise Exception("awaitable has not yet completed")
Convert warnings from a non-awaited coroutines into errors.
def setup_awaitable_errors() -> Callable[[], None]: """ Convert warnings from a non-awaited coroutines into errors. """ warnings.simplefilter("error", RuntimeWarning) # State shared between unraisablehook and check_for_unraisable_exceptions. unraisable_exceptions = [] orig_unraisablehook = sys.unraisablehook def unraisablehook(unraisable: "UnraisableHookArgs") -> None: unraisable_exceptions.append(unraisable.exc_value) def cleanup() -> None: """ A method to be used as a clean-up that fails a test-case if there are any new unraisable exceptions. """ sys.unraisablehook = orig_unraisablehook if unraisable_exceptions: exc = unraisable_exceptions.pop() assert exc is not None raise exc sys.unraisablehook = unraisablehook return cleanup
advance the clock until the deferred completes. Returns the number of milliseconds it took to complete.
def _await_resolution(reactor: ThreadedMemoryReactorClock, d: Deferred) -> float: """advance the clock until the deferred completes. Returns the number of milliseconds it took to complete. """ start_time = reactor.seconds() while not d.called: reactor.advance(0.01) return (reactor.seconds() - start_time) * 1000
Compute Pi to the required precision. Adapted from https://docs.python.org/3/library/decimal.html
def pi(precision: int) -> list[int]: """Compute Pi to the required precision. Adapted from https://docs.python.org/3/library/decimal.html """ saved_precision = getcontext().prec # Save precision getcontext().prec = precision three = Decimal(3) # substitute "three=3.0" for regular floats lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24 while s != lasts: lasts = s n, na = n + na, na + 8 d, da = d + da, da + 32 t = (t * n) / d s += t # type: ignore[assignment] digits = [] while s != 0: integral = int(s) digits.append(integral) s = (s - integral) * 10 getcontext().prec = saved_precision return digits