response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Register a RequestHandler class
def register_rh(handler): """Register a RequestHandler class""" assert issubclass(handler, RequestHandler), f'{handler} must be a subclass of RequestHandler' assert handler.RH_KEY not in _REQUEST_HANDLERS, f'RequestHandler {handler.RH_KEY} already registered' _REQUEST_HANDLERS[handler.RH_KEY] = handler return handler
Unified proxy selector for all backends
def select_proxy(url, proxies): """Unified proxy selector for all backends""" url_components = urllib.parse.urlparse(url) if 'no' in proxies: hostport = url_components.hostname + format_field(url_components.port, None, ':%s') if urllib.request.proxy_bypass_environment(hostport, {'no': proxies['no']}): return elif urllib.request.proxy_bypass(hostport): # check system settings return return traverse_obj(proxies, url_components.scheme or 'http', 'all')
Unified redirect method handling
def get_redirect_method(method, status): """Unified redirect method handling""" # A 303 must either use GET or HEAD for subsequent request # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4 if status == 303 and method != 'HEAD': method = 'GET' # 301 and 302 redirects are commonly turned into a GET from a POST # for subsequent requests by browsers, so we'll do the same. # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3 if status in (301, 302) and method == 'POST': method = 'GET' return method
Get corresponding item from a mapping string like 'A>B/C>D/E' @returns (target, error_message)
def resolve_mapping(source, mapping): """ Get corresponding item from a mapping string like 'A>B/C>D/E' @returns (target, error_message) """ for pair in mapping.lower().split('/'): kv = pair.split('>', 1) if len(kv) == 1 or kv[0].strip() == source: target = kv[-1].strip() if target == source: return target, f'already is in target format {source}' return target, None return None, f'could not find a mapping for {source}'
Escape non-ASCII characters as suggested by RFC 3986
def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
Normalize URL as suggested by RFC 3986
def normalize_url(url): """Normalize URL as suggested by RFC 3986""" url_parsed = urllib.parse.urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(remove_dot_segments(url_parsed.path)), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl()
Safely traverse nested `dict`s and `Iterable`s >>> obj = [{}, {"key": "value"}] >>> traverse_obj(obj, (1, "key")) 'value' Each of the provided `paths` is tested and the first producing a valid result will be returned. The next path will also be tested if the path branched but no results could be found. Supported values for traversal are `Mapping`, `Iterable`, `re.Match`, `xml.etree.ElementTree` (xpath) and `http.cookies.Morsel`. Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded. The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`. The keys in the path can be one of: - `None`: Return the current object. - `set`: Requires the only item in the set to be a type or function, like `{type}`/`{type, type, ...}/`{func}`. If a `type`, return only values of this type. If a function, returns `func(obj)`. - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`. - `slice`: Branch out and return all values in `obj[key]`. - `Ellipsis`: Branch out and return a list of all values. - `tuple`/`list`: Branch out and return a list of all matching values. Read as: `[traverse_obj(obj, branch) for branch in branches]`. - `function`: Branch out and return values filtered by the function. Read as: `[value for key, value in obj if function(key, value)]`. For `Iterable`s, `key` is the index of the value. For `re.Match`es, `key` is the group number (0 = full match) as well as additionally any group names, if given. - `dict`: Transform the current object and return a matching dict. Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`. - `any`-builtin: Take the first matching object and return it, resetting branching. - `all`-builtin: Take all matching objects and return them as a list, resetting branching. `tuple`, `list`, and `dict` all support nested paths and branches. @params paths Paths which to traverse by. @param default Value to return if the paths do not match. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, depth first. Try to avoid if using nested `dict` keys. @param expected_type If a `type`, only accept final values of this type. If any other callable, try to call the function on each result. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, recursively. This does respect branching paths. @param get_all If `False`, return the first matching result, otherwise all matching ones. @param casesense If `False`, consider string dictionary keys as case insensitive. `traverse_string` is only meant to be used by YoutubeDL.prepare_outtmpl and is not part of the API @param traverse_string Whether to traverse into objects as strings. If `True`, any non-compatible object will first be converted into a string and then traversed into. The return value of that path will be a string instead, not respecting any further branching. @returns The result of the object traversal. If successful, `get_all=True`, and the path branches at least once, then a list of results is returned instead. If no `default` is given and the last path branches, a `list` of results is always returned. If a path ends on a `dict` that result will always be a `dict`.
def traverse_obj( obj, *paths, default=NO_DEFAULT, expected_type=None, get_all=True, casesense=True, is_user_input=NO_DEFAULT, traverse_string=False): """ Safely traverse nested `dict`s and `Iterable`s >>> obj = [{}, {"key": "value"}] >>> traverse_obj(obj, (1, "key")) 'value' Each of the provided `paths` is tested and the first producing a valid result will be returned. The next path will also be tested if the path branched but no results could be found. Supported values for traversal are `Mapping`, `Iterable`, `re.Match`, `xml.etree.ElementTree` (xpath) and `http.cookies.Morsel`. Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded. The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`. The keys in the path can be one of: - `None`: Return the current object. - `set`: Requires the only item in the set to be a type or function, like `{type}`/`{type, type, ...}/`{func}`. If a `type`, return only values of this type. If a function, returns `func(obj)`. - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`. - `slice`: Branch out and return all values in `obj[key]`. - `Ellipsis`: Branch out and return a list of all values. - `tuple`/`list`: Branch out and return a list of all matching values. Read as: `[traverse_obj(obj, branch) for branch in branches]`. - `function`: Branch out and return values filtered by the function. Read as: `[value for key, value in obj if function(key, value)]`. For `Iterable`s, `key` is the index of the value. For `re.Match`es, `key` is the group number (0 = full match) as well as additionally any group names, if given. - `dict`: Transform the current object and return a matching dict. Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`. - `any`-builtin: Take the first matching object and return it, resetting branching. - `all`-builtin: Take all matching objects and return them as a list, resetting branching. `tuple`, `list`, and `dict` all support nested paths and branches. @params paths Paths which to traverse by. @param default Value to return if the paths do not match. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, depth first. Try to avoid if using nested `dict` keys. @param expected_type If a `type`, only accept final values of this type. If any other callable, try to call the function on each result. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, recursively. This does respect branching paths. @param get_all If `False`, return the first matching result, otherwise all matching ones. @param casesense If `False`, consider string dictionary keys as case insensitive. `traverse_string` is only meant to be used by YoutubeDL.prepare_outtmpl and is not part of the API @param traverse_string Whether to traverse into objects as strings. If `True`, any non-compatible object will first be converted into a string and then traversed into. The return value of that path will be a string instead, not respecting any further branching. @returns The result of the object traversal. If successful, `get_all=True`, and the path branches at least once, then a list of results is returned instead. If no `default` is given and the last path branches, a `list` of results is always returned. If a path ends on a `dict` that result will always be a `dict`. """ if is_user_input is not NO_DEFAULT: deprecation_warning('The is_user_input parameter is deprecated and no longer works') casefold = lambda k: k.casefold() if isinstance(k, str) else k if isinstance(expected_type, type): type_test = lambda val: val if isinstance(val, expected_type) else None else: type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,)) def apply_key(key, obj, is_last): branching = False result = None if obj is None and traverse_string: if key is ... or callable(key) or isinstance(key, slice): branching = True result = () elif key is None: result = obj elif isinstance(key, set): item = next(iter(key)) if len(key) > 1 or isinstance(item, type): assert all(isinstance(item, type) for item in key) if isinstance(obj, tuple(key)): result = obj else: result = try_call(item, args=(obj,)) elif isinstance(key, (list, tuple)): branching = True result = itertools.chain.from_iterable( apply_path(obj, branch, is_last)[0] for branch in key) elif key is ...: branching = True if isinstance(obj, http.cookies.Morsel): obj = dict(obj, key=obj.key, value=obj.value) if isinstance(obj, collections.abc.Mapping): result = obj.values() elif is_iterable_like(obj) or isinstance(obj, xml.etree.ElementTree.Element): result = obj elif isinstance(obj, re.Match): result = obj.groups() elif traverse_string: branching = False result = str(obj) else: result = () elif callable(key): branching = True if isinstance(obj, http.cookies.Morsel): obj = dict(obj, key=obj.key, value=obj.value) if isinstance(obj, collections.abc.Mapping): iter_obj = obj.items() elif is_iterable_like(obj) or isinstance(obj, xml.etree.ElementTree.Element): iter_obj = enumerate(obj) elif isinstance(obj, re.Match): iter_obj = itertools.chain( enumerate((obj.group(), *obj.groups())), obj.groupdict().items()) elif traverse_string: branching = False iter_obj = enumerate(str(obj)) else: iter_obj = () result = (v for k, v in iter_obj if try_call(key, args=(k, v))) if not branching: # string traversal result = ''.join(result) elif isinstance(key, dict): iter_obj = ((k, _traverse_obj(obj, v, False, is_last)) for k, v in key.items()) result = { k: v if v is not None else default for k, v in iter_obj if v is not None or default is not NO_DEFAULT } or None elif isinstance(obj, collections.abc.Mapping): if isinstance(obj, http.cookies.Morsel): obj = dict(obj, key=obj.key, value=obj.value) result = (try_call(obj.get, args=(key,)) if casesense or try_call(obj.__contains__, args=(key,)) else next((v for k, v in obj.items() if casefold(k) == key), None)) elif isinstance(obj, re.Match): if isinstance(key, int) or casesense: with contextlib.suppress(IndexError): result = obj.group(key) elif isinstance(key, str): result = next((v for k, v in obj.groupdict().items() if casefold(k) == key), None) elif isinstance(key, (int, slice)): if is_iterable_like(obj, (collections.abc.Sequence, xml.etree.ElementTree.Element)): branching = isinstance(key, slice) with contextlib.suppress(IndexError): result = obj[key] elif traverse_string: with contextlib.suppress(IndexError): result = str(obj)[key] elif isinstance(obj, xml.etree.ElementTree.Element) and isinstance(key, str): xpath, _, special = key.rpartition('/') if not special.startswith('@') and not special.endswith('()'): xpath = key special = None # Allow abbreviations of relative paths, absolute paths error if xpath.startswith('/'): xpath = f'.{xpath}' elif xpath and not xpath.startswith('./'): xpath = f'./{xpath}' def apply_specials(element): if special is None: return element if special == '@': return element.attrib if special.startswith('@'): return try_call(element.attrib.get, args=(special[1:],)) if special == 'text()': return element.text raise SyntaxError(f'apply_specials is missing case for {special!r}') if xpath: result = list(map(apply_specials, obj.iterfind(xpath))) else: result = apply_specials(obj) return branching, result if branching else (result,) def lazy_last(iterable): iterator = iter(iterable) prev = next(iterator, NO_DEFAULT) if prev is NO_DEFAULT: return for item in iterator: yield False, prev prev = item yield True, prev def apply_path(start_obj, path, test_type): objs = (start_obj,) has_branched = False key = None for last, key in lazy_last(variadic(path, (str, bytes, dict, set))): if not casesense and isinstance(key, str): key = key.casefold() if key in (any, all): has_branched = False filtered_objs = (obj for obj in objs if obj not in (None, {})) if key is any: objs = (next(filtered_objs, None),) else: objs = (list(filtered_objs),) continue if __debug__ and callable(key): # Verify function signature inspect.signature(key).bind(None, None) new_objs = [] for obj in objs: branching, results = apply_key(key, obj, last) has_branched |= branching new_objs.append(results) objs = itertools.chain.from_iterable(new_objs) if test_type and not isinstance(key, (dict, list, tuple)): objs = map(type_test, objs) return objs, has_branched, isinstance(key, dict) def _traverse_obj(obj, path, allow_empty, test_type): results, has_branched, is_dict = apply_path(obj, path, test_type) results = LazyList(item for item in results if item not in (None, {})) if get_all and has_branched: if results: return results.exhaust() if allow_empty: return [] if default is NO_DEFAULT else default return None return results[0] if results else {} if allow_empty and is_dict else None for index, path in enumerate(paths, 1): result = _traverse_obj(obj, path, index == len(paths), True) if result is not None: return result return None if default is NO_DEFAULT else default
Returns the platform name as a str
def platform_name(): """ Returns the platform name as a str """ return platform.platform()
Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks.
def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref
Encode obj as JSON and write it to fn, atomically if possible
def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ tf = tempfile.NamedTemporaryFile( prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn), suffix='.tmp', delete=False, mode='w', encoding='utf-8') try: with tf: json.dump(obj, tf, ensure_ascii=False) if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. with contextlib.suppress(OSError): os.unlink(fn) with contextlib.suppress(OSError): mask = os.umask(0) os.umask(mask) os.chmod(tf.name, 0o666 & ~mask) os.rename(tf.name, fn) except Exception: with contextlib.suppress(OSError): os.remove(tf.name) raise
Find the xpath xpath[@key=val]
def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z_-]+$', key) expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']") return node.find(expr)
Return the content of the tag with the specified ID in the passed HTML document
def get_element_by_id(id, html, **kwargs): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html, **kwargs)
Return the html of the tag with the specified ID in the passed HTML document
def get_element_html_by_id(id, html, **kwargs): """Return the html of the tag with the specified ID in the passed HTML document""" return get_element_html_by_attribute('id', id, html, **kwargs)
Return the content of the first tag with the specified class in the passed HTML document
def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None
Return the html of the first tag with the specified class in the passed HTML document
def get_element_html_by_class(class_name, html): """Return the html of the first tag with the specified class in the passed HTML document""" retval = get_elements_html_by_class(class_name, html) return retval[0] if retval else None
Return the content of all tags with the specified class in the passed HTML document as a list
def get_elements_by_class(class_name, html, **kargs): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name), html, escape_value=False)
Return the html of all tags with the specified class in the passed HTML document as a list
def get_elements_html_by_class(class_name, html): """Return the html of all tags with the specified class in the passed HTML document as a list""" return get_elements_html_by_attribute( 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name), html, escape_value=False)
Return the content of the tag with the specified attribute in the passed HTML document
def get_elements_by_attribute(*args, **kwargs): """Return the content of the tag with the specified attribute in the passed HTML document""" return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
Return the html of the tag with the specified attribute in the passed HTML document
def get_elements_html_by_attribute(*args, **kwargs): """Return the html of the tag with the specified attribute in the passed HTML document""" return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
Return the text (content) and the html (whole) of the tag with the specified attribute in the passed HTML document
def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True): """ Return the text (content) and the html (whole) of the tag with the specified attribute in the passed HTML document """ if not value: return quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?' value = re.escape(value) if escape_value else value partial_element_re = rf'''(?x) <(?P<tag>{tag}) (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)? \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q) ''' for m in re.finditer(partial_element_re, html): content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():]) yield ( unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)), whole )
For the first element with the specified tag in the passed HTML document return its' content (text) and the whole element (html)
def get_element_text_and_html_by_tag(tag, html): """ For the first element with the specified tag in the passed HTML document return its' content (text) and the whole element (html) """ def find_or_raise(haystack, needle, exc): try: return haystack.index(needle) except ValueError: raise exc closing_tag = f'</{tag}>' whole_start = find_or_raise( html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found')) content_start = find_or_raise( html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag')) content_start += whole_start + 1 with HTMLBreakOnClosingTagParser() as parser: parser.feed(html[whole_start:content_start]) if not parser.tagstack or parser.tagstack[0] != tag: raise compat_HTMLParseError(f'parser did not match opening {tag} tag') offset = content_start while offset < len(html): next_closing_tag_start = find_or_raise( html[offset:], closing_tag, compat_HTMLParseError(f'closing {tag} tag not found')) next_closing_tag_end = next_closing_tag_start + len(closing_tag) try: parser.feed(html[offset:offset + next_closing_tag_end]) offset += next_closing_tag_end except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException: return html[content_start:offset + next_closing_tag_start], \ html[whole_start:offset + next_closing_tag_end] raise compat_HTMLParseError('unexpected end of html')
Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': ''' }.
def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. """ parser = HTMLAttributeParser() with contextlib.suppress(compat_HTMLParseError): parser.feed(html_element) parser.close() return parser.attrs
Given a string for an series of HTML <li> elements, return a dictionary of their attributes
def parse_list(webpage): """Given a string for an series of HTML <li> elements, return a dictionary of their attributes""" parser = HTMLListAttrsParser() parser.feed(webpage) parser.close() return parser.items
Clean an HTML snippet into a readable string
def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html html = re.sub(r'\s+', ' ', html) html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html) html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip()
Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name).
def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ if filename == '-': if sys.platform == 'win32': import msvcrt # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout with contextlib.suppress(io.UnsupportedOperation): msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) for attempt in range(2): try: try: if sys.platform == 'win32': # FIXME: An exclusive lock also locks the file from being read. # Since windows locks are mandatory, don't lock the file on windows (for now). # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124 raise LockingUnsupportedError() stream = locked_file(filename, open_mode, block=False).__enter__() except OSError: stream = open(filename, open_mode) return stream, filename except OSError as err: if attempt or err.errno in (errno.EACCES,): raise old_filename, filename = filename, sanitize_path(filename) if old_filename == filename: raise
Convert RFC 2822 defined time string into system timestamp
def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp
Sanitizes a string so it could be used as part of a filename. @param restricted Use a stricter subset of allowed characters @param is_id Whether this is an ID that should be kept unchanged if possible. If unset, yt-dlp's new sanitization rules are in effect
def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT): """Sanitizes a string so it could be used as part of a filename. @param restricted Use a stricter subset of allowed characters @param is_id Whether this is an ID that should be kept unchanged if possible. If unset, yt-dlp's new sanitization rules are in effect """ if s == '': return '' def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] elif not restricted and char == '\n': return '\0 ' elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\': # Replace with their full-width unicode counterparts return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0)) elif char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '\0_\0-' if restricted else '\0 \0-' elif char in '\\/|*<>': return '\0_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127): return '' if unicodedata.category(char)[0] in 'CM' else '\0_' return char # Replace look-alike Unicode glyphs if restricted and (is_id is NO_DEFAULT or not is_id): s = unicodedata.normalize('NFKC', s) s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps result = ''.join(map(replace_insane, s)) if is_id is NO_DEFAULT: result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars STRIP_RE = r'(?:\0.|[ _-])*' result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end result = result.replace('\0', '') or '_' if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result
Sanitizes and normalizes path on Windows
def sanitize_path(s, force=False): """Sanitizes and normalizes path on Windows""" # XXX: this handles drive relative paths (c:sth) incorrectly if sys.platform == 'win32': force = False drive_or_unc, _ = os.path.splitdrive(s) elif force: drive_or_unc = '' else: return s norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) elif force and s and s[0] == os.path.sep: sanitized_path.insert(0, os.path.sep) # TODO: Fix behavioral differences <3.12 # The workaround using `normpath` only superficially passes tests # Ref: https://github.com/python/cpython/pull/100351 return os.path.normpath(os.path.join(*sanitized_path))
Expand shell variables and ~
def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s))
Remove all duplicates from the input iterable
def orderedSet(iterable, *, lazy=False): """Remove all duplicates from the input iterable""" def _iter(): seen = [] # Do not use set since the items can be unhashable for x in iterable: if x not in seen: seen.append(x) yield x return _iter() if lazy else list(_iter())
Transforms an HTML entity to a character.
def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in html.entities.name2codepoint: return chr(html.entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. # E.g. '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in html.entities.html5: return html.entities.html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/ytdl-org/youtube-dl/issues/7518 with contextlib.suppress(ValueError): return chr(int(numstr, base)) # Unknown entity in name, return its literal representation return '&%s;' % entity
Return a UNIX timestamp from the given date
def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) with contextlib.suppress(ValueError): date_format = f'%Y-%m-%d{delimiter}%H:%M:%S' dt_ = dt.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt_.timetuple())
Return a string with the date in the format YYYYMMDD
def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): with contextlib.suppress(ValueError): upload_date = dt.datetime.strptime(date_str, expression).strftime('%Y%m%d') if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: with contextlib.suppress(ValueError): upload_date = dt.datetime(*timetuple[:6]).strftime('%Y%m%d') if upload_date is not None: return str(upload_date)
Return a datetime object from a string. Supported format: (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)? @param format strftime format of DATE @param precision Round the datetime object: auto|microsecond|second|minute|hour|day auto: round to the unit provided in date_str (if applicable).
def datetime_from_str(date_str, precision='auto', format='%Y%m%d'): R""" Return a datetime object from a string. Supported format: (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)? @param format strftime format of DATE @param precision Round the datetime object: auto|microsecond|second|minute|hour|day auto: round to the unit provided in date_str (if applicable). """ auto_precision = False if precision == 'auto': auto_precision = True precision = 'microsecond' today = datetime_round(dt.datetime.now(dt.timezone.utc), precision) if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - dt.timedelta(days=1) match = re.match( r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?', date_str) if match is not None: start_time = datetime_from_str(match.group('start'), precision, format) time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1) unit = match.group('unit') if unit == 'month' or unit == 'year': new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time) unit = 'day' else: if unit == 'week': unit = 'day' time *= 7 delta = dt.timedelta(**{unit + 's': time}) new_date = start_time + delta if auto_precision: return datetime_round(new_date, unit) return new_date return datetime_round(dt.datetime.strptime(date_str, format), precision)
Return a date object from a string using datetime_from_str @param strict Restrict allowed patterns to "YYYYMMDD" and (now|today|yesterday)(-\d+(day|week|month|year)s?)?
def date_from_str(date_str, format='%Y%m%d', strict=False): R""" Return a date object from a string using datetime_from_str @param strict Restrict allowed patterns to "YYYYMMDD" and (now|today|yesterday)(-\d+(day|week|month|year)s?)? """ if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str): raise ValueError(f'Invalid date format "{date_str}"') return datetime_from_str(date_str, precision='microsecond', format=format).date()
Increment/Decrement a datetime object by months.
def datetime_add_months(dt_, months): """Increment/Decrement a datetime object by months.""" month = dt_.month + months - 1 year = dt_.year + month // 12 month = month % 12 + 1 day = min(dt_.day, calendar.monthrange(year, month)[1]) return dt_.replace(year, month, day)
Round a datetime object's time to a specific precision
def datetime_round(dt_, precision='day'): """ Round a datetime object's time to a specific precision """ if precision == 'microsecond': return dt_ unit_seconds = { 'day': 86400, 'hour': 3600, 'minute': 60, 'second': 1, } roundto = lambda x, n: ((x + n / 2) // n) * n timestamp = roundto(calendar.timegm(dt_.timetuple()), unit_seconds[precision]) return dt.datetime.fromtimestamp(timestamp, dt.timezone.utc)
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format
def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str
Get Windows version. returns () if it's not running on Windows
def get_windows_version(): ''' Get Windows version. returns () if it's not running on Windows ''' if compat_os_name == 'nt': return version_tuple(platform.win32_ver()[1]) else: return ()
Pass additional data in a URL for internal use.
def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = urllib.parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata
Formats numbers with decimal sufixes like K, M, etc
def format_decimal_suffix(num, fmt='%d%s', *, factor=1000): """ Formats numbers with decimal sufixes like K, M, etc """ num, factor = float_or_none(num), float(factor) if num is None or num < 0: return None POSSIBLE_SUFFIXES = 'kMGTPEZY' exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES)) suffix = ['', *POSSIBLE_SUFFIXES][exponent] if factor == 1024: suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i') converted = num / (factor ** exponent) return fmt % (converted, suffix)
Parse a string indicating a byte quantity into an integer
def parse_bytes(s): """Parse a string indicating a byte quantity into an integer""" return lookup_unit_table( {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])}, s.upper(), strict=True)
Return the number of a month by (locale-independently) English name
def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None
Return the number of a month by (locale-independently) English abbreviations
def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None
Replace all the '&' by '&amp;' in XML
def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str)
This implementation is inconsistent, but is kept for compatibility. Use this only for "webpage_url_domain"
def get_domain(url): """ This implementation is inconsistent, but is kept for compatibility. Use this only for "webpage_url_domain" """ return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None
A more relaxed version of int_or_none
def str_to_int(int_str): """ A more relaxed version of int_or_none """ if isinstance(int_str, int): return int_str elif isinstance(int_str, str): int_str = re.sub(r'[,\.\+]', '', int_str) return int_or_none(int_str)
Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version)
def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: return False return exe
Returns the version of the specified executable, or False if the executable is not present
def get_exe_version(exe, args=['--version'], version_re=None, unrecognized=('present', 'broken')): """ Returns the version of the specified executable, or False if the executable is not present """ unrecognized = variadic(unrecognized) assert len(unrecognized) in (1, 2) out = _get_exe_version_output(exe, args) if out is None: return unrecognized[-1] return out and detect_exe_version(out, version_re, unrecognized[0])
Float range
def frange(start=0, stop=None, step=1): """Float range""" if stop is None: start, stop = 0, start sign = [-1, 1][step > 0] if step else 0 while sign * start < sign * stop: yield start start += step
Replace URL components specified by kwargs @param url str or parse url tuple @param query_update update query @returns str
def update_url(url, *, query_update=None, **kwargs): """Replace URL components specified by kwargs @param url str or parse url tuple @param query_update update query @returns str """ if isinstance(url, str): if not kwargs and not query_update: return url else: url = urllib.parse.urlparse(url) if query_update: assert 'query' not in kwargs, 'query_update and query cannot be specified at the same time' kwargs['query'] = urllib.parse.urlencode({ **urllib.parse.parse_qs(url.query), **query_update }, True) return urllib.parse.urlunparse(url._replace(**kwargs))
Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578
def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type
Get a numeric quality value out of a list of possible values
def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q
Add ellipses to overly long strings
def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s
Returns if yt-dlp can be updated with -U
def ytdl_is_updateable(): """ Returns if yt-dlp can be updated with -U """ from ..update import is_non_updateable return not is_non_updateable()
Returns True iff the content should be blocked
def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit
Detect whether a file contains HTML by examining its first bytes.
def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ encoding = 'utf-8' for bom, enc in BOMS: while first_bytes.startswith(bom): encoding, first_bytes = enc, first_bytes[len(bom):] return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
Render a list of rows, each as a list of values. Text after a will be right aligned
def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False): """ Render a list of rows, each as a list of values. Text after a \t will be right aligned """ def width(string): return len(remove_terminal_sequences(string).replace('\t', '')) def get_max_lens(table): return [max(width(str(v)) for v in col) for col in zip(*table)] def filter_using_list(row, filterArray): return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take] max_lens = get_max_lens(data) if hide_empty else [] header_row = filter_using_list(header_row, max_lens) data = [filter_using_list(row, max_lens) for row in data] table = [header_row] + data max_lens = get_max_lens(table) extra_gap += 1 if delim: table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter for row in table: for pos, text in enumerate(map(str, row)): if '\t' in text: row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap else: row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap) ret = '\n'.join(''.join(row).rstrip() for row in table) return ret
Filter a dictionary with a simple string syntax. @returns Whether the filter passes @param incomplete Set of keys that is expected to be missing from dct. Can be True/False to indicate all/none of the keys may be missing. All conditions on incomplete keys pass if the key is missing
def match_str(filter_str, dct, incomplete=False): """ Filter a dictionary with a simple string syntax. @returns Whether the filter passes @param incomplete Set of keys that is expected to be missing from dct. Can be True/False to indicate all/none of the keys may be missing. All conditions on incomplete keys pass if the key is missing """ return all( _match_one(filter_part.replace(r'\&', '&'), dct, incomplete) for filter_part in re.split(r'(?<!\\)&', filter_str))
@param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data
def dfxp2srt(dfxp_data): ''' @param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data ''' LEGACY_NAMESPACES = ( (b'http://www.w3.org/ns/ttml', [ b'http://www.w3.org/2004/11/ttaf1', b'http://www.w3.org/2006/04/ttaf1', b'http://www.w3.org/2006/10/ttaf1', ]), (b'http://www.w3.org/ns/ttml#styling', [ b'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'xml': 'http://www.w3.org/XML/1998/namespace', 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser: _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() # Fix UTF-8 encoded file wrongly marked as UTF-16. See https://github.com/yt-dlp/yt-dlp/issues/6543#issuecomment-1477169870 # This will not trigger false positives since only UTF-8 text is being replaced dfxp_data = dfxp_data.replace(b'encoding=\'UTF-16\'', b'encoding=\'UTF-8\'') def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') or style.get(_x('xml:id')) if not style_id: continue parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out)
long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize.
def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = struct.pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s
bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes().
def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0] return acc
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only
def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted
Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data
def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data
Convert given int to a base-n string
def encode_base_n(num, n=None, table=None): """Convert given int to a base-n string""" table = _base_n_table(n, table) if not num: return table[0] result, base = '', len(table) while num: result = table[num % base] + result num = num // base return result
Convert given base-n string to int
def decode_base_n(string, n=None, table=None): """Convert given base-n string to int""" table = {char: index for index, char in enumerate(_base_n_table(n, table))} result, base = 0, len(table) for char in string: result = result * base + table[char] return result
Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only). The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
def iri_to_uri(iri): """ Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only). The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact. """ iri_parts = urllib.parse.urlparse(iri) if '[' in iri_parts.netloc: raise ValueError('IPv6 URIs are not, yet, supported.') # Querying `.netloc`, when there's only one bracket, also raises a ValueError. # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is. net_location = '' if iri_parts.username: net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~") if iri_parts.password is not None: net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~") net_location += '@' net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames. # The 'idna' encoding produces ASCII text. if iri_parts.port is not None and iri_parts.port != 80: net_location += ':' + str(iri_parts.port) return urllib.parse.urlunparse( (iri_parts.scheme, net_location, urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"), # Unsure about the `safe` argument, since this is a legacy way of handling parameters. urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"), # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component. urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"), urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
def time_seconds(**kwargs): """ Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z) """ return time.time() + dt.timedelta(**kwargs).total_seconds()
Ref: https://bugs.python.org/issue30075
def windows_enable_vt_mode(): """Ref: https://bugs.python.org/issue30075 """ if get_windows_version() < (10, 0, 10586): return import ctypes import ctypes.wintypes import msvcrt ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 dll = ctypes.WinDLL('kernel32', use_last_error=False) handle = os.open('CONOUT$', os.O_RDWR) try: h_out = ctypes.wintypes.HANDLE(msvcrt.get_osfhandle(handle)) dw_original_mode = ctypes.wintypes.DWORD() success = dll.GetConsoleMode(h_out, ctypes.byref(dw_original_mode)) if not success: raise Exception('GetConsoleMode failed') success = dll.SetConsoleMode(h_out, ctypes.wintypes.DWORD( dw_original_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)) if not success: raise Exception('SetConsoleMode failed') finally: os.close(handle) global WINDOWS_VT_MODE WINDOWS_VT_MODE = True supports_terminal_sequences.cache_clear()
Find the largest format dimensions in terms of video width and, for each thumbnail: * Modify the URL: Match the width with the provided regex and replace with the former width * Update dimensions This function is useful with video services that scale the provided thumbnails on demand
def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re): """ Find the largest format dimensions in terms of video width and, for each thumbnail: * Modify the URL: Match the width with the provided regex and replace with the former width * Update dimensions This function is useful with video services that scale the provided thumbnails on demand """ _keys = ('width', 'height') max_dimensions = max( (tuple(format.get(k) or 0 for k in _keys) for format in formats), default=(0, 0)) if not max_dimensions[0]: return thumbnails return [ merge_dicts( {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}, dict(zip(_keys, max_dimensions)), thumbnail) for thumbnail in thumbnails ]
Parse value of "Range" or "Content-Range" HTTP header into tuple.
def parse_http_range(range): """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """ if not range: return None, None, None crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range) if not crg: return None, None, None return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
Detect the text encoding used @returns (encoding, bytes to skip)
def determine_file_encoding(data): """ Detect the text encoding used @returns (encoding, bytes to skip) """ # BOM marks are given priority over declarations for bom, enc in BOMS: if data.startswith(bom): return enc, len(bom) # Strip off all null bytes to match even when UTF-16 or UTF-32 is used. # We ignore the endianness to get a good enough match data = data.replace(b'\0', b'') mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data) return mobj.group(1).decode() if mobj else None, 0
Merge dicts of http headers case insensitively, prioritizing the latter ones
def merge_headers(*dicts): """Merge dicts of http headers case insensitively, prioritizing the latter ones""" return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
Cache a method
def cached_method(f): """Cache a method""" signature = inspect.signature(f) @functools.wraps(f) def wrapper(self, *args, **kwargs): bound_args = signature.bind(self, *args, **kwargs) bound_args.apply_defaults() key = tuple(bound_args.arguments.values())[1:] cache = vars(self).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {}) if key not in cache: cache[key] = f(self, *args, **kwargs) return cache[key] return wrapper
@param tbr: Total bitrate in kbps (1000 bits/sec) @param duration: Duration in seconds @returns Filesize in bytes
def filesize_from_tbr(tbr, duration): """ @param tbr: Total bitrate in kbps (1000 bits/sec) @param duration: Duration in seconds @returns Filesize in bytes """ if tbr is None or duration is None: return None return int(duration * tbr * (1000 / 8))
Make an extension for an AdjustedArrayWindow specialization.
def window_specialization(typename): """Make an extension for an AdjustedArrayWindow specialization.""" return Extension( 'zipline.lib._{name}window'.format(name=typename), ['zipline/lib/_{name}window.pyx'.format(name=typename)], depends=['zipline/lib/_windowtemplate.pxi'], )
Read a requirements file, expressed as a path relative to Zipline root.
def read_requirements(path, conda_format=False, filter_names=None): """ Read a requirements file, expressed as a path relative to Zipline root. """ real_path = join(dirname(abspath(__file__)), path) with open(real_path) as f: reqs = _filter_requirements(f.readlines(), filter_names=filter_names, filter_sys_version=not conda_format) if conda_format: reqs = map(_conda_format, reqs) return list(reqs)
Generate test cases for the type of asset finder specific by asset_finder_type for test_lookup_generic.
def build_lookup_generic_cases(): """ Generate test cases for the type of asset finder specific by asset_finder_type for test_lookup_generic. """ unique_start = pd.Timestamp('2013-01-01', tz='UTC') unique_end = pd.Timestamp('2014-01-01', tz='UTC') dupe_old_start = pd.Timestamp('2013-01-01', tz='UTC') dupe_old_end = pd.Timestamp('2013-01-02', tz='UTC') dupe_new_start = pd.Timestamp('2013-01-03', tz='UTC') dupe_new_end = pd.Timestamp('2013-01-03', tz='UTC') equities = pd.DataFrame.from_records( [ # These symbols are duplicated within the US, but have different # lifetimes. { 'sid': 0, 'symbol': 'duplicated_in_us', 'start_date': dupe_old_start.value, 'end_date': dupe_old_end.value, 'exchange': 'US_EXCHANGE', }, { 'sid': 1, 'symbol': 'duplicated_in_us', 'start_date': dupe_new_start.value, 'end_date': dupe_new_end.value, 'exchange': 'US_EXCHANGE', }, # This asset is unique. { 'sid': 2, 'symbol': 'unique', 'start_date': unique_start.value, 'end_date': unique_end.value, 'exchange': 'US_EXCHANGE', }, # These assets appear with the same ticker at the same time in # different countries. { 'sid': 3, 'symbol': 'duplicated_globally', 'start_date': unique_start.value, 'end_date': unique_start.value, 'exchange': 'US_EXCHANGE', }, { 'sid': 4, 'symbol': 'duplicated_globally', 'start_date': unique_start.value, 'end_date': unique_start.value, 'exchange': 'CA_EXCHANGE', }, ], index='sid' ) fof14_sid = 10000 futures = pd.DataFrame.from_records( [ { 'sid': fof14_sid, 'symbol': 'FOF14', 'root_symbol': 'FO', 'start_date': unique_start.value, 'end_date': unique_end.value, 'auto_close_date': unique_end.value, 'exchange': 'US_FUT', }, ], index='sid' ) root_symbols = pd.DataFrame({ 'root_symbol': ['FO'], 'root_symbol_id': [1], 'exchange': ['US_FUT'], }) exchanges = pd.DataFrame.from_records([ {'exchange': 'US_EXCHANGE', 'country_code': 'US'}, {'exchange': 'CA_EXCHANGE', 'country_code': 'CA'}, {'exchange': 'US_FUT', 'country_code': 'US'}, ]) temp_db = tmp_assets_db( equities=equities, futures=futures, root_symbols=root_symbols, exchanges=exchanges, ) with temp_db as assets_db: finder = AssetFinder(assets_db) case = partial(Case, finder) equities = finder.retrieve_all(range(5)) dupe_old, dupe_new, unique, dupe_us, dupe_ca = equities fof14 = finder.retrieve_asset(fof14_sid) cf = finder.create_continuous_future( root_symbol=fof14.root_symbol, offset=0, roll_style='volume', adjustment=None, ) all_assets = list(equities) + [fof14, cf] for asset in list(equities) + [fof14, cf]: # Looking up an asset object directly should yield itself. yield case(asset, None, None, asset) # Looking up an asset by sid should yield the asset. yield case(asset.sid, None, None, asset) # Duplicated US equity symbol with resolution date. for country in ('US', None): # On or before dupe_new_start, we should get dupe_old. yield case('DUPLICATED_IN_US', dupe_old_start, country, dupe_old) yield case( 'DUPLICATED_IN_US', dupe_new_start - minute, country, dupe_old, ) # After that, we should get dupe_new. yield case('DUPLICATED_IN_US', dupe_new_start, country, dupe_new) yield case( 'DUPLICATED_IN_US', dupe_new_start + minute, country, dupe_new, ) # Unique symbol, disambiguated by country, with or without resolution # date. for asset, country in ((dupe_us, 'US'), (dupe_ca, 'CA')): yield case('DUPLICATED_GLOBALLY', unique_start, country, asset) yield case('DUPLICATED_GLOBALLY', None, country, asset) # Future symbols should be unique, but including as_of date # make sure that code path is exercised. yield case('FOF14', None, None, fof14) yield case('FOF14', unique_start, None, fof14) ## # Iterables # Iterables of Asset objects. yield case(all_assets, None, None, all_assets) yield case(iter(all_assets), None, None, all_assets) # Iterables of ints yield case((0, 1), None, None, equities[:2]) yield case(iter((0, 1)), None, None, equities[:2]) # Iterables of symbols. yield case( inputs=('DUPLICATED_IN_US', 'UNIQUE', 'DUPLICATED_GLOBALLY'), as_of=dupe_old_start, country_code='US', expected=[dupe_old, unique, dupe_us], ) yield case( inputs=['DUPLICATED_GLOBALLY'], as_of=dupe_new_start, country_code='CA', expected=[dupe_ca], ) # Mixed types yield case( inputs=( 'DUPLICATED_IN_US', # dupe_old b/c of as_of dupe_new, # dupe_new 2, # unique 'UNIQUE', # unique 'DUPLICATED_GLOBALLY', # dupe_us b/c of country_code dupe_ca, # dupe_ca ), as_of=dupe_old_start, country_code='US', expected=[dupe_old, dupe_new, unique, unique, dupe_us, dupe_ca], ) # Futures and Equities yield case(['FOF14', 0], None, None, [fof14, equities[0]]) yield case( inputs=['FOF14', 'DUPLICATED_IN_US', 'DUPLICATED_GLOBALLY'], as_of=dupe_new_start, country_code='US', expected=[fof14, dupe_new, dupe_us], ) # ContinuousFuture and Equity yield case([cf, 0], None, None, [cf, equities[0]]) yield case( [cf, 'DUPLICATED_IN_US', 'DUPLICATED_GLOBALLY'], as_of=dupe_new_start, country_code='US', expected=[cf, dupe_new, dupe_us], )
Rotate a list of elements. Pulls N elements off the end of the list and appends them to the front. >>> rotN(['a', 'b', 'c', 'd'], 2) ['c', 'd', 'a', 'b'] >>> rotN(['a', 'b', 'c', 'd'], 3) ['d', 'a', 'b', 'c']
def rotN(l, N): """ Rotate a list of elements. Pulls N elements off the end of the list and appends them to the front. >>> rotN(['a', 'b', 'c', 'd'], 2) ['c', 'd', 'a', 'b'] >>> rotN(['a', 'b', 'c', 'd'], 3) ['d', 'a', 'b', 'c'] """ assert len(l) >= N, "Can't rotate list by longer than its length." return l[N:] + l[:N]
500 randomly selected days. This is used to make sure our test coverage is unbiased towards any rules. We use a random sample because testing on all the trading days took around 180 seconds on my laptop, which is far too much for normal unit testing. We manually set the seed so that this will be deterministic. Results of multiple runs were compared to make sure that this is actually true. This returns a generator of tuples each wrapping a single generator. Iterating over this yields a single day, iterating over the day yields the minutes for that day.
def minutes_for_days(cal, ordered_days=False): """ 500 randomly selected days. This is used to make sure our test coverage is unbiased towards any rules. We use a random sample because testing on all the trading days took around 180 seconds on my laptop, which is far too much for normal unit testing. We manually set the seed so that this will be deterministic. Results of multiple runs were compared to make sure that this is actually true. This returns a generator of tuples each wrapping a single generator. Iterating over this yields a single day, iterating over the day yields the minutes for that day. """ random.seed('deterministic') if ordered_days: # Get a list of 500 trading days, in order. As a performance # optimization in AfterOpen and BeforeClose, we rely on the fact that # the clock only ever moves forward in a simulation. For those cases, # we guarantee that the list of trading days we test is ordered. ordered_session_list = random.sample(list(cal.all_sessions), 500) ordered_session_list.sort() def session_picker(day): return ordered_session_list[day] else: # Other than AfterOpen and BeforeClose, we don't rely on the the nature # of the clock, so we don't care. def session_picker(day): return random.choice(cal.all_sessions[:-1]) return [cal.minutes_for_session(session_picker(cnt)) for cnt in range(500)]
Utility method to generate fake minute-level CSV data. :param first_day: first trading day :param last_day: last trading day :param starting_open: first open value, raw value. :param starting_volume: first volume value, raw value. :param multipliers_list: ordered list of pd.Timestamp -> float, one per day in the range :param path: path to save the CSV :return: None
def generate_minute_test_data(first_day, last_day, starting_open, starting_volume, multipliers_list, path): """ Utility method to generate fake minute-level CSV data. :param first_day: first trading day :param last_day: last trading day :param starting_open: first open value, raw value. :param starting_volume: first volume value, raw value. :param multipliers_list: ordered list of pd.Timestamp -> float, one per day in the range :param path: path to save the CSV :return: None """ full_minutes = BcolzMinuteBarWriter.full_minutes_for_days( first_day, last_day) minutes_count = len(full_minutes) cal = get_calendar('XNYS') minutes = cal.minutes_for_sessions_in_range( first_day, last_day ) o = np.zeros(minutes_count, dtype=np.uint32) h = np.zeros(minutes_count, dtype=np.uint32) l = np.zeros(minutes_count, dtype=np.uint32) # noqa: E741 c = np.zeros(minutes_count, dtype=np.uint32) v = np.zeros(minutes_count, dtype=np.uint32) last_open = starting_open * 1000 last_volume = starting_volume for minute in minutes: # ugly, but works idx = full_minutes.searchsorted(minute) new_open = last_open + round((random.random() * 5), 2) o[idx] = new_open h[idx] = new_open + round((random.random() * 10000), 2) l[idx] = new_open - round((random.random() * 10000), 2) c[idx] = (h[idx] + l[idx]) / 2 v[idx] = int(last_volume + (random.randrange(-10, 10) * 1e4)) last_open = o[idx] last_volume = v[idx] # now deal with multipliers if len(multipliers_list) > 0: for idx, multiplier_info in enumerate(multipliers_list): start_idx = idx * 390 end_idx = start_idx + 390 # dividing by the multipler because we're going backwards # and generating the original data that will then be adjusted. o[start_idx:end_idx] /= multiplier_info[1] h[start_idx:end_idx] /= multiplier_info[1] l[start_idx:end_idx] /= multiplier_info[1] c[start_idx:end_idx] /= multiplier_info[1] v[start_idx:end_idx] *= multiplier_info[1] df = pd.DataFrame({ "open": o, "high": h, "low": l, "close": c, "volume": v }, columns=[ "open", "high", "low", "close", "volume" ], index=minutes) df.to_csv(path, index_label="minute")
Extract all of the fields from the portfolio as a new dictionary.
def portfolio_snapshot(p): """Extract all of the fields from the portfolio as a new dictionary. """ fields = ( 'cash_flow', 'starting_cash', 'portfolio_value', 'pnl', 'returns', 'cash', 'positions', 'positions_value', 'positions_exposure', ) return {field: getattr(p, field) for field in fields}
Decorator for providing dynamic default values for a method. Usages: @with_defaults(foo=lambda self: self.x + self.y) def func(self, foo): ... If a value is passed for `foo`, it will be used. Otherwise the function supplied to `with_defaults` will be called with `self` as an argument.
def with_defaults(**default_funcs): """ Decorator for providing dynamic default values for a method. Usages: @with_defaults(foo=lambda self: self.x + self.y) def func(self, foo): ... If a value is passed for `foo`, it will be used. Otherwise the function supplied to `with_defaults` will be called with `self` as an argument. """ def decorator(f): @wraps(f) def method(self, *args, **kwargs): for name, func in iteritems(default_funcs): if name not in kwargs: kwargs[name] = func(self) return f(self, *args, **kwargs) return method return decorator
Simple moving window generator over a 2D numpy array.
def moving_window(array, nrows): """ Simple moving window generator over a 2D numpy array. """ count = num_windows_of_length_M_on_buffers_of_length_N(nrows, len(array)) for i in range(count): yield array[i:i + nrows]
For a window of length M rolling over a buffer of length N, there are (N - M) + 1 legal windows. Example: If my array has N=4 rows, and I want windows of length M=2, there are 3 legal windows: data[0:2], data[1:3], and data[2:4].
def num_windows_of_length_M_on_buffers_of_length_N(M, N): """ For a window of length M rolling over a buffer of length N, there are (N - M) + 1 legal windows. Example: If my array has N=4 rows, and I want windows of length M=2, there are 3 legal windows: data[0:2], data[1:3], and data[2:4]. """ return N - M + 1
An iterator of all legal window lengths on a buffer of a given length. Returns values from 1 to underlying_buffer_length.
def valid_window_lengths(underlying_buffer_length): """ An iterator of all legal window lengths on a buffer of a given length. Returns values from 1 to underlying_buffer_length. """ return iter(range(1, underlying_buffer_length + 1))
Curried wrapper around array.astype for when you have the dtype before you have the data.
def as_dtype(dtype, data): """ Curried wrapper around array.astype for when you have the dtype before you have the data. """ return asarray(data).astype(dtype)
Curried wrapper around LabelArray, that round-trips the input data through `initial_dtype` first.
def as_labelarray(initial_dtype, missing_value, array): """ Curried wrapper around LabelArray, that round-trips the input data through `initial_dtype` first. """ return LabelArray( array.astype(initial_dtype), missing_value=initial_dtype.type(missing_value), )
Generate expected moving windows on a buffer with adjustments. We proceed by constructing, at each row, the view of the array we expect in in all windows anchored on that row. In general, if we have an adjustment to be applied once we process the row at index N, should see that adjustment applied to the underlying buffer for any window containing the row at index N. We then build all legal windows over these buffers.
def _gen_multiplicative_adjustment_cases(dtype): """ Generate expected moving windows on a buffer with adjustments. We proceed by constructing, at each row, the view of the array we expect in in all windows anchored on that row. In general, if we have an adjustment to be applied once we process the row at index N, should see that adjustment applied to the underlying buffer for any window containing the row at index N. We then build all legal windows over these buffers. """ adjustment_type = { float64_dtype: Float64Multiply, }[dtype] nrows, ncols = 6, 3 adjustments = {} buffer_as_of = [None] * 6 baseline = buffer_as_of[0] = full((nrows, ncols), 1, dtype=dtype) # Note that row indices are inclusive! adjustments[1] = [ adjustment_type(0, 0, 0, 0, coerce_to_dtype(dtype, 2)), ] buffer_as_of[1] = array([[2, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=dtype) # No adjustment at index 2. buffer_as_of[2] = buffer_as_of[1] adjustments[3] = [ adjustment_type(1, 2, 1, 1, coerce_to_dtype(dtype, 3)), adjustment_type(0, 1, 0, 0, coerce_to_dtype(dtype, 4)), ] buffer_as_of[3] = array([[8, 1, 1], [4, 3, 1], [1, 3, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=dtype) adjustments[4] = [ adjustment_type(0, 3, 2, 2, coerce_to_dtype(dtype, 5)) ] buffer_as_of[4] = array([[8, 1, 5], [4, 3, 5], [1, 3, 5], [1, 1, 5], [1, 1, 1], [1, 1, 1]], dtype=dtype) adjustments[5] = [ adjustment_type(0, 4, 1, 1, coerce_to_dtype(dtype, 6)), adjustment_type(2, 2, 2, 2, coerce_to_dtype(dtype, 7)), ] buffer_as_of[5] = array([[8, 6, 5], [4, 18, 5], [1, 18, 35], [1, 6, 5], [1, 6, 1], [1, 1, 1]], dtype=dtype) return _gen_expectations( baseline, default_missing_value_for_dtype(dtype), adjustments, buffer_as_of, nrows, perspective_offsets=(0, 1), )
Generate test cases for overwrite adjustments. The algorithm used here is the same as the one used above for multiplicative adjustments. The only difference is the semantics of how the adjustments are expected to modify the arrays. This is parameterized on `make_input` and `make_expected_output` functions, which take 2-D lists of values and transform them into desired input/output arrays. We do this so that we can easily test both vanilla numpy ndarrays and our own LabelArray class for strings.
def _gen_overwrite_adjustment_cases(dtype): """ Generate test cases for overwrite adjustments. The algorithm used here is the same as the one used above for multiplicative adjustments. The only difference is the semantics of how the adjustments are expected to modify the arrays. This is parameterized on `make_input` and `make_expected_output` functions, which take 2-D lists of values and transform them into desired input/output arrays. We do this so that we can easily test both vanilla numpy ndarrays and our own LabelArray class for strings. """ adjustment_type = { float64_dtype: Float64Overwrite, datetime64ns_dtype: Datetime64Overwrite, int64_dtype: Int64Overwrite, bytes_dtype: ObjectOverwrite, unicode_dtype: ObjectOverwrite, object_dtype: ObjectOverwrite, bool_dtype: BooleanOverwrite, }[dtype] make_expected_dtype = as_dtype(dtype) missing_value = default_missing_value_for_dtype(datetime64ns_dtype) if dtype == object_dtype: # When we're testing object dtypes, we expect to have strings, but # coerce_to_dtype(object, 3) just gives 3 as a Python integer. def make_overwrite_value(dtype, value): return str(value) else: make_overwrite_value = coerce_to_dtype adjustments = {} buffer_as_of = [None] * 6 baseline = make_expected_dtype([[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) buffer_as_of[0] = make_expected_dtype([[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) # Note that row indices are inclusive! adjustments[1] = [ adjustment_type(0, 0, 0, 0, make_overwrite_value(dtype, 1)), ] buffer_as_of[1] = make_expected_dtype([[1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) # No adjustment at index 2. buffer_as_of[2] = buffer_as_of[1] adjustments[3] = [ adjustment_type(1, 2, 1, 1, make_overwrite_value(dtype, 3)), adjustment_type(0, 1, 0, 0, make_overwrite_value(dtype, 4)), ] buffer_as_of[3] = make_expected_dtype([[4, 2, 2], [4, 3, 2], [2, 3, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) adjustments[4] = [ adjustment_type(0, 3, 2, 2, make_overwrite_value(dtype, 5)) ] buffer_as_of[4] = make_expected_dtype([[4, 2, 5], [4, 3, 5], [2, 3, 5], [2, 2, 5], [2, 2, 2], [2, 2, 2]]) adjustments[5] = [ adjustment_type(0, 4, 1, 1, make_overwrite_value(dtype, 6)), adjustment_type(2, 2, 2, 2, make_overwrite_value(dtype, 7)), ] buffer_as_of[5] = make_expected_dtype([[4, 6, 5], [4, 6, 5], [2, 6, 7], [2, 6, 5], [2, 6, 2], [2, 2, 2]]) return _gen_expectations( baseline, missing_value, adjustments, buffer_as_of, nrows=6, perspective_offsets=(0, 1), )
Generate test cases for overwrite adjustments. The algorithm used here is the same as the one used above for multiplicative adjustments. The only difference is the semantics of how the adjustments are expected to modify the arrays. This is parameterized on `make_input` and `make_expected_output` functions, which take 1-D lists of values and transform them into desired input/output arrays. We do this so that we can easily test both vanilla numpy ndarrays and our own LabelArray class for strings.
def _gen_overwrite_1d_array_adjustment_case(dtype): """ Generate test cases for overwrite adjustments. The algorithm used here is the same as the one used above for multiplicative adjustments. The only difference is the semantics of how the adjustments are expected to modify the arrays. This is parameterized on `make_input` and `make_expected_output` functions, which take 1-D lists of values and transform them into desired input/output arrays. We do this so that we can easily test both vanilla numpy ndarrays and our own LabelArray class for strings. """ adjustment_type = { bool_dtype: Boolean1DArrayOverwrite, float64_dtype: Float641DArrayOverwrite, datetime64ns_dtype: Datetime641DArrayOverwrite, }[dtype] make_expected_dtype = as_dtype(dtype) missing_value = default_missing_value_for_dtype(datetime64ns_dtype) adjustments = {} buffer_as_of = [None] * 6 baseline = make_expected_dtype([[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) buffer_as_of[0] = make_expected_dtype([[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) vals1 = [1] # Note that row indices are inclusive! adjustments[1] = [ adjustment_type( 0, 0, 0, 0, array([coerce_to_dtype(dtype, val) for val in vals1]) ) ] buffer_as_of[1] = make_expected_dtype([[1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) # No adjustment at index 2. buffer_as_of[2] = buffer_as_of[1] vals3 = [4, 4, 1] adjustments[3] = [ adjustment_type( 0, 2, 0, 0, array([coerce_to_dtype(dtype, val) for val in vals3]) ) ] buffer_as_of[3] = make_expected_dtype([[4, 2, 2], [4, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]) vals4 = [5] * 4 adjustments[4] = [ adjustment_type( 0, 3, 2, 2, array([coerce_to_dtype(dtype, val) for val in vals4])) ] buffer_as_of[4] = make_expected_dtype([[4, 2, 5], [4, 2, 5], [1, 2, 5], [2, 2, 5], [2, 2, 2], [2, 2, 2]]) vals5 = range(1, 6) adjustments[5] = [ adjustment_type( 0, 4, 1, 1, array([coerce_to_dtype(dtype, val) for val in vals5])), ] buffer_as_of[5] = make_expected_dtype([[4, 1, 5], [4, 2, 5], [1, 3, 5], [2, 4, 5], [2, 5, 2], [2, 2, 2]]) return _gen_expectations( baseline, missing_value, adjustments, buffer_as_of, nrows=6, perspective_offsets=(0, 1), )
Assert that a MultiIndex contains the product of `*levels`.
def assert_multi_index_is_product(testcase, index, *levels): """Assert that a MultiIndex contains the product of `*levels`.""" testcase.assertIsInstance( index, MultiIndex, "%s is not a MultiIndex" % index ) testcase.assertEqual(set(index), set(product(*levels)))
Make an event with a null event_date for all sids. Used to test that EventsLoaders filter out null events.
def make_null_event_date_events(all_sids, timestamp): """ Make an event with a null event_date for all sids. Used to test that EventsLoaders filter out null events. """ return pd.DataFrame({ 'sid': all_sids, 'timestamp': timestamp, 'event_date': pd.Timestamp('NaT'), 'float': -9999.0, 'int': -9999, 'datetime': pd.Timestamp('1980'), 'string': 'should be ignored', })
Every event has at least three pieces of data associated with it: 1. sid : The ID of the asset associated with the event. 2. event_date : The date on which an event occurred. 3. timestamp : The date on which we learned about the event. This can be before the occurence_date in the case of an announcement about an upcoming event. Events for two different sids shouldn't interact in any way, so the interesting cases are determined by the possible interleavings of event_date and timestamp for a single sid. Fix two events with dates e1, e2 and timestamps t1 and t2. Without loss of generality, assume that e1 < e2. (If two events have the same occurrence date, the behavior of next/previous event is undefined). The remaining possible sequences of events are given by taking all possible 4-tuples of four ascending dates. For each possible interleaving, we generate a set of fake events with those dates and assign them to a new sid.
def make_events(add_nulls): """ Every event has at least three pieces of data associated with it: 1. sid : The ID of the asset associated with the event. 2. event_date : The date on which an event occurred. 3. timestamp : The date on which we learned about the event. This can be before the occurence_date in the case of an announcement about an upcoming event. Events for two different sids shouldn't interact in any way, so the interesting cases are determined by the possible interleavings of event_date and timestamp for a single sid. Fix two events with dates e1, e2 and timestamps t1 and t2. Without loss of generality, assume that e1 < e2. (If two events have the same occurrence date, the behavior of next/previous event is undefined). The remaining possible sequences of events are given by taking all possible 4-tuples of four ascending dates. For each possible interleaving, we generate a set of fake events with those dates and assign them to a new sid. """ def gen_date_interleavings(): for e1, e2, t1, t2 in product(*[critical_dates] * 4): if e1 < e2: yield (e1, e2, t1, t2) event_frames = [] for sid, (e1, e2, t1, t2) in enumerate(gen_date_interleavings()): event_frames.append(make_events_for_sid(sid, [e1, e2], [t1, t2])) if add_nulls: for date in critical_dates: event_frames.append( make_null_event_date_events( np.arange(sid + 1), timestamp=date, ) ) return pd.concat(event_frames, ignore_index=True)
Wrapper around scipy.stats.mstats.winsorize that handles NaNs correctly. scipy's winsorize sorts NaNs to the end of the array when calculating percentiles.
def scipy_winsorize_with_nan_handling(array, limits): """ Wrapper around scipy.stats.mstats.winsorize that handles NaNs correctly. scipy's winsorize sorts NaNs to the end of the array when calculating percentiles. """ # The basic idea of this function is to do the following: # 1. Sort the input, sorting nans to the end of the array. # 2. Call scipy winsorize on the non-nan portion of the input. # 3. Undo the sorting to put the winsorized values back in their original # locations. nancount = np.isnan(array).sum() if nancount == len(array): return array.copy() sorter = array.argsort() unsorter = sorter.argsort() # argsorting a permutation gives its inverse! if nancount: sorted_non_nans = array[sorter][:-nancount] else: sorted_non_nans = array[sorter] sorted_winsorized = np.hstack([ scipy_winsorize(sorted_non_nans, limits).data, np.full(nancount, np.nan), ]) return sorted_winsorized[unsorter]
Take a 2D array and return the 0-indexed sorted position of each element in the array for each row. Examples -------- In [5]: data Out[5]: array([[-0.141, -1.103, -1.0171, 0.7812, 0.07 ], [ 0.926, 0.235, -0.7698, 1.4552, 0.2061], [ 1.579, 0.929, -0.557 , 0.7896, -1.6279], [-1.362, -2.411, -1.4604, 1.4468, -0.1885], [ 1.272, 1.199, -3.2312, -0.5511, -1.9794]]) In [7]: argsort(argsort(data)) Out[7]: array([[2, 0, 1, 4, 3], [3, 2, 0, 4, 1], [4, 3, 1, 2, 0], [2, 0, 1, 4, 3], [4, 3, 0, 2, 1]])
def rowwise_rank(array, mask=None): """ Take a 2D array and return the 0-indexed sorted position of each element in the array for each row. Examples -------- In [5]: data Out[5]: array([[-0.141, -1.103, -1.0171, 0.7812, 0.07 ], [ 0.926, 0.235, -0.7698, 1.4552, 0.2061], [ 1.579, 0.929, -0.557 , 0.7896, -1.6279], [-1.362, -2.411, -1.4604, 1.4468, -0.1885], [ 1.272, 1.199, -3.2312, -0.5511, -1.9794]]) In [7]: argsort(argsort(data)) Out[7]: array([[2, 0, 1, 4, 3], [3, 2, 0, 4, 1], [4, 3, 1, 2, 0], [2, 0, 1, 4, 3], [4, 3, 0, 2, 1]]) """ # note that unlike scipy.stats.rankdata, the output here is 0-indexed, not # 1-indexed. return argsort(argsort(array))
Iterate over ``it``, two elements at a time. ``it`` must yield an even number of times. Examples -------- >>> list(two_at_a_time([1, 2, 3, 4])) [(1, 2), (3, 4)]
def two_at_a_time(it): """Iterate over ``it``, two elements at a time. ``it`` must yield an even number of times. Examples -------- >>> list(two_at_a_time([1, 2, 3, 4])) [(1, 2), (3, 4)] """ return toolz.partition(2, it, pad=None)
Check if an asset was alive in the range from start to end. Parameters ---------- asset : Asset The asset to check start : pd.Timestamp Start of the interval. end : pd.Timestamp End of the interval. include_asset_start_date : bool Whether to include the start date of the asset when checking liveness. Returns ------- was_alive : bool Whether or not ``asset`` was alive for any days in the range from ``start`` to ``end``.
def alive_in_range(asset, start, end, include_asset_start_date=False): """ Check if an asset was alive in the range from start to end. Parameters ---------- asset : Asset The asset to check start : pd.Timestamp Start of the interval. end : pd.Timestamp End of the interval. include_asset_start_date : bool Whether to include the start date of the asset when checking liveness. Returns ------- was_alive : bool Whether or not ``asset`` was alive for any days in the range from ``start`` to ``end``. """ if include_asset_start_date: asset_start = asset.start_date else: asset_start = asset.start_date + pd.Timedelta('1 day') return intervals_overlap((asset_start, asset.end_date), (start, end))
Check whether a pair of datetime intervals overlap. Parameters ---------- a : (pd.Timestamp, pd.Timestamp) b : (pd.Timestamp, pd.Timestamp) Returns ------- have_overlap : bool Bool indicating whether there there is a non-empty intersection between the intervals.
def intervals_overlap(a, b): """ Check whether a pair of datetime intervals overlap. Parameters ---------- a : (pd.Timestamp, pd.Timestamp) b : (pd.Timestamp, pd.Timestamp) Returns ------- have_overlap : bool Bool indicating whether there there is a non-empty intersection between the intervals. """ # If the intervals do not overlap, then either the first is strictly before # the second, or the second is strictly before the first. a_strictly_before = a[1] < b[0] b_strictly_before = b[1] < a[0] return not (a_strictly_before or b_strictly_before)
Simple rolling vwap implementation for testing
def rolling_vwap(df, length): "Simple rolling vwap implementation for testing" closes = df['close'].values volumes = df['volume'].values product = closes * volumes out = full_like(closes, nan) for upper_bound in range(length, len(closes) + 1): bounds = slice(upper_bound - length, upper_bound) out[upper_bound - 1] = product[bounds].sum() / volumes[bounds].sum() return Series(out, index=df.index)