response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Event: yield to the scheduler without doing anything special.
def null(): """Event: yield to the scheduler without doing anything special.""" return ValueEvent(None)
Event: add another coroutine to the scheduler. Both the parent and child coroutines run concurrently.
def spawn(coro): """Event: add another coroutine to the scheduler. Both the parent and child coroutines run concurrently. """ if not isinstance(coro, types.GeneratorType): raise ValueError("%s is not a coroutine" % coro) return SpawnEvent(coro)
Event: delegate to another coroutine. The current coroutine is resumed once the sub-coroutine finishes. If the sub-coroutine returns a value using end(), then this event returns that value.
def call(coro): """Event: delegate to another coroutine. The current coroutine is resumed once the sub-coroutine finishes. If the sub-coroutine returns a value using end(), then this event returns that value. """ if not isinstance(coro, types.GeneratorType): raise ValueError("%s is not a coroutine" % coro) return DelegationEvent(coro)
Event: ends the coroutine and returns a value to its delegator.
def end(value=None): """Event: ends the coroutine and returns a value to its delegator. """ return ReturnEvent(value)
Event: read from a file descriptor asynchronously.
def read(fd, bufsize=None): """Event: read from a file descriptor asynchronously.""" if bufsize is None: # Read all. def reader(): buf = [] while True: data = yield read(fd, 1024) if not data: break buf.append(data) yield ReturnEvent("".join(buf)) return DelegationEvent(reader()) else: return ReadEvent(fd, bufsize)
Event: write to a file descriptor asynchronously.
def write(fd, data): """Event: write to a file descriptor asynchronously.""" return WriteEvent(fd, data)
Event: connect to a network address and return a Connection object for communicating on the socket.
def connect(host, port): """Event: connect to a network address and return a Connection object for communicating on the socket. """ addr = (host, port) sock = socket.create_connection(addr) return ValueEvent(Connection(sock, addr))
Event: suspend the thread for ``duration`` seconds.
def sleep(duration): """Event: suspend the thread for ``duration`` seconds.""" return SleepEvent(duration)
Suspend the thread until another, previously `spawn`ed thread completes.
def join(coro): """Suspend the thread until another, previously `spawn`ed thread completes. """ return JoinEvent(coro)
Halt the execution of a different `spawn`ed thread.
def kill(coro): """Halt the execution of a different `spawn`ed thread.""" return KillEvent(coro)
A coroutine that runs a network server. Host and port specify the listening address. func should be a coroutine that takes a single parameter, a Connection object. The coroutine is invoked for every incoming connection on the listening socket.
def server(host, port, func): """A coroutine that runs a network server. Host and port specify the listening address. func should be a coroutine that takes a single parameter, a Connection object. The coroutine is invoked for every incoming connection on the listening socket. """ def handler(conn): try: yield func(conn) finally: conn.close() listener = Listener(host, port) try: while True: conn = yield listener.accept() yield spawn(handler(conn)) except KeyboardInterrupt: pass finally: listener.close()
A variable store expression.
def ex_rvalue(name): """A variable store expression.""" return ast.Name(name, ast.Load())
An int, float, long, bool, string, or None literal with the given value.
def ex_literal(val): """An int, float, long, bool, string, or None literal with the given value. """ return ast.Constant(val)
A function-call expression with only positional parameters. The function may be an expression or the name of a function. Each argument may be an expression or a value to be used as a literal.
def ex_call(func, args): """A function-call expression with only positional parameters. The function may be an expression or the name of a function. Each argument may be an expression or a value to be used as a literal. """ if isinstance(func, str): func = ex_rvalue(func) args = list(args) for i in range(len(args)): if not isinstance(args[i], ast.expr): args[i] = ex_literal(args[i]) return ast.Call(func, args, [])
Compile a list of statements as the body of a function and return the resulting Python function. If `debug`, then print out the bytecode of the compiled function.
def compile_func(arg_names, statements, name="_the_func", debug=False): """Compile a list of statements as the body of a function and return the resulting Python function. If `debug`, then print out the bytecode of the compiled function. """ args_fields = { "args": [ast.arg(arg=n, annotation=None) for n in arg_names], "kwonlyargs": [], "kw_defaults": [], "defaults": [ex_literal(None) for _ in arg_names], } if "posonlyargs" in ast.arguments._fields: # Added in Python 3.8. args_fields["posonlyargs"] = [] args = ast.arguments(**args_fields) func_def = ast.FunctionDef( name=name, args=args, body=statements, decorator_list=[], ) # The ast.Module signature changed in 3.8 to accept a list of types to # ignore. if sys.version_info >= (3, 8): mod = ast.Module([func_def], []) else: mod = ast.Module([func_def]) ast.fix_missing_locations(mod) prog = compile(mod, "<generated>", "exec") # Debug: show bytecode. if debug: dis.dis(prog) for const in prog.co_consts: if isinstance(const, types.CodeType): dis.dis(const) the_locals = {} exec(prog, {}, the_locals) return the_locals[name]
Parse a top-level template string Expression. Any extraneous text is considered literal text.
def _parse(template): """Parse a top-level template string Expression. Any extraneous text is considered literal text. """ parser = Parser(template) parser.parse_expression() parts = parser.parts remainder = parser.string[parser.pos :] if remainder: parts.append(remainder) return Expression(parts)
Return whether or not a file is hidden on OS X. This uses os.lstat to work out if a file has the "hidden" flag.
def _is_hidden_osx(path): """Return whether or not a file is hidden on OS X. This uses os.lstat to work out if a file has the "hidden" flag. """ file_stat = os.lstat(beets.util.syspath(path)) if hasattr(file_stat, "st_flags") and hasattr(stat, "UF_HIDDEN"): return bool(file_stat.st_flags & stat.UF_HIDDEN) else: return False
Return whether or not a file is hidden on Windows. This uses GetFileAttributes to work out if a file has the "hidden" flag (FILE_ATTRIBUTE_HIDDEN).
def _is_hidden_win(path): """Return whether or not a file is hidden on Windows. This uses GetFileAttributes to work out if a file has the "hidden" flag (FILE_ATTRIBUTE_HIDDEN). """ # FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation. hidden_mask = 2 # Retrieve the attributes for the file. attrs = ctypes.windll.kernel32.GetFileAttributesW(beets.util.syspath(path)) # Ensure we have valid attributes and compare them against the mask. return attrs >= 0 and attrs & hidden_mask
Return whether or not a file starts with a dot. Files starting with a dot are seen as "hidden" files on Unix-based OSes.
def _is_hidden_dot(path): """Return whether or not a file starts with a dot. Files starting with a dot are seen as "hidden" files on Unix-based OSes. """ return os.path.basename(path).startswith(b".")
Return whether or not a file is hidden. `path` should be a bytestring filename. This method works differently depending on the platform it is called on. On OS X, it uses both the result of `is_hidden_osx` and `is_hidden_dot` to work out if a file is hidden. On Windows, it uses the result of `is_hidden_win` to work out if a file is hidden. On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to work out if a file is hidden.
def is_hidden(path): """Return whether or not a file is hidden. `path` should be a bytestring filename. This method works differently depending on the platform it is called on. On OS X, it uses both the result of `is_hidden_osx` and `is_hidden_dot` to work out if a file is hidden. On Windows, it uses the result of `is_hidden_win` to work out if a file is hidden. On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to work out if a file is hidden. """ # Run platform specific functions depending on the platform if sys.platform == "darwin": return _is_hidden_osx(path) or _is_hidden_dot(path) elif sys.platform == "win32": return _is_hidden_win(path) else: return _is_hidden_dot(path)
Returns the Discogs_id or None.
def extract_discogs_id_regex(album_id): """Returns the Discogs_id or None.""" # Discogs-IDs are simple integers. In order to avoid confusion with # other metadata plugins, we only look for very specific formats of the # input string: # - plain integer, optionally wrapped in brackets and prefixed by an # 'r', as this is how discogs displays the release ID on its webpage. # - legacy url format: discogs.com/<name of release>/release/<id> # - legacy url short format: discogs.com/release/<id> # - current url format: discogs.com/release/<id>-<name of release> # See #291, #4080 and #4085 for the discussions leading up to these # patterns. # Regex has been tested here https://regex101.com/r/TOu7kw/1 for pattern in [ r"^\[?r?(?P<id>\d+)\]?$", r"discogs\.com/release/(?P<id>\d+)-?", r"discogs\.com/[^/]+/release/(?P<id>\d+)", ]: match = re.search(pattern, album_id) if match: return int(match.group("id")) return None
Breaks a Queue such that it never blocks, always has size 1, and has no maximum size. get()ing from the queue returns `val`, which defaults to None. `sync` controls whether a lock is required (because it's not reentrant!).
def _invalidate_queue(q, val=None, sync=True): """Breaks a Queue such that it never blocks, always has size 1, and has no maximum size. get()ing from the queue returns `val`, which defaults to None. `sync` controls whether a lock is required (because it's not reentrant!). """ def _qsize(len=len): return 1 def _put(item): pass def _get(): return val if sync: q.mutex.acquire() try: # Originally, we set `maxsize` to 0 here, which is supposed to mean # an unlimited queue size. However, there is a race condition since # Python 3.2 when this attribute is changed while another thread is # waiting in put()/get() due to a full/empty queue. # Setting it to 2 is still hacky because Python does not give any # guarantee what happens if Queue methods/attributes are overwritten # when it is already in use. However, because of our dummy _put() # and _get() methods, it provides a workaround to let the queue appear # to be never empty or full. # See issue https://github.com/beetbox/beets/issues/2078 q.maxsize = 2 q._qsize = _qsize q._put = _put q._get = _get q.not_empty.notify_all() q.not_full.notify_all() finally: if sync: q.mutex.release()
Yield multiple([message, ..]) from a pipeline stage to send multiple values to the next pipeline stage.
def multiple(messages): """Yield multiple([message, ..]) from a pipeline stage to send multiple values to the next pipeline stage. """ return MultiMessage(messages)
Decorate a function to become a simple stage. >>> @stage ... def add(n, i): ... return i + n >>> pipe = Pipeline([ ... iter([1, 2, 3]), ... add(2), ... ]) >>> list(pipe.pull()) [3, 4, 5]
def stage(func): """Decorate a function to become a simple stage. >>> @stage ... def add(n, i): ... return i + n >>> pipe = Pipeline([ ... iter([1, 2, 3]), ... add(2), ... ]) >>> list(pipe.pull()) [3, 4, 5] """ def coro(*args): task = None while True: task = yield task task = func(*(args + (task,))) return coro
Decorate a function that manipulates items in a coroutine to become a simple stage. >>> @mutator_stage ... def setkey(key, item): ... item[key] = True >>> pipe = Pipeline([ ... iter([{'x': False}, {'a': False}]), ... setkey('x'), ... ]) >>> list(pipe.pull()) [{'x': True}, {'a': False, 'x': True}]
def mutator_stage(func): """Decorate a function that manipulates items in a coroutine to become a simple stage. >>> @mutator_stage ... def setkey(key, item): ... item[key] = True >>> pipe = Pipeline([ ... iter([{'x': False}, {'a': False}]), ... setkey('x'), ... ]) >>> list(pipe.pull()) [{'x': True}, {'a': False, 'x': True}] """ def coro(*args): task = None while True: task = yield task func(*(args + (task,))) return coro
Returns a list of all the messages encapsulated in obj. If obj is a MultiMessage, returns its enclosed messages. If obj is BUBBLE, returns an empty list. Otherwise, returns a list containing obj.
def _allmsgs(obj): """Returns a list of all the messages encapsulated in obj. If obj is a MultiMessage, returns its enclosed messages. If obj is BUBBLE, returns an empty list. Otherwise, returns a list containing obj. """ if isinstance(obj, MultiMessage): return obj.messages elif obj == BUBBLE: return [] else: return [obj]
Provide the canonical form of the path suitable for storing in the database.
def normpath(path: bytes) -> bytes: """Provide the canonical form of the path suitable for storing in the database. """ path = syspath(path, prefix=False) path = os.path.normpath(os.path.abspath(os.path.expanduser(path))) return bytestring_path(path)
Return a list consisting of path's parent directory, its grandparent, and so on. For instance: >>> ancestry(b'/a/b/c') ['/', '/a', '/a/b'] The argument should *not* be the result of a call to `syspath`.
def ancestry(path: bytes) -> List[str]: """Return a list consisting of path's parent directory, its grandparent, and so on. For instance: >>> ancestry(b'/a/b/c') ['/', '/a', '/a/b'] The argument should *not* be the result of a call to `syspath`. """ out = [] last_path = None while path: path = os.path.dirname(path) if path == last_path: break last_path = path if path: # don't yield '' out.insert(0, path) return out
Like `os.walk`, but yields things in case-insensitive sorted, breadth-first order. Directory and file names matching any glob pattern in `ignore` are skipped. If `logger` is provided, then warning messages are logged there when a directory cannot be listed.
def sorted_walk( path: AnyStr, ignore: Sequence = (), ignore_hidden: bool = False, logger: Optional[Logger] = None, ) -> Generator[Tuple, None, None]: """Like `os.walk`, but yields things in case-insensitive sorted, breadth-first order. Directory and file names matching any glob pattern in `ignore` are skipped. If `logger` is provided, then warning messages are logged there when a directory cannot be listed. """ # Make sure the paths aren't Unicode strings. path = bytestring_path(path) ignore = [bytestring_path(i) for i in ignore] # Get all the directories and files at this level. try: contents = os.listdir(syspath(path)) except OSError as exc: if logger: logger.warning( "could not list directory {}: {}".format( displayable_path(path), exc.strerror ) ) return dirs = [] files = [] for base in contents: base = bytestring_path(base) # Skip ignored filenames. skip = False for pat in ignore: if fnmatch.fnmatch(base, pat): if logger: logger.debug( "ignoring {} due to ignore rule {}".format(base, pat) ) skip = True break if skip: continue # Add to output as either a file or a directory. cur = os.path.join(path, base) if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden: if os.path.isdir(syspath(cur)): dirs.append(base) else: files.append(base) # Sort lists (case-insensitive) and yield the current level. dirs.sort(key=bytes.lower) files.sort(key=bytes.lower) yield (path, dirs, files) # Recurse into directories. for base in dirs: cur = os.path.join(path, base) # yield from sorted_walk(...) yield from sorted_walk(cur, ignore, ignore_hidden, logger)
Return the string representation of the path with forward (/) slashes.
def path_as_posix(path: bytes) -> bytes: """Return the string representation of the path with forward (/) slashes. """ return path.replace(b"\\", b"/")
Make all the enclosing directories of path (like mkdir -p on the parent).
def mkdirall(path: bytes): """Make all the enclosing directories of path (like mkdir -p on the parent). """ for ancestor in ancestry(path): if not os.path.isdir(syspath(ancestor)): try: os.mkdir(syspath(ancestor)) except OSError as exc: raise FilesystemError( exc, "create", (ancestor,), traceback.format_exc() )
Determine whether all strings in `names` match at least one of the `patterns`, which should be shell glob expressions.
def fnmatch_all(names: Sequence[bytes], patterns: Sequence[bytes]) -> bool: """Determine whether all strings in `names` match at least one of the `patterns`, which should be shell glob expressions. """ for name in names: matches = False for pattern in patterns: matches = fnmatch.fnmatch(name, pattern) if matches: break if not matches: return False return True
If path is an empty directory, then remove it. Recursively remove path's ancestry up to root (which is never removed) where there are empty directories. If path is not contained in root, then nothing is removed. Glob patterns in clutter are ignored when determining emptiness. If root is not provided, then only path may be removed (i.e., no recursive removal).
def prune_dirs( path: str, root: Optional[Bytes_or_String] = None, clutter: Sequence[str] = (".DS_Store", "Thumbs.db"), ): """If path is an empty directory, then remove it. Recursively remove path's ancestry up to root (which is never removed) where there are empty directories. If path is not contained in root, then nothing is removed. Glob patterns in clutter are ignored when determining emptiness. If root is not provided, then only path may be removed (i.e., no recursive removal). """ path = normpath(path) if root is not None: root = normpath(root) ancestors = ancestry(path) if root is None: # Only remove the top directory. ancestors = [] elif root in ancestors: # Only remove directories below the root. ancestors = ancestors[ancestors.index(root) + 1 :] else: # Remove nothing. return # Traverse upward from path. ancestors.append(path) ancestors.reverse() for directory in ancestors: directory = syspath(directory) if not os.path.exists(directory): # Directory gone already. continue clutter: List[bytes] = [bytestring_path(c) for c in clutter] match_paths = [bytestring_path(d) for d in os.listdir(directory)] try: if fnmatch_all(match_paths, clutter): # Directory contains only clutter (or nothing). shutil.rmtree(directory) else: break except OSError: break
Return a list of the path components in path. For instance: >>> components(b'/a/b/c') ['a', 'b', 'c'] The argument should *not* be the result of a call to `syspath`.
def components(path: AnyStr) -> MutableSequence[AnyStr]: """Return a list of the path components in path. For instance: >>> components(b'/a/b/c') ['a', 'b', 'c'] The argument should *not* be the result of a call to `syspath`. """ comps = [] ances = ancestry(path) for anc in ances: comp = os.path.basename(anc) if comp: comps.append(comp) else: # root comps.append(anc) last = os.path.basename(path) if last: comps.append(last) return comps
Get the encoding for command-line arguments (and other OS locale-sensitive strings).
def arg_encoding() -> str: """Get the encoding for command-line arguments (and other OS locale-sensitive strings). """ return sys.getfilesystemencoding()
Get the system's filesystem encoding. On Windows, this is always UTF-8 (not MBCS).
def _fsencoding() -> str: """Get the system's filesystem encoding. On Windows, this is always UTF-8 (not MBCS). """ encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() if encoding == "mbcs": # On Windows, a broken encoding known to Python as "MBCS" is # used for the filesystem. However, we only use the Unicode API # for Windows paths, so the encoding is actually immaterial so # we can avoid dealing with this nastiness. We arbitrarily # choose UTF-8. encoding = "utf-8" return encoding
Given a path, which is either a bytes or a unicode, returns a str path (ensuring that we never deal with Unicode pathnames). Path should be bytes but has safeguards for strings to be converted.
def bytestring_path(path: Bytes_or_String) -> bytes: """Given a path, which is either a bytes or a unicode, returns a str path (ensuring that we never deal with Unicode pathnames). Path should be bytes but has safeguards for strings to be converted. """ # Pass through bytestrings. if isinstance(path, bytes): return path # On Windows, remove the magic prefix added by `syspath`. This makes # ``bytestring_path(syspath(X)) == X``, i.e., we can safely # round-trip through `syspath`. if os.path.__name__ == "ntpath" and path.startswith(WINDOWS_MAGIC_PREFIX): path = path[len(WINDOWS_MAGIC_PREFIX) :] # Try to encode with default encodings, but fall back to utf-8. try: return path.encode(_fsencoding()) except (UnicodeError, LookupError): return path.encode("utf-8")
Attempts to decode a bytestring path to a unicode object for the purpose of displaying it to the user. If the `path` argument is a list or a tuple, the elements are joined with `separator`.
def displayable_path( path: Union[bytes, str, Tuple[Union[bytes, str], ...]], separator: str = "; ", ) -> str: """Attempts to decode a bytestring path to a unicode object for the purpose of displaying it to the user. If the `path` argument is a list or a tuple, the elements are joined with `separator`. """ if isinstance(path, (list, tuple)): return separator.join(displayable_path(p) for p in path) elif isinstance(path, str): return path elif not isinstance(path, bytes): # A non-string object: just get its unicode representation. return str(path) try: return path.decode(_fsencoding(), "ignore") except (UnicodeError, LookupError): return path.decode("utf-8", "ignore")
Convert a path for use by the operating system. In particular, paths on Windows must receive a magic prefix and must be converted to Unicode before they are sent to the OS. To disable the magic prefix on Windows, set `prefix` to False---but only do this if you *really* know what you're doing.
def syspath(path: Bytes_or_String, prefix: bool = True) -> Bytes_or_String: """Convert a path for use by the operating system. In particular, paths on Windows must receive a magic prefix and must be converted to Unicode before they are sent to the OS. To disable the magic prefix on Windows, set `prefix` to False---but only do this if you *really* know what you're doing. """ # Don't do anything if we're not on windows if os.path.__name__ != "ntpath": return path if not isinstance(path, str): # Beets currently represents Windows paths internally with UTF-8 # arbitrarily. But earlier versions used MBCS because it is # reported as the FS encoding by Windows. Try both. try: path = path.decode("utf-8") except UnicodeError: # The encoding should always be MBCS, Windows' broken # Unicode representation. assert isinstance(path, bytes) encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() path = path.decode(encoding, "replace") # Add the magic prefix if it isn't already there. # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX): if path.startswith("\\\\"): # UNC path. Final path should look like \\?\UNC\... path = "UNC" + path[1:] path = WINDOWS_MAGIC_PREFIX + path return path
Safer equality for paths.
def samefile(p1: bytes, p2: bytes) -> bool: """Safer equality for paths.""" if p1 == p2: return True return shutil._samefile(syspath(p1), syspath(p2))
Remove the file. If `soft`, then no error will be raised if the file does not exist.
def remove(path: Optional[bytes], soft: bool = True): """Remove the file. If `soft`, then no error will be raised if the file does not exist. """ path = syspath(path) if not path or (soft and not os.path.exists(path)): return try: os.remove(path) except OSError as exc: raise FilesystemError(exc, "delete", (path,), traceback.format_exc())
Copy a plain file. Permissions are not copied. If `dest` already exists, raises a FilesystemError unless `replace` is True. Has no effect if `path` is the same as `dest`. Paths are translated to system paths before the syscall.
def copy(path: bytes, dest: bytes, replace: bool = False): """Copy a plain file. Permissions are not copied. If `dest` already exists, raises a FilesystemError unless `replace` is True. Has no effect if `path` is the same as `dest`. Paths are translated to system paths before the syscall. """ if samefile(path, dest): return path = syspath(path) dest = syspath(dest) if not replace and os.path.exists(dest): raise FilesystemError("file exists", "copy", (path, dest)) try: shutil.copyfile(path, dest) except OSError as exc: raise FilesystemError(exc, "copy", (path, dest), traceback.format_exc())
Rename a file. `dest` may not be a directory. If `dest` already exists, raises an OSError unless `replace` is True. Has no effect if `path` is the same as `dest`. If the paths are on different filesystems (or the rename otherwise fails), a copy is attempted instead, in which case metadata will *not* be preserved. Paths are translated to system paths.
def move(path: bytes, dest: bytes, replace: bool = False): """Rename a file. `dest` may not be a directory. If `dest` already exists, raises an OSError unless `replace` is True. Has no effect if `path` is the same as `dest`. If the paths are on different filesystems (or the rename otherwise fails), a copy is attempted instead, in which case metadata will *not* be preserved. Paths are translated to system paths. """ if os.path.isdir(syspath(path)): raise FilesystemError("source is directory", "move", (path, dest)) if os.path.isdir(syspath(dest)): raise FilesystemError("destination is directory", "move", (path, dest)) if samefile(path, dest): return if os.path.exists(syspath(dest)) and not replace: raise FilesystemError("file exists", "rename", (path, dest)) # First, try renaming the file. try: os.replace(syspath(path), syspath(dest)) except OSError: # Copy the file to a temporary destination. basename = os.path.basename(bytestring_path(dest)) dirname = os.path.dirname(bytestring_path(dest)) tmp = tempfile.NamedTemporaryFile( suffix=syspath(b".beets", prefix=False), prefix=syspath(b"." + basename, prefix=False), dir=syspath(dirname), delete=False, ) try: with open(syspath(path), "rb") as f: shutil.copyfileobj(f, tmp) finally: tmp.close() # Move the copied file into place. try: os.replace(tmp.name, syspath(dest)) tmp = None os.remove(syspath(path)) except OSError as exc: raise FilesystemError( exc, "move", (path, dest), traceback.format_exc() ) finally: if tmp is not None: os.remove(tmp)
Create a symbolic link from path to `dest`. Raises an OSError if `dest` already exists, unless `replace` is True. Does nothing if `path` == `dest`.
def link(path: bytes, dest: bytes, replace: bool = False): """Create a symbolic link from path to `dest`. Raises an OSError if `dest` already exists, unless `replace` is True. Does nothing if `path` == `dest`. """ if samefile(path, dest): return if os.path.exists(syspath(dest)) and not replace: raise FilesystemError("file exists", "rename", (path, dest)) try: os.symlink(syspath(path), syspath(dest)) except NotImplementedError: # raised on python >= 3.2 and Windows versions before Vista raise FilesystemError( "OS does not support symbolic links." "link", (path, dest), traceback.format_exc(), ) except OSError as exc: raise FilesystemError(exc, "link", (path, dest), traceback.format_exc())
Create a hard link from path to `dest`. Raises an OSError if `dest` already exists, unless `replace` is True. Does nothing if `path` == `dest`.
def hardlink(path: bytes, dest: bytes, replace: bool = False): """Create a hard link from path to `dest`. Raises an OSError if `dest` already exists, unless `replace` is True. Does nothing if `path` == `dest`. """ if samefile(path, dest): return if os.path.exists(syspath(dest)) and not replace: raise FilesystemError("file exists", "rename", (path, dest)) try: os.link(syspath(path), syspath(dest)) except NotImplementedError: raise FilesystemError( "OS does not support hard links." "link", (path, dest), traceback.format_exc(), ) except OSError as exc: if exc.errno == errno.EXDEV: raise FilesystemError( "Cannot hard link across devices." "link", (path, dest), traceback.format_exc(), ) else: raise FilesystemError( exc, "link", (path, dest), traceback.format_exc() )
Create a reflink from `dest` to `path`. Raise an `OSError` if `dest` already exists, unless `replace` is True. If `path` == `dest`, then do nothing. If reflinking fails and `fallback` is enabled, try copying the file instead. Otherwise, raise an error without trying a plain copy. May raise an `ImportError` if the `reflink` module is not available.
def reflink( path: bytes, dest: bytes, replace: bool = False, fallback: bool = False, ): """Create a reflink from `dest` to `path`. Raise an `OSError` if `dest` already exists, unless `replace` is True. If `path` == `dest`, then do nothing. If reflinking fails and `fallback` is enabled, try copying the file instead. Otherwise, raise an error without trying a plain copy. May raise an `ImportError` if the `reflink` module is not available. """ import reflink as pyreflink if samefile(path, dest): return if os.path.exists(syspath(dest)) and not replace: raise FilesystemError("file exists", "rename", (path, dest)) try: pyreflink.reflink(path, dest) except (NotImplementedError, pyreflink.ReflinkImpossibleError): if fallback: copy(path, dest, replace) else: raise FilesystemError( "OS/filesystem does not support reflinks.", "link", (path, dest), traceback.format_exc(), )
Returns a version of ``path`` that does not exist on the filesystem. Specifically, if ``path` itself already exists, then something unique is appended to the path.
def unique_path(path: bytes) -> bytes: """Returns a version of ``path`` that does not exist on the filesystem. Specifically, if ``path` itself already exists, then something unique is appended to the path. """ if not os.path.exists(syspath(path)): return path base, ext = os.path.splitext(path) match = re.search(rb"\.(\d)+$", base) if match: num = int(match.group(1)) base = base[: match.start()] else: num = 0 while True: num += 1 suffix = f".{num}".encode() + ext new_path = base + suffix if not os.path.exists(new_path): return new_path
Takes a path (as a Unicode string) and makes sure that it is legal. Returns a new path. Only works with fragments; won't work reliably on Windows when a path begins with a drive letter. Path separators (including altsep!) should already be cleaned from the path components. If replacements is specified, it is used *instead* of the default set of replacements; it must be a list of (compiled regex, replacement string) pairs.
def sanitize_path( path: str, replacements: Optional[Sequence[Sequence[Union[Pattern, str]]]] = None, ) -> str: """Takes a path (as a Unicode string) and makes sure that it is legal. Returns a new path. Only works with fragments; won't work reliably on Windows when a path begins with a drive letter. Path separators (including altsep!) should already be cleaned from the path components. If replacements is specified, it is used *instead* of the default set of replacements; it must be a list of (compiled regex, replacement string) pairs. """ replacements = replacements or CHAR_REPLACE comps = components(path) if not comps: return "" for i, comp in enumerate(comps): for regex, repl in replacements: comp = regex.sub(repl, comp) comps[i] = comp return os.path.join(*comps)
Given a bytestring path or a Unicode path fragment, truncate the components to a legal length. In the last component, the extension is preserved.
def truncate_path(path: AnyStr, length: int = MAX_FILENAME_LENGTH) -> AnyStr: """Given a bytestring path or a Unicode path fragment, truncate the components to a legal length. In the last component, the extension is preserved. """ comps = components(path) out = [c[:length] for c in comps] base, ext = os.path.splitext(comps[-1]) if ext: # Last component has an extension. base = base[: length - len(ext)] out[-1] = base + ext return os.path.join(*out)
Perform a single round of path legalization steps (sanitation/replacement, encoding from Unicode to bytes, extension-appending, and truncation). Return the path (Unicode if `fragment` is set, `bytes` otherwise) and whether truncation was required.
def _legalize_stage( path: str, replacements: Optional[Sequence[Sequence[Union[Pattern, str]]]], length: int, extension: str, fragment: bool, ) -> Tuple[Bytes_or_String, bool]: """Perform a single round of path legalization steps (sanitation/replacement, encoding from Unicode to bytes, extension-appending, and truncation). Return the path (Unicode if `fragment` is set, `bytes` otherwise) and whether truncation was required. """ # Perform an initial sanitization including user replacements. path = sanitize_path(path, replacements) # Encode for the filesystem. if not fragment: path = bytestring_path(path) # type: ignore # Preserve extension. path += extension.lower() # Truncate too-long components. pre_truncate_path = path path = truncate_path(path, length) return path, path != pre_truncate_path
Given a path-like Unicode string, produce a legal path. Return the path and a flag indicating whether some replacements had to be ignored (see below). The legalization process (see `_legalize_stage`) consists of applying the sanitation rules in `replacements`, encoding the string to bytes (unless `fragment` is set), truncating components to `length`, appending the `extension`. This function performs up to three calls to `_legalize_stage` in case truncation conflicts with replacements (as can happen when truncation creates whitespace at the end of the string, for example). The limited number of iterations iterations avoids the possibility of an infinite loop of sanitation and truncation operations, which could be caused by replacement rules that make the string longer. The flag returned from this function indicates that the path has to be truncated twice (indicating that replacements made the string longer again after it was truncated); the application should probably log some sort of warning.
def legalize_path( path: str, replacements: Optional[Sequence[Sequence[Union[Pattern, str]]]], length: int, extension: bytes, fragment: bool, ) -> Tuple[Union[Bytes_or_String, bool]]: """Given a path-like Unicode string, produce a legal path. Return the path and a flag indicating whether some replacements had to be ignored (see below). The legalization process (see `_legalize_stage`) consists of applying the sanitation rules in `replacements`, encoding the string to bytes (unless `fragment` is set), truncating components to `length`, appending the `extension`. This function performs up to three calls to `_legalize_stage` in case truncation conflicts with replacements (as can happen when truncation creates whitespace at the end of the string, for example). The limited number of iterations iterations avoids the possibility of an infinite loop of sanitation and truncation operations, which could be caused by replacement rules that make the string longer. The flag returned from this function indicates that the path has to be truncated twice (indicating that replacements made the string longer again after it was truncated); the application should probably log some sort of warning. """ if fragment: # Outputting Unicode. extension = extension.decode("utf-8", "ignore") first_stage_path, _ = _legalize_stage( path, replacements, length, extension, fragment ) # Convert back to Unicode with extension removed. first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path)) # Re-sanitize following truncation (including user replacements). second_stage_path, retruncated = _legalize_stage( first_stage_path, replacements, length, extension, fragment ) # If the path was once again truncated, discard user replacements # and run through one last legalization stage. if retruncated: second_stage_path, _ = _legalize_stage( first_stage_path, None, length, extension, fragment ) return second_stage_path, retruncated
Convert a bytestring path to Unicode. This helps deal with APIs on Python 3 that *only* accept Unicode (i.e., `str` objects). I philosophically disagree with this decision, because paths are sadly bytes on Unix, but that's the way it is. So this function helps us "smuggle" the true bytes data through APIs that took Python 3's Unicode mandate too seriously.
def py3_path(path: Union[bytes, str]) -> str: """Convert a bytestring path to Unicode. This helps deal with APIs on Python 3 that *only* accept Unicode (i.e., `str` objects). I philosophically disagree with this decision, because paths are sadly bytes on Unix, but that's the way it is. So this function helps us "smuggle" the true bytes data through APIs that took Python 3's Unicode mandate too seriously. """ if isinstance(path, str): return path assert isinstance(path, bytes) return os.fsdecode(path)
Returns a boolean reflecting a human-entered string.
def str2bool(value: str) -> bool: """Returns a boolean reflecting a human-entered string.""" return value.lower() in ("yes", "1", "true", "t", "y")
Convert a value to a Unicode object for matching with a query. None becomes the empty string. Bytestrings are silently decoded.
def as_string(value: Any) -> str: """Convert a value to a Unicode object for matching with a query. None becomes the empty string. Bytestrings are silently decoded. """ if value is None: return "" elif isinstance(value, memoryview): return bytes(value).decode("utf-8", "ignore") elif isinstance(value, bytes): return value.decode("utf-8", "ignore") else: return str(value)
Given a sequence of hashble objects, returns the object that is most common in the set and the its number of appearance. The sequence must contain at least one object.
def plurality(objs: Sequence[T]) -> T: """Given a sequence of hashble objects, returns the object that is most common in the set and the its number of appearance. The sequence must contain at least one object. """ c = Counter(objs) if not c: raise ValueError("sequence must be non-empty") return c.most_common(1)[0]
Return the number of hardware thread contexts (cores or SMT threads) in the system.
def cpu_count() -> int: """Return the number of hardware thread contexts (cores or SMT threads) in the system. """ # Adapted from the soundconverter project: # https://github.com/kassoulet/soundconverter if sys.platform == "win32": try: num = int(os.environ["NUMBER_OF_PROCESSORS"]) except (ValueError, KeyError): num = 0 elif sys.platform == "darwin": try: num = int( command_output( [ "/usr/sbin/sysctl", "-n", "hw.ncpu", ] ).stdout ) except (ValueError, OSError, subprocess.CalledProcessError): num = 0 else: try: num = os.sysconf("SC_NPROCESSORS_ONLN") except (ValueError, OSError, AttributeError): num = 0 if num >= 1: return num else: return 1
Convert command arguments, which may either be `bytes` or `str` objects, to uniformly surrogate-escaped strings.
def convert_command_args(args: List[bytes]) -> List[str]: """Convert command arguments, which may either be `bytes` or `str` objects, to uniformly surrogate-escaped strings.""" assert isinstance(args, list) def convert(arg) -> str: if isinstance(arg, bytes): return os.fsdecode(arg) return arg return [convert(a) for a in args]
Runs the command and returns its output after it has exited. Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain byte strings of the respective output streams. ``cmd`` is a list of arguments starting with the command names. The arguments are bytes on Unix and strings on Windows. If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a shell to execute. If the process exits with a non-zero return code ``subprocess.CalledProcessError`` is raised. May also raise ``OSError``. This replaces `subprocess.check_output` which can have problems if lots of output is sent to stderr.
def command_output( cmd: List[Bytes_or_String], shell: bool = False, ) -> CommandOutput: """Runs the command and returns its output after it has exited. Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain byte strings of the respective output streams. ``cmd`` is a list of arguments starting with the command names. The arguments are bytes on Unix and strings on Windows. If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a shell to execute. If the process exits with a non-zero return code ``subprocess.CalledProcessError`` is raised. May also raise ``OSError``. This replaces `subprocess.check_output` which can have problems if lots of output is sent to stderr. """ cmd = convert_command_args(cmd) devnull = subprocess.DEVNULL proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=devnull, close_fds=platform.system() != "Windows", shell=shell, ) stdout, stderr = proc.communicate() if proc.returncode: raise subprocess.CalledProcessError( returncode=proc.returncode, cmd=" ".join(cmd), output=stdout + stderr, ) return CommandOutput(stdout, stderr)
Attempt to determine the maximum filename length for the filesystem containing `path`. If the value is greater than `limit`, then `limit` is used instead (to prevent errors when a filesystem misreports its capacity). If it cannot be determined (e.g., on Windows), return `limit`.
def max_filename_length(path: AnyStr, limit=MAX_FILENAME_LENGTH) -> int: """Attempt to determine the maximum filename length for the filesystem containing `path`. If the value is greater than `limit`, then `limit` is used instead (to prevent errors when a filesystem misreports its capacity). If it cannot be determined (e.g., on Windows), return `limit`. """ if hasattr(os, "statvfs"): try: res = os.statvfs(path) except OSError: return limit return min(res[9], limit) else: return limit
Return the system command that dispatches execution to the correct program.
def open_anything() -> str: """Return the system command that dispatches execution to the correct program. """ sys_name = platform.system() if sys_name == "Darwin": base_cmd = "open" elif sys_name == "Windows": base_cmd = "start" else: # Assume Unix base_cmd = "xdg-open" return base_cmd
Get a command for opening a text file. First try environment variable `VISUAL` followed by `EDITOR`. As last resort fall back to `open_anything()`, the platform-specific tool for opening files in general.
def editor_command() -> str: """Get a command for opening a text file. First try environment variable `VISUAL` followed by `EDITOR`. As last resort fall back to `open_anything()`, the platform-specific tool for opening files in general. """ return ( os.environ.get("VISUAL") or os.environ.get("EDITOR") or open_anything() )
Open the files in `targets` by `exec`ing a new `command`, given as a Unicode string. (The new program takes over, and Python execution ends: this does not fork a subprocess.) Can raise `OSError`.
def interactive_open(targets: Sequence[str], command: str): """Open the files in `targets` by `exec`ing a new `command`, given as a Unicode string. (The new program takes over, and Python execution ends: this does not fork a subprocess.) Can raise `OSError`. """ assert command # Split the command string into its arguments. try: args = shlex.split(command) except ValueError: # Malformed shell tokens. args = [command] args.insert(0, args[0]) # for argv[0] args += targets return os.execlp(*args)
Check whether the filesystem at the given path is case sensitive. To work best, the path should point to a file or a directory. If the path does not exist, assume a case sensitive file system on every platform except Windows. Currently only used for absolute paths by beets; may have a trailing path separator.
def case_sensitive(path: bytes) -> bool: """Check whether the filesystem at the given path is case sensitive. To work best, the path should point to a file or a directory. If the path does not exist, assume a case sensitive file system on every platform except Windows. Currently only used for absolute paths by beets; may have a trailing path separator. """ # Look at parent paths until we find a path that actually exists, or # reach the root. while True: head, tail = os.path.split(path) if head == path: # We have reached the root of the file system. # By default, the case sensitivity depends on the platform. return platform.system() != "Windows" # Trailing path separator, or path does not exist. if not tail or not os.path.exists(path): path = head continue upper_tail = tail.upper() lower_tail = tail.lower() # In case we can't tell from the given path name, look at the # parent directory. if upper_tail == lower_tail: path = head continue upper_sys = syspath(os.path.join(head, upper_tail)) lower_sys = syspath(os.path.join(head, lower_tail)) # If either the upper-cased or lower-cased path does not exist, the # filesystem must be case-sensitive. # (Otherwise, we have more work to do.) if not os.path.exists(upper_sys) or not os.path.exists(lower_sys): return True # Original and both upper- and lower-cased versions of the path # exist on the file system. Check whether they refer to different # files by their inodes (or an alternative method on Windows). return not os.path.samefile(lower_sys, upper_sys)
Formats a human-readable M:SS string as a float (number of seconds). Raises ValueError if the conversion cannot take place due to `string` not being in the right format.
def raw_seconds_short(string: str) -> float: """Formats a human-readable M:SS string as a float (number of seconds). Raises ValueError if the conversion cannot take place due to `string` not being in the right format. """ match = re.match(r"^(\d+):([0-5]\d)$", string) if not match: raise ValueError("String not in M:SS format") minutes, seconds = map(int, match.groups()) return float(minutes * 60 + seconds)
Decodes all unicode characters in a path into ASCII equivalents. Substitutions are provided by the unidecode module. Path separators in the input are preserved. Keyword arguments: path -- The path to be asciified. sep_replace -- the string to be used to replace extraneous path separators.
def asciify_path(path: str, sep_replace: str) -> str: """Decodes all unicode characters in a path into ASCII equivalents. Substitutions are provided by the unidecode module. Path separators in the input are preserved. Keyword arguments: path -- The path to be asciified. sep_replace -- the string to be used to replace extraneous path separators. """ # if this platform has an os.altsep, change it to os.sep. if os.altsep: path = path.replace(os.altsep, os.sep) path_components: List[Bytes_or_String] = path.split(os.sep) for index, item in enumerate(path_components): path_components[index] = unidecode(item).replace(os.sep, sep_replace) if os.altsep: path_components[index] = unidecode(item).replace( os.altsep, sep_replace ) return os.sep.join(path_components)
Apply the function `transform` to all the elements in the iterable `items`, like `map(transform, items)` but with no return value. The parallelism uses threads (not processes), so this is only useful for IO-bound `transform`s.
def par_map(transform: Callable, items: Iterable): """Apply the function `transform` to all the elements in the iterable `items`, like `map(transform, items)` but with no return value. The parallelism uses threads (not processes), so this is only useful for IO-bound `transform`s. """ pool = ThreadPool() pool.map(transform, items) pool.close() pool.join()
A decorator that creates a lazily evaluated property. On first access, the property is assigned the return value of `func`. This first value is stored, so that future accesses do not have to evaluate `func` again. This behaviour is useful when `func` is expensive to evaluate, and it is not certain that the result will be needed.
def lazy_property(func: Callable) -> Callable: """A decorator that creates a lazily evaluated property. On first access, the property is assigned the return value of `func`. This first value is stored, so that future accesses do not have to evaluate `func` again. This behaviour is useful when `func` is expensive to evaluate, and it is not certain that the result will be needed. """ field_name = "_" + func.__name__ @property @functools.wraps(func) def wrapper(self): if hasattr(self, field_name): return getattr(self, field_name) value = func(self) setattr(self, field_name, value) return value return wrapper
Execute the command and return its output. Raise a AnalysisABSubmitError on failure.
def call(args): """Execute the command and return its output. Raise a AnalysisABSubmitError on failure. """ try: return util.command_output(args).stdout except subprocess.CalledProcessError as e: raise ABSubmitError( "{} exited with status {}".format(args[0], e.returncode) )
Generates AcousticBrainz end point urls for given `mbid`.
def _generate_urls(base_url, mbid): """Generates AcousticBrainz end point urls for given `mbid`.""" for level in LEVELS: yield base_url + mbid + level
Template field function factory. Create a template field function that rewrites the given field with the given rewriting rules. ``simple_rules`` must be a list of (pattern, replacement) pairs. ``advanced_rules`` must be a list of (query, replacement) pairs.
def rewriter(field, simple_rules, advanced_rules): """Template field function factory. Create a template field function that rewrites the given field with the given rewriting rules. ``simple_rules`` must be a list of (pattern, replacement) pairs. ``advanced_rules`` must be a list of (query, replacement) pairs. """ def fieldfunc(item): value = item._values_fixed[field] for pattern, replacement in simple_rules: if pattern.match(value.lower()): # Rewrite activated. return replacement for query, replacement in advanced_rules: if query.match(item): # Rewrite activated. return replacement # Not activated; return original value. return value return fieldfunc
Check whether a string is a simple (non-path) filename. For example, `foo.txt` is safe because it is a "plain" filename. But `foo/bar.txt` and `../foo.txt` and `.` are all non-safe because they can traverse to other directories other than the current one.
def safe_filename(fn): """Check whether a string is a simple (non-path) filename. For example, `foo.txt` is safe because it is a "plain" filename. But `foo/bar.txt` and `../foo.txt` and `.` are all non-safe because they can traverse to other directories other than the current one. """ # Rule out any directories. if os.path.basename(fn) != fn: return False # In single names, rule out Unix directory traversal names. if fn in (".", ".."): return False return True
Respond with info about the server.
def server_info(): """Respond with info about the server.""" return {"data": {"type": "server", "id": "0", "attributes": SERVER_INFO}}
Respond with a list of all tracks and related information.
def all_tracks(): """Respond with a list of all tracks and related information.""" doc = TrackDocument() return doc.all_resources()
Respond with info about the specified track. Args: track_id: The id of the track provided in the URL (integer).
def single_track(track_id): """Respond with info about the specified track. Args: track_id: The id of the track provided in the URL (integer). """ doc = TrackDocument() return doc.single_resource(track_id)
Supply an audio file for the specified track. Args: track_id: The id of the track provided in the URL (integer).
def audio_file(track_id): """Supply an audio file for the specified track. Args: track_id: The id of the track provided in the URL (integer). """ track = current_app.config["lib"].get_item(track_id) if not track: return AURADocument.error( "404 Not Found", "No track with the requested id.", "There is no track with an id of {} in the library.".format( track_id ), ) path = py3_path(track.path) if not isfile(path): return AURADocument.error( "404 Not Found", "No audio file for the requested track.", ( "There is no audio file for track {} at the expected location" ).format(track_id), ) file_mimetype = guess_type(path)[0] if not file_mimetype: return AURADocument.error( "500 Internal Server Error", "Requested audio file has an unknown mimetype.", ( "The audio file for track {} has an unknown mimetype. " "Its file extension is {}." ).format(track_id, path.split(".")[-1]), ) # Check that the Accept header contains the file's mimetype # Takes into account */* and audio/* # Adding support for the bitrate parameter would require some effort so I # left it out. This means the client could be sent an error even if the # audio doesn't need transcoding. if not request.accept_mimetypes.best_match([file_mimetype]): return AURADocument.error( "406 Not Acceptable", "Unsupported MIME type or bitrate parameter in Accept header.", ( "The audio file for track {} is only available as {} and " "bitrate parameters are not supported." ).format(track_id, file_mimetype), ) return send_file( path, mimetype=file_mimetype, # Handles filename in Content-Disposition header as_attachment=True, # Tries to upgrade the stream to support range requests conditional=True, )
Respond with a list of all albums and related information.
def all_albums(): """Respond with a list of all albums and related information.""" doc = AlbumDocument() return doc.all_resources()
Respond with info about the specified album. Args: album_id: The id of the album provided in the URL (integer).
def single_album(album_id): """Respond with info about the specified album. Args: album_id: The id of the album provided in the URL (integer). """ doc = AlbumDocument() return doc.single_resource(album_id)
Respond with a list of all artists and related information.
def all_artists(): """Respond with a list of all artists and related information.""" doc = ArtistDocument() return doc.all_resources()
Respond with info about the specified artist. Args: artist_id: The id of the artist provided in the URL. A string which is the artist's name.
def single_artist(artist_id): """Respond with info about the specified artist. Args: artist_id: The id of the artist provided in the URL. A string which is the artist's name. """ doc = ArtistDocument() return doc.single_resource(artist_id)
Respond with info about the specified image. Args: image_id: The id of the image provided in the URL. A string in the form "<parent_type>-<parent_id>-<img_filename>".
def single_image(image_id): """Respond with info about the specified image. Args: image_id: The id of the image provided in the URL. A string in the form "<parent_type>-<parent_id>-<img_filename>". """ doc = ImageDocument() return doc.single_resource(image_id)
Supply an image file for the specified image. Args: image_id: The id of the image provided in the URL. A string in the form "<parent_type>-<parent_id>-<img_filename>".
def image_file(image_id): """Supply an image file for the specified image. Args: image_id: The id of the image provided in the URL. A string in the form "<parent_type>-<parent_id>-<img_filename>". """ img_path = ImageDocument.get_image_path(image_id) if not img_path: return AURADocument.error( "404 Not Found", "No image with the requested id.", "There is no image with an id of {} in the library".format( image_id ), ) return send_file(img_path)
An application factory for use by a WSGI server.
def create_app(): """An application factory for use by a WSGI server.""" config["aura"].add( { "host": "127.0.0.1", "port": 8337, "cors": [], "cors_supports_credentials": False, "page_limit": 500, } ) app = Flask(__name__) # Register AURA blueprint view functions under a URL prefix app.register_blueprint(aura_bp, url_prefix="/aura") # AURA specifies mimetype MUST be this app.config["JSONIFY_MIMETYPE"] = "application/vnd.api+json" # Disable auto-sorting of JSON keys app.config["JSON_SORT_KEYS"] = False # Provide a way to access the beets library # The normal method of using the Library and config provided in the # command function is not used because create_app() could be called # by an external WSGI server. # NOTE: this uses a 'private' function from beets.ui.__init__ app.config["lib"] = _open_library(config) # Enable CORS if required cors = config["aura"]["cors"].as_str_seq(list) if cors: from flask_cors import CORS # "Accept" is the only header clients use app.config["CORS_ALLOW_HEADERS"] = "Accept" app.config["CORS_RESOURCES"] = {r"/aura/*": {"origins": cors}} app.config["CORS_SUPPORTS_CREDENTIALS"] = config["aura"][ "cors_supports_credentials" ].get(bool) CORS(app) return app
Returns average BPM (possibly of a playing song) listening to Enter keystrokes.
def bpm(max_strokes): """Returns average BPM (possibly of a playing song) listening to Enter keystrokes. """ t0 = None dt = [] for i in range(max_strokes): # Press enter to the rhythm... s = input() if s == "": t1 = time.time() # Only start measuring at the second stroke if t0: dt.append(t1 - t0) t0 = t1 else: break # Return average BPM # bpm = (max_strokes-1) / sum(dt) * 60 ave = sum([1.0 / dti * 60 for dti in dt]) / len(dt) return ave
s -> (s0,s1), (s1,s2), (s2, s3), ...
def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return zip(a, b)
Build a span dict from the span string representation.
def span_from_str(span_str): """Build a span dict from the span string representation.""" def normalize_year(d, yearfrom): """Convert string to a 4 digits year""" if yearfrom < 100: raise BucketError("%d must be expressed on 4 digits" % yearfrom) # if two digits only, pick closest year that ends by these two # digits starting from yearfrom if d < 100: if (d % 100) < (yearfrom % 100): d = (yearfrom - yearfrom % 100) + 100 + d else: d = (yearfrom - yearfrom % 100) + d return d years = [int(x) for x in re.findall(r"\d+", span_str)] if not years: raise ui.UserError( "invalid range defined for year bucket '%s': no " "year found" % span_str ) try: years = [normalize_year(x, years[0]) for x in years] except BucketError as exc: raise ui.UserError( "invalid range defined for year bucket '%s': %s" % (span_str, exc) ) res = {"from": years[0], "str": span_str} if len(years) > 1: res["to"] = years[-1] return res
Set the `to` value of spans if empty and sort them chronologically.
def complete_year_spans(spans): """Set the `to` value of spans if empty and sort them chronologically.""" spans.sort(key=lambda x: x["from"]) for x, y in pairwise(spans): if "to" not in x: x["to"] = y["from"] - 1 if spans and "to" not in spans[-1]: spans[-1]["to"] = datetime.now().year
Add new spans to given spans list so that every year of [start,end] belongs to a span.
def extend_year_spans(spans, spanlen, start=1900, end=2014): """Add new spans to given spans list so that every year of [start,end] belongs to a span. """ extended_spans = spans[:] for x, y in pairwise(spans): # if a gap between two spans, fill the gap with as much spans of # spanlen length as necessary for span_from in range(x["to"] + 1, y["from"], spanlen): extended_spans.append({"from": span_from}) # Create spans prior to declared ones for span_from in range(spans[0]["from"] - spanlen, start, -spanlen): extended_spans.append({"from": span_from}) # Create spans after the declared ones for span_from in range(spans[-1]["to"] + 1, end, spanlen): extended_spans.append({"from": span_from}) complete_year_spans(extended_spans) return extended_spans
Build a chronologically ordered list of spans dict from unordered spans stringlist.
def build_year_spans(year_spans_str): """Build a chronologically ordered list of spans dict from unordered spans stringlist. """ spans = [] for elem in year_spans_str: spans.append(span_from_str(elem)) complete_year_spans(spans) return spans
Deduces formatting syntax from a span string.
def str2fmt(s): """Deduces formatting syntax from a span string.""" regex = re.compile( r"(?P<bef>\D*)(?P<fromyear>\d+)(?P<sep>\D*)" r"(?P<toyear>\d*)(?P<after>\D*)" ) m = re.match(regex, s) res = { "fromnchars": len(m.group("fromyear")), "tonchars": len(m.group("toyear")), } res["fmt"] = "{}%s{}{}{}".format( m.group("bef"), m.group("sep"), "%s" if res["tonchars"] else "", m.group("after"), ) return res
Return a span string representation.
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars): """Return a span string representation.""" args = str(yearfrom)[-fromnchars:] if tonchars: args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:]) return fmt % args
Extract the most common spans lengths and representation formats
def extract_modes(spans): """Extract the most common spans lengths and representation formats""" rangelen = sorted([x["to"] - x["from"] + 1 for x in spans]) deflen = sorted(rangelen, key=rangelen.count)[-1] reprs = [str2fmt(x["str"]) for x in spans] deffmt = sorted(reprs, key=reprs.count)[-1] return deflen, deffmt
Extract alphanumerics from string and return sorted list of chars [from...to]
def build_alpha_spans(alpha_spans_str, alpha_regexs): """Extract alphanumerics from string and return sorted list of chars [from...to] """ spans = [] for elem in alpha_spans_str: if elem in alpha_regexs: spans.append(re.compile(alpha_regexs[elem])) else: bucket = sorted([x for x in elem.lower() if x.isalnum()]) if bucket: begin_index = ASCII_DIGITS.index(bucket[0]) end_index = ASCII_DIGITS.index(bucket[-1]) else: raise ui.UserError( "invalid range defined for alpha bucket " "'%s': no alphanumeric character found" % elem ) spans.append( re.compile( "^[" + ASCII_DIGITS[begin_index : end_index + 1] + ASCII_DIGITS[begin_index : end_index + 1].upper() + "]" ) ) return spans
Truncate an iterable to at most `count` items.
def prefix(it, count): """Truncate an iterable to at most `count` items.""" for i, v in enumerate(it): if i >= count: break yield v
Used as a key to sort releases by date then preferred country
def releases_key(release, countries, original_year): """Used as a key to sort releases by date then preferred country""" date = release.get("date") if date and original_year: year = date.get("year", 9999) month = date.get("month", 99) day = date.get("day", 99) else: year = 9999 month = 99 day = 99 # Uses index of preferred countries to sort country_key = 99 if release.get("country"): for i, country in enumerate(countries): if country.match(release["country"]): country_key = i break return (year, month, day, country_key)
Gets metadata for a file from Acoustid and populates the _matches, _fingerprints, and _acoustids dictionaries accordingly.
def acoustid_match(log, path): """Gets metadata for a file from Acoustid and populates the _matches, _fingerprints, and _acoustids dictionaries accordingly. """ try: duration, fp = acoustid.fingerprint_file(util.syspath(path)) except acoustid.FingerprintGenerationError as exc: log.error( "fingerprinting of {0} failed: {1}", util.displayable_path(repr(path)), exc, ) return None fp = fp.decode() _fingerprints[path] = fp try: res = acoustid.lookup(API_KEY, fp, duration, meta="recordings releases") except acoustid.AcoustidError as exc: log.debug( "fingerprint matching {0} failed: {1}", util.displayable_path(repr(path)), exc, ) return None log.debug("chroma: fingerprinted {0}", util.displayable_path(repr(path))) # Ensure the response is usable and parse it. if res["status"] != "ok" or not res.get("results"): log.debug("no match found") return None result = res["results"][0] # Best match. if result["score"] < SCORE_THRESH: log.debug("no results above threshold") return None _acoustids[path] = result["id"] # Get recording and releases from the result if not result.get("recordings"): log.debug("no recordings found") return None recording_ids = [] releases = [] for recording in result["recordings"]: recording_ids.append(recording["id"]) if "releases" in recording: releases.extend(recording["releases"]) # The releases list is essentially in random order from the Acoustid lookup # so we optionally sort it using the match.preferred configuration options. # 'original_year' to sort the earliest first and # 'countries' to then sort preferred countries first. country_patterns = config["match"]["preferred"]["countries"].as_str_seq() countries = [re.compile(pat, re.I) for pat in country_patterns] original_year = config["match"]["preferred"]["original_year"] releases.sort( key=partial( releases_key, countries=countries, original_year=original_year ) ) release_ids = [rel["id"] for rel in releases] log.debug( "matched recordings {0} on releases {1}", recording_ids, release_ids ) _matches[path] = recording_ids, release_ids
Given an iterable of Items, determines (according to Acoustid) which releases the items have in common. Generates release IDs.
def _all_releases(items): """Given an iterable of Items, determines (according to Acoustid) which releases the items have in common. Generates release IDs. """ # Count the number of "hits" for each release. relcounts = defaultdict(int) for item in items: if item.path not in _matches: continue _, release_ids = _matches[item.path] for release_id in release_ids: relcounts[release_id] += 1 for release_id, count in relcounts.items(): if float(count) / len(items) > COMMON_REL_THRESH: yield release_id
Fingerprint each item in the task for later use during the autotagging candidate search.
def fingerprint_task(log, task, session): """Fingerprint each item in the task for later use during the autotagging candidate search. """ items = task.items if task.is_album else [task.item] for item in items: acoustid_match(log, item.path)
Apply Acoustid metadata (fingerprint and ID) to the task's items.
def apply_acoustid_metadata(task, session): """Apply Acoustid metadata (fingerprint and ID) to the task's items.""" for item in task.imported_items(): if item.path in _fingerprints: item.acoustid_fingerprint = _fingerprints[item.path] if item.path in _acoustids: item.acoustid_id = _acoustids[item.path]
Submit fingerprints for the items to the Acoustid server.
def submit_items(log, userkey, items, chunksize=64): """Submit fingerprints for the items to the Acoustid server.""" data = [] # The running list of dictionaries to submit. def submit_chunk(): """Submit the current accumulated fingerprint data.""" log.info("submitting {0} fingerprints", len(data)) try: acoustid.submit(API_KEY, userkey, data) except acoustid.AcoustidError as exc: log.warning("acoustid submission error: {0}", exc) del data[:] for item in items: fp = fingerprint_item(log, item, write=ui.should_write()) # Construct a submission dictionary for this item. item_data = { "duration": int(item.length), "fingerprint": fp, } if item.mb_trackid: item_data["mbid"] = item.mb_trackid log.debug("submitting MBID") else: item_data.update( { "track": item.title, "artist": item.artist, "album": item.album, "albumartist": item.albumartist, "year": item.year, "trackno": item.track, "discno": item.disc, } ) log.debug("submitting textual metadata") data.append(item_data) # If we have enough data, submit a chunk. if len(data) >= chunksize: submit_chunk() # Submit remaining data in a final chunk. if data: submit_chunk()
Get the fingerprint for an Item. If the item already has a fingerprint, it is not regenerated. If fingerprint generation fails, return None. If the items are associated with a library, they are saved to the database. If `write` is set, then the new fingerprints are also written to files' metadata.
def fingerprint_item(log, item, write=False): """Get the fingerprint for an Item. If the item already has a fingerprint, it is not regenerated. If fingerprint generation fails, return None. If the items are associated with a library, they are saved to the database. If `write` is set, then the new fingerprints are also written to files' metadata. """ # Get a fingerprint and length for this track. if not item.length: log.info("{0}: no duration available", util.displayable_path(item.path)) elif item.acoustid_fingerprint: if write: log.info( "{0}: fingerprint exists, skipping", util.displayable_path(item.path), ) else: log.info( "{0}: using existing fingerprint", util.displayable_path(item.path), ) return item.acoustid_fingerprint else: log.info("{0}: fingerprinting", util.displayable_path(item.path)) try: _, fp = acoustid.fingerprint_file(util.syspath(item.path)) item.acoustid_fingerprint = fp.decode() if write: log.info( "{0}: writing fingerprint", util.displayable_path(item.path) ) item.try_write() if item._db: item.store() return item.acoustid_fingerprint except acoustid.FingerprintGenerationError as exc: log.info("fingerprint generation failed: {0}", exc)