response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Return the path with its extension replaced by `ext`. The new extension must not contain a leading dot.
def replace_ext(path, ext): """Return the path with its extension replaced by `ext`. The new extension must not contain a leading dot. """ ext_dot = b"." + ext return os.path.splitext(path)[0] + ext_dot
Return the command template and the extension from the config.
def get_format(fmt=None): """Return the command template and the extension from the config.""" if not fmt: fmt = config["convert"]["format"].as_str().lower() fmt = ALIASES.get(fmt, fmt) try: format_info = config["convert"]["formats"][fmt].get(dict) command = format_info["command"] extension = format_info.get("extension", fmt) except KeyError: raise ui.UserError( 'convert: format {} needs the "command" field'.format(fmt) ) except ConfigTypeError: command = config["convert"]["formats"][fmt].get(str) extension = fmt # Convenience and backwards-compatibility shortcuts. keys = config["convert"].keys() if "command" in keys: command = config["convert"]["command"].as_str() elif "opts" in keys: # Undocumented option for backwards compatibility with < 1.3.1. command = "ffmpeg -i $source -y {} $dest".format( config["convert"]["opts"].as_str() ) if "extension" in keys: extension = config["convert"]["extension"].as_str() return (command.encode("utf-8"), extension.encode("utf-8"))
Determine whether the item should be transcoded as part of conversion (i.e., its bitrate is high or it has the wrong format).
def should_transcode(item, fmt): """Determine whether the item should be transcoded as part of conversion (i.e., its bitrate is high or it has the wrong format). """ no_convert_queries = config["convert"]["no_convert"].as_str_seq() if no_convert_queries: for query_string in no_convert_queries: query, _ = parse_query_string(query_string, Item) if query.match(item): return False if config["convert"]["never_convert_lossy_files"] and not ( item.format.lower() in LOSSLESS_FORMATS ): return False maxbr = config["convert"]["max_bitrate"].get(Optional(int)) if maxbr is not None and item.bitrate >= 1000 * maxbr: return True return fmt.lower() != item.format.lower()
Open `filename` in a text editor.
def edit(filename, log): """Open `filename` in a text editor.""" cmd = shlex.split(util.editor_command()) cmd.append(filename) log.debug("invoking editor command: {!r}", cmd) try: subprocess.call(cmd) except OSError as exc: raise ui.UserError( "could not run editor command {!r}: {}".format(cmd[0], exc) )
Dump a sequence of dictionaries as YAML for editing.
def dump(arg): """Dump a sequence of dictionaries as YAML for editing.""" return yaml.safe_dump_all( arg, allow_unicode=True, default_flow_style=False, )
Read a sequence of YAML documents back to a list of dictionaries with string keys. Can raise a `ParseError`.
def load(s): """Read a sequence of YAML documents back to a list of dictionaries with string keys. Can raise a `ParseError`. """ try: out = [] for d in yaml.safe_load_all(s): if not isinstance(d, dict): raise ParseError( "each entry must be a dictionary; found {}".format( type(d).__name__ ) ) # Convert all keys to strings. They started out as strings, # but the user may have inadvertently messed this up. out.append({str(k): v for k, v in d.items()}) except yaml.YAMLError as e: raise ParseError(f"invalid YAML: {e}") return out
Check whether the `value` is safe to represent in YAML and trust as returned from parsed YAML. This ensures that values do not change their type when the user edits their YAML representation.
def _safe_value(obj, key, value): """Check whether the `value` is safe to represent in YAML and trust as returned from parsed YAML. This ensures that values do not change their type when the user edits their YAML representation. """ typ = obj._type(key) return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type)
Represent `obj`, a `dbcore.Model` object, as a dictionary for serialization. Only include the given `fields` if provided; otherwise, include everything. The resulting dictionary's keys are strings and the values are safely YAML-serializable types.
def flatten(obj, fields): """Represent `obj`, a `dbcore.Model` object, as a dictionary for serialization. Only include the given `fields` if provided; otherwise, include everything. The resulting dictionary's keys are strings and the values are safely YAML-serializable types. """ # Format each value. d = {} for key in obj.keys(): value = obj[key] if _safe_value(obj, key, value): # A safe value that is faithfully representable in YAML. d[key] = value else: # A value that should be edited as a string. d[key] = obj.formatted()[key] # Possibly filter field names. if fields: return {k: v for k, v in d.items() if k in fields} else: return d
Set the fields of a `dbcore.Model` object according to a dictionary. This is the opposite of `flatten`. The `data` dictionary should have strings as values.
def apply_(obj, data): """Set the fields of a `dbcore.Model` object according to a dictionary. This is the opposite of `flatten`. The `data` dictionary should have strings as values. """ for key, value in data.items(): if _safe_value(obj, key, value): # A safe value *stayed* represented as a safe type. Assign it # directly. obj[key] = value else: # Either the field was stringified originally or the user changed # it from a safe type to an unsafe one. Parse it as a string. obj.set_parse(key, str(value))
Show the list of affected objects (items or albums) and confirm that the user wants to modify their artwork. `album` is a Boolean indicating whether these are albums (as opposed to items).
def _confirm(objs, album): """Show the list of affected objects (items or albums) and confirm that the user wants to modify their artwork. `album` is a Boolean indicating whether these are albums (as opposed to items). """ noun = "album" if album else "file" prompt = "Modify artwork for {} {}{} (Y/n)?".format( len(objs), noun, "s" if len(objs) > 1 else "" ) # Show all the items or albums. for obj in objs: print_(format(obj)) # Confirm with user. return ui.input_yn(prompt)
Returns a joined url. Takes host, port and endpoint and generates a valid emby API url. :param host: Hostname of the emby server :param port: Portnumber of the emby server :param endpoint: API endpoint :type host: str :type port: int :type endpoint: str :returns: Full API url :rtype: str
def api_url(host, port, endpoint): """Returns a joined url. Takes host, port and endpoint and generates a valid emby API url. :param host: Hostname of the emby server :param port: Portnumber of the emby server :param endpoint: API endpoint :type host: str :type port: int :type endpoint: str :returns: Full API url :rtype: str """ # check if http or https is defined as host and create hostname hostname_list = [host] if host.startswith("http://") or host.startswith("https://"): hostname = "".join(hostname_list) else: hostname_list.insert(0, "http://") hostname = "".join(hostname_list) joined = urljoin( "{hostname}:{port}".format(hostname=hostname, port=port), endpoint ) scheme, netloc, path, query_string, fragment = urlsplit(joined) query_params = parse_qs(query_string) query_params["format"] = ["json"] new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment))
Returns a dict with username and its encoded password. :param username: Emby username :param password: Emby password :type username: str :type password: str :returns: Dictionary with username and encoded password :rtype: dict
def password_data(username, password): """Returns a dict with username and its encoded password. :param username: Emby username :param password: Emby password :type username: str :type password: str :returns: Dictionary with username and encoded password :rtype: dict """ return { "username": username, "password": hashlib.sha1(password.encode("utf-8")).hexdigest(), "passwordMd5": hashlib.md5(password.encode("utf-8")).hexdigest(), }
Return header dict that is needed to talk to the Emby API. :param user_id: Emby user ID :param token: Authentication token for Emby :type user_id: str :type token: str :returns: Headers for requests :rtype: dict
def create_headers(user_id, token=None): """Return header dict that is needed to talk to the Emby API. :param user_id: Emby user ID :param token: Authentication token for Emby :type user_id: str :type token: str :returns: Headers for requests :rtype: dict """ headers = {} authorization = ( 'MediaBrowser UserId="{user_id}", ' 'Client="other", ' 'Device="beets", ' 'DeviceId="beets", ' 'Version="0.0.0"' ).format(user_id=user_id) headers["x-emby-authorization"] = authorization if token: headers["x-mediabrowser-token"] = token return headers
Return token for a user. :param host: Emby host :param port: Emby port :param headers: Headers for requests :param auth_data: Username and encoded password for authentication :type host: str :type port: int :type headers: dict :type auth_data: dict :returns: Access Token :rtype: str
def get_token(host, port, headers, auth_data): """Return token for a user. :param host: Emby host :param port: Emby port :param headers: Headers for requests :param auth_data: Username and encoded password for authentication :type host: str :type port: int :type headers: dict :type auth_data: dict :returns: Access Token :rtype: str """ url = api_url(host, port, "/Users/AuthenticateByName") r = requests.post(url, headers=headers, data=auth_data) return r.json().get("AccessToken")
Return user dict from server or None if there is no user. :param host: Emby host :param port: Emby port :username: Username :type host: str :type port: int :type username: str :returns: Matched Users :rtype: list
def get_user(host, port, username): """Return user dict from server or None if there is no user. :param host: Emby host :param port: Emby port :username: Username :type host: str :type port: int :type username: str :returns: Matched Users :rtype: list """ url = api_url(host, port, "/Users/Public") r = requests.get(url) user = [i for i in r.json() if i["Name"] == username] return user
Like `requests.get`, but logs the effective URL to the specified `log` at the `DEBUG` level. Use the optional `message` parameter to specify what to log before the URL. By default, the string is "getting URL". Also sets the User-Agent header to indicate beets.
def _logged_get(log, *args, **kwargs): """Like `requests.get`, but logs the effective URL to the specified `log` at the `DEBUG` level. Use the optional `message` parameter to specify what to log before the URL. By default, the string is "getting URL". Also sets the User-Agent header to indicate beets. """ # Use some arguments with the `send` call but most with the # `Request` construction. This is a cheap, magic-filled way to # emulate `requests.get` or, more pertinently, # `requests.Session.request`. req_kwargs = kwargs send_kwargs = {} for arg in ("stream", "verify", "proxies", "cert", "timeout"): if arg in kwargs: send_kwargs[arg] = req_kwargs.pop(arg) # Our special logging message parameter. if "message" in kwargs: message = kwargs.pop("message") else: message = "getting URL" req = requests.Request("GET", *args, **req_kwargs) with requests.Session() as s: s.headers = {"User-Agent": "beets"} prepped = s.prepare_request(req) settings = s.merge_environment_settings( prepped.url, {}, None, None, None ) send_kwargs.update(settings) log.debug("{}: {}", message, prepped.url) return s.send(prepped, **send_kwargs)
Determine whether a sequence holds identical elements.
def equal(seq): """Determine whether a sequence holds identical elements.""" return len(set(seq)) <= 1
Do all items in `matchdict`, whose values are dictionaries, have the same value for `field`? (If they do, the field is probably not the title.)
def equal_fields(matchdict, field): """Do all items in `matchdict`, whose values are dictionaries, have the same value for `field`? (If they do, the field is probably not the title.) """ return equal(m[field] for m in matchdict.values())
If all the filenames in the item/filename mapping match the pattern, return a dictionary mapping the items to dictionaries giving the value for each named subpattern in the match. Otherwise, return None.
def all_matches(names, pattern): """If all the filenames in the item/filename mapping match the pattern, return a dictionary mapping the items to dictionaries giving the value for each named subpattern in the match. Otherwise, return None. """ matches = {} for item, name in names.items(): m = re.match(pattern, name, re.IGNORECASE) if m and m.groupdict(): # Only yield a match when the regex applies *and* has # capture groups. Otherwise, no information can be extracted # from the filename. matches[item] = m.groupdict() else: return None return matches
Determine whether a given title is "bad" (empty or otherwise meaningless) and in need of replacement.
def bad_title(title): """Determine whether a given title is "bad" (empty or otherwise meaningless) and in need of replacement. """ for pat in BAD_TITLE_PATTERNS: if re.match(pat, title, re.IGNORECASE): return True return False
Given a mapping from items to field dicts, apply the fields to the objects.
def apply_matches(d, log): """Given a mapping from items to field dicts, apply the fields to the objects. """ some_map = list(d.values())[0] keys = some_map.keys() # Only proceed if the "tag" field is equal across all filenames. if "tag" in keys and not equal_fields(d, "tag"): return # Given both an "artist" and "title" field, assume that one is # *actually* the artist, which must be uniform, and use the other # for the title. This, of course, won't work for VA albums. if "artist" in keys: if equal_fields(d, "artist"): artist = some_map["artist"] title_field = "title" elif equal_fields(d, "title"): artist = some_map["title"] title_field = "artist" else: # Both vary. Abort. return for item in d: if not item.artist: item.artist = artist log.info("Artist replaced with: {}".format(item.artist)) # No artist field: remaining field is the title. else: title_field = "title" # Apply the title and track. for item in d: if bad_title(item.title): item.title = str(d[item][title_field]) log.info("Title replaced with: {}".format(item.title)) if "track" in d[item] and item.track == 0: item.track = int(d[item]["track"]) log.info("Track replaced with: {}".format(item.track))
Given an artist string, split the "main" artist from any artist on the right-hand side of a string like "feat". Return the main artist, which is always a string, and the featuring artist, which may be a string or None if none is present.
def split_on_feat(artist): """Given an artist string, split the "main" artist from any artist on the right-hand side of a string like "feat". Return the main artist, which is always a string, and the featuring artist, which may be a string or None if none is present. """ # split on the first "feat". regex = re.compile(plugins.feat_tokens(), re.IGNORECASE) parts = [s.strip() for s in regex.split(artist, 1)] if len(parts) == 1: return parts[0], None else: return tuple(parts)
Determine whether the title contains a "featured" marker.
def contains_feat(title): """Determine whether the title contains a "featured" marker.""" return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
Attempt to find featured artists in the item's artist fields and return the results. Returns None if no featured artist found.
def find_feat_part(artist, albumartist): """Attempt to find featured artists in the item's artist fields and return the results. Returns None if no featured artist found. """ # Look for the album artist in the artist field. If it's not # present, give up. albumartist_split = artist.split(albumartist, 1) if len(albumartist_split) <= 1: return None # If the last element of the split (the right-hand side of the # album artist) is nonempty, then it probably contains the # featured artist. elif albumartist_split[1] != "": # Extract the featured artist from the right-hand side. _, feat_part = split_on_feat(albumartist_split[1]) return feat_part # Otherwise, if there's nothing on the right-hand side, look for a # featuring artist on the left-hand side. else: lhs, rhs = split_on_feat(albumartist_split[0]) if lhs: return lhs return None
Given an ImportTask, produce a short string identifying the object.
def summary(task): """Given an ImportTask, produce a short string identifying the object. """ if task.is_album: return f"{task.cur_artist} - {task.cur_album}" else: return f"{task.item.artist} - {task.item.title}"
Builds unique m3u filename by putting current date between given basename and file ending.
def _build_m3u_session_filename(basename): """Builds unique m3u filename by putting current date between given basename and file ending.""" date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M") basename = re.sub(r"(\.m3u|\.M3U)", "", basename) path = normpath( os.path.join( config["importfeeds"]["dir"].as_filename(), f"{basename}_{date}.m3u" ) ) return path
Builds unique m3u filename by appending given basename to current date.
def _build_m3u_filename(basename): """Builds unique m3u filename by appending given basename to current date.""" basename = re.sub(r"[\s,/\\'\"]", "_", basename) date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M") path = normpath( os.path.join( config["importfeeds"]["dir"].as_filename(), date + "_" + basename + ".m3u", ) ) return path
Append relative paths to items into m3u file.
def _write_m3u(m3u_path, items_paths): """Append relative paths to items into m3u file.""" mkdirall(m3u_path) with open(syspath(m3u_path), "ab") as f: for path in items_paths: f.write(path + b"\n")
Print, with optional formatting, the fields of a single element. If no format string `fmt` is passed, the entries on `data` are printed one in each line, with the format 'field: value'. If `fmt` is not `None`, the `item` is printed according to `fmt`, using the `Item.__format__` machinery.
def print_data(data, item=None, fmt=None): """Print, with optional formatting, the fields of a single element. If no format string `fmt` is passed, the entries on `data` are printed one in each line, with the format 'field: value'. If `fmt` is not `None`, the `item` is printed according to `fmt`, using the `Item.__format__` machinery. """ if fmt: # use fmt specified by the user ui.print_(format(item, fmt)) return path = displayable_path(item.path) if item else None formatted = {} for key, value in data.items(): if isinstance(value, list): formatted[key] = "; ".join(value) if value is not None: formatted[key] = value if len(formatted) == 0: return maxwidth = max(len(key) for key in formatted) lineformat = f"{{0:>{maxwidth}}}: {{1}}" if path: ui.print_(displayable_path(path)) for field in sorted(formatted): value = formatted[field] if isinstance(value, list): value = "; ".join(value) ui.print_(lineformat.format(field, value))
Print only the keys (field names) for an item.
def print_data_keys(data, item=None): """Print only the keys (field names) for an item.""" path = displayable_path(item.path) if item else None formatted = [] for key, value in data.items(): formatted.append(key) if len(formatted) == 0: return line_format = "{0}{{0}}".format(" " * 4) if path: ui.print_(displayable_path(path)) for field in sorted(formatted): ui.print_(line_format.format(field))
Given Python code for a function body, return a compiled callable that invokes that code.
def _compile_func(body): """Given Python code for a function body, return a compiled callable that invokes that code. """ body = "def {}():\n {}".format(FUNC_NAME, body.replace("\n", "\n ")) code = compile(body, "inline", "exec") env = {} eval(code, env) return env[FUNC_NAME]
Sends request to the Kodi api to start a library refresh.
def update_kodi(host, port, user, password): """Sends request to the Kodi api to start a library refresh.""" url = f"http://{host}:{port}/jsonrpc" """Content-Type: application/json is mandatory according to the kodi jsonrpc documentation""" headers = {"Content-Type": "application/json"} # Create the payload. Id seems to be mandatory. payload = {"jsonrpc": "2.0", "method": "AudioLibrary.Scan", "id": 1} r = requests.post(url, auth=(user, password), json=payload, headers=headers) return r
JSON format: [ { "mbid": "...", "artist": "...", "title": "...", "playcount": "..." } ]
def fetch_tracks(user, page, limit): """JSON format: [ { "mbid": "...", "artist": "...", "title": "...", "playcount": "..." } ] """ network = pylast.LastFMNetwork(api_key=config["lastfm"]["api_key"]) user_obj = CustomUser(user, network) results, total_pages = user_obj.get_top_tracks_by_page( limit=limit, page=page ) return [ { "mbid": track.item.mbid if track.item.mbid else "", "artist": {"name": track.item.artist.name}, "name": track.item.title, "playcount": track.weight, } for track in results ], total_pages
Query command with head/tail.
def lslimit(lib, opts, args): """Query command with head/tail.""" if (opts.head is not None) and (opts.tail is not None): raise ValueError("Only use one of --head and --tail") if (opts.head or opts.tail or 0) < 0: raise ValueError("Limit value must be non-negative") query = decargs(args) if opts.album: objs = lib.albums(query) else: objs = lib.items(query) if opts.head is not None: objs = islice(objs, opts.head) elif opts.tail is not None: objs = deque(objs, opts.tail) for obj in objs: print_(format(obj))
Resolve &#xxx; HTML entities (and some others).
def unescape(text): """Resolve &#xxx; HTML entities (and some others).""" if isinstance(text, bytes): text = text.decode("utf-8", "ignore") out = text.replace("&nbsp;", " ") def replchar(m): num = m.group(1) return unichar(int(num)) out = re.sub("&#(\\d+);", replchar, out) return out
Yield a pairs of artists and titles to search for. The first item in the pair is the name of the artist, the second item is a list of song names. In addition to the artist and title obtained from the `item` the method tries to strip extra information like paranthesized suffixes and featured artists from the strings and add them as candidates. The artist sort name is added as a fallback candidate to help in cases where artist name includes special characters or is in a non-latin script. The method also tries to split multiple titles separated with `/`.
def search_pairs(item): """Yield a pairs of artists and titles to search for. The first item in the pair is the name of the artist, the second item is a list of song names. In addition to the artist and title obtained from the `item` the method tries to strip extra information like paranthesized suffixes and featured artists from the strings and add them as candidates. The artist sort name is added as a fallback candidate to help in cases where artist name includes special characters or is in a non-latin script. The method also tries to split multiple titles separated with `/`. """ def generate_alternatives(string, patterns): """Generate string alternatives by extracting first matching group for each given pattern. """ alternatives = [string] for pattern in patterns: match = re.search(pattern, string, re.IGNORECASE) if match: alternatives.append(match.group(1)) return alternatives title, artist, artist_sort = item.title, item.artist, item.artist_sort patterns = [ # Remove any featuring artists from the artists name rf"(.*?) {plugins.feat_tokens()}" ] artists = generate_alternatives(artist, patterns) # Use the artist_sort as fallback only if it differs from artist to avoid # repeated remote requests with the same search terms if artist != artist_sort: artists.append(artist_sort) patterns = [ # Remove a parenthesized suffix from a title string. Common # examples include (live), (remix), and (acoustic). r"(.+?)\s+[(].*[)]$", # Remove any featuring artists from the title r"(.*?) {}".format(plugins.feat_tokens(for_artist=False)), # Remove part of title after colon ':' for songs with subtitles r"(.+?)\s*:.*", ] titles = generate_alternatives(title, patterns) # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe) # and each of them. multi_titles = [] for title in titles: multi_titles.append([title]) if "/" in title: multi_titles.append([x.strip() for x in title.split("/")]) return itertools.product(artists, multi_titles)
Make a URL-safe, human-readable version of the given text This will do the following: 1. decode unicode characters into ASCII 2. shift everything to lowercase 3. strip whitespace 4. replace other non-word characters with dashes 5. strip extra dashes This somewhat duplicates the :func:`Google.slugify` function but slugify is not as generic as this one, which can be reused elsewhere.
def slug(text): """Make a URL-safe, human-readable version of the given text This will do the following: 1. decode unicode characters into ASCII 2. shift everything to lowercase 3. strip whitespace 4. replace other non-word characters with dashes 5. strip extra dashes This somewhat duplicates the :func:`Google.slugify` function but slugify is not as generic as this one, which can be reused elsewhere. """ return re.sub(r"\W+", "-", unidecode(text).lower().strip()).strip("-")
Remove first/last line of text if it contains the word 'lyrics' eg 'Lyrics by songsdatabase.com'
def remove_credits(text): """Remove first/last line of text if it contains the word 'lyrics' eg 'Lyrics by songsdatabase.com' """ textlines = text.split("\n") credits = None for i in (0, -1): if textlines and "lyrics" in textlines[i].lower(): credits = textlines.pop(i) if credits: text = "\n".join(textlines) return text
Clean up HTML
def _scrape_strip_cruft(html, plain_text_out=False): """Clean up HTML""" html = unescape(html) html = html.replace("\r", "\n") # Normalize EOL. html = re.sub(r" +", " ", html) # Whitespaces collapse. html = BREAK_RE.sub("\n", html) # <br> eats up surrounding '\n'. html = re.sub(r"(?s)<(script).*?</\1>", "", html) # Strip script tags. html = re.sub("\u2005", " ", html) # replace unicode with regular space if plain_text_out: # Strip remaining HTML tags html = COMMENT_RE.sub("", html) html = TAG_RE.sub("", html) html = "\n".join([x.strip() for x in html.strip().split("\n")]) html = re.sub(r"\n{3,}", r"\n\n", html) return html
Scrape lyrics from a URL. If no lyrics can be found, return None instead.
def scrape_lyrics_from_html(html): """Scrape lyrics from a URL. If no lyrics can be found, return None instead. """ def is_text_notcode(text): if not text: return False length = len(text) return ( length > 20 and text.count(" ") > length / 25 and (text.find("{") == -1 or text.find(";") == -1) ) html = _scrape_strip_cruft(html) html = _scrape_merge_paragraphs(html) # extract all long text blocks that are not code soup = try_parse_html(html, parse_only=SoupStrainer(string=is_text_notcode)) if not soup: return None # Get the longest text element (if any). strings = sorted(soup.stripped_strings, key=len, reverse=True) if strings: return strings[0] else: return None
Call a MusicBrainz API function and catch exceptions.
def mb_call(func, *args, **kwargs): """Call a MusicBrainz API function and catch exceptions.""" try: return func(*args, **kwargs) except musicbrainzngs.AuthenticationError: raise ui.UserError("authentication with MusicBrainz failed") except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc: raise ui.UserError(f"MusicBrainz API error: {exc}") except musicbrainzngs.UsageError: raise ui.UserError("MusicBrainz credentials missing")
Add all of the release IDs to the indicated collection. Multiple requests are made if there are many release IDs to submit.
def submit_albums(collection_id, release_ids): """Add all of the release IDs to the indicated collection. Multiple requests are made if there are many release IDs to submit. """ for i in range(0, len(release_ids), SUBMISSION_CHUNK_SIZE): chunk = release_ids[i : i + SUBMISSION_CHUNK_SIZE] mb_call(musicbrainzngs.add_releases_to_collection, collection_id, chunk)
Return number of missing items in `album`.
def _missing_count(album): """Return number of missing items in `album`.""" return (album.albumtotal or 0) - len(album.items())
Build and return `item` from `track_info` and `album info` objects. `item` is missing what fields cannot be obtained from MusicBrainz alone (encoder, rg_track_gain, rg_track_peak, rg_album_gain, rg_album_peak, original_year, original_month, original_day, length, bitrate, format, samplerate, bitdepth, channels, mtime.)
def _item(track_info, album_info, album_id): """Build and return `item` from `track_info` and `album info` objects. `item` is missing what fields cannot be obtained from MusicBrainz alone (encoder, rg_track_gain, rg_track_peak, rg_album_gain, rg_album_peak, original_year, original_month, original_day, length, bitrate, format, samplerate, bitdepth, channels, mtime.) """ t = track_info a = album_info return Item( **{ "album_id": album_id, "album": a.album, "albumartist": a.artist, "albumartist_credit": a.artist_credit, "albumartist_sort": a.artist_sort, "albumdisambig": a.albumdisambig, "albumstatus": a.albumstatus, "albumtype": a.albumtype, "artist": t.artist, "artist_credit": t.artist_credit, "artist_sort": t.artist_sort, "asin": a.asin, "catalognum": a.catalognum, "comp": a.va, "country": a.country, "day": a.day, "disc": t.medium, "disctitle": t.disctitle, "disctotal": a.mediums, "label": a.label, "language": a.language, "length": t.length, "mb_albumid": a.album_id, "mb_artistid": t.artist_id, "mb_releasegroupid": a.releasegroup_id, "mb_trackid": t.track_id, "media": t.media, "month": a.month, "script": a.script, "title": t.title, "track": t.index, "tracktotal": len(a.tracks), "year": a.year, } )
Try to determine if the path is an URL.
def is_url(path): """Try to determine if the path is an URL.""" if isinstance(path, bytes): # if it's bytes, then it's a path return False return path.split("://", 1)[0] in ["http", "https"]
Given a Musicbrainz work id, find the id one of the works the work is part of and the first composition date it encounters.
def direct_parent_id(mb_workid, work_date=None): """Given a Musicbrainz work id, find the id one of the works the work is part of and the first composition date it encounters. """ work_info = musicbrainzngs.get_work_by_id( mb_workid, includes=["work-rels", "artist-rels"] ) if "artist-relation-list" in work_info["work"] and work_date is None: for artist in work_info["work"]["artist-relation-list"]: if artist["type"] == "composer": if "end" in artist.keys(): work_date = artist["end"] if "work-relation-list" in work_info["work"]: for direct_parent in work_info["work"]["work-relation-list"]: if ( direct_parent["type"] == "parts" and direct_parent.get("direction") == "backward" ): direct_id = direct_parent["work"]["id"] return direct_id, work_date return None, work_date
Find the parent work id and composition date of a work given its id.
def work_parent_id(mb_workid): """Find the parent work id and composition date of a work given its id.""" work_date = None while True: new_mb_workid, work_date = direct_parent_id(mb_workid, work_date) if not new_mb_workid: return mb_workid, work_date mb_workid = new_mb_workid return mb_workid, work_date
Get the MusicBrainz information dict about a parent work, including the artist relations, and the composition date for a work's parent work.
def find_parentwork_info(mb_workid): """Get the MusicBrainz information dict about a parent work, including the artist relations, and the composition date for a work's parent work. """ parent_id, work_date = work_parent_id(mb_workid) work_info = musicbrainzngs.get_work_by_id( parent_id, includes=["artist-rels"] ) return work_info, work_date
Convert a string to an integer, interpreting the text as octal. Or, if `perm` is an integer, reinterpret it as an octal number that has been "misinterpreted" as decimal.
def convert_perm(perm): """Convert a string to an integer, interpreting the text as octal. Or, if `perm` is an integer, reinterpret it as an octal number that has been "misinterpreted" as decimal. """ if isinstance(perm, int): perm = str(perm) return int(perm, 8)
Check whether the file's permissions equal the given vector. Return a boolean.
def check_permissions(path, permission): """Check whether the file's permissions equal the given vector. Return a boolean. """ return oct(stat.S_IMODE(os.stat(syspath(path)).st_mode)) == oct(permission)
Check whether the file's permissions are as expected, otherwise, log a warning message. Return a boolean indicating the match, like `check_permissions`.
def assert_permissions(path, permission, log): """Check whether the file's permissions are as expected, otherwise, log a warning message. Return a boolean indicating the match, like `check_permissions`. """ if not check_permissions(path, permission): log.warning("could not set permissions on {}", displayable_path(path)) log.debug( "set permissions to {}, but permissions are now {}", permission, os.stat(syspath(path)).st_mode & 0o777, )
Creates a list of ancestor directories in the beets library path.
def dirs_in_library(library, item): """Creates a list of ancestor directories in the beets library path.""" return [ ancestor for ancestor in ancestry(item) if ancestor.startswith(library) ][1:]
Play items in paths with command_str and optional arguments. If keep_open, return to beets, otherwise exit once command runs.
def play( command_str, selection, paths, open_args, log, item_type="track", keep_open=False, ): """Play items in paths with command_str and optional arguments. If keep_open, return to beets, otherwise exit once command runs. """ # Print number of tracks or albums to be played, log command to be run. item_type += "s" if len(selection) > 1 else "" ui.print_("Playing {} {}.".format(len(selection), item_type)) log.debug("executing command: {} {!r}", command_str, open_args) try: if keep_open: command = shlex.split(command_str) command = command + open_args subprocess.call(command) else: util.interactive_open(open_args, command_str) except OSError as exc: raise ui.UserError(f"Could not play the query: {exc}")
Getting the section key for the music library in Plex.
def get_music_section( host, port, token, library_name, secure, ignore_cert_errors ): """Getting the section key for the music library in Plex.""" api_endpoint = append_token("library/sections", token) url = urljoin( "{}://{}:{}".format(get_protocol(secure), host, port), api_endpoint ) # Sends request. r = requests.get(url, verify=not ignore_cert_errors) # Parse xml tree and extract music section key. tree = ElementTree.fromstring(r.content) for child in tree.findall("Directory"): if child.get("title") == library_name: return child.get("key")
Ignore certificate errors if configured to.
def update_plex(host, port, token, library_name, secure, ignore_cert_errors): """Ignore certificate errors if configured to.""" if ignore_cert_errors: import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) """Sends request to the Plex api to start a library refresh. """ # Getting section key and build url. section_key = get_music_section( host, port, token, library_name, secure, ignore_cert_errors ) api_endpoint = f"library/sections/{section_key}/refresh" api_endpoint = append_token(api_endpoint, token) url = urljoin( "{}://{}:{}".format(get_protocol(secure), host, port), api_endpoint ) # Sends request and returns requests object. r = requests.get(url, verify=not ignore_cert_errors) return r
Appends the Plex Home token to the api call if required.
def append_token(url, token): """Appends the Plex Home token to the api call if required.""" if token: url += "?" + urlencode({"X-Plex-Token": token}) return url
Select some random items or albums and print the results.
def random_func(lib, opts, args): """Select some random items or albums and print the results.""" # Fetch all the objects matching the query into a list. query = decargs(args) if opts.album: objs = list(lib.albums(query)) else: objs = list(lib.items(query)) # Print a random subset. objs = random_objs( objs, opts.album, opts.number, opts.time, opts.equal_chance ) for obj in objs: print_(format(obj))
Execute the command and return its output or raise a ReplayGainError on failure.
def call(args: List[Any], log: Logger, **kwargs: Any): """Execute the command and return its output or raise a ReplayGainError on failure. """ try: return command_output(args, **kwargs) except subprocess.CalledProcessError as e: log.debug(e.output.decode("utf8", "ignore")) raise ReplayGainError( "{} exited with status {}".format(args[0], e.returncode) ) except UnicodeEncodeError: # Due to a bug in Python 2's subprocess on Windows, Unicode # filenames can fail to encode on that platform. See: # https://github.com/google-code-export/beets/issues/499 raise ReplayGainError("argument encoding failed")
Convert db to LUFS. According to https://wiki.hydrogenaud.io/index.php?title= ReplayGain_2.0_specification#Reference_level
def db_to_lufs(db: float) -> float: """Convert db to LUFS. According to https://wiki.hydrogenaud.io/index.php?title= ReplayGain_2.0_specification#Reference_level """ return db - 107
Convert LUFS to db. According to https://wiki.hydrogenaud.io/index.php?title= ReplayGain_2.0_specification#Reference_level
def lufs_to_db(db: float) -> float: """Convert LUFS to db. According to https://wiki.hydrogenaud.io/index.php?title= ReplayGain_2.0_specification#Reference_level """ return db + 107
Create a template field function that rewrites the given field with the given rewriting rules. ``rules`` must be a list of (pattern, replacement) pairs.
def rewriter(field, rules): """Create a template field function that rewrites the given field with the given rewriting rules. ``rules`` must be a list of (pattern, replacement) pairs. """ def fieldfunc(item): value = item._values_fixed[field] for pattern, replacement in rules: if pattern.match(value.lower()): # Rewrite activated. return replacement # Not activated; return original value. return value return fieldfunc
Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python string and return it. The old memory is then safe to free.
def copy_c_string(c_string): """Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python string and return it. The old memory is then safe to free. """ # This is a pretty dumb way to get a string copy, but it seems to # work. A more surefire way would be to allocate a ctypes buffer and copy # the data with `memcpy` or somesuch. s = ctypes.cast(c_string, ctypes.c_char_p).value return b"" + s
Check if `value` (as string) is matching any of the compiled regexes in the `progs` list.
def _match_progs(value, progs): """Check if `value` (as string) is matching any of the compiled regexes in the `progs` list. """ if not progs: return True for prog in progs: if prog.search(str(value)): return True return False
Get supported audio decoders from GStreamer. Returns a dict mapping decoder element names to the associated media types and file extensions.
def get_decoders(): """Get supported audio decoders from GStreamer. Returns a dict mapping decoder element names to the associated media types and file extensions. """ # We only care about audio decoder elements. filt = ( Gst.ELEMENT_FACTORY_TYPE_DEPAYLOADER | Gst.ELEMENT_FACTORY_TYPE_DEMUXER | Gst.ELEMENT_FACTORY_TYPE_PARSER | Gst.ELEMENT_FACTORY_TYPE_DECODER | Gst.ELEMENT_FACTORY_TYPE_MEDIA_AUDIO ) decoders = {} mime_types = set() for f in Gst.ElementFactory.list_get_elements(filt, Gst.Rank.NONE): for pad in f.get_static_pad_templates(): if pad.direction == Gst.PadDirection.SINK: caps = pad.static_caps.get() mimes = set() for i in range(caps.get_size()): struct = caps.get_structure(i) mime = struct.get_name() if mime == "unknown/unknown": continue mimes.add(mime) mime_types.add(mime) if mimes: decoders[f.get_name()] = (mimes, set()) # Check all the TypeFindFactory plugin features form the registry. If they # are associated with an audio media type that we found above, get the list # of corresponding file extensions. mime_extensions = {mime: set() for mime in mime_types} for feat in Gst.Registry.get().get_feature_list(Gst.TypeFindFactory): caps = feat.get_caps() if caps: for i in range(caps.get_size()): struct = caps.get_structure(i) mime = struct.get_name() if mime in mime_types: mime_extensions[mime].update(feat.get_extensions()) # Fill in the slot we left for file extensions. for name, (mimes, exts) in decoders.items(): for mime in mimes: exts.update(mime_extensions[mime]) return decoders
Play the files in paths in a straightforward way, without using the player's callback function.
def play_simple(paths): """Play the files in paths in a straightforward way, without using the player's callback function. """ p = GstPlayer() p.run() for path in paths: p.play_file(path) p.block()
Play the files in the path one after the other by using the callback function to advance to the next song.
def play_complicated(paths): """Play the files in the path one after the other by using the callback function to advance to the next song. """ my_paths = copy.copy(paths) def next_song(): my_paths.pop(0) p.play_file(my_paths[0]) p = GstPlayer(next_song) p.run() p.play_file(my_paths[0]) while my_paths: time.sleep(1)
Create a BPDError subclass for a static code and message.
def make_bpd_error(s_code, s_message): """Create a BPDError subclass for a static code and message.""" class NewBPDError(BPDError): code = s_code message = s_message cmd_name = "" index = 0 def __init__(self): pass return NewBPDError
Attempts to call t on val, raising a ArgumentTypeError on ValueError. If 't' is the special string 'intbool', attempts to cast first to an int and then to a bool (i.e., 1=True, 0=False).
def cast_arg(t, val): """Attempts to call t on val, raising a ArgumentTypeError on ValueError. If 't' is the special string 'intbool', attempts to cast first to an int and then to a bool (i.e., 1=True, 0=False). """ if t == "intbool": return cast_arg(bool, cast_arg(int, val)) else: try: return t(val) except ValueError: raise ArgumentTypeError()
Remove duplicates from sequence while preserving order.
def deduplicate(seq): """Remove duplicates from sequence while preserving order.""" seen = set() return [x for x in seq if x not in seen and not seen.add(x)]
Flatten nested lists/dictionaries into lists of strings (branches).
def flatten_tree(elem, path, branches): """Flatten nested lists/dictionaries into lists of strings (branches). """ if not path: path = [] if isinstance(elem, dict): for k, v in elem.items(): flatten_tree(v, path + [k], branches) elif isinstance(elem, list): for sub in elem: flatten_tree(sub, path, branches) else: branches.append(path + [str(elem)])
Find parents genre of a given genre, ordered from the closest to the further parent.
def find_parents(candidate, branches): """Find parents genre of a given genre, ordered from the closest to the further parent. """ for branch in branches: try: idx = branch.index(candidate.lower()) return list(reversed(branch[: idx + 1])) except ValueError: continue return [candidate]
Returns a dictionary of all the MetaSources E.g., {'itunes': Itunes} with isinstance(Itunes, MetaSource) true
def load_meta_sources(): """Returns a dictionary of all the MetaSources E.g., {'itunes': Itunes} with isinstance(Itunes, MetaSource) true """ meta_sources = {} for module_path, class_name in SOURCES.items(): module = import_module(METASYNC_MODULE + "." + module_path) meta_sources[class_name.lower()] = getattr(module, class_name) return meta_sources
Returns a dictionary containing the item_types of all the MetaSources
def load_item_types(): """Returns a dictionary containing the item_types of all the MetaSources""" item_types = {} for meta_source in META_SOURCES.values(): item_types.update(meta_source.item_types) return item_types
Get a flat -- i.e., JSON-ish -- representation of a beets Item or Album object. For Albums, `expand` dictates whether tracks are included.
def _rep(obj, expand=False): """Get a flat -- i.e., JSON-ish -- representation of a beets Item or Album object. For Albums, `expand` dictates whether tracks are included. """ out = dict(obj) if isinstance(obj, beets.library.Item): if app.config.get("INCLUDE_PATHS", False): out["path"] = util.displayable_path(out["path"]) else: del out["path"] # Filter all bytes attributes and convert them to strings. for key, value in out.items(): if isinstance(out[key], bytes): out[key] = base64.b64encode(value).decode("ascii") # Get the size (in bytes) of the backing file. This is useful # for the Tomahawk resolver API. try: out["size"] = os.path.getsize(util.syspath(obj.path)) except OSError: out["size"] = 0 return out elif isinstance(obj, beets.library.Album): if app.config.get("INCLUDE_PATHS", False): out["artpath"] = util.displayable_path(out["artpath"]) else: del out["artpath"] if expand: out["items"] = [_rep(item) for item in obj.items()] return out
Generator that dumps list of beets Items or Albums as JSON :param root: root key for JSON :param items: list of :class:`Item` or :class:`Album` to dump :param expand: If true every :class:`Album` contains its items in the json representation :returns: generator that yields strings
def json_generator(items, root, expand=False): """Generator that dumps list of beets Items or Albums as JSON :param root: root key for JSON :param items: list of :class:`Item` or :class:`Album` to dump :param expand: If true every :class:`Album` contains its items in the json representation :returns: generator that yields strings """ yield '{"%s":[' % root first = True for item in items: if first: first = False else: yield "," yield json.dumps(_rep(item, expand=expand)) yield "]}"
Returns whether the current request is for an expanded response.
def is_expand(): """Returns whether the current request is for an expanded response.""" return flask.request.args.get("expand") is not None
Returns whether the current delete request should remove the selected files.
def is_delete(): """Returns whether the current delete request should remove the selected files. """ return flask.request.args.get("delete") is not None
Returns the HTTP method of the current request.
def get_method(): """Returns the HTTP method of the current request.""" return flask.request.method
Decorates a function to handle RESTful HTTP requests for a resource.
def resource(name, patchable=False): """Decorates a function to handle RESTful HTTP requests for a resource.""" def make_responder(retriever): def responder(ids): entities = [retriever(id) for id in ids] entities = [entity for entity in entities if entity] if get_method() == "DELETE": if app.config.get("READONLY", True): return flask.abort(405) for entity in entities: entity.remove(delete=is_delete()) return flask.make_response(jsonify({"deleted": True}), 200) elif get_method() == "PATCH" and patchable: if app.config.get("READONLY", True): return flask.abort(405) for entity in entities: entity.update(flask.request.get_json()) entity.try_sync(True, False) # write, don't move if len(entities) == 1: return flask.jsonify(_rep(entities[0], expand=is_expand())) elif entities: return app.response_class( json_generator(entities, root=name), mimetype="application/json", ) elif get_method() == "GET": if len(entities) == 1: return flask.jsonify(_rep(entities[0], expand=is_expand())) elif entities: return app.response_class( json_generator(entities, root=name), mimetype="application/json", ) else: return flask.abort(404) else: return flask.abort(405) responder.__name__ = f"get_{name}" return responder return make_responder
Decorates a function to handle RESTful HTTP queries for resources.
def resource_query(name, patchable=False): """Decorates a function to handle RESTful HTTP queries for resources.""" def make_responder(query_func): def responder(queries): entities = query_func(queries) if get_method() == "DELETE": if app.config.get("READONLY", True): return flask.abort(405) for entity in entities: entity.remove(delete=is_delete()) return flask.make_response(jsonify({"deleted": True}), 200) elif get_method() == "PATCH" and patchable: if app.config.get("READONLY", True): return flask.abort(405) for entity in entities: entity.update(flask.request.get_json()) entity.try_sync(True, False) # write, don't move return app.response_class( json_generator(entities, root=name), mimetype="application/json", ) elif get_method() == "GET": return app.response_class( json_generator( entities, root="results", expand=is_expand() ), mimetype="application/json", ) else: return flask.abort(405) responder.__name__ = f"query_{name}" return responder return make_responder
Decorates a function to handle RESTful HTTP request for a list of resources.
def resource_list(name): """Decorates a function to handle RESTful HTTP request for a list of resources. """ def make_responder(list_all): def responder(): return app.response_class( json_generator(list_all(), root=name, expand=is_expand()), mimetype="application/json", ) responder.__name__ = f"all_{name}" return responder return make_responder
retrieve all unique values belonging to a key from a model
def _get_unique_table_field_values(model, field, sort_field): """retrieve all unique values belonging to a key from a model""" if field not in model.all_keys() or sort_field not in model.all_keys(): raise KeyError with g.lib.transaction() as tx: rows = tx.query( 'SELECT DISTINCT "{}" FROM "{}" ORDER BY "{}"'.format( field, model._table, sort_field ) ) return [row[0] for row in rows]
A context manager that temporary changes the working directory.
def chdir(d): """A context manager that temporary changes the working directory.""" olddir = os.getcwd() os.chdir(d) yield os.chdir(olddir)
Update the version number in setup.py, docs config, changelog, and root module.
def bump_version(version): """Update the version number in setup.py, docs config, changelog, and root module. """ version_parts = [int(p) for p in version.split(".")] assert len(version_parts) == 3, "invalid version number" minor = "{}.{}".format(*version_parts) major = "{}".format(*version_parts) # Replace the version each place where it lives. for filename, locations in VERSION_LOCS: # Read and transform the file. out_lines = [] with open(filename) as f: found = False for line in f: for pattern, template in locations: match = re.match(pattern, line) if match: # Check that this version is actually newer. old_version = match.group(1) old_parts = [int(p) for p in old_version.split(".")] assert ( version_parts > old_parts ), "version must be newer than {}".format(old_version) # Insert the new version. out_lines.append( template.format( version=version, major=major, minor=minor, ) + "\n" ) found = True break else: # Normal line. out_lines.append(line) if not found: print(f"No pattern found in {filename}") # Write the file back. with open(filename, "w") as f: f.write("".join(out_lines)) # Generate bits to insert into changelog. header_line = f"{version} (in development)" header = "\n\n" + header_line + "\n" + "-" * len(header_line) + "\n\n" header += "Changelog goes here!\n" # Insert into the right place. with open(CHANGELOG) as f: contents = f.read() location = contents.find("\n\n") # First blank line. contents = contents[:location] + header + contents[location:] # Write back. with open(CHANGELOG, "w") as f: f.write(contents)
Bump the version number.
def bump(version): """Bump the version number.""" bump_version(version)
Extract the first section of the changelog.
def get_latest_changelog(): """Extract the first section of the changelog.""" started = False lines = [] with open(CHANGELOG) as f: for line in f: if re.match(r"^--+$", line.strip()): # Section boundary. Start or end. if started: # Remove last line, which is the header of the next # section. del lines[-1] break else: started = True elif started: lines.append(line) return "".join(lines).strip()
Use Pandoc to convert text from ReST to Markdown.
def rst2md(text): """Use Pandoc to convert text from ReST to Markdown.""" pandoc = subprocess.Popen( ["pandoc", "--from=rst", "--to=markdown", "--wrap=none"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, _ = pandoc.communicate(text.encode("utf-8")) md = stdout.decode("utf-8").strip() # Fix up odd spacing in lists. return re.sub(r"^- ", "- ", md, flags=re.M)
Get the latest changelog entry as hacked up Markdown.
def changelog_as_markdown(): """Get the latest changelog entry as hacked up Markdown.""" rst = get_latest_changelog() # Replace plugin links with plugin names. rst = re.sub(r":doc:`/plugins/(\w+)`", r"``\1``", rst) # References with text. rst = re.sub(r":ref:`([^<]+)(<[^>]+>)`", r"\1", rst) # Other backslashes with verbatim ranges. rst = re.sub(r"(\s)`([^`]+)`([^_])", r"\1``\2``\3", rst) # Command links with command names. rst = re.sub(r":ref:`(\w+)-cmd`", r"``\1``", rst) # Bug numbers. rst = re.sub(r":bug:`(\d+)`", r"#\1", rst) # Users. rst = re.sub(r":user:`(\w+)`", r"@\1", rst) # Convert with Pandoc. md = rst2md(rst) # Restore escaped issue numbers. md = re.sub(r"\\#(\d+)\b", r"#\1", md) return md
Get the most recent version's changelog as Markdown.
def changelog(): """Get the most recent version's changelog as Markdown.""" print(changelog_as_markdown())
Read the current version from the changelog.
def get_version(index=0): """Read the current version from the changelog.""" with open(CHANGELOG) as f: cur_index = 0 for line in f: match = re.search(r"^\d+\.\d+\.\d+", line) if match: if cur_index == index: return match.group(0) else: cur_index += 1
Display the current version.
def version(): """Display the current version.""" print(get_version())
Enter today's date as the release date in the changelog.
def datestamp(): """Enter today's date as the release date in the changelog.""" dt = datetime.datetime.now() stamp = "({} {}, {})".format(dt.strftime("%B"), dt.day, dt.year) marker = "(in development)" lines = [] underline_length = None with open(CHANGELOG) as f: for line in f: if marker in line: # The header line. line = line.replace(marker, stamp) lines.append(line) underline_length = len(line.strip()) elif underline_length: # This is the line after the header. Rewrite the dashes. lines.append("-" * underline_length + "\n") underline_length = None else: lines.append(line) with open(CHANGELOG, "w") as f: for line in lines: f.write(line)
Run all steps to prepare a release. - Tag the commit. - Build the sdist package. - Generate the Markdown changelog to ``changelog.md``. - Bump the version number to the next version.
def prep(): """Run all steps to prepare a release. - Tag the commit. - Build the sdist package. - Generate the Markdown changelog to ``changelog.md``. - Bump the version number to the next version. """ cur_version = get_version() # Tag. subprocess.check_call(["git", "tag", f"v{cur_version}"]) # Build. with chdir(BASE): subprocess.check_call(["python", "setup.py", "sdist"]) # Generate Markdown changelog. cl = changelog_as_markdown() with open(os.path.join(BASE, "changelog.md"), "w") as f: f.write(cl) # Version number bump. # FIXME It should be possible to specify this as an argument. version_parts = [int(n) for n in cur_version.split(".")] version_parts[-1] += 1 next_version = ".".join(map(str, version_parts)) bump_version(next_version)
Unleash a release unto the world. - Push the tag to GitHub. - Upload to PyPI.
def publish(): """Unleash a release unto the world. - Push the tag to GitHub. - Upload to PyPI. """ version = get_version(1) # Push to GitHub. with chdir(BASE): subprocess.check_call(["git", "push"]) subprocess.check_call(["git", "push", "--tags"]) # Upload to PyPI. path = os.path.join(BASE, "dist", f"beets-{version}.tar.gz") subprocess.check_call(["twine", "upload", path])
Create a GitHub release using the `github-release` command-line tool. Reads the changelog to upload from `changelog.md`. Uploads the tarball from the `dist` directory.
def ghrelease(): """Create a GitHub release using the `github-release` command-line tool. Reads the changelog to upload from `changelog.md`. Uploads the tarball from the `dist` directory. """ version = get_version(1) tag = "v" + version # Load the changelog. with open(os.path.join(BASE, "changelog.md")) as f: cl_md = f.read() # Create the release. subprocess.check_call( [ "github-release", "release", "-u", GITHUB_USER, "-r", GITHUB_REPO, "--tag", tag, "--name", f"{GITHUB_REPO} {version}", "--description", cl_md, ] ) # Attach the release tarball. tarball = os.path.join(BASE, "dist", f"beets-{version}.tar.gz") subprocess.check_call( [ "github-release", "upload", "-u", GITHUB_USER, "-r", GITHUB_REPO, "--tag", tag, "--name", os.path.basename(tarball), "--file", tarball, ] )
Run this suite of tests.
def suite(): """Run this suite of tests.""" return unittest.TestLoader().loadTestsFromName(__name__)
Hack around the lazy descriptor used to cache weights for Distance calculations.
def _clear_weights(): """Hack around the lazy descriptor used to cache weights for Distance calculations. """ Distance.__dict__["_weights"].computed = False
Create an AlbumInfo object for testing.
def match_album_mock(*args, **kwargs): """Create an AlbumInfo object for testing.""" track_info = TrackInfo( title="new title", track_id="trackid", index=0, ) album_info = AlbumInfo( artist="artist", album="album", tracks=[track_info], album_id="albumid", artist_id="artistid", flex="flex", ) return iter([album_info])
Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in the release title and artist name, so that ID_RELEASE_0 is a closer match to the items created by ImportHelper._create_import_dir().
def mocked_get_release_by_id( id_, includes=[], release_status=[], release_type=[] ): """Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in the release title and artist name, so that ID_RELEASE_0 is a closer match to the items created by ImportHelper._create_import_dir().""" # Map IDs to (release title, artist), so the distances are different. releases = { ImportMusicBrainzIdTest.ID_RELEASE_0: ("VALID_RELEASE_0", "TAG ARTIST"), ImportMusicBrainzIdTest.ID_RELEASE_1: ( "VALID_RELEASE_1", "DISTANT_MATCH", ), } return { "release": { "title": releases[id_][0], "id": id_, "medium-list": [ { "track-list": [ { "id": "baz", "recording": { "title": "foo", "id": "bar", "length": 59, }, "position": 9, "number": "A2", } ], "position": 5, } ], "artist-credit": [ { "artist": { "name": releases[id_][1], "id": "some-id", }, } ], "release-group": { "id": "another-id", }, "status": "Official", } }
Mimic musicbrainzngs.get_recording_by_id, accepting only a restricted list of MB ids (ID_RECORDING_0, ID_RECORDING_1). The returned dict differs only in the recording title and artist name, so that ID_RECORDING_0 is a closer match to the items created by ImportHelper._create_import_dir().
def mocked_get_recording_by_id( id_, includes=[], release_status=[], release_type=[] ): """Mimic musicbrainzngs.get_recording_by_id, accepting only a restricted list of MB ids (ID_RECORDING_0, ID_RECORDING_1). The returned dict differs only in the recording title and artist name, so that ID_RECORDING_0 is a closer match to the items created by ImportHelper._create_import_dir().""" # Map IDs to (recording title, artist), so the distances are different. releases = { ImportMusicBrainzIdTest.ID_RECORDING_0: ( "VALID_RECORDING_0", "TAG ARTIST", ), ImportMusicBrainzIdTest.ID_RECORDING_1: ( "VALID_RECORDING_1", "DISTANT_MATCH", ), } return { "recording": { "title": releases[id_][0], "id": id_, "length": 59, "artist-credit": [ { "artist": { "name": releases[id_][1], "id": "some-id", }, } ], } }