response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Given an list of alias structures for an artist credit, select and return the user's preferred alias alias or None if no matching alias is found.
def _preferred_alias(aliases: List): """Given an list of alias structures for an artist credit, select and return the user's preferred alias alias or None if no matching alias is found. """ if not aliases: return # Only consider aliases that have locales set. aliases = [a for a in aliases if "locale" in a] # Get any ignored alias types and lower case them to prevent case issues ignored_alias_types = config["import"]["ignored_alias_types"].as_str_seq() ignored_alias_types = [a.lower() for a in ignored_alias_types] # Search configured locales in order. for locale in config["import"]["languages"].as_str_seq(): # Find matching primary aliases for this locale that are not # being ignored matches = [] for a in aliases: if ( a["locale"] == locale and "primary" in a and a.get("type", "").lower() not in ignored_alias_types ): matches.append(a) # Skip to the next locale if we have no matches if not matches: continue return matches[0]
Given a release, select and return the user's preferred release event as a tuple of (country, release_date). Fall back to the default release event if a preferred event is not found.
def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]: """Given a release, select and return the user's preferred release event as a tuple of (country, release_date). Fall back to the default release event if a preferred event is not found. """ countries = config["match"]["preferred"]["countries"].as_str_seq() countries = cast(Sequence, countries) for country in countries: for event in release.get("release-event-list", {}): try: if country in event["area"]["iso-3166-1-code-list"]: return country, event["date"] except KeyError: pass return (cast(str, release.get("country")), cast(str, release.get("date")))
Given a list representing an ``artist-credit`` block, accumulate data into a triple of joined artist name lists: canonical, sort, and credit.
def _multi_artist_credit( credit: List[Dict], include_join_phrase: bool ) -> Tuple[List[str], List[str], List[str]]: """Given a list representing an ``artist-credit`` block, accumulate data into a triple of joined artist name lists: canonical, sort, and credit. """ artist_parts = [] artist_sort_parts = [] artist_credit_parts = [] for el in credit: if isinstance(el, str): # Join phrase. if include_join_phrase: artist_parts.append(el) artist_credit_parts.append(el) artist_sort_parts.append(el) else: alias = _preferred_alias(el["artist"].get("alias-list", ())) # An artist. if alias: cur_artist_name = alias["alias"] else: cur_artist_name = el["artist"]["name"] artist_parts.append(cur_artist_name) # Artist sort name. if alias: artist_sort_parts.append(alias["sort-name"]) elif "sort-name" in el["artist"]: artist_sort_parts.append(el["artist"]["sort-name"]) else: artist_sort_parts.append(cur_artist_name) # Artist credit. if "name" in el: artist_credit_parts.append(el["name"]) else: artist_credit_parts.append(cur_artist_name) return ( artist_parts, artist_sort_parts, artist_credit_parts, )
Given a list representing an ``artist-credit`` block, flatten the data into a triple of joined artist name strings: canonical, sort, and credit.
def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]: """Given a list representing an ``artist-credit`` block, flatten the data into a triple of joined artist name strings: canonical, sort, and credit. """ artist_parts, artist_sort_parts, artist_credit_parts = _multi_artist_credit( credit, include_join_phrase=True ) return ( "".join(artist_parts), "".join(artist_sort_parts), "".join(artist_credit_parts), )
Given a list representing an ``artist-credit``, return a list of artist IDs
def _artist_ids(credit: List[Dict]) -> List[str]: """ Given a list representing an ``artist-credit``, return a list of artist IDs """ artist_ids: List[str] = [] for el in credit: if isinstance(el, dict): artist_ids.append(el["artist"]["id"]) return artist_ids
Given a list representing the artist relationships extract the names of the remixers and concatenate them.
def _get_related_artist_names(relations, relation_type): """Given a list representing the artist relationships extract the names of the remixers and concatenate them. """ related_artists = [] for relation in relations: if relation["type"] == relation_type: related_artists.append(relation["artist"]["name"]) return ", ".join(related_artists)
Translates a MusicBrainz recording result dictionary into a beets ``TrackInfo`` object. Three parameters are optional and are used only for tracks that appear on releases (non-singletons): ``index``, the overall track number; ``medium``, the disc number; ``medium_index``, the track's index on its medium; ``medium_total``, the number of tracks on the medium. Each number is a 1-based index.
def track_info( recording: Dict, index: Optional[int] = None, medium: Optional[int] = None, medium_index: Optional[int] = None, medium_total: Optional[int] = None, ) -> beets.autotag.hooks.TrackInfo: """Translates a MusicBrainz recording result dictionary into a beets ``TrackInfo`` object. Three parameters are optional and are used only for tracks that appear on releases (non-singletons): ``index``, the overall track number; ``medium``, the disc number; ``medium_index``, the track's index on its medium; ``medium_total``, the number of tracks on the medium. Each number is a 1-based index. """ info = beets.autotag.hooks.TrackInfo( title=recording["title"], track_id=recording["id"], index=index, medium=medium, medium_index=medium_index, medium_total=medium_total, data_source="MusicBrainz", data_url=track_url(recording["id"]), ) if recording.get("artist-credit"): # Get the artist names. ( info.artist, info.artist_sort, info.artist_credit, ) = _flatten_artist_credit(recording["artist-credit"]) ( info.artists, info.artists_sort, info.artists_credit, ) = _multi_artist_credit( recording["artist-credit"], include_join_phrase=False ) info.artists_ids = _artist_ids(recording["artist-credit"]) info.artist_id = info.artists_ids[0] if recording.get("artist-relation-list"): info.remixer = _get_related_artist_names( recording["artist-relation-list"], relation_type="remixer" ) if recording.get("length"): info.length = int(recording["length"]) / 1000.0 info.trackdisambig = recording.get("disambiguation") if recording.get("isrc-list"): info.isrc = ";".join(recording["isrc-list"]) lyricist = [] composer = [] composer_sort = [] for work_relation in recording.get("work-relation-list", ()): if work_relation["type"] != "performance": continue info.work = work_relation["work"]["title"] info.mb_workid = work_relation["work"]["id"] if "disambiguation" in work_relation["work"]: info.work_disambig = work_relation["work"]["disambiguation"] for artist_relation in work_relation["work"].get( "artist-relation-list", () ): if "type" in artist_relation: type = artist_relation["type"] if type == "lyricist": lyricist.append(artist_relation["artist"]["name"]) elif type == "composer": composer.append(artist_relation["artist"]["name"]) composer_sort.append(artist_relation["artist"]["sort-name"]) if lyricist: info.lyricist = ", ".join(lyricist) if composer: info.composer = ", ".join(composer) info.composer_sort = ", ".join(composer_sort) arranger = [] for artist_relation in recording.get("artist-relation-list", ()): if "type" in artist_relation: type = artist_relation["type"] if type == "arranger": arranger.append(artist_relation["artist"]["name"]) if arranger: info.arranger = ", ".join(arranger) # Supplementary fields provided by plugins extra_trackdatas = plugins.send("mb_track_extract", data=recording) for extra_trackdata in extra_trackdatas: info.update(extra_trackdata) info.decode() return info
Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo object, set the object's release date fields appropriately. If `original`, then set the original_year, etc., fields.
def _set_date_str( info: beets.autotag.hooks.AlbumInfo, date_str: str, original: bool = False, ): """Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo object, set the object's release date fields appropriately. If `original`, then set the original_year, etc., fields. """ if date_str: date_parts = date_str.split("-") for key in ("year", "month", "day"): if date_parts: date_part = date_parts.pop(0) try: date_num = int(date_part) except ValueError: continue if original: key = "original_" + key setattr(info, key, date_num)
Takes a MusicBrainz release result dictionary and returns a beets AlbumInfo object containing the interesting data about that release.
def album_info(release: Dict) -> beets.autotag.hooks.AlbumInfo: """Takes a MusicBrainz release result dictionary and returns a beets AlbumInfo object containing the interesting data about that release. """ # Get artist name using join phrases. artist_name, artist_sort_name, artist_credit_name = _flatten_artist_credit( release["artist-credit"] ) ( artists_names, artists_sort_names, artists_credit_names, ) = _multi_artist_credit( release["artist-credit"], include_join_phrase=False ) ntracks = sum(len(m["track-list"]) for m in release["medium-list"]) # The MusicBrainz API omits 'artist-relation-list' and 'work-relation-list' # when the release has more than 500 tracks. So we use browse_recordings # on chunks of tracks to recover the same information in this case. if ntracks > BROWSE_MAXTRACKS: log.debug("Album {} has too many tracks", release["id"]) recording_list = [] for i in range(0, ntracks, BROWSE_CHUNKSIZE): log.debug("Retrieving tracks starting at {}", i) recording_list.extend( musicbrainzngs.browse_recordings( release=release["id"], limit=BROWSE_CHUNKSIZE, includes=BROWSE_INCLUDES, offset=i, )["recording-list"] ) track_map = {r["id"]: r for r in recording_list} for medium in release["medium-list"]: for recording in medium["track-list"]: recording_info = track_map[recording["recording"]["id"]] recording["recording"] = recording_info # Basic info. track_infos = [] index = 0 for medium in release["medium-list"]: disctitle = medium.get("title") format = medium.get("format") if format in config["match"]["ignored_media"].as_str_seq(): continue all_tracks = medium["track-list"] if ( "data-track-list" in medium and not config["match"]["ignore_data_tracks"] ): all_tracks += medium["data-track-list"] track_count = len(all_tracks) if "pregap" in medium: all_tracks.insert(0, medium["pregap"]) for track in all_tracks: if ( "title" in track["recording"] and track["recording"]["title"] in SKIPPED_TRACKS ): continue if ( "video" in track["recording"] and track["recording"]["video"] == "true" and config["match"]["ignore_video_tracks"] ): continue # Basic information from the recording. index += 1 ti = track_info( track["recording"], index, int(medium["position"]), int(track["position"]), track_count, ) ti.release_track_id = track["id"] ti.disctitle = disctitle ti.media = format ti.track_alt = track["number"] # Prefer track data, where present, over recording data. if track.get("title"): ti.title = track["title"] if track.get("artist-credit"): # Get the artist names. ( ti.artist, ti.artist_sort, ti.artist_credit, ) = _flatten_artist_credit(track["artist-credit"]) ( ti.artists, ti.artists_sort, ti.artists_credit, ) = _multi_artist_credit( track["artist-credit"], include_join_phrase=False ) ti.artists_ids = _artist_ids(track["artist-credit"]) ti.artist_id = ti.artists_ids[0] if track.get("length"): ti.length = int(track["length"]) / (1000.0) track_infos.append(ti) album_artist_ids = _artist_ids(release["artist-credit"]) info = beets.autotag.hooks.AlbumInfo( album=release["title"], album_id=release["id"], artist=artist_name, artist_id=album_artist_ids[0], artists=artists_names, artists_ids=album_artist_ids, tracks=track_infos, mediums=len(release["medium-list"]), artist_sort=artist_sort_name, artists_sort=artists_sort_names, artist_credit=artist_credit_name, artists_credit=artists_credit_names, data_source="MusicBrainz", data_url=album_url(release["id"]), barcode=release.get("barcode"), ) info.va = info.artist_id == VARIOUS_ARTISTS_ID if info.va: info.artist = config["va_name"].as_str() info.asin = release.get("asin") info.releasegroup_id = release["release-group"]["id"] info.albumstatus = release.get("status") if release["release-group"].get("title"): info.release_group_title = release["release-group"].get("title") # Get the disambiguation strings at the release and release group level. if release["release-group"].get("disambiguation"): info.releasegroupdisambig = release["release-group"].get( "disambiguation" ) if release.get("disambiguation"): info.albumdisambig = release.get("disambiguation") # Get the "classic" Release type. This data comes from a legacy API # feature before MusicBrainz supported multiple release types. if "type" in release["release-group"]: reltype = release["release-group"]["type"] if reltype: info.albumtype = reltype.lower() # Set the new-style "primary" and "secondary" release types. albumtypes = [] if "primary-type" in release["release-group"]: rel_primarytype = release["release-group"]["primary-type"] if rel_primarytype: albumtypes.append(rel_primarytype.lower()) if "secondary-type-list" in release["release-group"]: if release["release-group"]["secondary-type-list"]: for sec_type in release["release-group"]["secondary-type-list"]: albumtypes.append(sec_type.lower()) info.albumtypes = albumtypes # Release events. info.country, release_date = _preferred_release_event(release) release_group_date = release["release-group"].get("first-release-date") if not release_date: # Fall back if release-specific date is not available. release_date = release_group_date _set_date_str(info, release_date, False) _set_date_str(info, release_group_date, True) # Label name. if release.get("label-info-list"): label_info = release["label-info-list"][0] if label_info.get("label"): label = label_info["label"]["name"] if label != "[no label]": info.label = label info.catalognum = label_info.get("catalog-number") # Text representation data. if release.get("text-representation"): rep = release["text-representation"] info.script = rep.get("script") info.language = rep.get("language") # Media (format). if release["medium-list"]: # If all media are the same, use that medium name if len(set([m.get("format") for m in release["medium-list"]])) == 1: info.media = release["medium-list"][0].get("format") # Otherwise, let's just call it "Media" else: info.media = "Media" if config["musicbrainz"]["genres"]: sources = [ release["release-group"].get("genre-list", []), release.get("genre-list", []), ] genres: Counter[str] = Counter() for source in sources: for genreitem in source: genres[genreitem["name"]] += int(genreitem["count"]) info.genre = "; ".join( genre for genre, _count in sorted(genres.items(), key=lambda g: -g[1]) ) # We might find links to external sources (Discogs, Bandcamp, ...) if any( config["musicbrainz"]["external_ids"].get().values() ) and release.get("url-relation-list"): discogs_url, bandcamp_url, spotify_url = None, None, None deezer_url, beatport_url, tidal_url = None, None, None fetch_discogs, fetch_bandcamp, fetch_spotify = False, False, False fetch_deezer, fetch_beatport, fetch_tidal = False, False, False if config["musicbrainz"]["external_ids"]["discogs"].get(): fetch_discogs = True if config["musicbrainz"]["external_ids"]["bandcamp"].get(): fetch_bandcamp = True if config["musicbrainz"]["external_ids"]["spotify"].get(): fetch_spotify = True if config["musicbrainz"]["external_ids"]["deezer"].get(): fetch_deezer = True if config["musicbrainz"]["external_ids"]["beatport"].get(): fetch_beatport = True if config["musicbrainz"]["external_ids"]["tidal"].get(): fetch_tidal = True for url in release["url-relation-list"]: if fetch_discogs and url["type"] == "discogs": log.debug("Found link to Discogs release via MusicBrainz") discogs_url = url["target"] if fetch_bandcamp and "bandcamp.com" in url["target"]: log.debug("Found link to Bandcamp release via MusicBrainz") bandcamp_url = url["target"] if fetch_spotify and "spotify.com" in url["target"]: log.debug("Found link to Spotify album via MusicBrainz") spotify_url = url["target"] if fetch_deezer and "deezer.com" in url["target"]: log.debug("Found link to Deezer album via MusicBrainz") deezer_url = url["target"] if fetch_beatport and "beatport.com" in url["target"]: log.debug("Found link to Beatport release via MusicBrainz") beatport_url = url["target"] if fetch_tidal and "tidal.com" in url["target"]: log.debug("Found link to Tidal release via MusicBrainz") tidal_url = url["target"] if discogs_url: info.discogs_albumid = extract_discogs_id_regex(discogs_url) if bandcamp_url: info.bandcamp_album_id = bandcamp_url if spotify_url: info.spotify_album_id = MetadataSourcePlugin._get_id( "album", spotify_url, spotify_id_regex ) if deezer_url: info.deezer_album_id = MetadataSourcePlugin._get_id( "album", deezer_url, deezer_id_regex ) if beatport_url: info.beatport_album_id = MetadataSourcePlugin._get_id( "album", beatport_url, beatport_id_regex ) if tidal_url: info.tidal_album_id = tidal_url.split("/")[-1] extra_albumdatas = plugins.send("mb_album_extract", data=release) for extra_albumdata in extra_albumdatas: info.update(extra_albumdata) info.decode() return info
Searches for a single album ("release" in MusicBrainz parlance) and returns an iterator over AlbumInfo objects. May raise a MusicBrainzAPIError. The query consists of an artist name, an album name, and, optionally, a number of tracks on the album and any other extra tags.
def match_album( artist: str, album: str, tracks: Optional[int] = None, extra_tags: Optional[Dict[str, Any]] = None, ) -> Iterator[beets.autotag.hooks.AlbumInfo]: """Searches for a single album ("release" in MusicBrainz parlance) and returns an iterator over AlbumInfo objects. May raise a MusicBrainzAPIError. The query consists of an artist name, an album name, and, optionally, a number of tracks on the album and any other extra tags. """ # Build search criteria. criteria = {"release": album.lower().strip()} if artist is not None: criteria["artist"] = artist.lower().strip() else: # Various Artists search. criteria["arid"] = VARIOUS_ARTISTS_ID if tracks is not None: criteria["tracks"] = str(tracks) # Additional search cues from existing metadata. if extra_tags: for tag, value in extra_tags.items(): key = FIELDS_TO_MB_KEYS[tag] value = str(value).lower().strip() if key == "catno": value = value.replace(" ", "") if value: criteria[key] = value # Abort if we have no search terms. if not any(criteria.values()): return try: log.debug("Searching for MusicBrainz releases with: {!r}", criteria) res = musicbrainzngs.search_releases( limit=config["musicbrainz"]["searchlimit"].get(int), **criteria ) except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError( exc, "release search", criteria, traceback.format_exc() ) for release in res["release-list"]: # The search result is missing some data (namely, the tracks), # so we just use the ID and fetch the rest of the information. albuminfo = album_for_id(release["id"]) if albuminfo is not None: yield albuminfo
Searches for a single track and returns an iterable of TrackInfo objects. May raise a MusicBrainzAPIError.
def match_track( artist: str, title: str, ) -> Iterator[beets.autotag.hooks.TrackInfo]: """Searches for a single track and returns an iterable of TrackInfo objects. May raise a MusicBrainzAPIError. """ criteria = { "artist": artist.lower().strip(), "recording": title.lower().strip(), } if not any(criteria.values()): return try: res = musicbrainzngs.search_recordings( limit=config["musicbrainz"]["searchlimit"].get(int), **criteria ) except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError( exc, "recording search", criteria, traceback.format_exc() ) for recording in res["recording-list"]: yield track_info(recording)
Search for a MusicBrainz ID in the given string and return it. If no ID can be found, return None.
def _parse_id(s: str) -> Optional[str]: """Search for a MusicBrainz ID in the given string and return it. If no ID can be found, return None. """ # Find the first thing that looks like a UUID/MBID. match = re.search("[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}", s) if match is not None: return match.group() if match else None return None
Merges a pseudo release with its actual release. This implementation is naive, it doesn't overwrite fields, like status or ids. According to the ticket PICARD-145, the main release id should be used. But the ticket has been in limbo since over a decade now. It also suggests the introduction of the tag `musicbrainz_pseudoreleaseid`, but as of this field can't be found in any official Picard docs, hence why we did not implement that for now.
def _merge_pseudo_and_actual_album( pseudo: beets.autotag.hooks.AlbumInfo, actual: beets.autotag.hooks.AlbumInfo ) -> Optional[beets.autotag.hooks.AlbumInfo]: """ Merges a pseudo release with its actual release. This implementation is naive, it doesn't overwrite fields, like status or ids. According to the ticket PICARD-145, the main release id should be used. But the ticket has been in limbo since over a decade now. It also suggests the introduction of the tag `musicbrainz_pseudoreleaseid`, but as of this field can't be found in any official Picard docs, hence why we did not implement that for now. """ merged = pseudo.copy() from_actual = { k: actual[k] for k in [ "media", "mediums", "country", "catalognum", "year", "month", "day", "original_year", "original_month", "original_day", "label", "barcode", "asin", "style", "genre", ] } merged.update(from_actual) return merged
Fetches an album by its MusicBrainz ID and returns an AlbumInfo object or None if the album is not found. May raise a MusicBrainzAPIError.
def album_for_id(releaseid: str) -> Optional[beets.autotag.hooks.AlbumInfo]: """Fetches an album by its MusicBrainz ID and returns an AlbumInfo object or None if the album is not found. May raise a MusicBrainzAPIError. """ log.debug("Requesting MusicBrainz release {}", releaseid) albumid = _parse_id(releaseid) if not albumid: log.debug("Invalid MBID ({0}).", releaseid) return None try: res = musicbrainzngs.get_release_by_id(albumid, RELEASE_INCLUDES) # resolve linked release relations actual_res = None if res["release"].get("status") == "Pseudo-Release": actual_res = _find_actual_release_from_pseudo_release(res) except musicbrainzngs.ResponseError: log.debug("Album ID match failed.") return None except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError( exc, "get release by ID", albumid, traceback.format_exc() ) # release is potentially a pseudo release release = album_info(res["release"]) # should be None unless we're dealing with a pseudo release if actual_res is not None: actual_release = album_info(actual_res["release"]) return _merge_pseudo_and_actual_album(release, actual_release) else: return release
Fetches a track by its MusicBrainz ID. Returns a TrackInfo object or None if no track is found. May raise a MusicBrainzAPIError.
def track_for_id(releaseid: str) -> Optional[beets.autotag.hooks.TrackInfo]: """Fetches a track by its MusicBrainz ID. Returns a TrackInfo object or None if no track is found. May raise a MusicBrainzAPIError. """ trackid = _parse_id(releaseid) if not trackid: log.debug("Invalid MBID ({0}).", releaseid) return None try: res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES) except musicbrainzngs.ResponseError: log.debug("Track ID match failed.") return None except musicbrainzngs.MusicBrainzError as exc: raise MusicBrainzAPIError( exc, "get recording by ID", trackid, traceback.format_exc() ) return track_info(res["recording"])
Set an item's metadata from its matched TrackInfo object.
def apply_item_metadata(item: Item, track_info: TrackInfo): """Set an item's metadata from its matched TrackInfo object.""" item.artist = track_info.artist item.artists = track_info.artists item.artist_sort = track_info.artist_sort item.artists_sort = track_info.artists_sort item.artist_credit = track_info.artist_credit item.artists_credit = track_info.artists_credit item.title = track_info.title item.mb_trackid = track_info.track_id item.mb_releasetrackid = track_info.release_track_id if track_info.artist_id: item.mb_artistid = track_info.artist_id if track_info.artists_ids: item.mb_artistids = track_info.artists_ids for field, value in track_info.items(): # We only overwrite fields that are not already hardcoded. if field in SPECIAL_FIELDS["track"]: continue if value is None: continue item[field] = value
Set the items' metadata to match an AlbumInfo object using a mapping from Items to TrackInfo objects.
def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]): """Set the items' metadata to match an AlbumInfo object using a mapping from Items to TrackInfo objects. """ for item, track_info in mapping.items(): # Artist or artist credit. if config["artist_credit"]: item.artist = ( track_info.artist_credit or track_info.artist or album_info.artist_credit or album_info.artist ) item.artists = ( track_info.artists_credit or track_info.artists or album_info.artists_credit or album_info.artists ) item.albumartist = album_info.artist_credit or album_info.artist item.albumartists = album_info.artists_credit or album_info.artists else: item.artist = track_info.artist or album_info.artist item.artists = track_info.artists or album_info.artists item.albumartist = album_info.artist item.albumartists = album_info.artists # Album. item.album = album_info.album # Artist sort and credit names. item.artist_sort = track_info.artist_sort or album_info.artist_sort item.artists_sort = track_info.artists_sort or album_info.artists_sort item.artist_credit = ( track_info.artist_credit or album_info.artist_credit ) item.artists_credit = ( track_info.artists_credit or album_info.artists_credit ) item.albumartist_sort = album_info.artist_sort item.albumartists_sort = album_info.artists_sort item.albumartist_credit = album_info.artist_credit item.albumartists_credit = album_info.artists_credit # Release date. for prefix in "", "original_": if config["original_date"] and not prefix: # Ignore specific release date. continue for suffix in "year", "month", "day": key = prefix + suffix value = getattr(album_info, key) or 0 # If we don't even have a year, apply nothing. if suffix == "year" and not value: break # Otherwise, set the fetched value (or 0 for the month # and day if not available). item[key] = value # If we're using original release date for both fields, # also set item.year = info.original_year, etc. if config["original_date"]: item[suffix] = value # Title. item.title = track_info.title if config["per_disc_numbering"]: # We want to let the track number be zero, but if the medium index # is not provided we need to fall back to the overall index. if track_info.medium_index is not None: item.track = track_info.medium_index else: item.track = track_info.index item.tracktotal = track_info.medium_total or len(album_info.tracks) else: item.track = track_info.index item.tracktotal = len(album_info.tracks) # Disc and disc count. item.disc = track_info.medium item.disctotal = album_info.mediums # MusicBrainz IDs. item.mb_trackid = track_info.track_id item.mb_releasetrackid = track_info.release_track_id item.mb_albumid = album_info.album_id if track_info.artist_id: item.mb_artistid = track_info.artist_id else: item.mb_artistid = album_info.artist_id if track_info.artists_ids: item.mb_artistids = track_info.artists_ids else: item.mb_artistids = album_info.artists_ids item.mb_albumartistid = album_info.artist_id item.mb_albumartistids = album_info.artists_ids item.mb_releasegroupid = album_info.releasegroup_id # Compilation flag. item.comp = album_info.va # Track alt. item.track_alt = track_info.track_alt # Don't overwrite fields with empty values unless the # field is explicitly allowed to be overwritten for field, value in album_info.items(): if field in SPECIAL_FIELDS["album"]: continue clobber = field in config["overwrite_null"]["album"].as_str_seq() if value is None and not clobber: continue item[field] = value for field, value in track_info.items(): if field in SPECIAL_FIELDS["track"]: continue clobber = field in config["overwrite_null"]["track"].as_str_seq() value = getattr(track_info, field) if value is None and not clobber: continue item[field] = value
Parse a string containing two dates separated by two dots (..). Return a pair of `Period` objects.
def _parse_periods(pattern: str) -> Tuple[Optional[Period], Optional[Period]]: """Parse a string containing two dates separated by two dots (..). Return a pair of `Period` objects. """ parts = pattern.split("..", 1) if len(parts) == 1: instant = Period.parse(parts[0]) return (instant, instant) else: start = Period.parse(parts[0]) end = Period.parse(parts[1]) return (start, end)
Parse a single *query part*, which is a chunk of a complete query string representing a single criterion. A query part is a string consisting of: - A *pattern*: the value to look for. - Optionally, a *field name* preceding the pattern, separated by a colon. So in `foo:bar`, `foo` is the field name and `bar` is the pattern. - Optionally, a *query prefix* just before the pattern (and after the optional colon) indicating the type of query that should be used. For example, in `~foo`, `~` might be a prefix. (The set of prefixes to look for is given in the `prefixes` parameter.) - Optionally, a negation indicator, `-` or `^`, at the very beginning. Both prefixes and the separating `:` character may be escaped with a backslash to avoid their normal meaning. The function returns a tuple consisting of: - The field name: a string or None if it's not present. - The pattern, a string. - The query class to use, which inherits from the base :class:`Query` type. - A negation flag, a bool. The three optional parameters determine which query class is used (i.e., the third return value). They are: - `query_classes`, which maps field names to query classes. These are used when no explicit prefix is present. - `prefixes`, which maps prefix strings to query classes. - `default_class`, the fallback when neither the field nor a prefix indicates a query class. So the precedence for determining which query class to return is: prefix, followed by field, and finally the default. For example, assuming the `:` prefix is used for `RegexpQuery`: - `'stapler'` -> `(None, 'stapler', SubstringQuery, False)` - `'color:red'` -> `('color', 'red', SubstringQuery, False)` - `':^Quiet'` -> `(None, '^Quiet', RegexpQuery, False)`, because the `^` follows the `:` - `'color::b..e'` -> `('color', 'b..e', RegexpQuery, False)` - `'-color:red'` -> `('color', 'red', SubstringQuery, True)`
def parse_query_part( part: str, query_classes: Dict[str, Type[query.FieldQuery]] = {}, prefixes: Dict = {}, default_class: Type[query.SubstringQuery] = query.SubstringQuery, ) -> Tuple[Optional[str], str, Type[query.FieldQuery], bool]: """Parse a single *query part*, which is a chunk of a complete query string representing a single criterion. A query part is a string consisting of: - A *pattern*: the value to look for. - Optionally, a *field name* preceding the pattern, separated by a colon. So in `foo:bar`, `foo` is the field name and `bar` is the pattern. - Optionally, a *query prefix* just before the pattern (and after the optional colon) indicating the type of query that should be used. For example, in `~foo`, `~` might be a prefix. (The set of prefixes to look for is given in the `prefixes` parameter.) - Optionally, a negation indicator, `-` or `^`, at the very beginning. Both prefixes and the separating `:` character may be escaped with a backslash to avoid their normal meaning. The function returns a tuple consisting of: - The field name: a string or None if it's not present. - The pattern, a string. - The query class to use, which inherits from the base :class:`Query` type. - A negation flag, a bool. The three optional parameters determine which query class is used (i.e., the third return value). They are: - `query_classes`, which maps field names to query classes. These are used when no explicit prefix is present. - `prefixes`, which maps prefix strings to query classes. - `default_class`, the fallback when neither the field nor a prefix indicates a query class. So the precedence for determining which query class to return is: prefix, followed by field, and finally the default. For example, assuming the `:` prefix is used for `RegexpQuery`: - `'stapler'` -> `(None, 'stapler', SubstringQuery, False)` - `'color:red'` -> `('color', 'red', SubstringQuery, False)` - `':^Quiet'` -> `(None, '^Quiet', RegexpQuery, False)`, because the `^` follows the `:` - `'color::b..e'` -> `('color', 'b..e', RegexpQuery, False)` - `'-color:red'` -> `('color', 'red', SubstringQuery, True)` """ # Apply the regular expression and extract the components. part = part.strip() match = PARSE_QUERY_PART_REGEX.match(part) assert match # Regex should always match negate = bool(match.group(1)) key = match.group(2) term = match.group(3).replace("\\:", ":") # Check whether there's a prefix in the query and use the # corresponding query type. for pre, query_class in prefixes.items(): if term.startswith(pre): return key, term[len(pre) :], query_class, negate # No matching prefix, so use either the query class determined by # the field or the default as a fallback. query_class = query_classes.get(key, default_class) return key, term, query_class, negate
Parse a *query part* string and return a :class:`Query` object. :param model_cls: The :class:`Model` class that this is a query for. This is used to determine the appropriate query types for the model's fields. :param prefixes: A map from prefix strings to :class:`Query` types. :param query_part: The string to parse. See the documentation for `parse_query_part` for more information on query part syntax.
def construct_query_part( model_cls: Type[Model], prefixes: Dict, query_part: str, ) -> query.Query: """Parse a *query part* string and return a :class:`Query` object. :param model_cls: The :class:`Model` class that this is a query for. This is used to determine the appropriate query types for the model's fields. :param prefixes: A map from prefix strings to :class:`Query` types. :param query_part: The string to parse. See the documentation for `parse_query_part` for more information on query part syntax. """ # A shortcut for empty query parts. if not query_part: return query.TrueQuery() out_query: query.Query # Use `model_cls` to build up a map from field (or query) names to # `Query` classes. query_classes: Dict[str, Type[query.FieldQuery]] = {} for k, t in itertools.chain( model_cls._fields.items(), model_cls._types.items() ): query_classes[k] = t.query query_classes.update(model_cls._queries) # Non-field queries. # Parse the string. key, pattern, query_class, negate = parse_query_part( query_part, query_classes, prefixes ) # If there's no key (field name) specified, this is a "match # anything" query. if key is None: # The query type matches a specific field, but none was # specified. So we use a version of the query that matches # any field. out_query = query.AnyFieldQuery( pattern, model_cls._search_fields, query_class ) # Field queries get constructed according to the name of the field # they are querying. else: out_query = query_class(key.lower(), pattern, key in model_cls._fields) # Apply negation. if negate: return query.NotQuery(out_query) else: return out_query
Creates a collection query of type `query_cls` from a list of strings in the format used by parse_query_part. `model_cls` determines how queries are constructed from strings.
def query_from_strings( query_cls: Type[query.CollectionQuery], model_cls: Type[Model], prefixes: Dict, query_parts: Collection[str], ) -> query.Query: """Creates a collection query of type `query_cls` from a list of strings in the format used by parse_query_part. `model_cls` determines how queries are constructed from strings. """ subqueries = [] for part in query_parts: subqueries.append(construct_query_part(model_cls, prefixes, part)) if not subqueries: # No terms in query. subqueries = [query.TrueQuery()] return query_cls(subqueries)
Create a `Sort` from a single string criterion. `model_cls` is the `Model` being queried. `part` is a single string ending in ``+`` or ``-`` indicating the sort. `case_insensitive` indicates whether or not the sort should be performed in a case sensitive manner.
def construct_sort_part( model_cls: Type[Model], part: str, case_insensitive: bool = True, ) -> Sort: """Create a `Sort` from a single string criterion. `model_cls` is the `Model` being queried. `part` is a single string ending in ``+`` or ``-`` indicating the sort. `case_insensitive` indicates whether or not the sort should be performed in a case sensitive manner. """ assert part, "part must be a field name and + or -" field = part[:-1] assert field, "field is missing" direction = part[-1] assert direction in ("+", "-"), "part must end with + or -" is_ascending = direction == "+" if field in model_cls._sorts: sort = model_cls._sorts[field]( model_cls, is_ascending, case_insensitive ) elif field in model_cls._fields: sort = query.FixedFieldSort(field, is_ascending, case_insensitive) else: # Flexible or computed. sort = query.SlowFieldSort(field, is_ascending, case_insensitive) return sort
Create a `Sort` from a list of sort criteria (strings).
def sort_from_strings( model_cls: Type[Model], sort_parts: Sequence[str], case_insensitive: bool = True, ) -> Sort: """Create a `Sort` from a list of sort criteria (strings).""" if not sort_parts: return query.NullSort() elif len(sort_parts) == 1: return construct_sort_part(model_cls, sort_parts[0], case_insensitive) else: sort = query.MultipleSort() for part in sort_parts: sort.add_sort( construct_sort_part(model_cls, part, case_insensitive) ) return sort
Given a list of strings, create the `Query` and `Sort` that they represent.
def parse_sorted_query( model_cls: Type[Model], parts: List[str], prefixes: Dict = {}, case_insensitive: bool = True, ) -> Tuple[query.Query, Sort]: """Given a list of strings, create the `Query` and `Sort` that they represent. """ # Separate query token and sort token. query_parts = [] sort_parts = [] # Split up query in to comma-separated subqueries, each representing # an AndQuery, which need to be joined together in one OrQuery subquery_parts = [] for part in parts + [","]: if part.endswith(","): # Ensure we can catch "foo, bar" as well as "foo , bar" last_subquery_part = part[:-1] if last_subquery_part: subquery_parts.append(last_subquery_part) # Parse the subquery in to a single AndQuery # TODO: Avoid needlessly wrapping AndQueries containing 1 subquery? query_parts.append( query_from_strings( query.AndQuery, model_cls, prefixes, subquery_parts ) ) del subquery_parts[:] else: # Sort parts (1) end in + or -, (2) don't have a field, and # (3) consist of more than just the + or -. if part.endswith(("+", "-")) and ":" not in part and len(part) > 1: sort_parts.append(part) else: subquery_parts.append(part) # Avoid needlessly wrapping single statements in an OR q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0] s = sort_from_strings(model_cls, sort_parts, case_insensitive) return q, s
Sends ``input`` to stdin. >>> with control_stdin('yes'): ... input() 'yes'
def control_stdin(input=None): """Sends ``input`` to stdin. >>> with control_stdin('yes'): ... input() 'yes' """ org = sys.stdin sys.stdin = StringIO(input) try: yield sys.stdin finally: sys.stdin = org
Save stdout in a StringIO. >>> with capture_stdout() as output: ... print('spam') ... >>> output.getvalue() 'spam'
def capture_stdout(): """Save stdout in a StringIO. >>> with capture_stdout() as output: ... print('spam') ... >>> output.getvalue() 'spam' """ org = sys.stdout sys.stdout = capture = StringIO() try: yield sys.stdout finally: sys.stdout = org print(capture.getvalue())
Convert args to bytestrings for Python 2 and convert them to strings on Python 3.
def _convert_args(args): """Convert args to bytestrings for Python 2 and convert them to strings on Python 3. """ for i, elem in enumerate(args): if isinstance(elem, bytes): args[i] = elem.decode(util.arg_encoding()) return args
Returns `True` if `cmd` can be executed.
def has_program(cmd, args=["--version"]): """Returns `True` if `cmd` can be executed.""" full_cmd = _convert_args([cmd] + args) try: with open(os.devnull, "wb") as devnull: subprocess.check_call( full_cmd, stderr=devnull, stdout=devnull, stdin=devnull ) except OSError: return False except subprocess.CalledProcessError: return False else: return True
Return `AlbumInfo` populated with mock data. Sets the album info's `album_id` field is set to the corresponding argument. For each pair (`id`, `values`) in `track_values` the `TrackInfo` from `generate_track_info` is added to the album info's `tracks` field. Most other fields of the album and track info are set to "album info" and "track info", respectively.
def generate_album_info(album_id, track_values): """Return `AlbumInfo` populated with mock data. Sets the album info's `album_id` field is set to the corresponding argument. For each pair (`id`, `values`) in `track_values` the `TrackInfo` from `generate_track_info` is added to the album info's `tracks` field. Most other fields of the album and track info are set to "album info" and "track info", respectively. """ tracks = [generate_track_info(id, values) for id, values in track_values] album = AlbumInfo( album_id="album info", album="album info", artist="album info", artist_id="album info", tracks=tracks, ) for field in ALBUM_INFO_FIELDS: setattr(album, field, "album info") return album
Return `TrackInfo` populated with mock data. The `track_id` field is set to the corresponding argument. All other string fields are set to "track info".
def generate_track_info(track_id="track info", values={}): """Return `TrackInfo` populated with mock data. The `track_id` field is set to the corresponding argument. All other string fields are set to "track info". """ track = TrackInfo( title="track info", track_id=track_id, ) for field in TRACK_INFO_FIELDS: setattr(track, field, "track info") for field, value in values.items(): setattr(track, field, value) return track
For commands that operate on matched items, performs a query and returns a list of matching items and a list of matching albums. (The latter is only nonempty when album is True.) Raises a UserError if no items match. also_items controls whether, when fetching albums, the associated items should be fetched also.
def _do_query(lib, query, album, also_items=True): """For commands that operate on matched items, performs a query and returns a list of matching items and a list of matching albums. (The latter is only nonempty when album is True.) Raises a UserError if no items match. also_items controls whether, when fetching albums, the associated items should be fetched also. """ if album: albums = list(lib.albums(query)) items = [] if also_items: for al in albums: items += al.items() else: albums = [] items = list(lib.items(query)) if album and not albums: raise ui.UserError("No matching albums found.") elif not album and not items: raise ui.UserError("No matching items found.") return items, albums
Parse the logfile and yield skipped paths to pass to the `import` command.
def _paths_from_logfile(path): """Parse the logfile and yield skipped paths to pass to the `import` command. """ with open(path, mode="r", encoding="utf-8") as fp: for i, line in enumerate(fp, start=1): verb, sep, paths = line.rstrip("\n").partition(" ") if not sep: raise ValueError(f"line {i} is invalid") # Ignore informational lines that don't need to be re-imported. if verb in {"import", "duplicate-keep", "duplicate-replace"}: continue if verb not in {"asis", "skip", "duplicate-skip"}: raise ValueError(f"line {i} contains unknown verb {verb}") yield os.path.commonpath(paths.split("; "))
Parse all `logfiles` and yield paths from it.
def _parse_logfiles(logfiles): """Parse all `logfiles` and yield paths from it.""" for logfile in logfiles: try: yield from _paths_from_logfile(syspath(normpath(logfile))) except ValueError as err: raise ui.UserError( "malformed logfile {}: {}".format( util.displayable_path(logfile), str(err) ) ) from err except IOError as err: raise ui.UserError( "unreadable logfile {}: {}".format( util.displayable_path(logfile), str(err) ) ) from err
Given a SQLite query result, print the `key` field of each returned row, with indentation of 2 spaces.
def _print_keys(query): """Given a SQLite query result, print the `key` field of each returned row, with indentation of 2 spaces. """ for row in query: print_(" " * 2 + row["key"])
Generate a string for an AlbumInfo or TrackInfo object that provides context that helps disambiguate similar-looking albums and tracks.
def disambig_string(info): """Generate a string for an AlbumInfo or TrackInfo object that provides context that helps disambiguate similar-looking albums and tracks. """ if isinstance(info, hooks.AlbumInfo): disambig = get_album_disambig_fields(info) elif isinstance(info, hooks.TrackInfo): disambig = get_singleton_disambig_fields(info) else: return "" return ", ".join(disambig)
Formats a string as a colorized similarity string according to a distance.
def dist_colorize(string, dist): """Formats a string as a colorized similarity string according to a distance. """ if dist <= config["match"]["strong_rec_thresh"].as_number(): string = ui.colorize("text_success", string) elif dist <= config["match"]["medium_rec_thresh"].as_number(): string = ui.colorize("text_warning", string) else: string = ui.colorize("text_error", string) return string
Formats a distance (a float) as a colorized similarity percentage string.
def dist_string(dist): """Formats a distance (a float) as a colorized similarity percentage string. """ string = "{:.1f}%".format(((1 - dist) * 100)) return dist_colorize(string, dist)
Returns a colorized string that indicates all the penalties applied to a distance object.
def penalty_string(distance, limit=None): """Returns a colorized string that indicates all the penalties applied to a distance object. """ penalties = [] for key in distance.keys(): key = key.replace("album_", "") key = key.replace("track_", "") key = key.replace("_", " ") penalties.append(key) if penalties: if limit and len(penalties) > limit: penalties = penalties[:limit] + ["..."] # Prefix penalty string with U+2260: Not Equal To penalty_string = "\u2260 {}".format(", ".join(penalties)) return ui.colorize("changed", penalty_string)
Print out a representation of the changes that will be made if an album's tags are changed according to `match`, which must be an AlbumMatch object.
def show_change(cur_artist, cur_album, match): """Print out a representation of the changes that will be made if an album's tags are changed according to `match`, which must be an AlbumMatch object. """ change = AlbumChange( cur_artist=cur_artist, cur_album=cur_album, match=match ) # Print the match header. change.show_match_header() # Print the match details. change.show_match_details() # Print the match tracks. change.show_match_tracks()
Print out the change that would occur by tagging `item` with the metadata from `match`, a TrackMatch object.
def show_item_change(item, match): """Print out the change that would occur by tagging `item` with the metadata from `match`, a TrackMatch object. """ change = TrackChange( cur_artist=item.artist, cur_title=item.title, match=match ) # Print the match header. change.show_match_header() # Print the match details. change.show_match_details()
Produces a brief summary line describing a set of items. Used for manually resolving duplicates during import. `items` is a list of `Item` objects. `singleton` indicates whether this is an album or single-item import (if the latter, them `items` should only have one element).
def summarize_items(items, singleton): """Produces a brief summary line describing a set of items. Used for manually resolving duplicates during import. `items` is a list of `Item` objects. `singleton` indicates whether this is an album or single-item import (if the latter, them `items` should only have one element). """ summary_parts = [] if not singleton: summary_parts.append("{} items".format(len(items))) format_counts = {} for item in items: format_counts[item.format] = format_counts.get(item.format, 0) + 1 if len(format_counts) == 1: # A single format. summary_parts.append(items[0].format) else: # Enumerate all the formats by decreasing frequencies: for fmt, count in sorted( format_counts.items(), key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0]), ): summary_parts.append(f"{fmt} {count}") if items: average_bitrate = sum([item.bitrate for item in items]) / len(items) total_duration = sum([item.length for item in items]) total_filesize = sum([item.filesize for item in items]) summary_parts.append("{}kbps".format(int(average_bitrate / 1000))) if items[0].format == "FLAC": sample_bits = "{}kHz/{} bit".format( round(int(items[0].samplerate) / 1000, 1), items[0].bitdepth ) summary_parts.append(sample_bits) summary_parts.append(ui.human_seconds_short(total_duration)) summary_parts.append(ui.human_bytes(total_filesize)) return ", ".join(summary_parts)
Determines whether a decision should be made without even asking the user. This occurs in quiet mode and when an action is chosen for NONE recommendations. Return None if the user should be queried. Otherwise, returns an action. May also print to the console if a summary judgment is made.
def _summary_judgment(rec): """Determines whether a decision should be made without even asking the user. This occurs in quiet mode and when an action is chosen for NONE recommendations. Return None if the user should be queried. Otherwise, returns an action. May also print to the console if a summary judgment is made. """ if config["import"]["quiet"]: if rec == Recommendation.strong: return importer.action.APPLY else: action = config["import"]["quiet_fallback"].as_choice( { "skip": importer.action.SKIP, "asis": importer.action.ASIS, } ) elif config["import"]["timid"]: return None elif rec == Recommendation.none: action = config["import"]["none_rec_action"].as_choice( { "skip": importer.action.SKIP, "asis": importer.action.ASIS, "ask": None, } ) else: return None if action == importer.action.SKIP: print_("Skipping.") elif action == importer.action.ASIS: print_("Importing as-is.") return action
Given a sorted list of candidates, ask the user for a selection of which candidate to use. Applies to both full albums and singletons (tracks). Candidates are either AlbumMatch or TrackMatch objects depending on `singleton`. for albums, `cur_artist`, `cur_album`, and `itemcount` must be provided. For singletons, `item` must be provided. `choices` is a list of `PromptChoice`s to be used in each prompt. Returns one of the following: * the result of the choice, which may be SKIP or ASIS * a candidate (an AlbumMatch/TrackMatch object) * a chosen `PromptChoice` from `choices`
def choose_candidate( candidates, singleton, rec, cur_artist=None, cur_album=None, item=None, itemcount=None, choices=[], ): """Given a sorted list of candidates, ask the user for a selection of which candidate to use. Applies to both full albums and singletons (tracks). Candidates are either AlbumMatch or TrackMatch objects depending on `singleton`. for albums, `cur_artist`, `cur_album`, and `itemcount` must be provided. For singletons, `item` must be provided. `choices` is a list of `PromptChoice`s to be used in each prompt. Returns one of the following: * the result of the choice, which may be SKIP or ASIS * a candidate (an AlbumMatch/TrackMatch object) * a chosen `PromptChoice` from `choices` """ # Sanity check. if singleton: assert item is not None else: assert cur_artist is not None assert cur_album is not None # Build helper variables for the prompt choices. choice_opts = tuple(c.long for c in choices) choice_actions = {c.short: c for c in choices} # Zero candidates. if not candidates: if singleton: print_("No matching recordings found.") else: print_("No matching release found for {} tracks.".format(itemcount)) print_( "For help, see: " "https://beets.readthedocs.org/en/latest/faq.html#nomatch" ) sel = ui.input_options(choice_opts) if sel in choice_actions: return choice_actions[sel] else: assert False # Is the change good enough? bypass_candidates = False if rec != Recommendation.none: match = candidates[0] bypass_candidates = True while True: # Display and choose from candidates. require = rec <= Recommendation.low if not bypass_candidates: # Display list of candidates. print_("") print_( 'Finding tags for {} "{} - {}".'.format( "track" if singleton else "album", item.artist if singleton else cur_artist, item.title if singleton else cur_album, ) ) print_(ui.indent(2) + "Candidates:") for i, match in enumerate(candidates): # Index, metadata, and distance. index0 = "{0}.".format(i + 1) index = dist_colorize(index0, match.distance) dist = "({:.1f}%)".format((1 - match.distance) * 100) distance = dist_colorize(dist, match.distance) metadata = "{0} - {1}".format( match.info.artist, match.info.title if singleton else match.info.album, ) if i == 0: metadata = dist_colorize(metadata, match.distance) else: metadata = ui.colorize("text_highlight_minor", metadata) line1 = [index, distance, metadata] print_(ui.indent(2) + " ".join(line1)) # Penalties. penalties = penalty_string(match.distance, 3) if penalties: print_(ui.indent(13) + penalties) # Disambiguation disambig = disambig_string(match.info) if disambig: print_(ui.indent(13) + disambig) # Ask the user for a choice. sel = ui.input_options(choice_opts, numrange=(1, len(candidates))) if sel == "m": pass elif sel in choice_actions: return choice_actions[sel] else: # Numerical selection. match = candidates[sel - 1] if sel != 1: # When choosing anything but the first match, # disable the default action. require = True bypass_candidates = False # Show what we're about to do. if singleton: show_item_change(item, match) else: show_change(cur_artist, cur_album, match) # Exact match => tag automatically if we're not in timid mode. if rec == Recommendation.strong and not config["import"]["timid"]: return match # Ask for confirmation. default = config["import"]["default_action"].as_choice( { "apply": "a", "skip": "s", "asis": "u", "none": None, } ) if default is None: require = True # Bell ring when user interaction is needed. if config["import"]["bell"]: ui.print_("\a", end="") sel = ui.input_options( ("Apply", "More candidates") + choice_opts, require=require, default=default, ) if sel == "a": return match elif sel in choice_actions: return choice_actions[sel]
Get a new `Proposal` using manual search criteria. Input either an artist and album (for full albums) or artist and track name (for singletons) for manual search.
def manual_search(session, task): """Get a new `Proposal` using manual search criteria. Input either an artist and album (for full albums) or artist and track name (for singletons) for manual search. """ artist = input_("Artist:").strip() name = input_("Album:" if task.is_album else "Track:").strip() if task.is_album: _, _, prop = autotag.tag_album(task.items, artist, name) return prop else: return autotag.tag_item(task.item, artist, name)
Get a new `Proposal` using a manually-entered ID. Input an ID, either for an album ("release") or a track ("recording").
def manual_id(session, task): """Get a new `Proposal` using a manually-entered ID. Input an ID, either for an album ("release") or a track ("recording"). """ prompt = "Enter {} ID:".format("release" if task.is_album else "recording") search_id = input_(prompt).strip() if task.is_album: _, _, prop = autotag.tag_album(task.items, search_ids=search_id.split()) return prop else: return autotag.tag_item(task.item, search_ids=search_id.split())
A prompt choice callback that aborts the importer.
def abort_action(session, task): """A prompt choice callback that aborts the importer.""" raise importer.ImportAbort()
Import the files in the given list of paths or matching the query.
def import_files(lib, paths, query): """Import the files in the given list of paths or matching the query. """ # Check parameter consistency. if config["import"]["quiet"] and config["import"]["timid"]: raise ui.UserError("can't be both quiet and timid") # Open the log. if config["import"]["log"].get() is not None: logpath = syspath(config["import"]["log"].as_filename()) try: loghandler = logging.FileHandler(logpath, encoding="utf-8") except OSError: raise ui.UserError( "could not open log file for writing: " "{}".format(displayable_path(logpath)) ) else: loghandler = None # Never ask for input in quiet mode. if config["import"]["resume"].get() == "ask" and config["import"]["quiet"]: config["import"]["resume"] = False session = TerminalImportSession(lib, loghandler, paths, query) session.run() # Emit event. plugins.send("import", lib=lib, paths=paths)
Print out items in lib matching query. If album, then search for albums instead of single items.
def list_items(lib, query, album, fmt=""): """Print out items in lib matching query. If album, then search for albums instead of single items. """ if album: for album in lib.albums(query): ui.print_(format(album, fmt)) else: for item in lib.items(query): ui.print_(format(item, fmt))
For all the items matched by the query, update the library to reflect the item's embedded tags. :param fields: The fields to be stored. If not specified, all fields will be. :param exclude_fields: The fields to not be stored. If not specified, all fields will be.
def update_items(lib, query, album, move, pretend, fields, exclude_fields=None): """For all the items matched by the query, update the library to reflect the item's embedded tags. :param fields: The fields to be stored. If not specified, all fields will be. :param exclude_fields: The fields to not be stored. If not specified, all fields will be. """ with lib.transaction(): items, _ = _do_query(lib, query, album) if move and fields is not None and "path" not in fields: # Special case: if an item needs to be moved, the path field has to # updated; otherwise the new path will not be reflected in the # database. fields.append("path") if fields is None: # no fields were provided, update all media fields item_fields = fields or library.Item._media_fields if move and "path" not in item_fields: # move is enabled, add 'path' to the list of fields to update item_fields.add("path") else: # fields was provided, just update those item_fields = fields # get all the album fields to update album_fields = fields or library.Album._fields.keys() if exclude_fields: # remove any excluded fields from the item and album sets item_fields = [f for f in item_fields if f not in exclude_fields] album_fields = [f for f in album_fields if f not in exclude_fields] # Walk through the items and pick up their changes. affected_albums = set() for item in items: # Item deleted? if not item.path or not os.path.exists(syspath(item.path)): ui.print_(format(item)) ui.print_(ui.colorize("text_error", " deleted")) if not pretend: item.remove(True) affected_albums.add(item.album_id) continue # Did the item change since last checked? if item.current_mtime() <= item.mtime: log.debug( "skipping {0} because mtime is up to date ({1})", displayable_path(item.path), item.mtime, ) continue # Read new data. try: item.read() except library.ReadError as exc: log.error( "error reading {0}: {1}", displayable_path(item.path), exc ) continue # Special-case album artist when it matches track artist. (Hacky # but necessary for preserving album-level metadata for non- # autotagged imports.) if not item.albumartist: old_item = lib.get_item(item.id) if old_item.albumartist == old_item.artist == item.artist: item.albumartist = old_item.albumartist item._dirty.discard("albumartist") # Check for and display changes. changed = ui.show_model_changes(item, fields=item_fields) # Save changes. if not pretend: if changed: # Move the item if it's in the library. if move and lib.directory in ancestry(item.path): item.move(store=False) item.store(fields=item_fields) affected_albums.add(item.album_id) else: # The file's mtime was different, but there were no # changes to the metadata. Store the new mtime, # which is set in the call to read(), so we don't # check this again in the future. item.store(fields=item_fields) # Skip album changes while pretending. if pretend: return # Modify affected albums to reflect changes in their items. for album_id in affected_albums: if album_id is None: # Singletons. continue album = lib.get_album(album_id) if not album: # Empty albums have already been removed. log.debug("emptied album {0}", album_id) continue first_item = album.items().get() # Update album structure to reflect an item in it. for key in library.Album.item_keys: album[key] = first_item[key] album.store(fields=album_fields) # Move album art (and any inconsistent items). if move and lib.directory in ancestry(first_item.path): log.debug("moving album {0}", album_id) # Manually moving and storing the album. items = list(album.items()) for item in items: item.move(store=False, with_album=False) item.store(fields=item_fields) album.move(store=False) album.store(fields=album_fields)
Remove items matching query from lib. If album, then match and remove whole albums. If delete, also remove files from disk.
def remove_items(lib, query, album, delete, force): """Remove items matching query from lib. If album, then match and remove whole albums. If delete, also remove files from disk. """ # Get the matching items. items, albums = _do_query(lib, query, album) objs = albums if album else items # Confirm file removal if not forcing removal. if not force: # Prepare confirmation with user. album_str = ( " in {} album{}".format(len(albums), "s" if len(albums) > 1 else "") if album else "" ) if delete: fmt = "$path - $title" prompt = "Really DELETE" prompt_all = "Really DELETE {} file{}{}".format( len(items), "s" if len(items) > 1 else "", album_str ) else: fmt = "" prompt = "Really remove from the library?" prompt_all = "Really remove {} item{}{} from the library?".format( len(items), "s" if len(items) > 1 else "", album_str ) # Helpers for printing affected items def fmt_track(t): ui.print_(format(t, fmt)) def fmt_album(a): ui.print_() for i in a.items(): fmt_track(i) fmt_obj = fmt_album if album else fmt_track # Show all the items. for o in objs: fmt_obj(o) # Confirm with user. objs = ui.input_select_objects( prompt, objs, fmt_obj, prompt_all=prompt_all ) if not objs: return # Remove (and possibly delete) items. with lib.transaction(): for obj in objs: obj.remove(delete)
Shows some statistics about the matched items.
def show_stats(lib, query, exact): """Shows some statistics about the matched items.""" items = lib.items(query) total_size = 0 total_time = 0.0 total_items = 0 artists = set() albums = set() album_artists = set() for item in items: if exact: try: total_size += os.path.getsize(syspath(item.path)) except OSError as exc: log.info("could not get size of {}: {}", item.path, exc) else: total_size += int(item.length * item.bitrate / 8) total_time += item.length total_items += 1 artists.add(item.artist) album_artists.add(item.albumartist) if item.album_id: albums.add(item.album_id) size_str = "" + ui.human_bytes(total_size) if exact: size_str += f" ({total_size} bytes)" print_( """Tracks: {} Total time: {}{} {}: {} Artists: {} Albums: {} Album artists: {}""".format( total_items, ui.human_seconds(total_time), f" ({total_time:.2f} seconds)" if exact else "", "Total size" if exact else "Approximate total size", size_str, len(artists), len(albums), len(album_artists), ), )
Modifies matching items according to user-specified assignments and deletions. `mods` is a dictionary of field and value pairse indicating assignments. `dels` is a list of fields to be deleted.
def modify_items(lib, mods, dels, query, write, move, album, confirm, inherit): """Modifies matching items according to user-specified assignments and deletions. `mods` is a dictionary of field and value pairse indicating assignments. `dels` is a list of fields to be deleted. """ # Parse key=value specifications into a dictionary. model_cls = library.Album if album else library.Item # Get the items to modify. items, albums = _do_query(lib, query, album, False) objs = albums if album else items # Apply changes *temporarily*, preview them, and collect modified # objects. print_("Modifying {} {}s.".format(len(objs), "album" if album else "item")) changed = [] templates = { key: functemplate.template(value) for key, value in mods.items() } for obj in objs: obj_mods = { key: model_cls._parse(key, obj.evaluate_template(templates[key])) for key in mods.keys() } if print_and_modify(obj, obj_mods, dels) and obj not in changed: changed.append(obj) # Still something to do? if not changed: print_("No changes to make.") return # Confirm action. if confirm: if write and move: extra = ", move and write tags" elif write: extra = " and write tags" elif move: extra = " and move" else: extra = "" changed = ui.input_select_objects( "Really modify%s" % extra, changed, lambda o: print_and_modify(o, mods, dels), ) # Apply changes to database and files with lib.transaction(): for obj in changed: obj.try_sync(write, move, inherit)
Print the modifications to an item and return a bool indicating whether any changes were made. `mods` is a dictionary of fields and values to update on the object; `dels` is a sequence of fields to delete.
def print_and_modify(obj, mods, dels): """Print the modifications to an item and return a bool indicating whether any changes were made. `mods` is a dictionary of fields and values to update on the object; `dels` is a sequence of fields to delete. """ obj.update(mods) for field in dels: try: del obj[field] except KeyError: pass return ui.show_model_changes(obj)
Split the arguments for the modify subcommand into query parts, assignments (field=value), and deletions (field!). Returns the result as a three-tuple in that order.
def modify_parse_args(args): """Split the arguments for the modify subcommand into query parts, assignments (field=value), and deletions (field!). Returns the result as a three-tuple in that order. """ mods = {} dels = [] query = [] for arg in args: if arg.endswith("!") and "=" not in arg and ":" not in arg: dels.append(arg[:-1]) # Strip trailing !. elif "=" in arg and ":" not in arg.split("=", 1)[0]: key, val = arg.split("=", 1) mods[key] = val else: query.append(arg) return query, mods, dels
Moves or copies items to a new base directory, given by dest. If dest is None, then the library's base directory is used, making the command "consolidate" files.
def move_items( lib, dest, query, copy, album, pretend, confirm=False, export=False ): """Moves or copies items to a new base directory, given by dest. If dest is None, then the library's base directory is used, making the command "consolidate" files. """ items, albums = _do_query(lib, query, album, False) objs = albums if album else items num_objs = len(objs) # Filter out files that don't need to be moved. def isitemmoved(item): return item.path != item.destination(basedir=dest) def isalbummoved(album): return any(isitemmoved(i) for i in album.items()) objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)] num_unmoved = num_objs - len(objs) # Report unmoved files that match the query. unmoved_msg = "" if num_unmoved > 0: unmoved_msg = f" ({num_unmoved} already in place)" copy = copy or export # Exporting always copies. action = "Copying" if copy else "Moving" act = "copy" if copy else "move" entity = "album" if album else "item" log.info( "{0} {1} {2}{3}{4}.", action, len(objs), entity, "s" if len(objs) != 1 else "", unmoved_msg, ) if not objs: return if pretend: if album: show_path_changes( [ (item.path, item.destination(basedir=dest)) for obj in objs for item in obj.items() ] ) else: show_path_changes( [(obj.path, obj.destination(basedir=dest)) for obj in objs] ) else: if confirm: objs = ui.input_select_objects( "Really %s" % act, objs, lambda o: show_path_changes( [(o.path, o.destination(basedir=dest))] ), ) for obj in objs: log.debug("moving: {0}", util.displayable_path(obj.path)) if export: # Copy without affecting the database. obj.move( operation=MoveOperation.COPY, basedir=dest, store=False ) else: # Ordinary move/copy: store the new path. if copy: obj.move(operation=MoveOperation.COPY, basedir=dest) else: obj.move(operation=MoveOperation.MOVE, basedir=dest)
Write tag information from the database to the respective files in the filesystem.
def write_items(lib, query, pretend, force): """Write tag information from the database to the respective files in the filesystem. """ items, albums = _do_query(lib, query, False, False) for item in items: # Item deleted? if not os.path.exists(syspath(item.path)): log.info("missing file: {0}", util.displayable_path(item.path)) continue # Get an Item object reflecting the "clean" (on-disk) state. try: clean_item = library.Item.from_path(item.path) except library.ReadError as exc: log.error( "error reading {0}: {1}", displayable_path(item.path), exc ) continue # Check for and display changes. changed = ui.show_model_changes( item, clean_item, library.Item._media_tag_fields, force ) if (changed or force) and not pretend: # We use `try_sync` here to keep the mtime up to date in the # database. item.try_sync(True, False)
Open a program to edit the user configuration. An empty config file is created if no existing config file exists.
def config_edit(): """Open a program to edit the user configuration. An empty config file is created if no existing config file exists. """ path = config.user_config_path() editor = util.editor_command() try: if not os.path.isfile(path): open(path, "w+").close() util.interactive_open([path], editor) except OSError as exc: message = f"Could not edit configuration: {exc}" if not editor: message += ( ". Please set the VISUAL (or EDITOR) environment variable" ) raise ui.UserError(message)
Yield the full completion shell script as strings. ``commands`` is alist of ``ui.Subcommand`` instances to generate completion data for.
def completion_script(commands): """Yield the full completion shell script as strings. ``commands`` is alist of ``ui.Subcommand`` instances to generate completion data for. """ base_script = os.path.join(os.path.dirname(__file__), "completion_base.sh") with open(base_script) as base_script: yield base_script.read() options = {} aliases = {} command_names = [] # Collect subcommands for cmd in commands: name = cmd.name command_names.append(name) for alias in cmd.aliases: if re.match(r"^\w+$", alias): aliases[alias] = name options[name] = {"flags": [], "opts": []} for opts in cmd.parser._get_all_options()[1:]: if opts.action in ("store_true", "store_false"): option_type = "flags" else: option_type = "opts" options[name][option_type].extend( opts._short_opts + opts._long_opts ) # Add global options options["_global"] = { "flags": ["-v", "--verbose"], "opts": "-l --library -c --config -d --directory -h --help".split(" "), } # Add flags common to all commands options["_common"] = {"flags": ["-h", "--help"]} # Start generating the script yield "_beet() {\n" # Command names yield " local commands='%s'\n" % " ".join(command_names) yield "\n" # Command aliases yield " local aliases='%s'\n" % " ".join(aliases.keys()) for alias, cmd in aliases.items(): yield " local alias__{}={}\n".format(alias.replace("-", "_"), cmd) yield "\n" # Fields yield " fields='%s'\n" % " ".join( set( list(library.Item._fields.keys()) + list(library.Album._fields.keys()) ) ) # Command options for cmd, opts in options.items(): for option_type, option_list in opts.items(): if option_list: option_list = " ".join(option_list) yield " local {}__{}='{}'\n".format( option_type, cmd.replace("-", "_"), option_list ) yield " _beet_dispatch\n" yield "}\n"
Get the encoding to use for *inputting* strings from the console.
def _in_encoding(): """Get the encoding to use for *inputting* strings from the console.""" return _stream_encoding(sys.stdin)
Get the encoding to use for *outputting* strings to the console.
def _out_encoding(): """Get the encoding to use for *outputting* strings to the console.""" return _stream_encoding(sys.stdout)
A helper for `_in_encoding` and `_out_encoding`: get the stream's preferred encoding, using a configured override or a default fallback if neither is not specified.
def _stream_encoding(stream, default="utf-8"): """A helper for `_in_encoding` and `_out_encoding`: get the stream's preferred encoding, using a configured override or a default fallback if neither is not specified. """ # Configured override? encoding = config["terminal_encoding"].get() if encoding: return encoding # For testing: When sys.stdout or sys.stdin is a StringIO under the # test harness, it doesn't have an `encoding` attribute. Just use # UTF-8. if not hasattr(stream, "encoding"): return default # Python's guessed output stream encoding, or UTF-8 as a fallback # (e.g., when piped to a file). return stream.encoding or default
Given a list of command-line argument bytestrings, attempts to decode them to Unicode strings when running under Python 2.
def decargs(arglist): """Given a list of command-line argument bytestrings, attempts to decode them to Unicode strings when running under Python 2. """ return arglist
Like print, but rather than raising an error when a character is not in the terminal's encoding's character set, just silently replaces it. The arguments must be Unicode strings: `unicode` on Python 2; `str` on Python 3. The `end` keyword argument behaves similarly to the built-in `print` (it defaults to a newline).
def print_(*strings, **kwargs): """Like print, but rather than raising an error when a character is not in the terminal's encoding's character set, just silently replaces it. The arguments must be Unicode strings: `unicode` on Python 2; `str` on Python 3. The `end` keyword argument behaves similarly to the built-in `print` (it defaults to a newline). """ if not strings: strings = [""] assert isinstance(strings[0], str) txt = " ".join(strings) txt += kwargs.get("end", "\n") # Encode the string and write it to stdout. # On Python 3, sys.stdout expects text strings and uses the # exception-throwing encoding error policy. To avoid throwing # errors and use our configurable encoding override, we use the # underlying bytes buffer instead. if hasattr(sys.stdout, "buffer"): out = txt.encode(_out_encoding(), "replace") sys.stdout.buffer.write(out) sys.stdout.buffer.flush() else: # In our test harnesses (e.g., DummyOut), sys.stdout.buffer # does not exist. We instead just record the text string. sys.stdout.write(txt)
Given a boolean or None, return the original value or a fallback.
def _bool_fallback(a, b): """Given a boolean or None, return the original value or a fallback.""" if a is None: assert isinstance(b, bool) return b else: assert isinstance(a, bool) return a
Decide whether a command that updates metadata should also write tags, using the importer configuration as the default.
def should_write(write_opt=None): """Decide whether a command that updates metadata should also write tags, using the importer configuration as the default. """ return _bool_fallback(write_opt, config["import"]["write"].get(bool))
Decide whether a command that updates metadata should also move files when they're inside the library, using the importer configuration as the default. Specifically, commands should move files after metadata updates only when the importer is configured *either* to move *or* to copy files. They should avoid moving files when the importer is configured not to touch any filenames.
def should_move(move_opt=None): """Decide whether a command that updates metadata should also move files when they're inside the library, using the importer configuration as the default. Specifically, commands should move files after metadata updates only when the importer is configured *either* to move *or* to copy files. They should avoid moving files when the importer is configured not to touch any filenames. """ return _bool_fallback( move_opt, config["import"]["move"].get(bool) or config["import"]["copy"].get(bool), )
Returns a string with `count` many spaces.
def indent(count): """Returns a string with `count` many spaces.""" return " " * count
Like `input`, but decodes the result to a Unicode string. Raises a UserError if stdin is not available. The prompt is sent to stdout rather than stderr. A printed between the prompt and the input cursor.
def input_(prompt=None): """Like `input`, but decodes the result to a Unicode string. Raises a UserError if stdin is not available. The prompt is sent to stdout rather than stderr. A printed between the prompt and the input cursor. """ # raw_input incorrectly sends prompts to stderr, not stdout, so we # use print_() explicitly to display prompts. # https://bugs.python.org/issue1927 if prompt: print_(prompt, end=" ") try: resp = input() except EOFError: raise UserError("stdin stream ended while input required") return resp
Prompts a user for input. The sequence of `options` defines the choices the user has. A single-letter shortcut is inferred for each option; the user's choice is returned as that single, lower-case letter. The options should be provided as lower-case strings unless a particular shortcut is desired; in that case, only that letter should be capitalized. By default, the first option is the default. `default` can be provided to override this. If `require` is provided, then there is no default. The prompt and fallback prompt are also inferred but can be overridden. If numrange is provided, it is a pair of `(high, low)` (both ints) indicating that, in addition to `options`, the user may enter an integer in that inclusive range. `max_width` specifies the maximum number of columns in the automatically generated prompt string.
def input_options( options, require=False, prompt=None, fallback_prompt=None, numrange=None, default=None, max_width=72, ): """Prompts a user for input. The sequence of `options` defines the choices the user has. A single-letter shortcut is inferred for each option; the user's choice is returned as that single, lower-case letter. The options should be provided as lower-case strings unless a particular shortcut is desired; in that case, only that letter should be capitalized. By default, the first option is the default. `default` can be provided to override this. If `require` is provided, then there is no default. The prompt and fallback prompt are also inferred but can be overridden. If numrange is provided, it is a pair of `(high, low)` (both ints) indicating that, in addition to `options`, the user may enter an integer in that inclusive range. `max_width` specifies the maximum number of columns in the automatically generated prompt string. """ # Assign single letters to each option. Also capitalize the options # to indicate the letter. letters = {} display_letters = [] capitalized = [] first = True for option in options: # Is a letter already capitalized? for letter in option: if letter.isalpha() and letter.upper() == letter: found_letter = letter break else: # Infer a letter. for letter in option: if not letter.isalpha(): continue # Don't use punctuation. if letter not in letters: found_letter = letter break else: raise ValueError("no unambiguous lettering found") letters[found_letter.lower()] = option index = option.index(found_letter) # Mark the option's shortcut letter for display. if not require and ( (default is None and not numrange and first) or ( isinstance(default, str) and found_letter.lower() == default.lower() ) ): # The first option is the default; mark it. show_letter = "[%s]" % found_letter.upper() is_default = True else: show_letter = found_letter.upper() is_default = False # Colorize the letter shortcut. show_letter = colorize( "action_default" if is_default else "action", show_letter ) # Insert the highlighted letter back into the word. descr_color = "action_default" if is_default else "action_description" capitalized.append( colorize(descr_color, option[:index]) + show_letter + colorize(descr_color, option[index + 1 :]) ) display_letters.append(found_letter.upper()) first = False # The default is just the first option if unspecified. if require: default = None elif default is None: if numrange: default = numrange[0] else: default = display_letters[0].lower() # Make a prompt if one is not provided. if not prompt: prompt_parts = [] prompt_part_lengths = [] if numrange: if isinstance(default, int): default_name = str(default) default_name = colorize("action_default", default_name) tmpl = "# selection (default %s)" prompt_parts.append(tmpl % default_name) prompt_part_lengths.append(len(tmpl % str(default))) else: prompt_parts.append("# selection") prompt_part_lengths.append(len(prompt_parts[-1])) prompt_parts += capitalized prompt_part_lengths += [len(s) for s in options] # Wrap the query text. # Start prompt with U+279C: Heavy Round-Tipped Rightwards Arrow prompt = colorize("action", "\u279C ") line_length = 0 for i, (part, length) in enumerate( zip(prompt_parts, prompt_part_lengths) ): # Add punctuation. if i == len(prompt_parts) - 1: part += colorize("action_description", "?") else: part += colorize("action_description", ",") length += 1 # Choose either the current line or the beginning of the next. if line_length + length + 1 > max_width: prompt += "\n" line_length = 0 if line_length != 0: # Not the beginning of the line; need a space. part = " " + part length += 1 prompt += part line_length += length # Make a fallback prompt too. This is displayed if the user enters # something that is not recognized. if not fallback_prompt: fallback_prompt = "Enter one of " if numrange: fallback_prompt += "%i-%i, " % numrange fallback_prompt += ", ".join(display_letters) + ":" resp = input_(prompt) while True: resp = resp.strip().lower() # Try default option. if default is not None and not resp: resp = default # Try an integer input if available. if numrange: try: resp = int(resp) except ValueError: pass else: low, high = numrange if low <= resp <= high: return resp else: resp = None # Try a normal letter input. if resp: resp = resp[0] if resp in letters: return resp # Prompt for new input. resp = input_(fallback_prompt)
Prompts the user for a "yes" or "no" response. The default is "yes" unless `require` is `True`, in which case there is no default.
def input_yn(prompt, require=False): """Prompts the user for a "yes" or "no" response. The default is "yes" unless `require` is `True`, in which case there is no default. """ # Start prompt with U+279C: Heavy Round-Tipped Rightwards Arrow yesno = colorize("action", "\u279C ") + colorize( "action_description", "Enter Y or N:" ) sel = input_options(("y", "n"), require, prompt, yesno) return sel == "y"
Prompt to user to choose all, none, or some of the given objects. Return the list of selected objects. `prompt` is the prompt string to use for each question (it should be phrased as an imperative verb). If `prompt_all` is given, it is used instead of `prompt` for the first (yes(/no/select) question. `rep` is a function to call on each object to print it out when confirming objects individually.
def input_select_objects(prompt, objs, rep, prompt_all=None): """Prompt to user to choose all, none, or some of the given objects. Return the list of selected objects. `prompt` is the prompt string to use for each question (it should be phrased as an imperative verb). If `prompt_all` is given, it is used instead of `prompt` for the first (yes(/no/select) question. `rep` is a function to call on each object to print it out when confirming objects individually. """ choice = input_options( ("y", "n", "s"), False, "%s? (Yes/no/select)" % (prompt_all or prompt) ) print() # Blank line. if choice == "y": # Yes. return objs elif choice == "s": # Select. out = [] for obj in objs: rep(obj) answer = input_options( ("y", "n", "q"), True, "%s? (yes/no/quit)" % prompt, "Enter Y or N:", ) if answer == "y": out.append(obj) elif answer == "q": return out return out else: # No. return []
Formats size, a number of bytes, in a human-readable way.
def human_bytes(size): """Formats size, a number of bytes, in a human-readable way.""" powers = ["", "K", "M", "G", "T", "P", "E", "Z", "Y", "H"] unit = "B" for power in powers: if size < 1024: return f"{size:3.1f} {power}{unit}" size /= 1024.0 unit = "iB" return "big"
Formats interval, a number of seconds, as a human-readable time interval using English words.
def human_seconds(interval): """Formats interval, a number of seconds, as a human-readable time interval using English words. """ units = [ (1, "second"), (60, "minute"), (60, "hour"), (24, "day"), (7, "week"), (52, "year"), (10, "decade"), ] for i in range(len(units) - 1): increment, suffix = units[i] next_increment, _ = units[i + 1] interval /= float(increment) if interval < next_increment: break else: # Last unit. increment, suffix = units[-1] interval /= float(increment) return f"{interval:3.1f} {suffix}s"
Formats a number of seconds as a short human-readable M:SS string.
def human_seconds_short(interval): """Formats a number of seconds as a short human-readable M:SS string. """ interval = int(interval) return "%i:%02i" % (interval // 60, interval % 60)
Returns a string that prints the given text in the given color in a terminal that is ANSI color-aware. The color must be a list of strings from ANSI_CODES.
def _colorize(color, text): """Returns a string that prints the given text in the given color in a terminal that is ANSI color-aware. The color must be a list of strings from ANSI_CODES. """ # Construct escape sequence to be put before the text by iterating # over all "ANSI codes" in `color`. escape = "" for code in color: escape = escape + COLOR_ESCAPE + "%im" % ANSI_CODES[code] return escape + text + RESET_COLOR
Colorize text if colored output is enabled. (Like _colorize but conditional.)
def colorize(color_name, text): """Colorize text if colored output is enabled. (Like _colorize but conditional.) """ if config["ui"]["color"] and "NO_COLOR" not in os.environ: global COLORS if not COLORS: # Read all color configurations and set global variable COLORS. COLORS = dict() for name in COLOR_NAMES: # Convert legacy color definitions (strings) into the new # list-based color definitions. Do this by trying to read the # color definition from the configuration as unicode - if this # is successful, the color definition is a legacy definition # and has to be converted. try: color_def = config["ui"]["colors"][name].get(str) except (confuse.ConfigTypeError, NameError): # Normal color definition (type: list of unicode). color_def = config["ui"]["colors"][name].get(list) else: # Legacy color definition (type: unicode). Convert. if color_def in LEGACY_COLORS: color_def = LEGACY_COLORS[color_def] else: raise UserError("no such color %s", color_def) for code in color_def: if code not in ANSI_CODES.keys(): raise ValueError("no such ANSI code %s", code) COLORS[name] = color_def # In case a 3rd party plugin is still passing the actual color ('red') # instead of the abstract color name ('text_error') color = COLORS.get(color_name) if not color: log.debug("Invalid color_name: {0}", color_name) color = color_name return _colorize(color, text) else: return text
Remove colors from a string.
def uncolorize(colored_text): """Remove colors from a string.""" # Define a regular expression to match ANSI codes. # See: http://stackoverflow.com/a/2187024/1382707 # Explanation of regular expression: # \x1b - matches ESC character # \[ - matches opening square bracket # [;\d]* - matches a sequence consisting of one or more digits or # semicola # [A-Za-z] - matches a letter ansi_code_regex = re.compile(r"\x1b\[[;\d]*[A-Za-z]", re.VERBOSE) # Strip ANSI codes from `colored_text` using the regular expression. text = ansi_code_regex.sub("", colored_text) return text
Measure the length of a string while excluding ANSI codes from the measurement. The standard `len(my_string)` method also counts ANSI codes to the string length, which is counterproductive when layouting a Terminal interface.
def color_len(colored_text): """Measure the length of a string while excluding ANSI codes from the measurement. The standard `len(my_string)` method also counts ANSI codes to the string length, which is counterproductive when layouting a Terminal interface. """ # Return the length of the uncolored string. return len(uncolorize(colored_text))
Given two values, return the same pair of strings except with their differences highlighted in the specified color. Strings are highlighted intelligently to show differences; other values are stringified and highlighted in their entirety.
def _colordiff(a, b): """Given two values, return the same pair of strings except with their differences highlighted in the specified color. Strings are highlighted intelligently to show differences; other values are stringified and highlighted in their entirety. """ # First, convert paths to readable format if isinstance(a, bytes) or isinstance(b, bytes): # A path field. a = util.displayable_path(a) b = util.displayable_path(b) if not isinstance(a, str) or not isinstance(b, str): # Non-strings: use ordinary equality. if a == b: return str(a), str(b) else: return ( colorize("text_diff_removed", str(a)), colorize("text_diff_added", str(b)), ) a_out = [] b_out = [] matcher = SequenceMatcher(lambda x: False, a, b) for op, a_start, a_end, b_start, b_end in matcher.get_opcodes(): if op == "equal": # In both strings. a_out.append(a[a_start:a_end]) b_out.append(b[b_start:b_end]) elif op == "insert": # Right only. b_out.append(colorize("text_diff_added", b[b_start:b_end])) elif op == "delete": # Left only. a_out.append(colorize("text_diff_removed", a[a_start:a_end])) elif op == "replace": # Right and left differ. Colorise with second highlight if # it's just a case change. if a[a_start:a_end].lower() != b[b_start:b_end].lower(): a_color = "text_diff_removed" b_color = "text_diff_added" else: a_color = b_color = "text_highlight_minor" a_out.append(colorize(a_color, a[a_start:a_end])) b_out.append(colorize(b_color, b[b_start:b_end])) else: assert False return "".join(a_out), "".join(b_out)
Colorize differences between two values if color is enabled. (Like _colordiff but conditional.)
def colordiff(a, b): """Colorize differences between two values if color is enabled. (Like _colordiff but conditional.) """ if config["ui"]["color"]: return _colordiff(a, b) else: return str(a), str(b)
Get the configuration's path formats as a list of query/template pairs.
def get_path_formats(subview=None): """Get the configuration's path formats as a list of query/template pairs. """ path_formats = [] subview = subview or config["paths"] for query, view in subview.items(): query = PF_KEY_QUERIES.get(query, query) # Expand common queries. path_formats.append((query, template(view.as_str()))) return path_formats
Confuse validation function that reads regex/string pairs.
def get_replacements(): """Confuse validation function that reads regex/string pairs.""" replacements = [] for pattern, repl in config["replace"].get(dict).items(): repl = repl or "" try: replacements.append((re.compile(pattern), repl)) except re.error: raise UserError( "malformed regular expression in replace: {}".format(pattern) ) return replacements
Get the width (columns) of the terminal.
def term_width(): """Get the width (columns) of the terminal.""" fallback = config["ui"]["terminal_width"].get(int) # The fcntl and termios modules are not available on non-Unix # platforms, so we fall back to a constant. try: import fcntl import termios except ImportError: return fallback try: buf = fcntl.ioctl(0, termios.TIOCGWINSZ, " " * 4) except OSError: return fallback try: height, width = struct.unpack("hh", buf) except struct.error: return fallback return width
Splits string into a list of substrings at whitespace. `width_tuple` is a 3-tuple of `(first_width, last_width, middle_width)`. The first substring has a length not longer than `first_width`, the last substring has a length not longer than `last_width`, and all other substrings have a length not longer than `middle_width`. `string` may contain ANSI codes at word borders.
def split_into_lines(string, width_tuple): """Splits string into a list of substrings at whitespace. `width_tuple` is a 3-tuple of `(first_width, last_width, middle_width)`. The first substring has a length not longer than `first_width`, the last substring has a length not longer than `last_width`, and all other substrings have a length not longer than `middle_width`. `string` may contain ANSI codes at word borders. """ first_width, middle_width, last_width = width_tuple words = [] esc_text = re.compile( r"""(?P<pretext>[^\x1b]*) (?P<esc>(?:\x1b\[[;\d]*[A-Za-z])+) (?P<text>[^\x1b]+)(?P<reset>\x1b\[39;49;00m) (?P<posttext>[^\x1b]*)""", re.VERBOSE, ) if uncolorize(string) == string: # No colors in string words = string.split() else: # Use a regex to find escapes and the text within them. for m in esc_text.finditer(string): # m contains four groups: # pretext - any text before escape sequence # esc - intitial escape sequence # text - text, no escape sequence, may contain spaces # reset - ASCII colour reset space_before_text = False if m.group("pretext") != "": # Some pretext found, let's handle it # Add any words in the pretext words += m.group("pretext").split() if m.group("pretext")[-1] == " ": # Pretext ended on a space space_before_text = True else: # Pretext ended mid-word, ensure next word pass else: # pretext empty, treat as if there is a space before space_before_text = True if m.group("text")[0] == " ": # First character of the text is a space space_before_text = True # Now, handle the words in the main text: raw_words = m.group("text").split() if space_before_text: # Colorize each word with pre/post escapes # Reconstruct colored words words += [ m.group("esc") + raw_word + RESET_COLOR for raw_word in raw_words ] elif raw_words: # Pretext stops mid-word if m.group("esc") != RESET_COLOR: # Add the rest of the current word, with a reset after it words[-1] += m.group("esc") + raw_words[0] + RESET_COLOR # Add the subsequent colored words: words += [ m.group("esc") + raw_word + RESET_COLOR for raw_word in raw_words[1:] ] else: # Caught a mid-word escape sequence words[-1] += raw_words[0] words += raw_words[1:] if ( m.group("text")[-1] != " " and m.group("posttext") != "" and m.group("posttext")[0] != " " ): # reset falls mid-word post_text = m.group("posttext").split() words[-1] += post_text[0] words += post_text[1:] else: # Add any words after escape sequence words += m.group("posttext").split() result = [] next_substr = "" # Iterate over all words. previous_fit = False for i in range(len(words)): if i == 0: pot_substr = words[i] else: # (optimistically) add the next word to check the fit pot_substr = " ".join([next_substr, words[i]]) # Find out if the pot(ential)_substr fits into the next substring. fits_first = len(result) == 0 and color_len(pot_substr) <= first_width fits_middle = len(result) != 0 and color_len(pot_substr) <= middle_width if fits_first or fits_middle: # Fitted(!) let's try and add another word before appending next_substr = pot_substr previous_fit = True elif not fits_first and not fits_middle and previous_fit: # Extra word didn't fit, append what we have result.append(next_substr) next_substr = words[i] previous_fit = color_len(next_substr) <= middle_width else: # Didn't fit anywhere if uncolorize(pot_substr) == pot_substr: # Simple uncolored string, append a cropped word if len(result) == 0: # Crop word by the first_width for the first line result.append(pot_substr[:first_width]) # add rest of word to next line next_substr = pot_substr[first_width:] else: result.append(pot_substr[:middle_width]) next_substr = pot_substr[middle_width:] else: # Colored strings if len(result) == 0: this_line, next_line = color_split(pot_substr, first_width) result.append(this_line) next_substr = next_line else: this_line, next_line = color_split(pot_substr, middle_width) result.append(this_line) next_substr = next_line previous_fit = color_len(next_substr) <= middle_width # We finished constructing the substrings, but the last substring # has not yet been added to the result. result.append(next_substr) # Also, the length of the last substring was only checked against # `middle_width`. Append an empty substring as the new last substring if # the last substring is too long. if not color_len(next_substr) <= last_width: result.append("") return result
Print left & right data, with separator inbetween 'left' and 'right' have a structure of: {'prefix':u'','contents':u'','suffix':u'','width':0} In a column layout the printing will be: {indent_str}{lhs0}{separator}{rhs0} {lhs1 / padding }{rhs1} ... The first line of each column (i.e. {lhs0} or {rhs0}) is: {prefix}{part of contents}{suffix} With subsequent lines (i.e. {lhs1}, {rhs1} onwards) being the rest of contents, wrapped if the width would be otherwise exceeded.
def print_column_layout( indent_str, left, right, separator=" -> ", max_width=term_width() ): """Print left & right data, with separator inbetween 'left' and 'right' have a structure of: {'prefix':u'','contents':u'','suffix':u'','width':0} In a column layout the printing will be: {indent_str}{lhs0}{separator}{rhs0} {lhs1 / padding }{rhs1} ... The first line of each column (i.e. {lhs0} or {rhs0}) is: {prefix}{part of contents}{suffix} With subsequent lines (i.e. {lhs1}, {rhs1} onwards) being the rest of contents, wrapped if the width would be otherwise exceeded. """ if right["prefix"] + right["contents"] + right["suffix"] == "": # No right hand information, so we don't need a separator. separator = "" first_line_no_wrap = ( indent_str + left["prefix"] + left["contents"] + left["suffix"] + separator + right["prefix"] + right["contents"] + right["suffix"] ) if color_len(first_line_no_wrap) < max_width: # Everything fits, print out line. print_(first_line_no_wrap) else: # Wrap into columns if "width" not in left or "width" not in right: # If widths have not been defined, set to share space. left["width"] = ( max_width - len(indent_str) - color_len(separator) ) // 2 right["width"] = ( max_width - len(indent_str) - color_len(separator) ) // 2 # On the first line, account for suffix as well as prefix left_width_tuple = ( left["width"] - color_len(left["prefix"]) - color_len(left["suffix"]), left["width"] - color_len(left["prefix"]), left["width"] - color_len(left["prefix"]), ) left_split = split_into_lines(left["contents"], left_width_tuple) right_width_tuple = ( right["width"] - color_len(right["prefix"]) - color_len(right["suffix"]), right["width"] - color_len(right["prefix"]), right["width"] - color_len(right["prefix"]), ) right_split = split_into_lines(right["contents"], right_width_tuple) max_line_count = max(len(left_split), len(right_split)) out = "" for i in range(max_line_count): # indentation out += indent_str # Prefix or indent_str for line if i == 0: out += left["prefix"] else: out += indent(color_len(left["prefix"])) # Line i of left hand side contents. if i < len(left_split): out += left_split[i] left_part_len = color_len(left_split[i]) else: left_part_len = 0 # Padding until end of column. # Note: differs from original # column calcs in not -1 afterwards for space # in track number as that is included in 'prefix' padding = left["width"] - color_len(left["prefix"]) - left_part_len # Remove some padding on the first line to display # length if i == 0: padding -= color_len(left["suffix"]) out += indent(padding) if i == 0: out += left["suffix"] # Separator between columns. if i == 0: out += separator else: out += indent(color_len(separator)) # Right prefix, contents, padding, suffix if i == 0: out += right["prefix"] else: out += indent(color_len(right["prefix"])) # Line i of right hand side. if i < len(right_split): out += right_split[i] right_part_len = color_len(right_split[i]) else: right_part_len = 0 # Padding until end of column padding = ( right["width"] - color_len(right["prefix"]) - right_part_len ) # Remove some padding on the first line to display # length if i == 0: padding -= color_len(right["suffix"]) out += indent(padding) # Length in first line if i == 0: out += right["suffix"] # Linebreak, except in the last line. if i < max_line_count - 1: out += "\n" # Constructed all of the columns, now print print_(out)
Prints using a newline separator between left & right if they go over their allocated widths. The datastructures are shared with the column layout. In contrast to the column layout, the prefix and suffix are printed at the beginning and end of the contents. If no wrapping is required (i.e. everything fits) the first line will look exactly the same as the column layout: {indent}{lhs0}{separator}{rhs0} However if this would go over the width given, the layout now becomes: {indent}{lhs0} {indent}{separator}{rhs0} If {lhs0} would go over the maximum width, the subsequent lines are indented a second time for ease of reading.
def print_newline_layout( indent_str, left, right, separator=" -> ", max_width=term_width() ): """Prints using a newline separator between left & right if they go over their allocated widths. The datastructures are shared with the column layout. In contrast to the column layout, the prefix and suffix are printed at the beginning and end of the contents. If no wrapping is required (i.e. everything fits) the first line will look exactly the same as the column layout: {indent}{lhs0}{separator}{rhs0} However if this would go over the width given, the layout now becomes: {indent}{lhs0} {indent}{separator}{rhs0} If {lhs0} would go over the maximum width, the subsequent lines are indented a second time for ease of reading. """ if right["prefix"] + right["contents"] + right["suffix"] == "": # No right hand information, so we don't need a separator. separator = "" first_line_no_wrap = ( indent_str + left["prefix"] + left["contents"] + left["suffix"] + separator + right["prefix"] + right["contents"] + right["suffix"] ) if color_len(first_line_no_wrap) < max_width: # Everything fits, print out line. print_(first_line_no_wrap) else: # Newline separation, with wrapping empty_space = max_width - len(indent_str) # On lower lines we will double the indent for clarity left_width_tuple = ( empty_space, empty_space - len(indent_str), empty_space - len(indent_str), ) left_str = left["prefix"] + left["contents"] + left["suffix"] left_split = split_into_lines(left_str, left_width_tuple) # Repeat calculations for rhs, including separator on first line right_width_tuple = ( empty_space - color_len(separator), empty_space - len(indent_str), empty_space - len(indent_str), ) right_str = right["prefix"] + right["contents"] + right["suffix"] right_split = split_into_lines(right_str, right_width_tuple) for i, line in enumerate(left_split): if i == 0: print_(indent_str + line) elif line != "": # Ignore empty lines print_(indent_str * 2 + line) for i, line in enumerate(right_split): if i == 0: print_(indent_str + separator + line) elif line != "": print_(indent_str * 2 + line)
Given two Model objects and their formatted views, format their values for `field` and highlight changes among them. Return a human-readable string. If the value has not changed, return None instead.
def _field_diff(field, old, old_fmt, new, new_fmt): """Given two Model objects and their formatted views, format their values for `field` and highlight changes among them. Return a human-readable string. If the value has not changed, return None instead. """ oldval = old.get(field) newval = new.get(field) # If no change, abort. if ( isinstance(oldval, float) and isinstance(newval, float) and abs(oldval - newval) < FLOAT_EPSILON ): return None elif oldval == newval: return None # Get formatted values for output. oldstr = old_fmt.get(field, "") newstr = new_fmt.get(field, "") # For strings, highlight changes. For others, colorize the whole # thing. if isinstance(oldval, str): oldstr, newstr = colordiff(oldval, newstr) else: oldstr = colorize("text_error", oldstr) newstr = colorize("text_error", newstr) return f"{oldstr} -> {newstr}"
Given a Model object, print a list of changes from its pristine version stored in the database. Return a boolean indicating whether any changes were found. `old` may be the "original" object to avoid using the pristine version from the database. `fields` may be a list of fields to restrict the detection to. `always` indicates whether the object is always identified, regardless of whether any changes are present.
def show_model_changes(new, old=None, fields=None, always=False): """Given a Model object, print a list of changes from its pristine version stored in the database. Return a boolean indicating whether any changes were found. `old` may be the "original" object to avoid using the pristine version from the database. `fields` may be a list of fields to restrict the detection to. `always` indicates whether the object is always identified, regardless of whether any changes are present. """ old = old or new._db._get(type(new), new.id) # Keep the formatted views around instead of re-creating them in each # iteration step old_fmt = old.formatted() new_fmt = new.formatted() # Build up lines showing changed fields. changes = [] for field in old: # Subset of the fields. Never show mtime. if field == "mtime" or (fields and field not in fields): continue # Detect and show difference for this field. line = _field_diff(field, old, old_fmt, new, new_fmt) if line: changes.append(f" {field}: {line}") # New fields. for field in set(new) - set(old): if fields and field not in fields: continue changes.append( " {}: {}".format(field, colorize("text_highlight", new_fmt[field])) ) # Print changes. if changes or always: print_(format(old)) if changes: print_("\n".join(changes)) return bool(changes)
Given a list of tuples (source, destination) that indicate the path changes, log the changes as INFO-level output to the beets log. The output is guaranteed to be unicode. Every pair is shown on a single line if the terminal width permits it, else it is split over two lines. E.g., Source -> Destination vs. Source -> Destination
def show_path_changes(path_changes): """Given a list of tuples (source, destination) that indicate the path changes, log the changes as INFO-level output to the beets log. The output is guaranteed to be unicode. Every pair is shown on a single line if the terminal width permits it, else it is split over two lines. E.g., Source -> Destination vs. Source -> Destination """ sources, destinations = zip(*path_changes) # Ensure unicode output sources = list(map(util.displayable_path, sources)) destinations = list(map(util.displayable_path, destinations)) # Calculate widths for terminal split col_width = (term_width() - len(" -> ")) // 2 max_width = len(max(sources + destinations, key=len)) if max_width > col_width: # Print every change over two lines for source, dest in zip(sources, destinations): color_source, color_dest = colordiff(source, dest) print_("{0} \n -> {1}".format(color_source, color_dest)) else: # Print every change on a single line, and add a header title_pad = max_width - len("Source ") + len(" -> ") print_("Source {0} Destination".format(" " * title_pad)) for source, dest in zip(sources, destinations): pad = max_width - len(source) color_source, color_dest = colordiff(source, dest) print_( "{0} {1} -> {2}".format( color_source, " " * pad, color_dest, ) )
Custom action callback to parse options which have ``key=value`` pairs as values. All such pairs passed for this option are aggregated into a dictionary.
def _store_dict(option, opt_str, value, parser): """Custom action callback to parse options which have ``key=value`` pairs as values. All such pairs passed for this option are aggregated into a dictionary. """ dest = option.dest option_values = getattr(parser.values, dest, None) if option_values is None: # This is the first supplied ``key=value`` pair of option. # Initialize empty dictionary and get a reference to it. setattr(parser.values, dest, {}) option_values = getattr(parser.values, dest) try: key, value = value.split("=", 1) if not (key and value): raise ValueError except ValueError: raise UserError( "supplied argument `{}' is not of the form `key=value'".format( value ) ) option_values[key] = value
Load the plugins specified on the command line or in the configuration.
def _load_plugins(options, config): """Load the plugins specified on the command line or in the configuration.""" paths = config["pluginpath"].as_str_seq(split=False) paths = [util.normpath(p) for p in paths] log.debug("plugin paths: {0}", util.displayable_path(paths)) # On Python 3, the search paths need to be unicode. paths = [util.py3_path(p) for p in paths] # Extend the `beetsplug` package to include the plugin paths. import beetsplug beetsplug.__path__ = paths + list(beetsplug.__path__) # For backwards compatibility, also support plugin paths that # *contain* a `beetsplug` package. sys.path += paths # If we were given any plugins on the command line, use those. if options.plugins is not None: plugin_list = ( options.plugins.split(",") if len(options.plugins) > 0 else [] ) else: plugin_list = config["plugins"].as_str_seq() # Exclude any plugins that were specified on the command line if options.exclude is not None: plugin_list = [ p for p in plugin_list if p not in options.exclude.split(",") ] plugins.load_plugins(plugin_list) return plugins
Prepare and global state and updates it with command line options. Returns a list of subcommands, a list of plugins, and a library instance.
def _setup(options, lib=None): """Prepare and global state and updates it with command line options. Returns a list of subcommands, a list of plugins, and a library instance. """ # Configure the MusicBrainz API. mb.configure() config = _configure(options) plugins = _load_plugins(options, config) # Add types and queries defined by plugins. plugin_types_album = plugins.types(library.Album) library.Album._types.update(plugin_types_album) item_types = plugin_types_album.copy() item_types.update(library.Item._types) item_types.update(plugins.types(library.Item)) library.Item._types = item_types library.Item._queries.update(plugins.named_queries(library.Item)) library.Album._queries.update(plugins.named_queries(library.Album)) plugins.send("pluginload") # Get the default subcommands. from beets.ui.commands import default_commands subcommands = list(default_commands) subcommands.extend(plugins.commands()) if lib is None: lib = _open_library(config) plugins.send("library_opened", lib=lib) return subcommands, plugins, lib
Amend the global configuration object with command line options.
def _configure(options): """Amend the global configuration object with command line options.""" # Add any additional config files specified with --config. This # special handling lets specified plugins get loaded before we # finish parsing the command line. if getattr(options, "config", None) is not None: overlay_path = options.config del options.config config.set_file(overlay_path) else: overlay_path = None config.set_args(options) # Configure the logger. if config["verbose"].get(int): log.set_global_level(logging.DEBUG) else: log.set_global_level(logging.INFO) if overlay_path: log.debug( "overlaying configuration: {0}", util.displayable_path(overlay_path) ) config_path = config.user_config_path() if os.path.isfile(config_path): log.debug("user configuration: {0}", util.displayable_path(config_path)) else: log.debug( "no user configuration found at {0}", util.displayable_path(config_path), ) log.debug("data directory: {0}", util.displayable_path(config.config_dir())) return config
Create a new library instance from the configuration.
def _open_library(config): """Create a new library instance from the configuration.""" dbpath = util.bytestring_path(config["library"].as_filename()) _ensure_db_directory_exists(dbpath) try: lib = library.Library( dbpath, config["directory"].as_filename(), get_path_formats(), get_replacements(), ) lib.get_item(0) # Test database connection. except (sqlite3.OperationalError, sqlite3.DatabaseError) as db_error: log.debug("{}", traceback.format_exc()) raise UserError( "database file {} cannot not be opened: {}".format( util.displayable_path(dbpath), db_error ) ) log.debug( "library database: {0}\n" "library directory: {1}", util.displayable_path(lib.path), util.displayable_path(lib.directory), ) return lib
A helper function for `main` without top-level exception handling.
def _raw_main(args, lib=None): """A helper function for `main` without top-level exception handling. """ parser = SubcommandsOptionParser() parser.add_format_option(flags=("--format-item",), target=library.Item) parser.add_format_option(flags=("--format-album",), target=library.Album) parser.add_option( "-l", "--library", dest="library", help="library database file to use" ) parser.add_option( "-d", "--directory", dest="directory", help="destination music directory", ) parser.add_option( "-v", "--verbose", dest="verbose", action="count", help="log more details (use twice for even more)", ) parser.add_option( "-c", "--config", dest="config", help="path to configuration file" ) parser.add_option( "-p", "--plugins", dest="plugins", help="a comma-separated list of plugins to load", ) parser.add_option( "-P", "--disable-plugins", dest="exclude", help="a comma-separated list of plugins to disable", ) parser.add_option( "-h", "--help", dest="help", action="store_true", help="show this help message and exit", ) parser.add_option( "--version", dest="version", action="store_true", help=optparse.SUPPRESS_HELP, ) options, subargs = parser.parse_global_options(args) # Special case for the `config --edit` command: bypass _setup so # that an invalid configuration does not prevent the editor from # starting. if ( subargs and subargs[0] == "config" and ("-e" in subargs or "--edit" in subargs) ): from beets.ui.commands import config_edit return config_edit() test_lib = bool(lib) subcommands, plugins, lib = _setup(options, lib) parser.add_subcommand(*subcommands) subcommand, suboptions, subargs = parser.parse_subcommand(subargs) subcommand.func(lib, suboptions, subargs) plugins.send("cli_exit", lib=lib) if not test_lib: # Clean up the library unless it came from the test harness. lib._close()
Run the main command-line interface for beets. Includes top-level exception handlers that print friendly error messages.
def main(args=None): """Run the main command-line interface for beets. Includes top-level exception handlers that print friendly error messages. """ try: _raw_main(args) except UserError as exc: message = exc.args[0] if exc.args else None log.error("error: {0}", message) sys.exit(1) except util.HumanReadableException as exc: exc.log(log) sys.exit(1) except library.FileOperationError as exc: # These errors have reasonable human-readable descriptions, but # we still want to log their tracebacks for debugging. log.debug("{}", traceback.format_exc()) log.error("{}", exc) sys.exit(1) except confuse.ConfigError as exc: log.error("configuration error: {0}", exc) sys.exit(1) except db_query.InvalidQueryError as exc: log.error("invalid query: {0}", exc) sys.exit(1) except OSError as exc: if exc.errno == errno.EPIPE: # "Broken pipe". End silently. sys.stderr.close() else: raise except KeyboardInterrupt: # Silently ignore ^C except in verbose mode. log.debug("{}", traceback.format_exc()) except db.DBAccessError as exc: log.error( "database access error: {0}\n" "the library file might have a permissions problem", exc, ) sys.exit(1)
Return a proxied image URL that resizes the original image to maxwidth (preserving aspect ratio).
def resize_url(url, maxwidth, quality=0): """Return a proxied image URL that resizes the original image to maxwidth (preserving aspect ratio). """ params = { "url": url.replace("http://", ""), "w": maxwidth, } if quality > 0: params["q"] = quality return "{}?{}".format(PROXY_URL, urlencode(params))
Return an unused filename with the same extension as the specified path.
def temp_file_for(path): """Return an unused filename with the same extension as the specified path. """ ext = os.path.splitext(path)[1] with NamedTemporaryFile(suffix=py3_path(ext), delete=False) as f: return bytestring_path(f.name)
Perform a select() over all the Events provided, returning the ones ready to be fired. Only WaitableEvents (including SleepEvents) matter here; all other events are ignored (and thus postponed).
def _event_select(events): """Perform a select() over all the Events provided, returning the ones ready to be fired. Only WaitableEvents (including SleepEvents) matter here; all other events are ignored (and thus postponed). """ # Gather waitables and wakeup times. waitable_to_event = {} rlist, wlist, xlist = [], [], [] earliest_wakeup = None for event in events: if isinstance(event, SleepEvent): if not earliest_wakeup: earliest_wakeup = event.wakeup_time else: earliest_wakeup = min(earliest_wakeup, event.wakeup_time) elif isinstance(event, WaitableEvent): r, w, x = event.waitables() rlist += r wlist += w xlist += x for waitable in r: waitable_to_event[("r", waitable)] = event for waitable in w: waitable_to_event[("w", waitable)] = event for waitable in x: waitable_to_event[("x", waitable)] = event # If we have a any sleeping threads, determine how long to sleep. if earliest_wakeup: timeout = max(earliest_wakeup - time.time(), 0.0) else: timeout = None # Perform select() if we have any waitables. if rlist or wlist or xlist: rready, wready, xready = select.select(rlist, wlist, xlist, timeout) else: rready, wready, xready = (), (), () if timeout: time.sleep(timeout) # Gather ready events corresponding to the ready waitables. ready_events = set() for ready in rready: ready_events.add(waitable_to_event[("r", ready)]) for ready in wready: ready_events.add(waitable_to_event[("w", ready)]) for ready in xready: ready_events.add(waitable_to_event[("x", ready)]) # Gather any finished sleeps. for event in events: if isinstance(event, SleepEvent) and event.time_left() == 0.0: ready_events.add(event) return ready_events
Schedules a coroutine, running it to completion. This encapsulates the Bluelet scheduler, which the root coroutine can add to by spawning new coroutines.
def run(root_coro): """Schedules a coroutine, running it to completion. This encapsulates the Bluelet scheduler, which the root coroutine can add to by spawning new coroutines. """ # The "threads" dictionary keeps track of all the currently- # executing and suspended coroutines. It maps coroutines to their # currently "blocking" event. The event value may be SUSPENDED if # the coroutine is waiting on some other condition: namely, a # delegated coroutine or a joined coroutine. In this case, the # coroutine should *also* appear as a value in one of the below # dictionaries `delegators` or `joiners`. threads = {root_coro: ValueEvent(None)} # Maps child coroutines to delegating parents. delegators = {} # Maps child coroutines to joining (exit-waiting) parents. joiners = collections.defaultdict(list) def complete_thread(coro, return_value): """Remove a coroutine from the scheduling pool, awaking delegators and joiners as necessary and returning the specified value to any delegating parent. """ del threads[coro] # Resume delegator. if coro in delegators: threads[delegators[coro]] = ValueEvent(return_value) del delegators[coro] # Resume joiners. if coro in joiners: for parent in joiners[coro]: threads[parent] = ValueEvent(None) del joiners[coro] def advance_thread(coro, value, is_exc=False): """After an event is fired, run a given coroutine associated with it in the threads dict until it yields again. If the coroutine exits, then the thread is removed from the pool. If the coroutine raises an exception, it is reraised in a ThreadException. If is_exc is True, then the value must be an exc_info tuple and the exception is thrown into the coroutine. """ try: if is_exc: next_event = coro.throw(*value) else: next_event = coro.send(value) except StopIteration: # Thread is done. complete_thread(coro, None) except BaseException: # Thread raised some other exception. del threads[coro] raise ThreadException(coro, sys.exc_info()) else: if isinstance(next_event, types.GeneratorType): # Automatically invoke sub-coroutines. (Shorthand for # explicit bluelet.call().) next_event = DelegationEvent(next_event) threads[coro] = next_event def kill_thread(coro): """Unschedule this thread and its (recursive) delegates.""" # Collect all coroutines in the delegation stack. coros = [coro] while isinstance(threads[coro], Delegated): coro = threads[coro].child coros.append(coro) # Complete each coroutine from the top to the bottom of the # stack. for coro in reversed(coros): complete_thread(coro, None) # Continue advancing threads until root thread exits. exit_te = None while threads: try: # Look for events that can be run immediately. Continue # running immediate events until nothing is ready. while True: have_ready = False for coro, event in list(threads.items()): if isinstance(event, SpawnEvent): threads[event.spawned] = ValueEvent(None) # Spawn. advance_thread(coro, None) have_ready = True elif isinstance(event, ValueEvent): advance_thread(coro, event.value) have_ready = True elif isinstance(event, ExceptionEvent): advance_thread(coro, event.exc_info, True) have_ready = True elif isinstance(event, DelegationEvent): threads[coro] = Delegated(event.spawned) # Suspend. threads[event.spawned] = ValueEvent(None) # Spawn. delegators[event.spawned] = coro have_ready = True elif isinstance(event, ReturnEvent): # Thread is done. complete_thread(coro, event.value) have_ready = True elif isinstance(event, JoinEvent): threads[coro] = SUSPENDED # Suspend. joiners[event.child].append(coro) have_ready = True elif isinstance(event, KillEvent): threads[coro] = ValueEvent(None) kill_thread(event.child) have_ready = True # Only start the select when nothing else is ready. if not have_ready: break # Wait and fire. event2coro = {v: k for k, v in threads.items()} for event in _event_select(threads.values()): # Run the IO operation, but catch socket errors. try: value = event.fire() except OSError as exc: if ( isinstance(exc.args, tuple) and exc.args[0] == errno.EPIPE ): # Broken pipe. Remote host disconnected. pass elif ( isinstance(exc.args, tuple) and exc.args[0] == errno.ECONNRESET ): # Connection was reset by peer. pass else: traceback.print_exc() # Abort the coroutine. threads[event2coro[event]] = ReturnEvent(None) else: advance_thread(event2coro[event], value) except ThreadException as te: # Exception raised from inside a thread. event = ExceptionEvent(te.exc_info) if te.coro in delegators: # The thread is a delegate. Raise exception in its # delegator. threads[delegators[te.coro]] = event del delegators[te.coro] else: # The thread is root-level. Raise in client code. exit_te = te break except BaseException: # For instance, KeyboardInterrupt during select(). Raise # into root thread and terminate others. threads = {root_coro: ExceptionEvent(sys.exc_info())} # If any threads still remain, kill them. for coro in threads: coro.close() # If we're exiting with an exception, raise it in the client. if exit_te: exit_te.reraise()