response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Verify no command params are shadowed or prefixed by the built in param. The CLI parses all command line options into a single namespace. This means that option names must be unique and cannot conflict with the top level params. For example, there's a top level param ``--version``. If an operation for a service also provides a ``--version`` option, it can never be called because we'll assume the user meant the top level ``--version`` param. Beyond just direct shadowing, a param which prefixes a builtin is also effectively shadowed because argparse will expand prefixes of arguments. So `--end` would expand to `--endpoint-url` for instance. In order to ensure this doesn't happen, this test will go through every command table and ensure we're not shadowing any builtins. Also, rather than being a test generator, we're going to just aggregate all the failures in one pass and surface them as a single test failure.
def test_no_shadowed_builtins(command_name, command_table, builtins): """Verify no command params are shadowed or prefixed by the built in param. The CLI parses all command line options into a single namespace. This means that option names must be unique and cannot conflict with the top level params. For example, there's a top level param ``--version``. If an operation for a service also provides a ``--version`` option, it can never be called because we'll assume the user meant the top level ``--version`` param. Beyond just direct shadowing, a param which prefixes a builtin is also effectively shadowed because argparse will expand prefixes of arguments. So `--end` would expand to `--endpoint-url` for instance. In order to ensure this doesn't happen, this test will go through every command table and ensure we're not shadowing any builtins. Also, rather than being a test generator, we're going to just aggregate all the failures in one pass and surface them as a single test failure. """ errors = [] for sub_name, sub_command in command_table.items(): op_help = sub_command.create_help_command() arg_table = op_help.arg_table for arg_name in arg_table: if any(p.startswith(arg_name) for p in builtins): # Then we are shadowing or prefixing a top level argument errors.append( 'Shadowing/Prefixing a top level option: ' '%s.%s.%s' % (command_name, sub_name, arg_name)) if errors: raise AssertionError('\n' + '\n'.join(errors))
Trims output and removes all lines after a line starting with warning. A line will only start with warning if it is the start of a "not installed" warning, which should be ignored when comparing output.
def sanitize_output(output): """ Trims output and removes all lines after a line starting with warning. A line will only start with warning if it is the start of a "not installed" warning, which should be ignored when comparing output. """ to_return = "" for line in output.splitlines(): if bool(re.match('warning', line.strip(), re.I)): return to_return.strip() else: to_return += line to_return += '\n' return to_return.strip()
Build an environment variable from a list of strings.
def build_environment(entries): """ Build an environment variable from a list of strings. """ return os.path.pathsep.join(entries)
Get the path of a specific fixture
def get_testdata(file_name): """Get the path of a specific fixture""" return os.path.join( os.path.dirname(os.path.realpath(__file__)), "testdata", file_name )
Get an example list_cluster call (For mocking)
def list_cluster_response(): """Get an example list_cluster call (For mocking)""" return { "clusters": [ EXAMPLE_NAME ] }
Get an example describe_cluster call (For mocking)
def describe_cluster_response(): """Get an example describe_cluster call (For mocking)""" return { "cluster": { "status": "ACTIVE", "endpoint": "https://endpoint.amazonaws.com", "name": EXAMPLE_NAME, "certificateAuthority": { "data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t" }, "roleArn": "arn:aws:iam::111222333444/eksRole", "resourcesVpcConfig": { "subnetIds": [ "subnet-00000000000000000", "subnet-00000000000000001", "subnet-00000000000000002" ], "vpcId": "vpc-00000000000000000", "securityGroupIds": [ "sg-00000000000000000" ] }, "version": "1.10", "arn": "arn:aws:eks:region:111222333444:cluster/" + EXAMPLE_NAME, "createdAt": 1500000000.000 } }
Get an example describe_cluster call (For mocking)
def describe_cluster_response_outpost_cluster(): """Get an example describe_cluster call (For mocking)""" return { "cluster": { "status": "ACTIVE", "endpoint": "https://endpoint.amazonaws.com", "name": EXAMPLE_NAME, "certificateAuthority": { "data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t" }, "roleArn": "arn:aws:iam::111222333444/eksRole", "resourcesVpcConfig": { "subnetIds": [ "subnet-00000000000000000", "subnet-00000000000000001", "subnet-00000000000000002" ], "vpcId": "vpc-00000000000000000", "securityGroupIds": [ "sg-00000000000000000" ] }, "version": "1.10", "arn": "arn:aws:eks:region:111222333444:cluster/" + EXAMPLE_NAME, "createdAt": 1500000000.000, "id": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", "outpostConfig": { "outpostArns": [ "arn:aws:outposts:us-west-2:111222333444:outpost/op-00000000000000000" ], } } }
Get an example describe_cluster call (For mocking)
def describe_cluster_no_status_response(): """Get an example describe_cluster call (For mocking)""" return { "cluster": { "endpoint": "https://endpoint.amazonaws.com", "name": EXAMPLE_NAME, "certificateAuthority": { "data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t" }, "roleArn": "arn:aws:iam::111222333444/eksRole", "resourcesVpcConfig": { "subnetIds": [ "subnet-00000000000000000", "subnet-00000000000000001", "subnet-00000000000000002" ], "vpcId": "vpc-00000000000000000", "securityGroupIds": [ "sg-00000000000000000" ] }, "version": "1.10", "arn": "arn:aws:eks:region:111222333444:cluster/" + EXAMPLE_NAME, "createdAt": 1500000000.000 } }
Get an example describe_cluster call during creation
def describe_cluster_creating_response(): """Get an example describe_cluster call during creation""" return { "cluster": { "status": "CREATING", "name": EXAMPLE_NAME, "certificateAuthority": {}, "roleArn": "arn:aws:iam::111222333444/eksRole", "resourcesVpcConfig": { "subnetIds": [ "subnet-00000000000000000", "subnet-00000000000000001", "subnet-00000000000000002" ], "vpcId": "vpc-00000000000000000", "securityGroupIds": [ "sg-00000000000000000" ] }, "version": "1.10", "arn": "arn:aws:eks:region:111222333444:cluster/" + EXAMPLE_NAME, "createdAt": 1500000000.000 } }
Get an example describe_cluster call during deletion
def describe_cluster_deleting_response(): """Get an example describe_cluster call during deletion""" return { "cluster": { "status": "DELETING", "endpoint": "https://endpoint.amazonaws.com", "name": EXAMPLE_NAME, "certificateAuthority": { "data": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t" }, "roleArn": "arn:aws:iam::111222333444/eksRole", "resourcesVpcConfig": { "subnetIds": [ "subnet-00000000000000000", "subnet-00000000000000001", "subnet-00000000000000002" ], "vpcId": "vpc-00000000000000000", "securityGroupIds": [ "sg-00000000000000000" ] }, "version": "1.10", "arn": "arn:aws:eks:region:111222333444:cluster/" + EXAMPLE_NAME, "createdAt": 1500000000.000 } }
Creates a randomly generated bucket in s3 with the files text1.txt and another_directory/text2.txt inside. The directory is manually created as it tests the ability to handle directories when generating s3 files.
def make_s3_files(session, key1='text1.txt', key2='text2.txt', size=None): """ Creates a randomly generated bucket in s3 with the files text1.txt and another_directory/text2.txt inside. The directory is manually created as it tests the ability to handle directories when generating s3 files. """ region = 'us-west-2' bucket = create_bucket(session) if size: string1 = "*" * size string2 = string1 else: string1 = "This is a test." string2 = "This is another test." client = session.create_client('s3', region_name=region) client.put_object(Bucket=bucket, Key=key1, Body=string1) if key2 is not None: client.put_object(Bucket=bucket, Key='another_directory/') client.put_object(Bucket=bucket, Key='another_directory/%s' % key2, Body=string2) return bucket
Function to cleanup generated s3 bucket and files.
def s3_cleanup(bucket, session): """ Function to cleanup generated s3 bucket and files. """ region = 'us-west-2' client = session.create_client('s3', region_name=region) try: client.head_bucket(Bucket=bucket) except ClientError: return response = client.list_objects(Bucket=bucket) contents = response.get('Contents', {}) keys = [content['Key'] for content in contents] for key in keys: client.delete_object(Bucket=bucket, Key=key) client.delete_bucket(Bucket=bucket)
An example to hydrate a complex structure with custom value logic. In this case we create a nested structure and divide the value by 100.
def _hydrate(params, container, cli_type, key, value): """ An example to hydrate a complex structure with custom value logic. In this case we create a nested structure and divide the value by 100. """ params['bag'] = { 'ArgumentBaz': { 'SomeValueAbc': value / 100.0 } }
Creates a mock key provider that yields keys for each in key_list
def create_mock_key_provider(key_list): """Creates a mock key provider that yields keys for each in key_list""" public_keys = {} for k in key_list: public_keys[k] = {'Fingerprint': k, 'Value': 'ffaa00'} key_provider = mock.Mock() key_provider.get_public_keys.return_value = public_keys return key_provider
Creates a scenario for a stack of actions Each action can be "gap" meaning there is no previous link, "invalid" meaning we should simulate an invalid digest, "missing" meaning we should simulate a digest is missing from S3, "bucket_change" meaning it is a link but the bucket is different than the previous bucket. Values are popped one by one off of the list until a terminal "gap" action is found.
def create_scenario(actions, logs=None): """Creates a scenario for a stack of actions Each action can be "gap" meaning there is no previous link, "invalid" meaning we should simulate an invalid digest, "missing" meaning we should simulate a digest is missing from S3, "bucket_change" meaning it is a link but the bucket is different than the previous bucket. Values are popped one by one off of the list until a terminal "gap" action is found. """ keys = [str(i) for i in range(len(actions))] key_provider = create_mock_key_provider(keys) digest_provider = MockDigestProvider(actions, logs) digest_validator = mock.Mock() def validate(bucket, key, public_key, digest_data, digest_str): if '_invalid' in digest_data: raise DigestError('invalid error') digest_validator.validate = validate return key_provider, digest_provider, digest_validator
Create and return a callback and a list populated with call args
def collecting_callback(): """Create and return a callback and a list populated with call args""" calls = [] def cb(**kwargs): calls.append(kwargs) return cb, calls
Generate a string which is an environment variable containing the absolute paths for each file in files :param files: The names of the files to put in the environment variable :type files: list
def generate_env_variable(files): """ Generate a string which is an environment variable containing the absolute paths for each file in files :param files: The names of the files to put in the environment variable :type files: list """ output = "" for file in files: if len(output) == 0: output = file else: output += os.path.pathsep + file return output
This sets up the test by making a directory named some_directory. It has the file text1.txt and the directory another_directory inside. Inside of another_directory it creates the file text2.txt.
def make_loc_files(file_creator, size=None): """ This sets up the test by making a directory named some_directory. It has the file text1.txt and the directory another_directory inside. Inside of another_directory it creates the file text2.txt. """ if size: body = "*" * size else: body = 'This is a test.' filename1 = file_creator.create_file( os.path.join('some_directory', 'text1.txt'), body) filename2 = file_creator.create_file( os.path.join('some_directory', 'another_directory', 'text2.txt'), body) filename1 = six.text_type(filename1) filename2 = six.text_type(filename2) return [filename1, filename2, os.path.dirname(filename2), os.path.dirname(filename1)]
Removes all of the local files made.
def clean_loc_files(file_creator): """ Removes all of the local files made. """ file_creator.remove_all()
Ensures that the FileStat's properties are what they are suppose to be.
def compare_files(self, result_file, ref_file): """ Ensures that the FileStat's properties are what they are suppose to be. """ self.assertEqual(result_file.src, ref_file.src) self.assertEqual(result_file.dest, ref_file.dest) self.assertEqual(result_file.compare_key, ref_file.compare_key) self.assertEqual(result_file.size, ref_file.size) self.assertEqual(result_file.last_update, ref_file.last_update) self.assertEqual(result_file.src_type, ref_file.src_type) self.assertEqual(result_file.dest_type, ref_file.dest_type) self.assertEqual(result_file.operation_name, ref_file.operation_name)
Return a `mediafile.Image` object for the path.
def mediafile_image(image_path, maxwidth=None): """Return a `mediafile.Image` object for the path.""" with open(syspath(image_path), "rb") as f: data = f.read() return mediafile.Image(data, type=mediafile.ImageType.front)
Embed an image into the item's media file.
def embed_item( log, item, imagepath, maxwidth=None, itempath=None, compare_threshold=0, ifempty=False, as_album=False, id3v23=None, quality=0, ): """Embed an image into the item's media file.""" # Conditions. if compare_threshold: is_similar = check_art_similarity( log, item, imagepath, compare_threshold ) if is_similar is None: log.warning("Error while checking art similarity; skipping.") return elif not is_similar: log.info("Image not similar; skipping.") return if ifempty and get_art(log, item): log.info("media file already contained art") return # Filters. if maxwidth and not as_album: imagepath = resize_image(log, imagepath, maxwidth, quality) # Get the `Image` object from the file. try: log.debug("embedding {0}", displayable_path(imagepath)) image = mediafile_image(imagepath, maxwidth) except OSError as exc: log.warning("could not read image file: {0}", exc) return # Make sure the image kind is safe (some formats only support PNG # and JPEG). if image.mime_type not in ("image/jpeg", "image/png"): log.info("not embedding image of unsupported type: {}", image.mime_type) return item.try_write(path=itempath, tags={"images": [image]}, id3v23=id3v23)
Embed album art into all of the album's items.
def embed_album( log, album, maxwidth=None, quiet=False, compare_threshold=0, ifempty=False, quality=0, ): """Embed album art into all of the album's items.""" imagepath = album.artpath if not imagepath: log.info("No album art present for {0}", album) return if not os.path.isfile(syspath(imagepath)): log.info( "Album art not found at {0} for {1}", displayable_path(imagepath), album, ) return if maxwidth: imagepath = resize_image(log, imagepath, maxwidth, quality) log.info("Embedding album art into {0}", album) for item in album.items(): embed_item( log, item, imagepath, maxwidth, None, compare_threshold, ifempty, as_album=True, quality=quality, )
Returns path to an image resized to maxwidth and encoded with the specified quality level.
def resize_image(log, imagepath, maxwidth, quality): """Returns path to an image resized to maxwidth and encoded with the specified quality level. """ log.debug( "Resizing album art to {0} pixels wide and encoding at quality \ level {1}", maxwidth, quality, ) imagepath = ArtResizer.shared.resize( maxwidth, syspath(imagepath), quality=quality ) return imagepath
A boolean indicating if an image is similar to embedded item art. If no embedded art exists, always return `True`. If the comparison fails for some reason, the return value is `None`. This must only be called if `ArtResizer.shared.can_compare` is `True`.
def check_art_similarity( log, item, imagepath, compare_threshold, artresizer=None, ): """A boolean indicating if an image is similar to embedded item art. If no embedded art exists, always return `True`. If the comparison fails for some reason, the return value is `None`. This must only be called if `ArtResizer.shared.can_compare` is `True`. """ with NamedTemporaryFile(delete=True) as f: art = extract(log, f.name, item) if not art: return True if artresizer is None: artresizer = ArtResizer.shared return artresizer.compare(art, imagepath, compare_threshold)
Reads the state file, returning a dictionary.
def _open_state(): """Reads the state file, returning a dictionary.""" try: with open(config["statefile"].as_filename(), "rb") as f: return pickle.load(f) except Exception as exc: # The `pickle` module can emit all sorts of exceptions during # unpickling, including ImportError. We use a catch-all # exception to avoid enumerating them all (the docs don't even have a # full list!). log.debug("state file could not be read: {0}", exc) return {}
Writes the state dictionary out to disk.
def _save_state(state): """Writes the state dictionary out to disk.""" try: with open(config["statefile"].as_filename(), "wb") as f: pickle.dump(state, f) except OSError as exc: log.error("state file could not be written: {0}", exc)
Record that the files under all of the `paths` have been imported under `toppath`.
def progress_add(toppath, *paths): """Record that the files under all of the `paths` have been imported under `toppath`. """ with progress_write() as state: imported = state.setdefault(toppath, []) for path in paths: # Normally `progress_add` will be called with the path # argument increasing. This is because of the ordering in # `albums_in_dir`. We take advantage of that to make the # code faster if imported and imported[len(imported) - 1] <= path: imported.append(path) else: insort(imported, path)
Return whether `path` has been imported in `toppath`.
def progress_element(toppath, path): """Return whether `path` has been imported in `toppath`.""" state = progress_read() if toppath not in state: return False imported = state[toppath] i = bisect_left(imported, path) return i != len(imported) and imported[i] == path
Return `True` if there exist paths that have already been imported under `toppath`.
def has_progress(toppath): """Return `True` if there exist paths that have already been imported under `toppath`. """ state = progress_read() return toppath in state
Indicate that the import of the album in `paths` is completed and should not be repeated in incremental imports.
def history_add(paths): """Indicate that the import of the album in `paths` is completed and should not be repeated in incremental imports. """ state = _open_state() if HISTORY_KEY not in state: state[HISTORY_KEY] = set() state[HISTORY_KEY].add(tuple(paths)) _save_state(state)
Get the set of completed path tuples in incremental imports.
def history_get(): """Get the set of completed path tuples in incremental imports.""" state = _open_state() if HISTORY_KEY not in state: return set() return state[HISTORY_KEY]
A generator yielding all the albums (as ImportTask objects) found in the user-specified list of paths. In the case of a singleton import, yields single-item tasks instead.
def read_tasks(session): """A generator yielding all the albums (as ImportTask objects) found in the user-specified list of paths. In the case of a singleton import, yields single-item tasks instead. """ skipped = 0 for toppath in session.paths: # Check whether we need to resume the import. session.ask_resume(toppath) # Generate tasks. task_factory = ImportTaskFactory(toppath, session) yield from task_factory.tasks() skipped += task_factory.skipped if not task_factory.imported: log.warning("No files imported from {0}", displayable_path(toppath)) # Show skipped directories (due to incremental/resume). if skipped: log.info("Skipped {0} paths.", skipped)
A generator that works as a drop-in-replacement for read_tasks. Instead of finding files from the filesystem, a query is used to match items from the library.
def query_tasks(session): """A generator that works as a drop-in-replacement for read_tasks. Instead of finding files from the filesystem, a query is used to match items from the library. """ if session.config["singletons"]: # Search for items. for item in session.lib.items(session.query): task = SingletonImportTask(None, item) for task in task.handle_created(session): yield task else: # Search for albums. for album in session.lib.albums(session.query): log.debug( "yielding album {0}: {1} - {2}", album.id, album.albumartist, album.album, ) items = list(album.items()) _freshen_items(items) task = ImportTask(None, [album.item_dir()], items) for task in task.handle_created(session): yield task
A coroutine for performing the initial MusicBrainz lookup for an album. It accepts lists of Items and yields (items, cur_artist, cur_album, candidates, rec) tuples. If no match is found, all of the yielded parameters (except items) are None.
def lookup_candidates(session, task): """A coroutine for performing the initial MusicBrainz lookup for an album. It accepts lists of Items and yields (items, cur_artist, cur_album, candidates, rec) tuples. If no match is found, all of the yielded parameters (except items) are None. """ if task.skip: # FIXME This gets duplicated a lot. We need a better # abstraction. return plugins.send("import_task_start", session=session, task=task) log.debug("Looking up: {0}", displayable_path(task.paths)) # Restrict the initial lookup to IDs specified by the user via the -m # option. Currently all the IDs are passed onto the tasks directly. task.search_ids = session.config["search_ids"].as_str_seq() task.lookup_candidates()
A coroutine for interfacing with the user about the tagging process. The coroutine accepts an ImportTask objects. It uses the session's `choose_match` method to determine the `action` for this task. Depending on the action additional stages are executed and the processed task is yielded. It emits the ``import_task_choice`` event for plugins. Plugins have access to the choice via the ``task.choice_flag`` property and may choose to change it.
def user_query(session, task): """A coroutine for interfacing with the user about the tagging process. The coroutine accepts an ImportTask objects. It uses the session's `choose_match` method to determine the `action` for this task. Depending on the action additional stages are executed and the processed task is yielded. It emits the ``import_task_choice`` event for plugins. Plugins have access to the choice via the ``task.choice_flag`` property and may choose to change it. """ if task.skip: return task if session.already_merged(task.paths): return pipeline.BUBBLE # Ask the user for a choice. task.choose_match(session) plugins.send("import_task_choice", session=session, task=task) # As-tracks: transition to singleton workflow. if task.choice_flag is action.TRACKS: # Set up a little pipeline for dealing with the singletons. def emitter(task): for item in task.items: task = SingletonImportTask(task.toppath, item) yield from task.handle_created(session) yield SentinelImportTask(task.toppath, task.paths) return _extend_pipeline( emitter(task), lookup_candidates(session), user_query(session) ) # As albums: group items by albums and create task for each album if task.choice_flag is action.ALBUMS: return _extend_pipeline( [task], group_albums(session), lookup_candidates(session), user_query(session), ) resolve_duplicates(session, task) if task.should_merge_duplicates: # Create a new task for tagging the current items # and duplicates together duplicate_items = task.duplicate_items(session.lib) # Duplicates would be reimported so make them look "fresh" _freshen_items(duplicate_items) duplicate_paths = [item.path for item in duplicate_items] # Record merged paths in the session so they are not reimported session.mark_merged(duplicate_paths) merged_task = ImportTask( None, task.paths + duplicate_paths, task.items + duplicate_items ) return _extend_pipeline( [merged_task], lookup_candidates(session), user_query(session) ) apply_choice(session, task) return task
Check if a task conflicts with items or albums already imported and ask the session to resolve this.
def resolve_duplicates(session, task): """Check if a task conflicts with items or albums already imported and ask the session to resolve this. """ if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG): found_duplicates = task.find_duplicates(session.lib) if found_duplicates: log.debug( "found duplicates: {}".format([o.id for o in found_duplicates]) ) # Get the default action to follow from config. duplicate_action = config["import"]["duplicate_action"].as_choice( { "skip": "s", "keep": "k", "remove": "r", "merge": "m", "ask": "a", } ) log.debug("default action for duplicates: {0}", duplicate_action) if duplicate_action == "s": # Skip new. task.set_choice(action.SKIP) elif duplicate_action == "k": # Keep both. Do nothing; leave the choice intact. pass elif duplicate_action == "r": # Remove old. task.should_remove_duplicates = True elif duplicate_action == "m": # Merge duplicates together task.should_merge_duplicates = True else: # No default action set; ask the session. session.resolve_duplicate(task, found_duplicates) session.log_choice(task, True)
Select the `action.ASIS` choice for all tasks. This stage replaces the initial_lookup and user_query stages when the importer is run without autotagging.
def import_asis(session, task): """Select the `action.ASIS` choice for all tasks. This stage replaces the initial_lookup and user_query stages when the importer is run without autotagging. """ if task.skip: return log.info("{}", displayable_path(task.paths)) task.set_choice(action.ASIS) apply_choice(session, task)
Apply the task's choice to the Album or Item it contains and add it to the library.
def apply_choice(session, task): """Apply the task's choice to the Album or Item it contains and add it to the library. """ if task.skip: return # Change metadata. if task.apply: task.apply_metadata() plugins.send("import_task_apply", session=session, task=task) task.add(session.lib) # If ``set_fields`` is set, set those fields to the # configured values. # NOTE: This cannot be done before the ``task.add()`` call above, # because then the ``ImportTask`` won't have an `album` for which # it can set the fields. if config["import"]["set_fields"]: task.set_fields(session.lib)
A coroutine (pipeline stage) that calls the given function with each non-skipped import task. These stages occur between applying metadata changes and moving/copying/writing files.
def plugin_stage(session, func, task): """A coroutine (pipeline stage) that calls the given function with each non-skipped import task. These stages occur between applying metadata changes and moving/copying/writing files. """ if task.skip: return func(session, task) # Stage may modify DB, so re-load cached item data. # FIXME Importer plugins should not modify the database but instead # the albums and items attached to tasks. task.reload()
A coroutine (pipeline stage) that performs necessary file manipulations *after* items have been added to the library and finalizes each task.
def manipulate_files(session, task): """A coroutine (pipeline stage) that performs necessary file manipulations *after* items have been added to the library and finalizes each task. """ if not task.skip: if task.should_remove_duplicates: task.remove_duplicates(session.lib) if session.config["move"]: operation = MoveOperation.MOVE elif session.config["copy"]: operation = MoveOperation.COPY elif session.config["link"]: operation = MoveOperation.LINK elif session.config["hardlink"]: operation = MoveOperation.HARDLINK elif session.config["reflink"]: operation = MoveOperation.REFLINK else: operation = None task.manipulate_files( operation, write=session.config["write"], session=session, ) # Progress, cleanup, and event. task.finalize(session)
A coroutine (pipeline stage) to log each file to be imported.
def log_files(session, task): """A coroutine (pipeline stage) to log each file to be imported.""" if isinstance(task, SingletonImportTask): log.info("Singleton: {0}", displayable_path(task.item["path"])) elif task.items: log.info("Album: {0}", displayable_path(task.paths[0])) for item in task.items: log.info(" {0}", displayable_path(item["path"]))
A pipeline stage that groups the items of each task into albums using their metadata. Groups are identified using their artist and album fields. The pipeline stage emits new album tasks for each discovered group.
def group_albums(session): """A pipeline stage that groups the items of each task into albums using their metadata. Groups are identified using their artist and album fields. The pipeline stage emits new album tasks for each discovered group. """ def group(item): return (item.albumartist or item.artist, item.album) task = None while True: task = yield task if task.skip: continue tasks = [] sorted_items = sorted(task.items, key=group) for _, items in itertools.groupby(sorted_items, group): items = list(items) task = ImportTask(task.toppath, [i.path for i in items], items) tasks += task.handle_created(session) tasks.append(SentinelImportTask(task.toppath, task.paths)) task = pipeline.multiple(tasks)
Returns True if path os a subdirectory of any directory in dirs (a list). In other case, returns False.
def is_subdir_of_any_in_list(path, dirs): """Returns True if path os a subdirectory of any directory in dirs (a list). In other case, returns False. """ ancestors = ancestry(path) return any(d in ancestors for d in dirs)
Recursively searches the given directory and returns an iterable of (paths, items) where paths is a list of directories and items is a list of Items that is probably an album. Specifically, any folder containing any media files is an album.
def albums_in_dir(path): """Recursively searches the given directory and returns an iterable of (paths, items) where paths is a list of directories and items is a list of Items that is probably an album. Specifically, any folder containing any media files is an album. """ collapse_pat = collapse_paths = collapse_items = None ignore = config["ignore"].as_str_seq() ignore_hidden = config["ignore_hidden"].get(bool) for root, dirs, files in sorted_walk( path, ignore=ignore, ignore_hidden=ignore_hidden, logger=log ): items = [os.path.join(root, f) for f in files] # If we're currently collapsing the constituent directories in a # multi-disc album, check whether we should continue collapsing # and add the current directory. If so, just add the directory # and move on to the next directory. If not, stop collapsing. if collapse_paths: if (is_subdir_of_any_in_list(root, collapse_paths)) or ( collapse_pat and collapse_pat.match(os.path.basename(root)) ): # Still collapsing. collapse_paths.append(root) collapse_items += items continue else: # Collapse finished. Yield the collapsed directory and # proceed to process the current one. if collapse_items: yield collapse_paths, collapse_items collapse_pat = collapse_paths = collapse_items = None # Check whether this directory looks like the *first* directory # in a multi-disc sequence. There are two indicators: the file # is named like part of a multi-disc sequence (e.g., "Title Disc # 1") or it contains no items but only directories that are # named in this way. start_collapsing = False for marker in MULTIDISC_MARKERS: # We're using replace on %s due to lack of .format() on bytestrings p = MULTIDISC_PAT_FMT.replace(b"%s", marker) marker_pat = re.compile(p, re.I) match = marker_pat.match(os.path.basename(root)) # Is this directory the root of a nested multi-disc album? if dirs and not items: # Check whether all subdirectories have the same prefix. start_collapsing = True subdir_pat = None for subdir in dirs: subdir = util.bytestring_path(subdir) # The first directory dictates the pattern for # the remaining directories. if not subdir_pat: match = marker_pat.match(subdir) if match: match_group = re.escape(match.group(1)) subdir_pat = re.compile( b"".join([b"^", match_group, rb"\d"]), re.I ) else: start_collapsing = False break # Subsequent directories must match the pattern. elif not subdir_pat.match(subdir): start_collapsing = False break # If all subdirectories match, don't check other # markers. if start_collapsing: break # Is this directory the first in a flattened multi-disc album? elif match: start_collapsing = True # Set the current pattern to match directories with the same # prefix as this one, followed by a digit. collapse_pat = re.compile( b"".join([b"^", re.escape(match.group(1)), rb"\d"]), re.I ) break # If either of the above heuristics indicated that this is the # beginning of a multi-disc album, initialize the collapsed # directory and item lists and check the next directory. if start_collapsing: # Start collapsing; continue to the next iteration. collapse_paths = [root] collapse_items = items continue # If it's nonempty, yield it. if items: yield [root], items # Clear out any unfinished collapse. if collapse_paths and collapse_items: yield collapse_paths, collapse_items
Given a beets query string as a list of components, return the `Query` and `Sort` they represent. Like `dbcore.parse_sorted_query`, with beets query prefixes and ensuring that implicit path queries are made explicit with 'path::<query>'
def parse_query_parts(parts, model_cls): """Given a beets query string as a list of components, return the `Query` and `Sort` they represent. Like `dbcore.parse_sorted_query`, with beets query prefixes and ensuring that implicit path queries are made explicit with 'path::<query>' """ # Get query types and their prefix characters. prefixes = { ":": dbcore.query.RegexpQuery, "=~": dbcore.query.StringQuery, "=": dbcore.query.MatchQuery, } prefixes.update(plugins.queries()) # Special-case path-like queries, which are non-field queries # containing path separators (/). parts = [f"path:{s}" if PathQuery.is_path_query(s) else s for s in parts] case_insensitive = beets.config["sort_case_insensitive"].get(bool) query, sort = dbcore.parse_sorted_query( model_cls, parts, prefixes, case_insensitive ) log.debug("Parsed query: {!r}", query) log.debug("Parsed sort: {!r}", sort) return query, sort
Given a beets query string, return the `Query` and `Sort` they represent. The string is split into components using shell-like syntax.
def parse_query_string(s, model_cls): """Given a beets query string, return the `Query` and `Sort` they represent. The string is split into components using shell-like syntax. """ message = f"Query is not unicode: {s!r}" assert isinstance(s, str), message try: parts = shlex.split(s) except ValueError as exc: raise dbcore.InvalidQueryError(s, exc) return parse_query_parts(parts, model_cls)
Convert a string argument to an integer for use in a template function. May raise a ValueError.
def _int_arg(s): """Convert a string argument to an integer for use in a template function. May raise a ValueError. """ return int(s.strip())
Coerce `bytes` to `str` to avoid crashes solely due to logging. This is particularly relevant for bytestring paths. Much of our code explicitly uses `displayable_path` for them, but better be safe and prevent any crashes that are solely due to log formatting.
def logsafe(val): """Coerce `bytes` to `str` to avoid crashes solely due to logging. This is particularly relevant for bytestring paths. Much of our code explicitly uses `displayable_path` for them, but better be safe and prevent any crashes that are solely due to log formatting. """ # Bytestring: Needs decoding to be safe for substitution in format strings. if isinstance(val, bytes): # Blindly convert with UTF-8. Eventually, it would be nice to # (a) only do this for paths, if they can be given a distinct # type, and (b) warn the developer if they do this for other # bytestrings. return val.decode("utf-8", "replace") # Other objects are used as-is so field access, etc., still works in # the format string. Relies on a working __str__ implementation. return val
Imports the modules for a sequence of plugin names. Each name must be the name of a Python module under the "beetsplug" namespace package in sys.path; the module indicated should contain the BeetsPlugin subclasses desired.
def load_plugins(names=()): """Imports the modules for a sequence of plugin names. Each name must be the name of a Python module under the "beetsplug" namespace package in sys.path; the module indicated should contain the BeetsPlugin subclasses desired. """ for name in names: modname = f"{PLUGIN_NAMESPACE}.{name}" try: try: namespace = __import__(modname, None, None) except ImportError as exc: # Again, this is hacky: if exc.args[0].endswith(" " + name): log.warning("** plugin {0} not found", name) else: raise else: for obj in getattr(namespace, name).__dict__.values(): if ( isinstance(obj, type) and issubclass(obj, BeetsPlugin) and obj != BeetsPlugin and obj not in _classes ): _classes.add(obj) except Exception: log.warning( "** error loading plugin {}:\n{}", name, traceback.format_exc(), )
Returns a list of BeetsPlugin subclass instances from all currently loaded beets plugins. Loads the default plugin set first.
def find_plugins(): """Returns a list of BeetsPlugin subclass instances from all currently loaded beets plugins. Loads the default plugin set first. """ if _instances: # After the first call, use cached instances for performance reasons. # See https://github.com/beetbox/beets/pull/3810 return list(_instances.values()) load_plugins() plugins = [] for cls in _classes: # Only instantiate each plugin class once. if cls not in _instances: _instances[cls] = cls() plugins.append(_instances[cls]) return plugins
Returns a list of Subcommand objects from all loaded plugins.
def commands(): """Returns a list of Subcommand objects from all loaded plugins.""" out = [] for plugin in find_plugins(): out += plugin.commands() return out
Returns a dict mapping prefix strings to Query subclasses all loaded plugins.
def queries(): """Returns a dict mapping prefix strings to Query subclasses all loaded plugins. """ out = {} for plugin in find_plugins(): out.update(plugin.queries()) return out
Gets the track distance calculated by all loaded plugins. Returns a Distance object.
def track_distance(item, info): """Gets the track distance calculated by all loaded plugins. Returns a Distance object. """ from beets.autotag.hooks import Distance dist = Distance() for plugin in find_plugins(): dist.update(plugin.track_distance(item, info)) return dist
Returns the album distance calculated by plugins.
def album_distance(items, album_info, mapping): """Returns the album distance calculated by plugins.""" from beets.autotag.hooks import Distance dist = Distance() for plugin in find_plugins(): dist.update(plugin.album_distance(items, album_info, mapping)) return dist
Gets MusicBrainz candidates for an album from each plugin.
def candidates(items, artist, album, va_likely, extra_tags=None): """Gets MusicBrainz candidates for an album from each plugin.""" for plugin in find_plugins(): yield from plugin.candidates( items, artist, album, va_likely, extra_tags )
Gets MusicBrainz candidates for an item from the plugins.
def item_candidates(item, artist, title): """Gets MusicBrainz candidates for an item from the plugins.""" for plugin in find_plugins(): yield from plugin.item_candidates(item, artist, title)
Get AlbumInfo objects for a given ID string.
def album_for_id(album_id): """Get AlbumInfo objects for a given ID string.""" for plugin in find_plugins(): album = plugin.album_for_id(album_id) if album: yield album
Get TrackInfo objects for a given ID string.
def track_for_id(track_id): """Get TrackInfo objects for a given ID string.""" for plugin in find_plugins(): track = plugin.track_for_id(track_id) if track: yield track
Get all the template functions declared by plugins as a dictionary.
def template_funcs(): """Get all the template functions declared by plugins as a dictionary. """ funcs = {} for plugin in find_plugins(): if plugin.template_funcs: funcs.update(plugin.template_funcs) return funcs
Get a list of early import stage functions defined by plugins.
def early_import_stages(): """Get a list of early import stage functions defined by plugins.""" stages = [] for plugin in find_plugins(): stages += plugin.get_early_import_stages() return stages
Get a list of import stage functions defined by plugins.
def import_stages(): """Get a list of import stage functions defined by plugins.""" stages = [] for plugin in find_plugins(): stages += plugin.get_import_stages() return stages
Check the provided template functions for conflicts and merge into funcs. Raises a `PluginConflictException` if a plugin defines template functions for fields that another plugin has already defined template functions for.
def _check_conflicts_and_merge(plugin, plugin_funcs, funcs): """Check the provided template functions for conflicts and merge into funcs. Raises a `PluginConflictException` if a plugin defines template functions for fields that another plugin has already defined template functions for. """ if plugin_funcs: if not plugin_funcs.keys().isdisjoint(funcs.keys()): conflicted_fields = ", ".join(plugin_funcs.keys() & funcs.keys()) raise PluginConflictException( f"Plugin {plugin.name} defines template functions for " f"{conflicted_fields} that conflict with another plugin." ) funcs.update(plugin_funcs)
Get a dictionary mapping field names to unary functions that compute the field's value.
def item_field_getters(): """Get a dictionary mapping field names to unary functions that compute the field's value. """ funcs = {} for plugin in find_plugins(): _check_conflicts_and_merge(plugin, plugin.template_fields, funcs) return funcs
As above, for album fields.
def album_field_getters(): """As above, for album fields.""" funcs = {} for plugin in find_plugins(): _check_conflicts_and_merge(plugin, plugin.album_template_fields, funcs) return funcs
Find all event handlers from plugins as a dictionary mapping event names to sequences of callables.
def event_handlers(): """Find all event handlers from plugins as a dictionary mapping event names to sequences of callables. """ all_handlers = defaultdict(list) for plugin in find_plugins(): if plugin.listeners: for event, handlers in plugin.listeners.items(): all_handlers[event] += handlers return all_handlers
Send an event to all assigned event listeners. `event` is the name of the event to send, all other named arguments are passed along to the handlers. Return a list of non-None values returned from the handlers.
def send(event, **arguments): """Send an event to all assigned event listeners. `event` is the name of the event to send, all other named arguments are passed along to the handlers. Return a list of non-None values returned from the handlers. """ log.debug("Sending event: {0}", event) results = [] for handler in event_handlers()[event]: result = handler(**arguments) if result is not None: results.append(result) return results
Return a regular expression that matches phrases like "featuring" that separate a main artist or a song title from secondary artists. The `for_artist` option determines whether the regex should be suitable for matching artist fields (the default) or title fields.
def feat_tokens(for_artist=True): """Return a regular expression that matches phrases like "featuring" that separate a main artist or a song title from secondary artists. The `for_artist` option determines whether the regex should be suitable for matching artist fields (the default) or title fields. """ feat_words = ["ft", "featuring", "feat", "feat.", "ft."] if for_artist: feat_words += ["with", "vs", "and", "con", "&"] return r"(?<=\s)(?:{})(?=\s)".format( "|".join(re.escape(x) for x in feat_words) )
Clean up a stringlist configuration attribute: keep only choices elements present in choices_all, remove duplicate elements, expand '*' wildcard while keeping original stringlist order.
def sanitize_choices(choices, choices_all): """Clean up a stringlist configuration attribute: keep only choices elements present in choices_all, remove duplicate elements, expand '*' wildcard while keeping original stringlist order. """ seen = set() others = [x for x in choices_all if x not in choices] res = [] for s in choices: if s not in seen: if s in list(choices_all): res.append(s) elif s == "*": res.extend(others) seen.add(s) return res
Clean up a single-element mapping configuration attribute as returned by Confuse's `Pairs` template: keep only two-element tuples present in pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*') wildcards while keeping the original order. Note that ('*', '*') and ('*', 'whatever') have the same effect. For example, >>> sanitize_pairs( ... [('foo', 'baz bar'), ('key', '*'), ('*', '*')], ... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'), ... ('key', 'value')] ... ) [('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')]
def sanitize_pairs(pairs, pairs_all): """Clean up a single-element mapping configuration attribute as returned by Confuse's `Pairs` template: keep only two-element tuples present in pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*') wildcards while keeping the original order. Note that ('*', '*') and ('*', 'whatever') have the same effect. For example, >>> sanitize_pairs( ... [('foo', 'baz bar'), ('key', '*'), ('*', '*')], ... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'), ... ('key', 'value')] ... ) [('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')] """ pairs_all = list(pairs_all) seen = set() others = [x for x in pairs_all if x not in pairs] res = [] for k, values in pairs: for v in values.split(): x = (k, v) if x in pairs_all: if x not in seen: seen.add(x) res.append(x) elif k == "*": new = [o for o in others if o not in seen] seen.update(new) res.extend(new) elif v == "*": new = [o for o in others if o not in seen and o[0] == k] seen.update(new) res.extend(new) return res
Makes a generator send the event 'event' every time it yields. This decorator is supposed to decorate a generator, but any function returning an iterable should work. Each yielded value is passed to plugins using the 'info' parameter of 'send'.
def notify_info_yielded(event): """Makes a generator send the event 'event' every time it yields. This decorator is supposed to decorate a generator, but any function returning an iterable should work. Each yielded value is passed to plugins using the 'info' parameter of 'send'. """ def decorator(generator): def decorated(*args, **kwargs): for v in generator(*args, **kwargs): send(event, info=v) yield v return decorated return decorator
Returns the ``data_source`` weight and the maximum source weight for albums or individual tracks.
def get_distance(config, data_source, info): """Returns the ``data_source`` weight and the maximum source weight for albums or individual tracks. """ dist = beets.autotag.Distance() if info.data_source == data_source: dist.add("source", config["source_weight"].as_number()) return dist
Store, move, and write the item according to the arguments. :param lib: beets library. :type lib: beets.library.Library :param item: Item whose changes to apply. :type item: beets.library.Item :param move: Move the item if it's in the library. :type move: bool :param pretend: Return without moving, writing, or storing the item's metadata. :type pretend: bool :param write: Write the item's metadata to its media file. :type write: bool
def apply_item_changes(lib, item, move, pretend, write): """Store, move, and write the item according to the arguments. :param lib: beets library. :type lib: beets.library.Library :param item: Item whose changes to apply. :type item: beets.library.Item :param move: Move the item if it's in the library. :type move: bool :param pretend: Return without moving, writing, or storing the item's metadata. :type pretend: bool :param write: Write the item's metadata to its media file. :type write: bool """ if pretend: return from beets import util # Move the item if it's in the library. if move and lib.directory in util.ancestry(item.path): item.move(with_album=False) if write: item.try_write() item.store()
Get the duration of an item or album.
def _length(obj, album): """Get the duration of an item or album.""" if album: return sum(i.length for i in obj.items()) else: return obj.length
Generate (lazily) a permutation of the objects where every group with equal values for `field` have an equal chance of appearing in any given position.
def _equal_chance_permutation(objs, field="albumartist", random_gen=None): """Generate (lazily) a permutation of the objects where every group with equal values for `field` have an equal chance of appearing in any given position. """ rand = random_gen or random # Group the objects by artist so we can sample from them. key = attrgetter(field) objs.sort(key=key) objs_by_artists = {} for artist, v in groupby(objs, key): objs_by_artists[artist] = list(v) # While we still have artists with music to choose from, pick one # randomly and pick a track from that artist. while objs_by_artists: # Choose an artist and an object for that artist, removing # this choice from the pool. artist = rand.choice(list(objs_by_artists.keys())) objs_from_artist = objs_by_artists[artist] i = rand.randint(0, len(objs_from_artist) - 1) yield objs_from_artist.pop(i) # Remove the artist if we've used up all of its objects. if not objs_from_artist: del objs_by_artists[artist]
Return a list containing the first `num` values in `iter` (or fewer, if the iterable ends early).
def _take(iter, num): """Return a list containing the first `num` values in `iter` (or fewer, if the iterable ends early). """ out = [] for val in iter: out.append(val) num -= 1 if num <= 0: break return out
Return a list containing the first values in `iter`, which should be Item or Album objects, that add up to the given amount of time in seconds.
def _take_time(iter, secs, album): """Return a list containing the first values in `iter`, which should be Item or Album objects, that add up to the given amount of time in seconds. """ out = [] total_time = 0.0 for obj in iter: length = _length(obj, album) if total_time + length <= secs: out.append(obj) total_time += length return out
Get a random subset of the provided `objs`. If `number` is provided, produce that many matches. Otherwise, if `time` is provided, instead select a list whose total time is close to that number of minutes. If `equal_chance` is true, give each artist an equal chance of being included so that artists with more songs are not represented disproportionately.
def random_objs( objs, album, number=1, time=None, equal_chance=False, random_gen=None ): """Get a random subset of the provided `objs`. If `number` is provided, produce that many matches. Otherwise, if `time` is provided, instead select a list whose total time is close to that number of minutes. If `equal_chance` is true, give each artist an equal chance of being included so that artists with more songs are not represented disproportionately. """ rand = random_gen or random # Permute the objects either in a straightforward way or an # artist-balanced way. if equal_chance: perm = _equal_chance_permutation(objs) else: perm = objs rand.shuffle(perm) # N.B. This shuffles the original list. # Select objects by time our count. if time: return _take_time(perm, time * 60, album) else: return _take(perm, number)
Insert an item into a virtual filesystem node.
def _insert(node, path, itemid): """Insert an item into a virtual filesystem node.""" if len(path) == 1: # Last component. Insert file. node.files[path[0]] = itemid else: # In a directory. dirname = path[0] rest = path[1:] if dirname not in node.dirs: node.dirs[dirname] = Node({}, {}) _insert(node.dirs[dirname], rest, itemid)
Generates a filesystem-like directory tree for the files contained in `lib`. Filesystem nodes are (files, dirs) named tuples in which both components are dictionaries. The first maps filenames to Item ids. The second maps directory names to child node tuples.
def libtree(lib): """Generates a filesystem-like directory tree for the files contained in `lib`. Filesystem nodes are (files, dirs) named tuples in which both components are dictionaries. The first maps filenames to Item ids. The second maps directory names to child node tuples. """ root = Node({}, {}) for item in lib.items(): dest = item.destination(fragment=True) parts = util.components(dest) _insert(root, parts, item.id) return root
Basic edit distance between two strings, ignoring non-alphanumeric characters and case. Comparisons are based on a transliteration/lowering to ASCII characters. Normalized by string length.
def _string_dist_basic(str1: str, str2: str) -> float: """Basic edit distance between two strings, ignoring non-alphanumeric characters and case. Comparisons are based on a transliteration/lowering to ASCII characters. Normalized by string length. """ assert isinstance(str1, str) assert isinstance(str2, str) str1 = as_string(unidecode(str1)) str2 = as_string(unidecode(str2)) str1 = re.sub(r"[^a-z0-9]", "", str1.lower()) str2 = re.sub(r"[^a-z0-9]", "", str2.lower()) if not str1 and not str2: return 0.0 return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
Gives an "intuitive" edit distance between two strings. This is an edit distance, normalized by the string length, with a number of tweaks that reflect intuition about text.
def string_dist(str1: Optional[str], str2: Optional[str]) -> float: """Gives an "intuitive" edit distance between two strings. This is an edit distance, normalized by the string length, with a number of tweaks that reflect intuition about text. """ if str1 is None and str2 is None: return 0.0 if str1 is None or str2 is None: return 1.0 str1 = str1.lower() str2 = str2.lower() # Don't penalize strings that move certain words to the end. For # example, "the something" should be considered equal to # "something, the". for word in SD_END_WORDS: if str1.endswith(", %s" % word): str1 = "{} {}".format(word, str1[: -len(word) - 2]) if str2.endswith(", %s" % word): str2 = "{} {}".format(word, str2[: -len(word) - 2]) # Perform a couple of basic normalizing substitutions. for pat, repl in SD_REPLACE: str1 = re.sub(pat, repl, str1) str2 = re.sub(pat, repl, str2) # Change the weight for certain string portions matched by a set # of regular expressions. We gradually change the strings and build # up penalties associated with parts of the string that were # deleted. base_dist = _string_dist_basic(str1, str2) penalty = 0.0 for pat, weight in SD_PATTERNS: # Get strings that drop the pattern. case_str1 = re.sub(pat, "", str1) case_str2 = re.sub(pat, "", str2) if case_str1 != str1 or case_str2 != str2: # If the pattern was present (i.e., it is deleted in the # the current case), recalculate the distances for the # modified strings. case_dist = _string_dist_basic(case_str1, case_str2) case_delta = max(0.0, base_dist - case_dist) if case_delta == 0.0: continue # Shift our baseline strings down (to avoid rematching the # same part of the string) and add a scaled distance # amount to the penalties. str1 = case_str1 str2 = case_str2 base_dist = case_dist penalty += weight * case_delta return base_dist + penalty
Get an AlbumInfo object for a MusicBrainz release ID. Return None if the ID is not found.
def album_for_mbid(release_id: str) -> Optional[AlbumInfo]: """Get an AlbumInfo object for a MusicBrainz release ID. Return None if the ID is not found. """ try: album = mb.album_for_id(release_id) if album: plugins.send("albuminfo_received", info=album) return album except mb.MusicBrainzAPIError as exc: exc.log(log) return None
Get a TrackInfo object for a MusicBrainz recording ID. Return None if the ID is not found.
def track_for_mbid(recording_id: str) -> Optional[TrackInfo]: """Get a TrackInfo object for a MusicBrainz recording ID. Return None if the ID is not found. """ try: track = mb.track_for_id(recording_id) if track: plugins.send("trackinfo_received", info=track) return track except mb.MusicBrainzAPIError as exc: exc.log(log) return None
Get a list of albums for an ID.
def albums_for_id(album_id: str) -> Iterable[AlbumInfo]: """Get a list of albums for an ID.""" a = album_for_mbid(album_id) if a: yield a for a in plugins.album_for_id(album_id): if a: plugins.send("albuminfo_received", info=a) yield a
Get a list of tracks for an ID.
def tracks_for_id(track_id: str) -> Iterable[TrackInfo]: """Get a list of tracks for an ID.""" t = track_for_mbid(track_id) if t: yield t for t in plugins.track_for_id(track_id): if t: plugins.send("trackinfo_received", info=t) yield t
Search for album matches. ``items`` is a list of Item objects that make up the album. ``artist`` and ``album`` are the respective names (strings), which may be derived from the item list or may be entered by the user. ``va_likely`` is a boolean indicating whether the album is likely to be a "various artists" release. ``extra_tags`` is an optional dictionary of additional tags used to further constrain the search.
def album_candidates( items: List[Item], artist: str, album: str, va_likely: bool, extra_tags: Dict, ) -> Iterable[Tuple]: """Search for album matches. ``items`` is a list of Item objects that make up the album. ``artist`` and ``album`` are the respective names (strings), which may be derived from the item list or may be entered by the user. ``va_likely`` is a boolean indicating whether the album is likely to be a "various artists" release. ``extra_tags`` is an optional dictionary of additional tags used to further constrain the search. """ if config["musicbrainz"]["enabled"]: # Base candidates if we have album and artist to match. if artist and album: yield from invoke_mb( mb.match_album, artist, album, len(items), extra_tags ) # Also add VA matches from MusicBrainz where appropriate. if va_likely and album: yield from invoke_mb( mb.match_album, None, album, len(items), extra_tags ) # Candidates from plugins. yield from plugins.candidates(items, artist, album, va_likely, extra_tags)
Search for item matches. ``item`` is the Item to be matched. ``artist`` and ``title`` are strings and either reflect the item or are specified by the user.
def item_candidates(item: Item, artist: str, title: str) -> Iterable[Tuple]: """Search for item matches. ``item`` is the Item to be matched. ``artist`` and ``title`` are strings and either reflect the item or are specified by the user. """ # MusicBrainz candidates. if config["musicbrainz"]["enabled"] and artist and title: yield from invoke_mb(mb.match_track, artist, title) # Plugin candidates. yield from plugins.item_candidates(item, artist, title)
Extract the likely current metadata for an album given a list of its items. Return two dictionaries: - The most common value for each field. - Whether each field's value was unanimous (values are booleans).
def current_metadata( items: Iterable[Item], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """Extract the likely current metadata for an album given a list of its items. Return two dictionaries: - The most common value for each field. - Whether each field's value was unanimous (values are booleans). """ assert items # Must be nonempty. likelies = {} consensus = {} fields = [ "artist", "album", "albumartist", "year", "disctotal", "mb_albumid", "label", "barcode", "catalognum", "country", "media", "albumdisambig", ] for field in fields: values = [item[field] for item in items if item] likelies[field], freq = plurality(values) consensus[field] = freq == len(values) # If there's an album artist consensus, use this for the artist. if consensus["albumartist"] and likelies["albumartist"]: likelies["artist"] = likelies["albumartist"] return likelies, consensus
Given a list of Items and a list of TrackInfo objects, find the best mapping between them. Returns a mapping from Items to TrackInfo objects, a set of extra Items, and a set of extra TrackInfo objects. These "extra" objects occur when there is an unequal number of objects of the two types.
def assign_items( items: Sequence[Item], tracks: Sequence[TrackInfo], ) -> Tuple[Dict[Item, TrackInfo], List[Item], List[TrackInfo]]: """Given a list of Items and a list of TrackInfo objects, find the best mapping between them. Returns a mapping from Items to TrackInfo objects, a set of extra Items, and a set of extra TrackInfo objects. These "extra" objects occur when there is an unequal number of objects of the two types. """ # Construct the cost matrix. costs: List[List[Distance]] = [] for item in items: row = [] for track in tracks: row.append(track_distance(item, track)) costs.append(row) # Find a minimum-cost bipartite matching. log.debug("Computing track assignment...") matching = Munkres().compute(costs) log.debug("...done.") # Produce the output matching. mapping = {items[i]: tracks[j] for (i, j) in matching} extra_items = list(set(items) - set(mapping.keys())) extra_items.sort(key=lambda i: (i.disc, i.track, i.title)) extra_tracks = list(set(tracks) - set(mapping.values())) extra_tracks.sort(key=lambda t: (t.index, t.title)) return mapping, extra_items, extra_tracks
Returns True if the item and track info index is different. Tolerates per disc and per release numbering.
def track_index_changed(item: Item, track_info: TrackInfo) -> bool: """Returns True if the item and track info index is different. Tolerates per disc and per release numbering. """ return item.track not in (track_info.medium_index, track_info.index)
Determines the significance of a track metadata change. Returns a Distance object. `incl_artist` indicates that a distance component should be included for the track artist (i.e., for various-artist releases).
def track_distance( item: Item, track_info: TrackInfo, incl_artist: bool = False, ) -> Distance: """Determines the significance of a track metadata change. Returns a Distance object. `incl_artist` indicates that a distance component should be included for the track artist (i.e., for various-artist releases). """ dist = hooks.Distance() # Length. if track_info.length: item_length = cast(float, item.length) track_length_grace = cast( Union[float, int], config["match"]["track_length_grace"].as_number(), ) track_length_max = cast( Union[float, int], config["match"]["track_length_max"].as_number(), ) diff = abs(item_length - track_info.length) - track_length_grace dist.add_ratio("track_length", diff, track_length_max) # Title. dist.add_string("track_title", item.title, track_info.title) # Artist. Only check if there is actually an artist in the track data. if ( incl_artist and track_info.artist and item.artist.lower() not in VA_ARTISTS ): dist.add_string("track_artist", item.artist, track_info.artist) # Track index. if track_info.index and item.track: dist.add_expr("track_index", track_index_changed(item, track_info)) # Track ID. if item.mb_trackid: dist.add_expr("track_id", item.mb_trackid != track_info.track_id) # Plugins. dist.update(plugins.track_distance(item, track_info)) return dist
Determines how "significant" an album metadata change would be. Returns a Distance object. `album_info` is an AlbumInfo object reflecting the album to be compared. `items` is a sequence of all Item objects that will be matched (order is not important). `mapping` is a dictionary mapping Items to TrackInfo objects; the keys are a subset of `items` and the values are a subset of `album_info.tracks`.
def distance( items: Sequence[Item], album_info: AlbumInfo, mapping: Dict[Item, TrackInfo], ) -> Distance: """Determines how "significant" an album metadata change would be. Returns a Distance object. `album_info` is an AlbumInfo object reflecting the album to be compared. `items` is a sequence of all Item objects that will be matched (order is not important). `mapping` is a dictionary mapping Items to TrackInfo objects; the keys are a subset of `items` and the values are a subset of `album_info.tracks`. """ likelies, _ = current_metadata(items) dist = hooks.Distance() # Artist, if not various. if not album_info.va: dist.add_string("artist", likelies["artist"], album_info.artist) # Album. dist.add_string("album", likelies["album"], album_info.album) # Current or preferred media. if album_info.media: # Preferred media options. patterns = config["match"]["preferred"]["media"].as_str_seq() patterns = cast(Sequence, patterns) options = [re.compile(r"(\d+x)?(%s)" % pat, re.I) for pat in patterns] if options: dist.add_priority("media", album_info.media, options) # Current media. elif likelies["media"]: dist.add_equality("media", album_info.media, likelies["media"]) # Mediums. if likelies["disctotal"] and album_info.mediums: dist.add_number("mediums", likelies["disctotal"], album_info.mediums) # Prefer earliest release. if album_info.year and config["match"]["preferred"]["original_year"]: # Assume 1889 (earliest first gramophone discs) if we don't know the # original year. original = album_info.original_year or 1889 diff = abs(album_info.year - original) diff_max = abs(datetime.date.today().year - original) dist.add_ratio("year", diff, diff_max) # Year. elif likelies["year"] and album_info.year: if likelies["year"] in (album_info.year, album_info.original_year): # No penalty for matching release or original year. dist.add("year", 0.0) elif album_info.original_year: # Prefer matchest closest to the release year. diff = abs(likelies["year"] - album_info.year) diff_max = abs( datetime.date.today().year - album_info.original_year ) dist.add_ratio("year", diff, diff_max) else: # Full penalty when there is no original year. dist.add("year", 1.0) # Preferred countries. patterns = config["match"]["preferred"]["countries"].as_str_seq() patterns = cast(Sequence, patterns) options = [re.compile(pat, re.I) for pat in patterns] if album_info.country and options: dist.add_priority("country", album_info.country, options) # Country. elif likelies["country"] and album_info.country: dist.add_string("country", likelies["country"], album_info.country) # Label. if likelies["label"] and album_info.label: dist.add_string("label", likelies["label"], album_info.label) # Catalog number. if likelies["catalognum"] and album_info.catalognum: dist.add_string( "catalognum", likelies["catalognum"], album_info.catalognum ) # Disambiguation. if likelies["albumdisambig"] and album_info.albumdisambig: dist.add_string( "albumdisambig", likelies["albumdisambig"], album_info.albumdisambig ) # Album ID. if likelies["mb_albumid"]: dist.add_equality( "album_id", likelies["mb_albumid"], album_info.album_id ) # Tracks. dist.tracks = {} for item, track in mapping.items(): dist.tracks[track] = track_distance(item, track, album_info.va) dist.add("tracks", dist.tracks[track].distance) # Missing tracks. for _ in range(len(album_info.tracks) - len(mapping)): dist.add("missing_tracks", 1.0) # Unmatched tracks. for _ in range(len(items) - len(mapping)): dist.add("unmatched_tracks", 1.0) # Plugins. dist.update(plugins.album_distance(items, album_info, mapping)) return dist
If the items are tagged with a MusicBrainz album ID, returns an AlbumInfo object for the corresponding album. Otherwise, returns None.
def match_by_id(items: Iterable[Item]): """If the items are tagged with a MusicBrainz album ID, returns an AlbumInfo object for the corresponding album. Otherwise, returns None. """ albumids = (item.mb_albumid for item in items if item.mb_albumid) # Did any of the items have an MB album ID? try: first = next(albumids) except StopIteration: log.debug("No album ID found.") return None # Is there a consensus on the MB album ID? for other in albumids: if other != first: log.debug("No album ID consensus.") return None # If all album IDs are equal, look up the album. log.debug("Searching for discovered album ID: {0}", first) return hooks.album_for_mbid(first)
Given a sorted list of AlbumMatch or TrackMatch objects, return a recommendation based on the results' distances. If the recommendation is higher than the configured maximum for an applied penalty, the recommendation will be downgraded to the configured maximum for that penalty.
def _recommendation( results: Sequence[Union[AlbumMatch, TrackMatch]], ) -> Recommendation: """Given a sorted list of AlbumMatch or TrackMatch objects, return a recommendation based on the results' distances. If the recommendation is higher than the configured maximum for an applied penalty, the recommendation will be downgraded to the configured maximum for that penalty. """ if not results: # No candidates: no recommendation. return Recommendation.none # Basic distance thresholding. min_dist = results[0].distance if min_dist < config["match"]["strong_rec_thresh"].as_number(): # Strong recommendation level. rec = Recommendation.strong elif min_dist <= config["match"]["medium_rec_thresh"].as_number(): # Medium recommendation level. rec = Recommendation.medium elif len(results) == 1: # Only a single candidate. rec = Recommendation.low elif ( results[1].distance - min_dist >= config["match"]["rec_gap_thresh"].as_number() ): # Gap between first two candidates is large. rec = Recommendation.low else: # No conclusion. Return immediately. Can't be downgraded any further. return Recommendation.none # Downgrade to the max rec if it is lower than the current rec for an # applied penalty. keys = set(min_dist.keys()) if isinstance(results[0], hooks.AlbumMatch): for track_dist in min_dist.tracks.values(): keys.update(list(track_dist.keys())) max_rec_view = config["match"]["max_rec"] for key in keys: if key in list(max_rec_view.keys()): max_rec = max_rec_view[key].as_choice( { "strong": Recommendation.strong, "medium": Recommendation.medium, "low": Recommendation.low, "none": Recommendation.none, } ) rec = min(rec, max_rec) return rec
Sort candidates by distance.
def _sort_candidates(candidates: Iterable[AnyMatch]) -> Sequence[AnyMatch]: """Sort candidates by distance.""" return sorted(candidates, key=lambda match: match.distance)
Given a candidate AlbumInfo object, attempt to add the candidate to the output dictionary of AlbumMatch objects. This involves checking the track count, ordering the items, checking for duplicates, and calculating the distance.
def _add_candidate( items: Sequence[Item], results: Dict[Any, AlbumMatch], info: AlbumInfo, ): """Given a candidate AlbumInfo object, attempt to add the candidate to the output dictionary of AlbumMatch objects. This involves checking the track count, ordering the items, checking for duplicates, and calculating the distance. """ log.debug( "Candidate: {0} - {1} ({2})", info.artist, info.album, info.album_id ) # Discard albums with zero tracks. if not info.tracks: log.debug("No tracks.") return # Prevent duplicates. if info.album_id and info.album_id in results: log.debug("Duplicate.") return # Discard matches without required tags. for req_tag in cast(Sequence, config["match"]["required"].as_str_seq()): if getattr(info, req_tag) is None: log.debug("Ignored. Missing required tag: {0}", req_tag) return # Find mapping between the items and the track info. mapping, extra_items, extra_tracks = assign_items(items, info.tracks) # Get the change distance. dist = distance(items, info, mapping) # Skip matches with ignored penalties. penalties = [key for key, _ in dist] ignored = cast(Sequence[str], config["match"]["ignored"].as_str_seq()) for penalty in ignored: if penalty in penalties: log.debug("Ignored. Penalty: {0}", penalty) return log.debug("Success. Distance: {0}", dist) results[info.album_id] = hooks.AlbumMatch( dist, info, mapping, extra_items, extra_tracks )
Return a tuple of the current artist name, the current album name, and a `Proposal` containing `AlbumMatch` candidates. The artist and album are the most common values of these fields among `items`. The `AlbumMatch` objects are generated by searching the metadata backends. By default, the metadata of the items is used for the search. This can be customized by setting the parameters. `search_ids` is a list of metadata backend IDs: if specified, it will restrict the candidates to those IDs, ignoring `search_artist` and `search album`. The `mapping` field of the album has the matched `items` as keys. The recommendation is calculated from the match quality of the candidates.
def tag_album( items, search_artist: Optional[str] = None, search_album: Optional[str] = None, search_ids: List = [], ) -> Tuple[str, str, Proposal]: """Return a tuple of the current artist name, the current album name, and a `Proposal` containing `AlbumMatch` candidates. The artist and album are the most common values of these fields among `items`. The `AlbumMatch` objects are generated by searching the metadata backends. By default, the metadata of the items is used for the search. This can be customized by setting the parameters. `search_ids` is a list of metadata backend IDs: if specified, it will restrict the candidates to those IDs, ignoring `search_artist` and `search album`. The `mapping` field of the album has the matched `items` as keys. The recommendation is calculated from the match quality of the candidates. """ # Get current metadata. likelies, consensus = current_metadata(items) cur_artist = cast(str, likelies["artist"]) cur_album = cast(str, likelies["album"]) log.debug("Tagging {0} - {1}", cur_artist, cur_album) # The output result, keys are the MB album ID. candidates: Dict[Any, AlbumMatch] = {} # Search by explicit ID. if search_ids: for search_id in search_ids: log.debug("Searching for album ID: {0}", search_id) for album_info_for_id in hooks.albums_for_id(search_id): _add_candidate(items, candidates, album_info_for_id) # Use existing metadata or text search. else: # Try search based on current ID. id_info = match_by_id(items) if id_info: _add_candidate(items, candidates, id_info) rec = _recommendation(list(candidates.values())) log.debug("Album ID match recommendation is {0}", rec) if candidates and not config["import"]["timid"]: # If we have a very good MBID match, return immediately. # Otherwise, this match will compete against metadata-based # matches. if rec == Recommendation.strong: log.debug("ID match.") return ( cur_artist, cur_album, Proposal(list(candidates.values()), rec), ) # Search terms. if not (search_artist and search_album): # No explicit search terms -- use current metadata. search_artist, search_album = cur_artist, cur_album log.debug("Search terms: {0} - {1}", search_artist, search_album) extra_tags = None if config["musicbrainz"]["extra_tags"]: tag_list = config["musicbrainz"]["extra_tags"].get() extra_tags = {k: v for (k, v) in likelies.items() if k in tag_list} log.debug("Additional search terms: {0}", extra_tags) # Is this album likely to be a "various artist" release? va_likely = ( (not consensus["artist"]) or (search_artist.lower() in VA_ARTISTS) or any(item.comp for item in items) ) log.debug("Album might be VA: {0}", va_likely) # Get the results from the data sources. for matched_candidate in hooks.album_candidates( items, search_artist, search_album, va_likely, extra_tags ): _add_candidate(items, candidates, matched_candidate) log.debug("Evaluating {0} candidates.", len(candidates)) # Sort and get the recommendation. candidates_sorted = _sort_candidates(candidates.values()) rec = _recommendation(candidates_sorted) return cur_artist, cur_album, Proposal(candidates_sorted, rec)
Find metadata for a single track. Return a `Proposal` consisting of `TrackMatch` objects. `search_artist` and `search_title` may be used to override the current metadata for the purposes of the MusicBrainz title. `search_ids` may be used for restricting the search to a list of metadata backend IDs.
def tag_item( item, search_artist: Optional[str] = None, search_title: Optional[str] = None, search_ids: List = [], ) -> Proposal: """Find metadata for a single track. Return a `Proposal` consisting of `TrackMatch` objects. `search_artist` and `search_title` may be used to override the current metadata for the purposes of the MusicBrainz title. `search_ids` may be used for restricting the search to a list of metadata backend IDs. """ # Holds candidates found so far: keys are MBIDs; values are # (distance, TrackInfo) pairs. candidates = {} rec: Optional[Recommendation] = None # First, try matching by MusicBrainz ID. trackids = search_ids or [t for t in [item.mb_trackid] if t] if trackids: for trackid in trackids: log.debug("Searching for track ID: {0}", trackid) for track_info in hooks.tracks_for_id(trackid): dist = track_distance(item, track_info, incl_artist=True) candidates[track_info.track_id] = hooks.TrackMatch( dist, track_info ) # If this is a good match, then don't keep searching. rec = _recommendation(_sort_candidates(candidates.values())) if ( rec == Recommendation.strong and not config["import"]["timid"] ): log.debug("Track ID match.") return Proposal(_sort_candidates(candidates.values()), rec) # If we're searching by ID, don't proceed. if search_ids: if candidates: assert rec is not None return Proposal(_sort_candidates(candidates.values()), rec) else: return Proposal([], Recommendation.none) # Search terms. if not (search_artist and search_title): search_artist, search_title = item.artist, item.title log.debug("Item search terms: {0} - {1}", search_artist, search_title) # Get and evaluate candidate metadata. for track_info in hooks.item_candidates(item, search_artist, search_title): dist = track_distance(item, track_info, incl_artist=True) candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info) # Sort by distance and return with recommendation. log.debug("Found {0} candidates.", len(candidates)) candidates_sorted = _sort_candidates(candidates.values()) rec = _recommendation(candidates_sorted) return Proposal(candidates_sorted, rec)
Set up the python-musicbrainz-ngs module according to settings from the beets configuration. This should be called at startup.
def configure(): """Set up the python-musicbrainz-ngs module according to settings from the beets configuration. This should be called at startup. """ hostname = config["musicbrainz"]["host"].as_str() https = config["musicbrainz"]["https"].get(bool) # Only call set_hostname when a custom server is configured. Since # musicbrainz-ngs connects to musicbrainz.org with HTTPS by default if hostname != "musicbrainz.org": musicbrainzngs.set_hostname(hostname, https) musicbrainzngs.set_rate_limit( config["musicbrainz"]["ratelimit_interval"].as_number(), config["musicbrainz"]["ratelimit"].get(int), )