desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Merge another Statistic into this instance.
Takes another Statistic of the same type, and merges its information into
this instance.
Args:
other: Another Statistic instance.'
| @abc.abstractmethod
def _merge_from(self, other):
| pass
|
'Return a string representation of this instance using the given name.
Returns a human readable and nicely presented representation of this
instance. Since this instance does not know what it\'s measuring, a string
name is given to use in the string representation.
For example, if this Statistic held a count, say 5, and the given name was
\'error_count\', then the string representation might be \'error_count: 5\'.
Args:
name: A string name for this instance.
Returns:
A human readable and preferably a nicely presented string representation
of this instance.'
| @abc.abstractmethod
def _pretty_print(self, name):
| pass
|
'Returns a new copy of `self`.'
| @abc.abstractmethod
def copy(self):
| pass
|
'Constructs a Counter.
Args:
name: String name of this counter.
start_value: What value to start the count at.'
| def __init__(self, name, start_value=0):
| super(Counter, self).__init__(name)
self.count = start_value
|
'Increment the count.
Args:
inc: (defaults to 1) How much to increment the count by.'
| def increment(self, inc=1):
| self.count += inc
|
'Adds the count of another Counter into this instance.'
| def _merge_from(self, other):
| if (not isinstance(other, Counter)):
raise MergeStatisticsException(('Cannot merge %s into Counter' % other.__class__.__name__))
self.count += other.count
|
'Initializes the histogram with the given ranges.
Args:
name: String name of this histogram.
buckets: The ranges the histogram counts over. This is a list of values,
where each value is the inclusive lower bound of the range. An extra
range will be implicitly defined which spans from negative infinity
to the lowest given lower bound. The highest given lower bound
defines a range spaning to positive infinity. This way any value will
be included in the histogram counts. For example, if `buckets` is
[4, 6, 10] the histogram will have ranges
[-inf, 4), [4, 6), [6, 10), [10, inf).
verbose_pretty_print: If True, self.pretty_print will print the count for
every bucket. If False, only buckets with positive counts will be
printed.'
| def __init__(self, name, buckets, verbose_pretty_print=False):
| super(Histogram, self).__init__(name)
self.buckets = ([float('-inf')] + sorted(set(buckets)))
self.counters = dict([(bucket_lower, 0) for bucket_lower in self.buckets])
self.verbose_pretty_print = verbose_pretty_print
|
'Find rightmost bucket less than or equal to x.'
| def _find_le(self, x):
| i = bisect.bisect_right(self.buckets, x)
if i:
return self.buckets[(i - 1)]
raise ValueError
|
'Increment the bucket containing the given value.
The bucket count for which ever range `value` falls in will be incremented.
Args:
value: Any number.
inc: An integer. How much to increment the bucket count by.'
| def increment(self, value, inc=1):
| bucket_lower = self._find_le(value)
self.counters[bucket_lower] += inc
|
'Adds the counts of another Histogram into this instance.
`other` must have the same buckets as this instance. The counts
from `other` are added to the counts for this instance.
Args:
other: Another Histogram instance with the same buckets as this instance.
Raises:
MergeStatisticsException: If `other` is not a Histogram or the buckets
are not the same.'
| def _merge_from(self, other):
| if (not isinstance(other, Histogram)):
raise MergeStatisticsException(('Cannot merge %s into Histogram' % other.__class__.__name__))
if (self.buckets != other.buckets):
raise MergeStatisticsException(('Histogram buckets do not match. Expected %s, got %s' % (self.buckets, other.buckets)))
for (bucket_lower, count) in other.counters.items():
self.counters[bucket_lower] += count
|
'Create an `DagOutput` with the given name.
Args:
name: If given, a string name which defines the name of this output.
If not given, the names in the dictionary this is connected to
will be used as output names.'
| def __init__(self, name=None):
| self.name = name
self.output_type = None
self.input_type = None
|
'Create an `DagInput` with the given type.
Args:
type_: The Python class which inputs to `DAGPipeline` should be
instances of. `DAGPipeline.input_type` will be this type.'
| def __init__(self, type_):
| self.output_type = type_
|
'Constructs a DAGPipeline.
A DAG (direct acyclic graph) is given which fully specifies what the
DAGPipeline runs.
Args:
dag: A dictionary mapping `Pipeline` or `DagOutput` instances to any of
`Pipeline`, `PipelineKey`, `DagInput`. `dag` defines a directed acyclic
graph.
pipeline_name: String name of this Pipeline object.
Raises:
InvalidDAGException: If each key value pair in the `dag` dictionary is
not of the form
(Pipeline or DagOutput): (Pipeline, PipelineKey, or DagInput).
TypeMismatchException: The type signature of each key and value in `dag`
must match, otherwise this will be thrown.
DuplicateNameException: If two `Pipeline` instances in `dag` have the
same string name.
BadInputOrOutputException: If there are no `DagOutput` instaces in `dag`
or not exactly one `DagInput` plus type combination in `dag`.
InvalidDictionaryOutput: If `DagOutput()` is not connected to a
dictionary, or `DagOutput(name)` is not connected to a Pipeline,
PipelineKey, or DagInput instance.
NotConnectedException: If a `Pipeline` used in a dependency has nothing
feeding into it, or a `Pipeline` used as a destination does not feed
anywhere.
BadTopologyException: If there there is a directed cycle in `dag`.
Exception: Misc. exceptions.'
| def __init__(self, dag, pipeline_name='DAGPipeline'):
| self.dag = dict(self._expand_dag_shorthands(dag))
for (unit, dependency) in self.dag.items():
if (not isinstance(unit, (pipeline.Pipeline, DagOutput))):
raise InvalidDAGException(('Dependency {%s: %s} is invalid. Left hand side value %s must either be a Pipeline or DagOutput object' % (unit, dependency, unit)))
if isinstance(dependency, dict):
if (not all([isinstance(name, basestring) for name in dependency])):
raise InvalidDAGException(('Dependency {%s: %s} is invalid. Right hand side keys %s must be strings' % (unit, dependency, dependency.keys())))
values = dependency.values()
else:
values = [dependency]
for subordinate in values:
if (not (isinstance(subordinate, pipeline.Pipeline) or (isinstance(subordinate, pipeline.PipelineKey) and isinstance(subordinate.unit, pipeline.Pipeline)) or isinstance(subordinate, DagInput))):
raise InvalidDAGException(('Dependency {%s: %s} is invalid. Right hand side subordinate %s must be either a Pipeline, PipelineKey, or DagInput object' % (unit, dependency, subordinate)))
if isinstance(unit, DagOutput):
continue
if (unit.input_type != self._get_type_signature_for_dependency(dependency)):
raise TypeMismatchException(('Invalid dependency {%s: %s}. Required `input_type` of left hand side is %s. DagOutput type of right hand side is %s.' % (unit, dependency, unit.input_type, self._get_type_signature_for_dependency(dependency))))
sorted_unit_names = sorted([(unit, unit.name) for unit in self.dag], key=(lambda t: t[1]))
for (index, (unit, name)) in enumerate(sorted_unit_names[:(-1)]):
if (name == sorted_unit_names[(index + 1)][1]):
other_unit = sorted_unit_names[(index + 1)][0]
raise DuplicateNameException(('Pipelines %s and %s both have name "%s". Each Pipeline must have a unique name.' % (unit, other_unit, name)))
self.outputs = [unit for unit in self.dag if isinstance(unit, DagOutput)]
self.output_names = dict([(output.name, output) for output in self.outputs])
for output in self.outputs:
output.input_type = output.output_type = self._get_type_signature_for_dependency(self.dag[output])
inputs = set()
for deps in self.dag.values():
units = self._get_units(deps)
for unit in units:
if isinstance(unit, DagInput):
inputs.add(unit)
if (len(inputs) != 1):
if (not inputs):
raise BadInputOrOutputException('No DagInput object found. DagInput is the start of the pipeline.')
else:
raise BadInputOrOutputException('Multiple DagInput objects found. Only one input is supported.')
if (not self.outputs):
raise BadInputOrOutputException('No DagOutput objects found. DagOutput is the end of the pipeline.')
self.input = inputs.pop()
output_signature = dict([(output.name, output.output_type) for output in self.outputs])
super(DAGPipeline, self).__init__(input_type=self.input.output_type, output_type=output_signature, name=pipeline_name)
all_subordinates = set([dep_unit for unit in self.dag for dep_unit in self._get_units(self.dag[unit])]).difference(set([self.input]))
all_destinations = set(self.dag.keys()).difference(set(self.outputs))
if (all_subordinates != all_destinations):
units_with_no_input = all_subordinates.difference(all_destinations)
units_with_no_output = all_destinations.difference(all_subordinates)
if units_with_no_input:
raise NotConnectedException(('%s is given as a dependency in the DAG but has nothing connected to it. Nothing in the DAG feeds into it.' % units_with_no_input.pop()))
else:
raise NotConnectedException(('%s is given as a destination in the DAG but does not output anywhere. It is a deadend.' % units_with_no_output.pop()))
graph = dict([(unit, [self._get_units(self.dag[unit]), 0]) for unit in self.dag])
graph[self.input] = [[], 0]
for (unit, (forward_connections, _)) in graph.items():
for to_unit in forward_connections:
graph[to_unit][1] += 1
self.call_list = call_list = []
nodes = set(self.outputs)
while nodes:
n = nodes.pop()
call_list.append(n)
for m in graph[n][0]:
graph[m][1] -= 1
if (graph[m][1] == 0):
nodes.add(m)
elif (graph[m][1] < 0):
raise Exception(('Congratulations, you found a bug! Please report this issue at https://github.com/tensorflow/magenta/issues and copy/paste the following: dag=%s, graph=%s, call_list=%s' % (self.dag, graph, call_list)))
for unit in graph:
if (graph[unit][1] != 0):
raise BadTopologyException(('Dependency loop found on %s' % unit))
if (set(call_list) != set(((list(all_subordinates) + self.outputs) + [self.input]))):
raise BadTopologyException('Not all pipelines feed into an output or there is a dependency loop.')
call_list.reverse()
assert (call_list[0] == self.input)
|
'Expand DAG shorthand.
Currently the only shorthand is "direct connection".
A direct connection is a connection {a: b} where a.input_type is a dict,
b.output_type is a dict, and a.input_type == b.output_type. This is not
actually valid, but we can convert it to a valid connection.
{a: b} is expanded to
{a: {"name_1": b["name_1"], "name_2": b["name_2"], ...}}.
{DagOutput(): {"name_1", obj1, "name_2": obj2, ...} is expanded to
{DagOutput("name_1"): obj1, DagOutput("name_2"): obj2, ...}.
Args:
dag: A dictionary encoding the DAG.
Yields:
Key, value pairs for a new dag dictionary.
Raises:
InvalidDictionaryOutput: If `DagOutput` is not used correctly.'
| def _expand_dag_shorthands(self, dag):
| for (key, val) in dag.items():
if (isinstance(key, pipeline.Pipeline) and isinstance(val, pipeline.Pipeline) and isinstance(key.input_type, dict) and (key.input_type == val.output_type)):
(yield (key, dict([(name, val[name]) for name in val.output_type])))
elif (key == DagOutput()):
if (isinstance(val, pipeline.Pipeline) and isinstance(val.output_type, dict)):
dependency = [(name, val[name]) for name in val.output_type]
elif isinstance(val, dict):
dependency = val.items()
else:
raise InvalidDictionaryOutput(('DagOutput() with no name can only be connected to a dictionary or a Pipeline whose output_type is a dictionary. Found DagOutput() connected to %s' % val))
for (name, subordinate) in dependency:
(yield (DagOutput(name), subordinate))
elif isinstance(key, DagOutput):
if isinstance(val, dict):
raise InvalidDictionaryOutput(('DagOutput("%s") which has name "%s" can only be connected to a single input, not dictionary %s. Use DagOutput() without name instead.' % (key.name, key.name, val)))
if (isinstance(val, pipeline.Pipeline) and isinstance(val.output_type, dict)):
raise InvalidDictionaryOutput(('DagOutput("%s") which has name "%s" can only be connected to a single input, not pipeline %s which has dictionary output_type %s. Use DagOutput() without name instead.' % (key.name, key.name, val, val.output_type)))
(yield (key, val))
else:
(yield (key, val))
|
'Gets list of units from a dependency.'
| def _get_units(self, dependency):
| dep_list = []
if isinstance(dependency, dict):
dep_list.extend(dependency.values())
else:
dep_list.append(dependency)
return [self._validate_subordinate(sub) for sub in dep_list]
|
'Verifies that subordinate is DagInput, PipelineKey, or Pipeline.'
| def _validate_subordinate(self, subordinate):
| if isinstance(subordinate, pipeline.Pipeline):
return subordinate
if isinstance(subordinate, pipeline.PipelineKey):
if (not isinstance(subordinate.unit, pipeline.Pipeline)):
raise InvalidDAGException(('PipelineKey object %s does not have a valid Pipeline' % subordinate))
return subordinate.unit
if isinstance(subordinate, DagInput):
return subordinate
raise InvalidDAGException(('Looking for Pipeline, PipelineKey, or DagInput object, but got %s' % type(subordinate)))
|
'Gets the type signature of the dependency output.'
| def _get_type_signature_for_dependency(self, dependency):
| if isinstance(dependency, (pipeline.Pipeline, pipeline.PipelineKey, DagInput)):
return dependency.output_type
return dict([(name, sub_dep.output_type) for (name, sub_dep) in dependency.items()])
|
'Runs the DAG on the given input.
All pipelines in the DAG will run.
Args:
input_object: Any object. The required type depends on implementation.
Returns:
A dictionary mapping output names to lists of objects. The object types
depend on implementation. Each output name corresponds to an output
collection. See get_output_names method.'
| def transform(self, input_object):
| def stats_accumulator(unit, unit_inputs, cumulative_stats):
for single_input in unit_inputs:
results_ = unit.transform(single_input)
stats = unit.get_stats()
cumulative_stats.extend(stats)
(yield results_)
stats = []
results = {self.input: [input_object]}
for unit in self.call_list[1:]:
if isinstance(unit, DagOutput):
unit_outputs = self._get_outputs_as_signature(self.dag[unit], results)
else:
unit_inputs = self._get_inputs_for_unit(unit, results)
if (not unit_inputs):
results[unit] = []
continue
unjoined_outputs = list(stats_accumulator(unit, unit_inputs, stats))
unit_outputs = self._join_lists_or_dicts(unjoined_outputs, unit)
results[unit] = unit_outputs
self._set_stats(stats)
return dict([(output.name, results[output]) for output in self.outputs])
|
'Returns a list or dict which matches the type signature of dependency.
Args:
dependency: DagInput, PipelineKey, Pipeline instance, or dictionary
mapping names to those values.
outputs: A database of computed unit outputs. A dictionary mapping
Pipeline to list of objects.
Returns:
A list or dictionary of computed unit outputs which matches the type
signature of the given dependency.'
| def _get_outputs_as_signature(self, dependency, outputs):
| def _get_outputs_for_key(unit_or_key, outputs):
if isinstance(unit_or_key, pipeline.PipelineKey):
if (not outputs[unit_or_key.unit]):
return outputs[unit_or_key.unit]
assert isinstance(outputs[unit_or_key.unit], dict)
return outputs[unit_or_key.unit][unit_or_key.key]
assert isinstance(unit_or_key, (pipeline.Pipeline, DagInput))
return outputs[unit_or_key]
if isinstance(dependency, dict):
return dict([(name, _get_outputs_for_key(unit_or_key, outputs)) for (name, unit_or_key) in dependency.items()])
return _get_outputs_for_key(dependency, outputs)
|
'Creates valid inputs for the given unit from the outputs in `results`.
Args:
unit: The `Pipeline` to create inputs for.
results: A database of computed unit outputs. A dictionary mapping
Pipeline to list of objects.
list_operation: A function that maps lists of inputs to a single list of
tuples, where each tuple is an input. This is used when `unit` takes
a dictionary as input. Each tuple is used as the values for a
dictionary input. This can be thought of as taking a sort of
transpose of a ragged 2D array.
The default is `itertools.product` which takes the cartesian product
of the input lists.
Returns:
If `unit` takes a single input, a list of objects.
If `unit` takes a dictionary input, a list of dictionaries each mapping
string name to object.'
| def _get_inputs_for_unit(self, unit, results, list_operation=itertools.product):
| previous_outputs = self._get_outputs_as_signature(self.dag[unit], results)
if isinstance(previous_outputs, dict):
names = list(previous_outputs.keys())
lists = [previous_outputs[name] for name in names]
stack = list_operation(*lists)
return [dict(zip(names, values)) for values in stack]
else:
return previous_outputs
|
'Joins many lists or dicts of outputs into a single list or dict.
This function also validates that the outputs are correct for the given
Pipeline.
If `outputs` is a list of lists, the lists are concated and the type of
each object must match `unit.output_type`.
If `output` is a list of dicts (mapping string names to lists), each
key has its lists concated across all the dicts. The keys and types
are validated against `unit.output_type`.
Args:
outputs: A list of lists, or list of dicts which map string names to
lists.
unit: A Pipeline which every output in `outputs` will be validated
against. `unit` must produce the outputs it says it will produce.
Returns:
If `outputs` is a list of lists, a single list of outputs.
If `outputs` is a list of dicts, a single dictionary mapping string names
to lists of outputs.
Raises:
InvalidTransformOutputException: If anything in `outputs` does not match
the type signature given by `unit.output_type`.'
| def _join_lists_or_dicts(self, outputs, unit):
| if (not outputs):
return []
if isinstance(unit.output_type, dict):
concated = dict([(key, list()) for key in unit.output_type.keys()])
for d in outputs:
if (not isinstance(d, dict)):
raise InvalidTransformOutputException(('Expected dictionary output for %s with output type %s but instead got type %s' % (unit, unit.output_type, type(d))))
if (set(d.keys()) != set(unit.output_type.keys())):
raise InvalidTransformOutputException(('Got dictionary output with incorrect keys for %s. Got %s. Expected %s' % (unit, d.keys(), unit.output_type.keys())))
for (k, val) in d.items():
if (not isinstance(val, list)):
raise InvalidTransformOutputException(('DagOutput from %s for key %s is not a list.' % (unit, k)))
if (not _all_are_type(val, unit.output_type[k])):
raise InvalidTransformOutputException(('Some outputs from %s for key %s are not of expected type %s. Got types %s' % (unit, k, unit.output_type[k], [type(inst) for inst in val])))
concated[k] += val
else:
concated = []
for l in outputs:
if (not isinstance(l, list)):
raise InvalidTransformOutputException(('Expected list output for %s with outpu type %s but instead got type %s' % (unit, unit.output_type, type(l))))
if (not _all_are_type(l, unit.output_type)):
raise InvalidTransformOutputException(('Some outputs from %s are not of expected type %s. Got types %s' % (unit, unit.output_type, [type(inst) for inst in l])))
concated += l
return concated
|
'Constructs a `Pipeline` object.
Subclass constructors are expected to call this constructor.
A type signature is a Python class or primative collection containing
classes. Valid type signatures for `Pipeline` inputs and outputs are either
a Python class, or a dictionary mapping string names to classes. An object
matches a type signature if its type equals the type signature
(i.e. type(\'hello\') == str) or, if its a collection, the types in the
collection match (i.e. {\'hello\': \'world\', \'number\': 1234} matches type
signature {\'hello\': str, \'number\': int})
`Pipeline` instances have (preferably unique) string names. These names act
as name spaces for the Statistics produced by them. The `get_stats` method
will automatically prepend `name` to all of the Statistics names before
returning them.
Args:
input_type: The type signature this pipeline expects for its inputs.
output_type: The type signature this pipeline promises its outputs will
have.
name: The string name for this instance. This name is accessible through
the `name` property. Names should be unique across `Pipeline`
instances. If None (default), the string name of the implementing
subclass is used.'
| def __init__(self, input_type, output_type, name=None):
| if (name is None):
self._name = type(self).__name__
else:
assert isinstance(name, six.string_types)
self._name = name
_assert_valid_type_signature(input_type, 'input_type')
_assert_valid_type_signature(output_type, 'output_type')
self._input_type = input_type
self._output_type = output_type
self._stats = []
|
'What type or types does this pipeline take as input.
Returns:
A class, or a dictionary mapping names to classes.'
| @property
def input_type(self):
| return self._input_type
|
'What type or types does this pipeline output.
Returns:
A class, or a dictionary mapping names to classes.'
| @property
def output_type(self):
| return self._output_type
|
'Returns a dictionary mapping names to classes.
If `output_type` is a single class, then a default name will be created
for the output and a dictionary containing `output_type` will be returned.
Returns:
Dictionary mapping names to output types.'
| @property
def output_type_as_dict(self):
| return _guarantee_dict(self._output_type, 'dataset')
|
'The string name of this pipeline.'
| @property
def name(self):
| return self._name
|
'Runs the pipeline on the given input.
Args:
input_object: An object or dictionary mapping names to objects.
The object types must match `input_type`.
Returns:
If `output_type` is a class, `transform` returns a list of objects
which are all that type. If `output_type` is a dictionary mapping
names to classes, `transform` returns a dictionary mapping those
same names to lists of objects that are the type mapped to each name.'
| @abc.abstractmethod
def transform(self, input_object):
| pass
|
'Overwrites the current Statistics returned by `get_stats`.
Implementers of Pipeline should call `_set_stats` from within `transform`.
Args:
stats: An iterable of Statistic objects.
Raises:
InvalidStatisticsException: If `stats` is not iterable, or if any
object in the list is not a `Statistic` instance.'
| def _set_stats(self, stats):
| if (not hasattr(stats, '__iter__')):
raise InvalidStatisticsException(('Expecting iterable, got type %s' % type(stats)))
self._stats = [self._prepend_name(stat) for stat in stats]
|
'Returns a copy of `stat` with `self.name` prepended to `stat.name`.'
| def _prepend_name(self, stat):
| if (not isinstance(stat, statistics.Statistic)):
raise InvalidStatisticsException(('Expecting Statistic object, got %s' % stat))
stat_copy = stat.copy()
stat_copy.name = ((self._name + '_') + stat_copy.name)
return stat_copy
|
'Returns Statistics about pipeline runs.
Call `get_stats` after each call to `transform`.
`transform` computes Statistics which will be returned here.
Returns:
A list of `Statistic` objects.'
| def get_stats(self):
| return list(self._stats)
|
'Construct a NoteSequencePipeline. Should only be called by subclasses.
Args:
name: Pipeline name.'
| def __init__(self, name=None):
| super(NoteSequencePipeline, self).__init__(input_type=music_pb2.NoteSequence, output_type=music_pb2.NoteSequence, name=name)
|
'Creates a Splitter pipeline.
Args:
hop_size_seconds: Hop size in seconds that will be used to split a
NoteSequence at regular intervals.
name: Pipeline name.'
| def __init__(self, hop_size_seconds, name=None):
| super(Splitter, self).__init__(name=name)
self._hop_size_seconds = hop_size_seconds
|
'Creates a Quantizer pipeline.
Exactly one of `steps_per_quarter` and `steps_per_second` should be defined.
Args:
steps_per_quarter: Steps per quarter note to use for quantization.
steps_per_second: Steps per second to use for quantization.
name: Pipeline name.
Raises:
ValueError: If both or neither of `steps_per_quarter` and
`steps_per_second` are set.'
| def __init__(self, steps_per_quarter=None, steps_per_second=None, name=None):
| super(Quantizer, self).__init__(name=name)
if ((steps_per_quarter is not None) == (steps_per_second is not None)):
raise ValueError('Exactly one of steps_per_quarter or steps_per_second must be set.')
self._steps_per_quarter = steps_per_quarter
self._steps_per_second = steps_per_second
|
'Creates a StretchPipeline.
Args:
stretch_factors: A Python list of uniform stretch factors to apply.
name: Pipeline name.'
| def __init__(self, stretch_factors, name=None):
| super(StretchPipeline, self).__init__(name=name)
self._stretch_factors = stretch_factors
|
'Creates a TranspositionPipeline.
Args:
transposition_range: Collection of integer pitch steps to transpose.
name: Pipeline name.'
| def __init__(self, transposition_range, name=None):
| super(TranspositionPipeline, self).__init__(name=name)
self._transposition_range = transposition_range
|
'Transposes a note sequence by the specified amount.'
| @staticmethod
def _transpose(ns, amount, stats):
| ts = copy.deepcopy(ns)
for note in ts.notes:
note.pitch += amount
if ((note.pitch < constants.MIN_MIDI_PITCH) or (note.pitch > constants.MAX_MIDI_PITCH)):
stats['skipped_due_to_range_exceeded'].increment()
return None
return ts
|
'Tests the output for the given parameters.'
| def runTest(self, relative_root, recursive):
| root_dir = os.path.join(self.root_dir, relative_root)
expected_filenames = self.expected_dir_midi_contents[relative_root]
if recursive:
for sub_dir in self.expected_sub_dirs[relative_root]:
for filename in self.expected_dir_midi_contents[os.path.join(relative_root, sub_dir)]:
expected_filenames.add(os.path.join(sub_dir, filename))
with tempfile.NamedTemporaryFile(prefix='ConvertMidiDirToSequencesTest') as output_file:
convert_dir_to_note_sequences.convert_directory(root_dir, output_file.name, 1, recursive)
actual_filenames = set()
for sequence in note_sequence_io.note_sequence_record_iterator(output_file.name):
self.assertEquals(note_sequence_io.generate_note_sequence_id(sequence.filename, os.path.basename(relative_root), 'midi'), sequence.id)
self.assertEquals(os.path.basename(root_dir), sequence.collection_name)
self.assertNotEquals(0, len(sequence.notes))
actual_filenames.add(sequence.filename)
self.assertEquals(expected_filenames, actual_filenames)
|
'Given a MusicXML file, return the score as an xml.etree.ElementTree.
Given a MusicXML file, return the score as an xml.etree.ElementTree
If the file is compress (ends in .mxl), uncompress it first
Args:
filename: The path of a MusicXML file
Returns:
The score as an xml.etree.ElementTree.
Raises:
MusicXMLParseException: if the file cannot be parsed.'
| @staticmethod
def _get_score(filename):
| score = None
if filename.endswith('.mxl'):
try:
mxlzip = zipfile.ZipFile(filename)
except zipfile.BadZipfile as exception:
raise MusicXMLParseException(exception)
namelist = mxlzip.namelist()
container_file = [x for x in namelist if (x == 'META-INF/container.xml')]
compressed_file_name = ''
if container_file:
try:
container = ET.fromstring(mxlzip.read(container_file[0]))
for rootfile_tag in container.findall('./rootfiles/rootfile'):
if ('media-type' in rootfile_tag.attrib):
if (rootfile_tag.attrib['media-type'] == MUSICXML_MIME_TYPE):
if (not compressed_file_name):
compressed_file_name = rootfile_tag.attrib['full-path']
else:
raise MusicXMLParseException('Multiple MusicXML files found in compressed archive')
elif (not compressed_file_name):
compressed_file_name = rootfile_tag.attrib['full-path']
else:
raise MusicXMLParseException('Multiple MusicXML files found in compressed archive')
except ET.ParseError as exception:
raise MusicXMLParseException(exception)
if (not compressed_file_name):
raise MusicXMLParseException('Unable to locate main .xml file in compressed archive.')
compressed_file_name = compressed_file_name.encode('utf-8')
if (compressed_file_name not in namelist):
raise MusicXMLParseException(('Score file %s not found in zip archive' % compressed_file_name))
score_string = mxlzip.read(compressed_file_name)
try:
score = ET.fromstring(score_string)
except ET.ParseError as exception:
raise MusicXMLParseException(exception)
else:
try:
tree = ET.parse(filename)
score = tree.getroot()
except ET.ParseError as exception:
raise MusicXMLParseException(exception)
return score
|
'Parse the uncompressed MusicXML document.'
| def _parse(self):
| xml_part_list = self._score.find('part-list')
if (xml_part_list is not None):
for element in xml_part_list:
if (element.tag == 'score-part'):
score_part = ScorePart(element)
self._score_parts[score_part.id] = score_part
for (score_part_index, child) in enumerate(self._score.findall('part')):
part = Part(child, self._score_parts, self._state)
self.parts.append(part)
score_part_index += 1
if (self._state.time_position > self.total_time_secs):
self.total_time_secs = self._state.time_position
|
'Return a list of all the chord symbols used in this score.'
| def get_chord_symbols(self):
| chord_symbols = []
for part in self.parts:
for measure in part.measures:
for chord_symbol in measure.chord_symbols:
if (chord_symbol not in chord_symbols):
chord_symbols.append(chord_symbol)
return chord_symbols
|
'Return a list of all the time signatures used in this score.
Does not support polymeter (i.e. assumes all parts have the same
time signature, such as Part 1 having a time signature of 6/8
while Part 2 has a simultaneous time signature of 2/4).
Ignores duplicate time signatures to prevent Magenta duplicate
time signature error. This happens when multiple parts have the
same time signature is used in multiple parts at the same time.
Example: If Part 1 has a time siganture of 4/4 and Part 2 also
has a time signature of 4/4, then only instance of 4/4 is sent
to Magenta.
Returns:
A list of all TimeSignature objects used in this score.'
| def get_time_signatures(self):
| time_signatures = []
for part in self.parts:
for measure in part.measures:
if (measure.time_signature is not None):
if (measure.time_signature not in time_signatures):
time_signatures.append(measure.time_signature)
return time_signatures
|
'Return a list of all the key signatures used in this score.
Support different key signatures in different parts (score in
written pitch).
Ignores duplicate key signatures to prevent Magenta duplicate key
signature error. This happens when multiple parts have the same
key signature at the same time.
Example: If the score is in written pitch and the
flute is written in the key of Bb major, the trombone will also be
written in the key of Bb major. However, the clarinet and trumpet
will be written in the key of C major because they are Bb transposing
instruments.
If no key signatures are found, create a default key signature of
C major.
Returns:
A list of all KeySignature objects used in this score.'
| def get_key_signatures(self):
| key_signatures = []
for part in self.parts:
for measure in part.measures:
if (measure.key_signature is not None):
if (measure.key_signature not in key_signatures):
key_signatures.append(measure.key_signature)
if (not key_signatures):
key_signature = KeySignature(self._state)
key_signature.time_position = 0
key_signatures.append(key_signature)
return key_signatures
|
'Return a list of all tempos in this score.
If no tempos are found, create a default tempo of 120 qpm.
Returns:
A list of all Tempo objects used in this score.'
| def get_tempos(self):
| tempos = []
if self.parts:
part = self.parts[0]
for measure in part.measures:
for tempo in measure.tempos:
tempos.append(tempo)
if (not tempos):
tempo = Tempo(self._state)
tempo.qpm = self._state.qpm
tempo.time_position = 0
tempos.append(tempo)
return tempos
|
'Parse the <score-part> element to an in-memory representation.'
| def _parse(self, xml_score_part):
| self.id = xml_score_part.attrib['id']
if (xml_score_part.find('part-name') is not None):
self.part_name = (xml_score_part.find('part-name').text or '')
xml_midi_instrument = xml_score_part.find('midi-instrument')
if ((xml_midi_instrument is not None) and (xml_midi_instrument.find('midi-channel') is not None) and (xml_midi_instrument.find('midi-program') is not None)):
self.midi_channel = int(xml_midi_instrument.find('midi-channel').text)
self.midi_program = int(xml_midi_instrument.find('midi-program').text)
else:
self.midi_channel = DEFAULT_MIDI_CHANNEL
self.midi_program = DEFAULT_MIDI_PROGRAM
|
'Parse the <part> element.'
| def _parse(self, xml_part, score_parts):
| if ('id' in xml_part.attrib):
self.id = xml_part.attrib['id']
if (self.id in score_parts):
self.score_part = score_parts[self.id]
else:
self.score_part = ScorePart()
self._state.time_position = 0
self._state.midi_channel = self.score_part.midi_channel
self._state.midi_program = self.score_part.midi_program
self._state.transpose = 0
xml_measures = xml_part.findall('measure')
for measure in xml_measures:
self._repair_empty_measure(measure)
parsed_measure = Measure(measure, self._state)
self.measures.append(parsed_measure)
|
'Repair a measure if it is empty by inserting a whole measure rest.
If a <measure> only consists of a <forward> element that advances
the time cursor, remove the <forward> element and replace
with a whole measure rest of the same duration.
Args:
measure: The measure to repair.'
| def _repair_empty_measure(self, measure):
| forward_count = len(measure.findall('forward'))
note_count = len(measure.findall('note'))
if ((note_count == 0) and (forward_count == 1)):
xml_forward = measure.find('forward')
xml_duration = xml_forward.find('duration')
forward_duration = int(xml_duration.text)
measure.remove(xml_forward)
new_note = '<note>'
new_note += (('<rest /><duration>' + str(forward_duration)) + '</duration>')
new_note += '<voice>1</voice><type>whole</type><staff>1</staff>'
new_note += '</note>'
new_note_xml = ET.fromstring(new_note)
measure.append(new_note_xml)
|
'Parse the <measure> element.'
| def _parse(self):
| for child in self.xml_measure:
if (child.tag == 'attributes'):
self._parse_attributes(child)
elif (child.tag == 'backup'):
self._parse_backup(child)
elif (child.tag == 'direction'):
self._parse_direction(child)
elif (child.tag == 'forward'):
self._parse_forward(child)
elif (child.tag == 'note'):
note = Note(child, self.state)
self.notes.append(note)
self.state.previous_note = note
if ((note.voice == 1) and (not note.is_in_chord)):
self.duration += note.note_duration.duration
elif (child.tag == 'harmony'):
chord_symbol = ChordSymbol(child, self.state)
self.chord_symbols.append(chord_symbol)
else:
pass
|
'Parse the MusicXML <attributes> element.'
| def _parse_attributes(self, xml_attributes):
| for child in xml_attributes:
if (child.tag == 'divisions'):
self.state.divisions = int(child.text)
elif (child.tag == 'key'):
self.key_signature = KeySignature(self.state, child)
elif (child.tag == 'time'):
if (self.time_signature is None):
self.time_signature = TimeSignature(self.state, child)
self.state.time_signature = self.time_signature
else:
raise MultipleTimeSignatureException('Multiple time signatures')
elif (child.tag == 'transpose'):
transpose = int(child.find('chromatic').text)
self.state.transpose = transpose
if (self.key_signature is not None):
self.key_signature.key += transpose
else:
pass
|
'Parse the MusicXML <backup> element.
This moves the global time position backwards.
Args:
xml_backup: XML element with tag type \'backup\'.'
| def _parse_backup(self, xml_backup):
| xml_duration = xml_backup.find('duration')
backup_duration = int(xml_duration.text)
midi_ticks = (backup_duration * (constants.STANDARD_PPQ / self.state.divisions))
seconds = ((midi_ticks / constants.STANDARD_PPQ) * self.state.seconds_per_quarter)
self.state.time_position -= seconds
|
'Parse the MusicXML <direction> element.'
| def _parse_direction(self, xml_direction):
| for child in xml_direction:
if (child.tag == 'sound'):
if (child.get('tempo') is not None):
tempo = Tempo(self.state, child)
self.tempos.append(tempo)
self.state.qpm = tempo.qpm
self.state.seconds_per_quarter = (60 / self.state.qpm)
if (child.get('dynamics') is not None):
self.state.velocity = int(child.get('dynamics'))
|
'Parse the MusicXML <forward> element.
This moves the global time position forward.
Args:
xml_forward: XML element with tag type \'forward\'.'
| def _parse_forward(self, xml_forward):
| xml_duration = xml_forward.find('duration')
forward_duration = int(xml_duration.text)
midi_ticks = (forward_duration * (constants.STANDARD_PPQ / self.state.divisions))
seconds = ((midi_ticks / constants.STANDARD_PPQ) * self.state.seconds_per_quarter)
self.state.time_position += seconds
|
'Correct the time signature for incomplete measures.
If the measure is incomplete or a pickup, insert an appropriate
time signature into this Measure.'
| def _fix_time_signature(self):
| numerator = self.duration
denominator = (self.state.divisions * 4)
fractional_time_signature = Fraction(numerator, denominator)
if ((self.state.time_signature is None) and (self.time_signature is None)):
self.time_signature = TimeSignature(self.state)
self.time_signature.numerator = fractional_time_signature.numerator
self.time_signature.denominator = fractional_time_signature.denominator
self.state.time_signature = self.time_signature
else:
fractional_state_time_signature = Fraction(self.state.time_signature.numerator, self.state.time_signature.denominator)
pickup_measure = False
if (numerator < self.state.time_signature.numerator):
pickup_measure = True
global_time_signature_denominator = self.state.time_signature.denominator
if ((fractional_time_signature == 1) and (not pickup_measure)):
new_time_signature = TimeSignature(self.state)
new_time_signature.numerator = global_time_signature_denominator
new_time_signature.denominator = global_time_signature_denominator
else:
new_time_signature = TimeSignature(self.state)
new_time_signature.numerator = numerator
new_time_signature.denominator = denominator
new_time_sig_fraction = Fraction(numerator, denominator)
if (new_time_sig_fraction == fractional_time_signature):
new_time_signature.numerator = fractional_time_signature.numerator
new_time_signature.denominator = fractional_time_signature.denominator
if (pickup_measure or ((self.time_signature is None) and (fractional_time_signature != fractional_state_time_signature))):
new_time_signature.time_position = self.start_time_position
self.time_signature = new_time_signature
self.state.time_signature = new_time_signature
|
'Parse the MusicXML <note> element.'
| def _parse(self):
| self.midi_channel = self.state.midi_channel
self.midi_program = self.state.midi_program
self.velocity = self.state.velocity
for child in self.xml_note:
if (child.tag == 'chord'):
self.is_in_chord = True
elif (child.tag == 'duration'):
self.note_duration.parse_duration(self.is_in_chord, self.is_grace_note, child.text)
elif (child.tag == 'pitch'):
self._parse_pitch(child)
elif (child.tag == 'rest'):
self.is_rest = True
elif (child.tag == 'voice'):
self.voice = int(child.text)
elif (child.tag == 'dot'):
self.note_duration.dots += 1
elif (child.tag == 'type'):
self.note_duration.type = child.text
elif (child.tag == 'time-modification'):
self._parse_tuplet(child)
elif (child.tag == 'unpitched'):
raise UnpitchedNoteException('Unpitched notes are not supported')
else:
pass
|
'Parse the MusicXML <pitch> element.'
| def _parse_pitch(self, xml_pitch):
| step = xml_pitch.find('step').text
alter_text = ''
alter = 0.0
if (xml_pitch.find('alter') is not None):
alter_text = xml_pitch.find('alter').text
octave = xml_pitch.find('octave').text
if alter_text:
alter = float(alter_text)
alter_semitones = int(alter)
is_microtonal_alter = (alter != alter_semitones)
alter_string = ''
if (alter_semitones == (-2)):
alter_string = 'bb'
elif (alter_semitones == (-1)):
alter_string = 'b'
elif (alter_semitones == 1):
alter_string = '#'
elif (alter_semitones == 2):
alter_string = 'x'
if is_microtonal_alter:
alter_string += ' (+microtones) '
pitch_string = ((step + alter_string) + octave)
midi_pitch = self.pitch_to_midi_pitch(step, alter, octave)
midi_pitch += self.state.transpose
self.pitch = (pitch_string, midi_pitch)
|
'Parses a tuplet ratio.
Represented in MusicXML by the <time-modification> element.
Args:
xml_time_modification: An xml time-modification element.'
| def _parse_tuplet(self, xml_time_modification):
| numerator = int(xml_time_modification.find('actual-notes').text)
denominator = int(xml_time_modification.find('normal-notes').text)
self.note_duration.tuplet_ratio = Fraction(numerator, denominator)
|
'Convert MusicXML pitch representation to MIDI pitch number.'
| @staticmethod
def pitch_to_midi_pitch(step, alter, octave):
| pitch_class = 0
if (step == 'C'):
pitch_class = 0
elif (step == 'D'):
pitch_class = 2
elif (step == 'E'):
pitch_class = 4
elif (step == 'F'):
pitch_class = 5
elif (step == 'G'):
pitch_class = 7
elif (step == 'A'):
pitch_class = 9
elif (step == 'B'):
pitch_class = 11
else:
raise PitchStepParseException(('Unable to parse pitch step ' + step))
pitch_class = ((pitch_class + int(alter)) % 12)
midi_pitch = ((12 + pitch_class) + (int(octave) * 12))
return midi_pitch
|
'Parse the duration of a note and compute timings.'
| def parse_duration(self, is_in_chord, is_grace_note, duration):
| self.duration = int(duration)
if is_in_chord:
self.duration = self.state.previous_note.note_duration.duration
self.midi_ticks = self.duration
self.midi_ticks *= (constants.STANDARD_PPQ / self.state.divisions)
self.seconds = (self.midi_ticks / constants.STANDARD_PPQ)
self.seconds *= self.state.seconds_per_quarter
self.time_position = self.state.time_position
self.is_grace_note = is_grace_note
if is_in_chord:
self.time_position = self.state.previous_note.note_duration.time_position
else:
self.state.time_position += self.seconds
|
'Convert the MusicXML note-type-value to a Python Fraction.
Examples:
- whole = 1/1
- half = 1/2
- quarter = 1/4
- 32nd = 1/32
Returns:
A Fraction object representing the note type.'
| def _convert_type_to_ratio(self):
| return self.TYPE_RATIO_MAP[self.type]
|
'Compute the duration ratio of the note as a Python Fraction.
Examples:
- Whole Note = 1
- Quarter Note = 1/4
- Dotted Quarter Note = 3/8
- Triplet eighth note = 1/12
Returns:
The duration ratio as a Python Fraction.'
| def duration_ratio(self):
| duration_ratio = Fraction(1, 1)
type_ratio = self._convert_type_to_ratio()
duration_ratio /= self.tuplet_ratio
type_ratio /= self.tuplet_ratio
one_half = Fraction(1, 2)
dot_sum = Fraction(0, 1)
for dot in range(self.dots):
dot_sum += ((one_half ** (dot + 1)) * type_ratio)
duration_ratio = (type_ratio + dot_sum)
if self.is_grace_note:
duration_ratio = Fraction(0, 1)
return duration_ratio
|
'Return the duration ratio as a float.'
| def duration_float(self):
| ratio = self.duration_ratio()
return (ratio.numerator / ratio.denominator)
|
'Parse alter text to a string of one or two sharps/flats.
Args:
alter_text: A string representation of an integer number of semitones.
Returns:
A string, one of \'bb\', \'b\', \'#\', \'##\', or the empty string.
Raises:
ChordSymbolParseException: If `alter_text` cannot be parsed to an integer,
or if the integer is not a valid number of semitones between -2 and 2
inclusive.'
| def _alter_to_string(self, alter_text):
| try:
alter_semitones = int(alter_text)
except ValueError:
raise ChordSymbolParseException(('Non-integer alter: ' + str(alter_text)))
if (alter_semitones == (-2)):
alter_string = 'bb'
elif (alter_semitones == (-1)):
alter_string = 'b'
elif (alter_semitones == 0):
alter_string = ''
elif (alter_semitones == 1):
alter_string = '#'
elif (alter_semitones == 2):
alter_string = '##'
else:
raise ChordSymbolParseException(('Invalid alter: ' + str(alter_semitones)))
return alter_string
|
'Parse the MusicXML <harmony> element.'
| def _parse(self):
| self.time_position = self.state.time_position
for child in self.xml_harmony:
if (child.tag == 'root'):
self._parse_root(child)
elif (child.tag == 'kind'):
if (child.text is None):
continue
kind_text = str(child.text).strip()
if (kind_text not in self.CHORD_KIND_ABBREVIATIONS):
raise ChordSymbolParseException(('Unknown chord kind: ' + kind_text))
self.kind = self.CHORD_KIND_ABBREVIATIONS[kind_text]
elif (child.tag == 'degree'):
self.degrees.append(self._parse_degree(child))
elif (child.tag == 'bass'):
self._parse_bass(child)
elif (child.tag == 'offset'):
try:
offset = int(child.text)
except ValueError:
raise ChordSymbolParseException(('Non-integer offset: ' + str(child.text)))
midi_ticks = ((offset * constants.STANDARD_PPQ) / self.state.divisions)
seconds = ((midi_ticks / constants.STANDARD_PPQ) * self.state.seconds_per_quarter)
self.time_position += seconds
else:
pass
if ((self.root is None) and (self.kind != 'N.C.')):
raise ChordSymbolParseException('Chord symbol must have a root')
|
'Parse and return the pitch-like <root> or <bass> element.'
| def _parse_pitch(self, xml_pitch, step_tag, alter_tag):
| if (xml_pitch.find(step_tag) is None):
raise ChordSymbolParseException('Missing pitch step')
step = xml_pitch.find(step_tag).text
alter_string = ''
if (xml_pitch.find(alter_tag) is not None):
alter_text = xml_pitch.find(alter_tag).text
alter_string = self._alter_to_string(alter_text)
if self.state.transpose:
raise ChordSymbolParseException('Transposition of chord symbols currently unsupported')
return (step + alter_string)
|
'Parse the <root> tag for a chord symbol.'
| def _parse_root(self, xml_root):
| self.root = self._parse_pitch(xml_root, step_tag='root-step', alter_tag='root-alter')
|
'Parse the <bass> tag for a chord symbol.'
| def _parse_bass(self, xml_bass):
| self.bass = self._parse_pitch(xml_bass, step_tag='bass-step', alter_tag='bass-alter')
|
'Parse and return the <degree> scale degree modification element.'
| def _parse_degree(self, xml_degree):
| if (xml_degree.find('degree-value') is None):
raise ChordSymbolParseException('Missing scale degree value in harmony')
value_text = xml_degree.find('degree-value').text
try:
value = int(value_text)
except ValueError:
raise ChordSymbolParseException(('Non-integer scale degree: ' + str(value_text)))
alter_string = ''
if (xml_degree.find('degree-alter') is not None):
alter_text = xml_degree.find('degree-alter').text
alter_string = self._alter_to_string(alter_text)
if (xml_degree.find('degree-type') is None):
raise ChordSymbolParseException('Missing degree modification type')
type_text = xml_degree.find('degree-type').text
if (type_text == 'add'):
if (not alter_string):
type_string = 'add'
else:
type_string = ''
elif (type_text == 'subtract'):
type_string = 'no'
alter_string = ''
elif (type_text == 'alter'):
if (not alter_string):
raise ChordSymbolParseException('Degree alteration by zero semitones')
type_string = ''
else:
raise ChordSymbolParseException(('Invalid degree modification type: ' + str(type_text)))
return ((type_string + alter_string) + str(value))
|
'Return a chord symbol figure string.'
| def get_figure_string(self):
| if (self.kind == 'N.C.'):
return self.kind
else:
degrees_string = ''.join((('(%s)' % degree) for degree in self.degrees))
figure = ((self.root + self.kind) + degrees_string)
if self.bass:
figure += ('/' + self.bass)
return figure
|
'Parse the MusicXML <time> element.'
| def _parse(self):
| if ((len(self.xml_time.findall('beats')) > 1) or (len(self.xml_time.findall('beat-type')) > 1)):
raise AlternatingTimeSignatureException('Alternating Time Signature')
self.numerator = int(self.xml_time.find('beats').text)
self.denominator = int(self.xml_time.find('beat-type').text)
self.time_position = self.state.time_position
|
'Parse the MusicXML <key> element into a MIDI compatible key.
If the mode is not minor (e.g. dorian), default to "major"
because MIDI only supports major and minor modes.'
| def _parse(self):
| self.key = int(self.xml_key.find('fifths').text)
mode = self.xml_key.find('mode')
if (mode != 'minor'):
mode = 'major'
self.mode = mode
self.time_position = self.state.time_position
|
'Parse the MusicXML <sound> element and retrieve the tempo.
If no tempo is specified, default to DEFAULT_QUARTERS_PER_MINUTE'
| def _parse(self):
| self.qpm = float(self.xml_sound.get('tempo'))
if (self.qpm == 0):
self.qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
self.time_position = self.state.time_position
|
'Serializes a NoteSequence proto and writes it to the file.
Args:
note_sequence: A NoteSequence proto to write.'
| def write(self, note_sequence):
| tf.python_io.TFRecordWriter.write(self, note_sequence.SerializeToString())
|
'The number of distinct event encodings.
Returns:
An int, the range of ints that can be returned by self.encode_event.'
| @abc.abstractproperty
def num_classes(self):
| pass
|
'An event value to use as a default.
Returns:
The default event value.'
| @abc.abstractproperty
def default_event(self):
| pass
|
'Convert from an event value to an encoding integer.
Args:
event: An event value to encode.
Returns:
An integer representing the encoded event, in range [0, self.num_classes).'
| @abc.abstractmethod
def encode_event(self, event):
| pass
|
'Convert from an encoding integer to an event value.
Args:
index: The encoding, an integer in the range [0, self.num_classes).
Returns:
The decoded event value.'
| @abc.abstractmethod
def decode_event(self, index):
| pass
|
'The size of the input vector used by this model.
Returns:
An integer, the length of the list returned by self.events_to_input.'
| @abc.abstractproperty
def input_size(self):
| pass
|
'The range of labels used by this model.
Returns:
An integer, the range of integers that can be returned by
self.events_to_label.'
| @abc.abstractproperty
def num_classes(self):
| pass
|
'The class label that represents a default event.
Returns:
An int, the class label that represents a default event.'
| @abc.abstractproperty
def default_event_label(self):
| pass
|
'Returns the input vector for the event at the given position.
Args:
events: A list-like sequence of events.
position: An integer event position in the sequence.
Returns:
An input vector, a self.input_size length list of floats.'
| @abc.abstractmethod
def events_to_input(self, events, position):
| pass
|
'Returns the label for the event at the given position.
Args:
events: A list-like sequence of events.
position: An integer event position in the sequence.
Returns:
A label, an integer in the range [0, self.num_classes).'
| @abc.abstractmethod
def events_to_label(self, events, position):
| pass
|
'Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An integer in the range [0, self.num_classes).
events: A list-like sequence of events.
Returns:
An event value.'
| @abc.abstractmethod
def class_index_to_event(self, class_index, events):
| pass
|
'Returns a SequenceExample for the given event sequence.
Args:
events: A list-like sequence of events.
Returns:
A tf.train.SequenceExample containing inputs and labels.'
| def encode(self, events):
| inputs = []
labels = []
for i in range((len(events) - 1)):
inputs.append(self.events_to_input(events, i))
labels.append(self.events_to_label(events, (i + 1)))
return sequence_example_lib.make_sequence_example(inputs, labels)
|
'Returns an inputs batch for the given event sequences.
Args:
event_sequences: A list of list-like event sequences.
full_length: If True, the inputs batch will be for the full length of
each event sequence. If False, the inputs batch will only be for the
last event of each event sequence. A full-length inputs batch is used
for the first step of extending the event sequences, since the RNN
cell state needs to be initialized with the priming sequence. For
subsequent generation steps, only a last-event inputs batch is used.
Returns:
An inputs batch. If `full_length` is True, the shape will be
[len(event_sequences), len(event_sequences[0]), INPUT_SIZE]. If
`full_length` is False, the shape will be
[len(event_sequences), 1, INPUT_SIZE].'
| def get_inputs_batch(self, event_sequences, full_length=False):
| inputs_batch = []
for events in event_sequences:
inputs = []
if full_length:
for i in range(len(events)):
inputs.append(self.events_to_input(events, i))
else:
inputs.append(self.events_to_input(events, (len(events) - 1)))
inputs_batch.append(inputs)
return inputs_batch
|
'Extends the event sequences by sampling the softmax probabilities.
Args:
event_sequences: A list of EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of event sequences.
Returns:
A Python list of chosen class indices, one for each event sequence.'
| def extend_event_sequences(self, event_sequences, softmax):
| num_classes = len(softmax[0][0])
chosen_classes = []
for i in range(len(event_sequences)):
chosen_class = np.random.choice(num_classes, p=softmax[i][(-1)])
event = self.class_index_to_event(chosen_class, event_sequences[i])
event_sequences[i].append(event)
chosen_classes.append(chosen_class)
return chosen_classes
|
'Evaluate the log likelihood of multiple event sequences.
Each event sequence is evaluated from the end. If the size of the
corresponding softmax vector is 1 less than the number of events, the entire
event sequence will be evaluated (other than the first event, whose
distribution is not modeled). If the softmax vector is shorter than this,
only the events at the end of the sequence will be evaluated.
Args:
event_sequences: A list of EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of event sequences.
Returns:
A Python list containing the log likelihood of each event sequence.
Raises:
ValueError: If one of the event sequences is too long with respect to the
corresponding softmax vectors.'
| def evaluate_log_likelihood(self, event_sequences, softmax):
| all_loglik = []
for i in range(len(event_sequences)):
if (len(softmax[i]) >= len(event_sequences[i])):
raise ValueError(('event sequence must be longer than softmax vector (%d events but softmax vector has length %d)' % (len(event_sequences[i]), len(softmax[i]))))
end_pos = len(event_sequences[i])
start_pos = (end_pos - len(softmax[i]))
loglik = 0.0
for (softmax_pos, position) in enumerate(range(start_pos, end_pos)):
index = self.events_to_label(event_sequences[i], position)
loglik += np.log(softmax[i][softmax_pos][index])
all_loglik.append(loglik)
return all_loglik
|
'Initialize a OneHotEventSequenceEncoderDecoder object.
Args:
one_hot_encoding: A OneHotEncoding object that transforms events to and
from integer indices.'
| def __init__(self, one_hot_encoding):
| self._one_hot_encoding = one_hot_encoding
|
'Returns the input vector for the given position in the event sequence.
Returns a one-hot vector for the given position in the event sequence, as
determined by the one hot encoding.
Args:
events: A list-like sequence of events.
position: An integer event position in the event sequence.
Returns:
An input vector, a list of floats.'
| def events_to_input(self, events, position):
| input_ = ([0.0] * self.input_size)
input_[self._one_hot_encoding.encode_event(events[position])] = 1.0
return input_
|
'Returns the label for the given position in the event sequence.
Returns the zero-based index value for the given position in the event
sequence, as determined by the one hot encoding.
Args:
events: A list-like sequence of events.
position: An integer event position in the event sequence.
Returns:
A label, an integer.'
| def events_to_label(self, events, position):
| return self._one_hot_encoding.encode_event(events[position])
|
'Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An integer in the range [0, self.num_classes).
events: A list-like sequence of events. This object is not used in this
implementation.
Returns:
An event value.'
| def class_index_to_event(self, class_index, events):
| return self._one_hot_encoding.decode_event(class_index)
|
'Initializes the LookbackEventSequenceEncoderDecoder.
Args:
one_hot_encoding: A OneHotEncoding object that transforms events to and
from integer indices.
lookback_distances: A list of step intervals to look back in history to
encode both the following event and whether the current step is a
repeat. If None, use default lookback distances.
binary_counter_bits: The number of input bits to use as a counter for the
metric position of the next event.'
| def __init__(self, one_hot_encoding, lookback_distances=None, binary_counter_bits=5):
| self._one_hot_encoding = one_hot_encoding
self._lookback_distances = (lookback_distances if (lookback_distances is not None) else DEFAULT_LOOKBACK_DISTANCES)
self._binary_counter_bits = binary_counter_bits
|
'Returns the input vector for the given position in the event sequence.
Returns a self.input_size length list of floats. Assuming a one-hot
encoding with 38 classes, two lookback distances, and five binary counters,
self.input_size will = 121. Each index represents a different input signal
to the model.
Indices [0, 120]:
[0, 37]: Event of current step.
[38, 75]: Event of next step for first lookback.
[76, 113]: Event of next step for second lookback.
114: 16th note binary counter.
115: 8th note binary counter.
116: 4th note binary counter.
117: Half note binary counter.
118: Whole note binary counter.
119: The current step is repeating (first lookback).
120: The current step is repeating (second lookback).
Args:
events: A list-like sequence of events.
position: An integer position in the event sequence.
Returns:
An input vector, an self.input_size length list of floats.'
| def events_to_input(self, events, position):
| input_ = ([0.0] * self.input_size)
offset = 0
index = self._one_hot_encoding.encode_event(events[position])
input_[index] = 1.0
offset += self._one_hot_encoding.num_classes
for (i, lookback_distance) in enumerate(self._lookback_distances):
lookback_position = ((position - lookback_distance) + 1)
if (lookback_position < 0):
event = self._one_hot_encoding.default_event
else:
event = events[lookback_position]
index = self._one_hot_encoding.encode_event(event)
input_[(offset + index)] = 1.0
offset += self._one_hot_encoding.num_classes
n = (position + 1)
for i in range(self._binary_counter_bits):
input_[offset] = (1.0 if ((n // (2 ** i)) % 2) else (-1.0))
offset += 1
for (i, lookback_distance) in enumerate(self._lookback_distances):
lookback_position = (position - lookback_distance)
if ((lookback_position >= 0) and (events[position] == events[lookback_position])):
input_[offset] = 1.0
offset += 1
assert (offset == self.input_size)
return input_
|
'Returns the label for the given position in the event sequence.
Returns an integer in the range [0, self.num_classes). Indices in the range
[0, self._one_hot_encoding.num_classes) map to standard events. Indices
self._one_hot_encoding.num_classes and self._one_hot_encoding.num_classes +
1 are signals to repeat events from earlier in the sequence. More distant
repeats are selected first and standard events are selected last.
Assuming a one-hot encoding with 38 classes and two lookback distances,
self.num_classes = 40 and the values will be as follows.
Values [0, 39]:
[0, 37]: Event of the last step in the event sequence, if not repeating
any of the lookbacks.
38: If the last event is repeating the first lookback, if not also
repeating the second lookback.
39: If the last event is repeating the second lookback.
Args:
events: A list-like sequence of events.
position: An integer position in the event sequence.
Returns:
A label, an integer.'
| def events_to_label(self, events, position):
| if (self._lookback_distances and (position < self._lookback_distances[(-1)]) and (events[position] == self._one_hot_encoding.default_event)):
return ((self._one_hot_encoding.num_classes + len(self._lookback_distances)) - 1)
for (i, lookback_distance) in reversed(list(enumerate(self._lookback_distances))):
lookback_position = (position - lookback_distance)
if ((lookback_position >= 0) and (events[position] == events[lookback_position])):
return (self._one_hot_encoding.num_classes + i)
return self._one_hot_encoding.encode_event(events[position])
|
'Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An int in the range [0, self.num_classes).
events: The current event sequence.
Returns:
An event value.'
| def class_index_to_event(self, class_index, events):
| for (i, lookback_distance) in reversed(list(enumerate(self._lookback_distances))):
if (class_index == (self._one_hot_encoding.num_classes + i)):
if (len(events) < lookback_distance):
return self._one_hot_encoding.default_event
return events[(- lookback_distance)]
return self._one_hot_encoding.decode_event(class_index)
|
'Initialize a ConditionalEventSequenceEncoderDecoder object.
Args:
control_encoder_decoder: The EventSequenceEncoderDecoder to encode/decode
the control sequence.
target_encoder_decoder: The EventSequenceEncoderDecoder to encode/decode
the target sequence.'
| def __init__(self, control_encoder_decoder, target_encoder_decoder):
| self._control_encoder_decoder = control_encoder_decoder
self._target_encoder_decoder = target_encoder_decoder
|
'The size of the concatenated control and target input vectors.
Returns:
An integer, the size of an input vector.'
| @property
def input_size(self):
| return (self._control_encoder_decoder.input_size + self._target_encoder_decoder.input_size)
|
'The range of target labels used by this model.
Returns:
An integer, the range of integers that can be returned by
self.events_to_label.'
| @property
def num_classes(self):
| return self._target_encoder_decoder.num_classes
|
'The class label that represents a default target event.
Returns:
An integer, the class label that represents a default target event.'
| @property
def default_event_label(self):
| return self._target_encoder_decoder.default_event_label
|
'Returns the input vector for the given position in the sequence pair.
Returns the vector formed by concatenating the input vector for the control
sequence and the input vector for the target sequence.
Args:
control_events: A list-like sequence of control events.
target_events: A list-like sequence of target events.
position: An integer event position in the event sequences. When
predicting the target label at position `i + 1`, the input vector is
the concatenation of the control input vector at position `i + 1` and
the target input vector at position `i`.
Returns:
An input vector, a list of floats.'
| def events_to_input(self, control_events, target_events, position):
| return (self._control_encoder_decoder.events_to_input(control_events, (position + 1)) + self._target_encoder_decoder.events_to_input(target_events, position))
|
'Returns the label for the given position in the target event sequence.
Args:
target_events: A list-like sequence of target events.
position: An integer event position in the target event sequence.
Returns:
A label, an integer.'
| def events_to_label(self, target_events, position):
| return self._target_encoder_decoder.events_to_label(target_events, position)
|
'Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An integer in the range [0, self.num_classes).
target_events: A list-like sequence of target events.
Returns:
A target event value.'
| def class_index_to_event(self, class_index, target_events):
| return self._target_encoder_decoder.class_index_to_event(class_index, target_events)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.