desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Returns a SequenceExample for the given event sequence pair.
Args:
control_events: A list-like sequence of control events.
target_events: A list-like sequence of target events, the same length as
`control_events`.
Returns:
A tf.train.SequenceExample containing inputs and labels.
Raises:
ValueError: If the control and target event sequences have different
length.'
| def encode(self, control_events, target_events):
| if (len(control_events) != len(target_events)):
raise ValueError(('must have the same number of control and target events (%d control events but %d target events)' % (len(control_events), len(target_events))))
inputs = []
labels = []
for i in range((len(target_events) - 1)):
inputs.append(self.events_to_input(control_events, target_events, i))
labels.append(self.events_to_label(target_events, (i + 1)))
return sequence_example_lib.make_sequence_example(inputs, labels)
|
'Returns an inputs batch for the given control and target event sequences.
Args:
control_event_sequences: A list of list-like control event sequences.
target_event_sequences: A list of list-like target event sequences, the
same length as `control_event_sequences`. Each target event sequence
must be shorter than the corresponding control event sequence.
full_length: If True, the inputs batch will be for the full length of
each control/target event sequence pair. If False, the inputs batch
will only be for the last event of each target event sequence. A full-
length inputs batch is used for the first step of extending the target
event sequences, since the RNN cell state needs to be initialized with
the priming target sequence. For subsequent generation steps, only a
last-event inputs batch is used.
Returns:
An inputs batch. If `full_length` is True, the shape will be
[len(target_event_sequences), len(target_event_sequences[0]), INPUT_SIZE].
If `full_length` is False, the shape will be
[len(target_event_sequences), 1, INPUT_SIZE].
Raises:
ValueError: If there are a different number of control and target event
sequences, or if one of the control event sequences is not shorter
than the corresponding control event sequence.'
| def get_inputs_batch(self, control_event_sequences, target_event_sequences, full_length=False):
| if (len(control_event_sequences) != len(target_event_sequences)):
raise ValueError(('%d control event sequences but %d target event sequences' % len(control_event_sequences, len(target_event_sequences))))
inputs_batch = []
for (control_events, target_events) in zip(control_event_sequences, target_event_sequences):
if (len(control_events) <= len(target_events)):
raise ValueError(('control event sequence must be longer than target event sequence (%d control events but %d target events)' % (len(control_events), len(target_events))))
inputs = []
if full_length:
for i in range(len(target_events)):
inputs.append(self.events_to_input(control_events, target_events, i))
else:
inputs.append(self.events_to_input(control_events, target_events, (len(target_events) - 1)))
inputs_batch.append(inputs)
return inputs_batch
|
'Extends the event sequences by sampling the softmax probabilities.
Args:
target_event_sequences: A list of target EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of event sequences.
Returns:
A Python list of chosen class indices, one for each target event sequence.'
| def extend_event_sequences(self, target_event_sequences, softmax):
| return self._target_encoder_decoder.extend_event_sequences(target_event_sequences, softmax)
|
'Evaluate the log likelihood of multiple target event sequences.
Args:
target_event_sequences: A list of target EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of target event sequences. The
softmax vectors are assumed to have been generated by a full-length
inputs batch.
Returns:
A Python list containing the log likelihood of each target event sequence.'
| def evaluate_log_likelihood(self, target_event_sequences, softmax):
| return self._target_encoder_decoder.evaluate_log_likelihood(target_event_sequences, softmax)
|
'Initialize a MultipleEventSequenceEncoder object.
Args:
encoders: A list of component EventSequenceEncoderDecoder objects whose
output will be concatenated.
encode_single_sequence: If True, at encoding time all of the encoders will
be applied to a single event sequence. If False, each event of the
event sequence should be a tuple with size the same as the number of
encoders, each of which will be applied to the events in the
corresponding position in the tuple, i.e. the first encoder will be
applied to the first element of each event tuple, the second encoder
will be applied to the second element, etc.'
| def __init__(self, encoders, encode_single_sequence=False):
| self._encoders = encoders
self._encode_single_sequence = encode_single_sequence
|
'Constructs an EncoderPipeline.
Args:
input_type: The type this pipeline expects as input.
encoder_decoder: An EventSequenceEncoderDecoder.
name: A unique pipeline name.'
| def __init__(self, input_type, encoder_decoder, name=None):
| super(EncoderPipeline, self).__init__(input_type=input_type, output_type=tf.train.SequenceExample, name=name)
self._encoder_decoder = encoder_decoder
|
'Verify sustain controls extend notes until the end of the control.'
| def testApplySustainControlChanges(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(0.0, 64, 127), (0.75, 64, 0), (2.0, 64, 127), (3.0, 64, 0), (3.75, 64, 127), (4.5, 64, 127), (4.8, 64, 0), (4.9, 64, 127), (6.0, 64, 0)])
testing_lib.add_track_to_sequence(sequence, 1, [(12, 100, 0.01, 10.0), (52, 99, 4.75, 5.0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(11, 55, 0.22, 0.5), (40, 45, 2.5, 3.5), (55, 120, 4.0, 4.01)])
testing_lib.add_track_to_sequence(expected_sequence, 0, [(11, 55, 0.22, 0.75), (40, 45, 2.5, 3.5), (55, 120, 4.0, 4.8)])
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(expected_sequence, sus_sequence)
|
'Verify that sustain control handles repeated notes correctly.
For example, a single pitch played before sustain:
x-- x-- x--
After sustain:
x---x---x--
Notes should be extended until either the end of the sustain control or the
beginning of another note of the same pitch.'
| def testApplySustainControlChangesWithRepeatedNotes(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.25, 1.5), (60, 100, 1.25, 1.5), (72, 100, 2.0, 3.5), (60, 100, 2.0, 3.0), (60, 100, 3.5, 4.5)])
testing_lib.add_track_to_sequence(expected_sequence, 0, [(60, 100, 0.25, 1.25), (60, 100, 1.25, 2.0), (72, 100, 2.0, 4.0), (60, 100, 2.0, 3.5), (60, 100, 3.5, 4.5)])
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(expected_sequence, sus_sequence)
|
'Repeated notes before sustain can overlap and should not be modified.
Once a repeat happens within the sustain, any active notes should end
before the next one starts.
This is kind of an edge case because a note overlapping a note of the same
pitch may not make sense, but apply_sustain_control_changes tries not to
modify events that happen outside of a sustain.'
| def testApplySustainControlChangesWithRepeatedNotesBeforeSustain(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.25, 1.5), (60, 100, 0.5, 1.5), (60, 100, 1.25, 2.0)])
testing_lib.add_track_to_sequence(expected_sequence, 0, [(60, 100, 0.25, 1.25), (60, 100, 0.5, 1.25), (60, 100, 1.25, 4.0)])
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(expected_sequence, sus_sequence)
|
'Test sustain on and off events happening at the same time.
The off event should be processed last, so this should be a no-op.'
| def testApplySustainControlChangesSimultaneousOnOff(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (1.0, 64, 0)])
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.5), (60, 100, 2.0, 3.0)])
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(sequence, sus_sequence)
|
'Test sustain control extending the duration of the final note.'
| def testApplySustainControlChangesExtendNotesToEnd(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.5), (72, 100, 2.0, 3.0)])
testing_lib.add_track_to_sequence(expected_sequence, 0, [(60, 100, 0.5, 4.0), (72, 100, 2.0, 4.0)])
expected_sequence.total_time = 4.0
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(expected_sequence, sus_sequence)
|
'Test applying extraneous sustain control at the end of the sequence.'
| def testApplySustainControlChangesExtraneousSustain(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(4.0, 64, 127), (5.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.5), (72, 100, 2.0, 3.0)])
testing_lib.add_track_to_sequence(expected_sequence, 0, [(60, 100, 0.5, 1.5), (72, 100, 2.0, 3.0)])
expected_sequence.total_time = 3.0
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(expected_sequence, sus_sequence)
|
'In the case of identical notes, one should be dropped.
This is an edge case because in most cases, the same pitch should not sound
twice at the same time on one instrument.'
| def testApplySustainControlChangesWithIdenticalNotes(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 2.0, 2.5), (60, 100, 2.0, 2.5)])
testing_lib.add_track_to_sequence(expected_sequence, 0, [(60, 100, 2.0, 4.0)])
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.assertProtoEquals(expected_sequence, sus_sequence)
|
'Construct a Melody.'
| def __init__(self, events=None, **kwargs):
| if ('pad_event' in kwargs):
del kwargs['pad_event']
super(Melody, self).__init__(pad_event=MELODY_NO_EVENT, events=events, **kwargs)
|
'Initializes with a list of event values and sets attributes.
Args:
events: List of Melody events to set melody to.
start_step: The integer starting step offset.
steps_per_bar: The number of steps in a bar.
steps_per_quarter: The number of steps in a quarter note.
Raises:
ValueError: If `events` contains an event that is not in the proper range.'
| def _from_event_list(self, events, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER):
| for event in events:
if (not (MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT)):
raise ValueError(('Melody event out of range: %d' % event))
super(Melody, self)._from_event_list(events, start_step=start_step, steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)
|
'Adds the given note to the `events` list.
`start_step` is set to the given pitch. `end_step` is set to NOTE_OFF.
Everything after `start_step` in `events` is deleted before the note is
added. `events`\'s length will be changed so that the last event has index
`end_step`.
Args:
pitch: Midi pitch. An integer between 0 and 127 inclusive.
start_step: A non-negative integer step that the note begins on.
end_step: An integer step that the note ends on. The note is considered to
end at the onset of the end step. `end_step` must be greater than
`start_step`.
Raises:
BadNoteException: If `start_step` does not precede `end_step`.'
| def _add_note(self, pitch, start_step, end_step):
| if (start_step >= end_step):
raise BadNoteException(('Start step does not precede end step: start=%d, end=%d' % (start_step, end_step)))
self.set_length((end_step + 1))
self._events[start_step] = pitch
self._events[end_step] = MELODY_NOTE_OFF
for i in range((start_step + 1), end_step):
self._events[i] = MELODY_NO_EVENT
|
'Returns indexes of the most recent pitch and NOTE_OFF events.
Returns:
A tuple (start_step, end_step) of the last note\'s on and off event
indices.
Raises:
ValueError: If `events` contains no NOTE_OFF or pitch events.'
| def _get_last_on_off_events(self):
| last_off = len(self)
for i in range((len(self) - 1), (-1), (-1)):
if (self._events[i] == MELODY_NOTE_OFF):
last_off = i
if (self._events[i] >= MIN_MIDI_PITCH):
return (i, last_off)
raise ValueError('No events in the stream')
|
'Gets a histogram of the note occurrences in a melody.
Returns:
A list of 12 ints, one for each note value (C at index 0 through B at
index 11). Each int is the total number of times that note occurred in
the melody.'
| def get_note_histogram(self):
| np_melody = np.array(self._events, dtype=int)
return np.bincount((np_melody[(np_melody >= MIN_MIDI_PITCH)] % NOTES_PER_OCTAVE), minlength=NOTES_PER_OCTAVE)
|
'Gets a histogram of the how many notes fit into each key.
Returns:
A list of 12 ints, one for each Major key (C Major at index 0 through
B Major at index 11). Each int is the total number of notes that could
fit into that key.'
| def get_major_key_histogram(self):
| note_histogram = self.get_note_histogram()
key_histogram = np.zeros(NOTES_PER_OCTAVE)
for (note, count) in enumerate(note_histogram):
key_histogram[NOTE_KEYS[note]] += count
return key_histogram
|
'Finds the major key that this melody most likely belongs to.
If multiple keys match equally, the key with the lowest index is returned,
where the indexes of the keys are C Major = 0 through B Major = 11.
Returns:
An int for the most likely key (C Major = 0 through B Major = 11)'
| def get_major_key(self):
| key_histogram = self.get_major_key_histogram()
return key_histogram.argmax()
|
'Appends the event to the end of the melody and increments the end step.
An implicit NOTE_OFF at the end of the melody will not be respected by this
modification.
Args:
event: The integer Melody event to append to the end.
Raises:
ValueError: If `event` is not in the proper range.'
| def append(self, event):
| if (not (MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT)):
raise ValueError(('Event out of range: %d' % event))
super(Melody, self).append(event)
|
'Populate self with a melody from the given quantized NoteSequence.
A monophonic melody is extracted from the given `instrument` starting at
`search_start_step`. `instrument` and `search_start_step` can be used to
drive extraction of multiple melodies from the same quantized sequence. The
end step of the extracted melody will be stored in `self._end_step`.
0 velocity notes are ignored. The melody extraction is ended when there are
no held notes for a time stretch of `gap_bars` in bars (measures) of music.
The number of time steps per bar is computed from the time signature in
`quantized_sequence`.
`ignore_polyphonic_notes` determines what happens when polyphonic (multiple
notes start at the same time) data is encountered. If
`ignore_polyphonic_notes` is true, the highest pitch is used in the melody
when multiple notes start at the same time. If false, an exception is
raised.
Args:
quantized_sequence: A NoteSequence quantized with
sequences_lib.quantize_note_sequence.
search_start_step: Start searching for a melody at this time step. Assumed
to be the first step of a bar.
instrument: Search for a melody in this instrument number.
gap_bars: If this many bars or more follow a NOTE_OFF event, the melody
is ended.
ignore_polyphonic_notes: If True, the highest pitch is used in the melody
when multiple notes start at the same time. If False,
PolyphonicMelodyException will be raised if multiple notes start at
the same time.
pad_end: If True, the end of the melody will be padded with NO_EVENTs so
that it will end at a bar boundary.
filter_drums: If True, notes for which `is_drum` is True will be ignored.
Raises:
NonIntegerStepsPerBarException: If `quantized_sequence`\'s bar length
(derived from its time signature) is not an integer number of time
steps.
PolyphonicMelodyException: If any of the notes start on the same step
and `ignore_polyphonic_notes` is False.'
| def from_quantized_sequence(self, quantized_sequence, search_start_step=0, instrument=0, gap_bars=1, ignore_polyphonic_notes=False, pad_end=False, filter_drums=True):
| sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
self._reset()
steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence)
if ((steps_per_bar_float % 1) != 0):
raise events_lib.NonIntegerStepsPerBarException(('There are %f timesteps per bar. Time signature: %d/%d' % (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator, quantized_sequence.time_signatures[0].denominator)))
self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
self._steps_per_quarter = quantized_sequence.quantization_info.steps_per_quarter
notes = sorted([n for n in quantized_sequence.notes if ((n.instrument == instrument) and (n.quantized_start_step >= search_start_step))], key=(lambda note: (note.quantized_start_step, (- note.pitch))))
if (not notes):
return
melody_start_step = (notes[0].quantized_start_step - ((notes[0].quantized_start_step - search_start_step) % steps_per_bar))
for note in notes:
if (filter_drums and note.is_drum):
continue
if (not note.velocity):
continue
start_index = (note.quantized_start_step - melody_start_step)
end_index = (note.quantized_end_step - melody_start_step)
if (not self._events):
self._add_note(note.pitch, start_index, end_index)
continue
(last_on, last_off) = self._get_last_on_off_events()
on_distance = (start_index - last_on)
off_distance = (start_index - last_off)
if (on_distance == 0):
if ignore_polyphonic_notes:
continue
else:
self._reset()
raise PolyphonicMelodyException()
elif (on_distance < 0):
raise PolyphonicMelodyException('Unexpected note. Not in ascending order.')
if (len(self) and (off_distance >= (gap_bars * steps_per_bar))):
break
self._add_note(note.pitch, start_index, end_index)
if (not self._events):
return
self._start_step = melody_start_step
if (self._events[(-1)] == MELODY_NOTE_OFF):
del self._events[(-1)]
length = len(self)
if pad_end:
length += ((- len(self)) % steps_per_bar)
self.set_length(length)
|
'Converts the Melody to NoteSequence proto.
The end of the melody is treated as a NOTE_OFF event for any sustained
notes.
Args:
velocity: Midi velocity to give each note. Between 1 and 127 (inclusive).
instrument: Midi instrument to give each note.
program: Midi program to give each note.
sequence_start_time: A time in seconds (float) that the first note in the
sequence will land on.
qpm: Quarter notes per minute (float).
Returns:
A NoteSequence proto encoding the given melody.'
| def to_sequence(self, velocity=100, instrument=0, program=0, sequence_start_time=0.0, qpm=120.0):
| seconds_per_step = ((60.0 / qpm) / self.steps_per_quarter)
sequence = music_pb2.NoteSequence()
sequence.tempos.add().qpm = qpm
sequence.ticks_per_quarter = STANDARD_PPQ
sequence_start_time += (self.start_step * seconds_per_step)
current_sequence_note = None
for (step, note) in enumerate(self):
if (MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH):
if (current_sequence_note is not None):
current_sequence_note.end_time = ((step * seconds_per_step) + sequence_start_time)
current_sequence_note = sequence.notes.add()
current_sequence_note.start_time = ((step * seconds_per_step) + sequence_start_time)
current_sequence_note.pitch = note
current_sequence_note.velocity = velocity
current_sequence_note.instrument = instrument
current_sequence_note.program = program
elif (note == MELODY_NOTE_OFF):
if (current_sequence_note is not None):
current_sequence_note.end_time = ((step * seconds_per_step) + sequence_start_time)
current_sequence_note = None
if (current_sequence_note is not None):
current_sequence_note.end_time = ((len(self) * seconds_per_step) + sequence_start_time)
if sequence.notes:
sequence.total_time = sequence.notes[(-1)].end_time
return sequence
|
'Transpose notes in this Melody.
All notes are transposed the specified amount. Additionally, all notes
are octave shifted to lie within the [min_note, max_note) range.
Args:
transpose_amount: The number of half steps to transpose this Melody.
Positive values transpose up. Negative values transpose down.
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.'
| def transpose(self, transpose_amount, min_note=0, max_note=128):
| for i in range(len(self)):
if (self._events[i] >= MIN_MIDI_PITCH):
self._events[i] += transpose_amount
if (self._events[i] < min_note):
self._events[i] = (min_note + ((self._events[i] - min_note) % NOTES_PER_OCTAVE))
elif (self._events[i] >= max_note):
self._events[i] = ((max_note - NOTES_PER_OCTAVE) + ((self._events[i] - max_note) % NOTES_PER_OCTAVE))
|
'Transpose and octave shift the notes in this Melody.
The key center of this melody is computed with a heuristic, and the notes
are transposed to be in the given key. The melody is also octave shifted
to be centered in the given range. Additionally, all notes are octave
shifted to lie within a given range.
Args:
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.
transpose_to_key: The melody is transposed to be in this key or None if
should not be transposed. 0 = C Major.
Returns:
How much notes are transposed by.'
| def squash(self, min_note, max_note, transpose_to_key=None):
| if (transpose_to_key is None):
transpose_amount = 0
else:
melody_key = self.get_major_key()
key_diff = (transpose_to_key - melody_key)
midi_notes = [note for note in self._events if (MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH)]
if (not midi_notes):
return 0
melody_min_note = min(midi_notes)
melody_max_note = max(midi_notes)
melody_center = ((melody_min_note + melody_max_note) / 2)
target_center = (((min_note + max_note) - 1) / 2)
center_diff = (target_center - (melody_center + key_diff))
transpose_amount = (key_diff + (NOTES_PER_OCTAVE * int(round((center_diff / float(NOTES_PER_OCTAVE))))))
self.transpose(transpose_amount, min_note, max_note)
return transpose_amount
|
'Sets the length of the melody to the specified number of steps.
If the melody is not long enough, ends any sustained notes and adds NO_EVENT
steps for padding. If it is too long, it will be truncated to the requested
length.
Args:
steps: How many steps long the melody should be.
from_left: Whether to add/remove from the left instead of right.'
| def set_length(self, steps, from_left=False):
| old_len = len(self)
super(Melody, self).set_length(steps, from_left=from_left)
if ((steps > old_len) and (not from_left)):
for i in reversed(range(old_len)):
if (self._events[i] == MELODY_NOTE_OFF):
break
elif (self._events[i] != MELODY_NO_EVENT):
self._events[old_len] = MELODY_NOTE_OFF
break
|
'Increase the resolution of a Melody.
Increases the resolution of a Melody object by a factor of `k`. This uses
MELODY_NO_EVENT to extend each event in the melody to be `k` steps long.
Args:
k: An integer, the factor by which to increase the resolution of the
melody.'
| def increase_resolution(self, k):
| super(Melody, self).increase_resolution(k, fill_event=MELODY_NO_EVENT)
|
'Constructs a BaseSequenceGenerator.
Args:
model: An instance of BaseModel.
details: A generator_pb2.GeneratorDetails for this generator.
checkpoint: Where to look for the most recent model checkpoint. Either a
directory to be used with tf.train.latest_checkpoint or the path to a
single checkpoint file. Or None if a bundle should be used.
bundle: A generator_pb2.GeneratorBundle object that contains both a
checkpoint and a metagraph. Or None if a checkpoint should be used.
Raises:
SequenceGeneratorException: if neither checkpoint nor bundle is set.'
| def __init__(self, model, details, checkpoint, bundle):
| self._model = model
self._details = details
self._checkpoint = checkpoint
self._bundle = bundle
if ((self._checkpoint is None) and (self._bundle is None)):
raise SequenceGeneratorException('Either checkpoint or bundle must be set')
if ((self._checkpoint is not None) and (self._bundle is not None)):
raise SequenceGeneratorException('Checkpoint and bundle cannot both be set')
if self._bundle:
if (self._bundle.generator_details.id != self._details.id):
raise SequenceGeneratorException(("Generator id in bundle (%s) does not match this generator's id (%s)" % (self._bundle.generator_details.id, self._details.id)))
self._initialized = False
|
'Returns a GeneratorDetails description of this generator.'
| @property
def details(self):
| return self._details
|
'Returns the BundleDetails or None if checkpoint was used.'
| @property
def bundle_details(self):
| if (self._bundle is None):
return None
return self._bundle.bundle_details
|
'Implementation for sequence generation based on sequence and options.
The implementation can assume that _initialize has been called before this
method is called.
Args:
input_sequence: An input NoteSequence to base the generation on.
generator_options: A GeneratorOptions proto with options to use for
generation.
Returns:
The generated NoteSequence proto.'
| @abc.abstractmethod
def _generate(self, input_sequence, generator_options):
| pass
|
'Builds the TF graph and loads the checkpoint.
If the graph has already been initialized, this is a no-op.
Raises:
SequenceGeneratorException: If the checkpoint cannot be found.'
| def initialize(self):
| if self._initialized:
return
if (self._checkpoint is not None):
if (not _checkpoint_file_exists(self._checkpoint)):
raise SequenceGeneratorException(('Checkpoint path does not exist: %s' % self._checkpoint))
checkpoint_file = self._checkpoint
if tf.gfile.IsDirectory(checkpoint_file):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_file)
if (checkpoint_file is None):
raise SequenceGeneratorException(('No checkpoint file found in directory: %s' % self._checkpoint))
if ((not _checkpoint_file_exists(self._checkpoint)) or tf.gfile.IsDirectory(checkpoint_file)):
raise SequenceGeneratorException(('Checkpoint path is not a file: %s (supplied path: %s)' % (checkpoint_file, self._checkpoint)))
self._model.initialize_with_checkpoint(checkpoint_file)
else:
tempdir = None
try:
tempdir = tempfile.mkdtemp()
checkpoint_filename = os.path.join(tempdir, 'model.ckpt')
with tf.gfile.Open(checkpoint_filename, 'wb') as f:
f.write(self._bundle.checkpoint_file[0])
metagraph_filename = os.path.join(tempdir, 'model.ckpt.meta')
with tf.gfile.Open(metagraph_filename, 'wb') as f:
f.write(self._bundle.metagraph_file)
self._model.initialize_with_checkpoint_and_metagraph(checkpoint_filename, metagraph_filename)
finally:
if (tempdir is not None):
tf.gfile.DeleteRecursively(tempdir)
self._initialized = True
|
'Closes the TF session.
If the session was already closed, this is a no-op.'
| def close(self):
| if self._initialized:
self._model.close()
self._initialized = False
|
'When used as a context manager, initializes the TF session.'
| def __enter__(self):
| self.initialize()
return self
|
'When used as a context manager, closes the TF session.'
| def __exit__(self, *args):
| self.close()
|
'Generates a sequence from the model based on sequence and options.
Also initializes the TF graph if not yet initialized.
Args:
input_sequence: An input NoteSequence to base the generation on.
generator_options: A GeneratorOptions proto with options to use for
generation.
Returns:
The generated NoteSequence proto.'
| def generate(self, input_sequence, generator_options):
| self.initialize()
return self._generate(input_sequence, generator_options)
|
'Writes a generator_pb2.GeneratorBundle file in the specified location.
Saves the checkpoint, metagraph, and generator id in one file.
Args:
bundle_file: Location to write the bundle file.
bundle_description: A short, human-readable string description of this
bundle.
Raises:
SequenceGeneratorException: if there is an error creating the bundle file.'
| def create_bundle_file(self, bundle_file, bundle_description=None):
| if (not bundle_file):
raise SequenceGeneratorException('Bundle file location not specified.')
if (not self.details.id):
raise SequenceGeneratorException('Generator id must be included in GeneratorDetails when creating a bundle file.')
if (not self.details.description):
tf.logging.warn('Writing bundle file with no generator description.')
if (not bundle_description):
tf.logging.warn('Writing bundle file with no bundle description.')
self.initialize()
tempdir = None
try:
tempdir = tempfile.mkdtemp()
checkpoint_filename = os.path.join(tempdir, 'model.ckpt')
self._model.write_checkpoint_with_metagraph(checkpoint_filename)
if (not os.path.isfile(checkpoint_filename)):
raise SequenceGeneratorException(('Could not read checkpoint file: %s' % checkpoint_filename))
metagraph_filename = (checkpoint_filename + '.meta')
if (not os.path.isfile(metagraph_filename)):
raise SequenceGeneratorException(('Could not read metagraph file: %s' % metagraph_filename))
bundle = generator_pb2.GeneratorBundle()
bundle.generator_details.CopyFrom(self.details)
if bundle_description:
bundle.bundle_details.description = bundle_description
with tf.gfile.Open(checkpoint_filename, 'rb') as f:
bundle.checkpoint_file.append(f.read())
with tf.gfile.Open(metagraph_filename, 'rb') as f:
bundle.metagraph_file = f.read()
with tf.gfile.Open(bundle_file, 'wb') as f:
f.write(bundle.SerializeToString())
finally:
if (tempdir is not None):
tf.gfile.DeleteRecursively(tempdir)
|
'Returns the input vector for the given position in the chord progression.
Indices [0, 36]:
[0]: Whether or not this chord is "no chord".
[1, 12]: A one-hot encoding of the chord root pitch class.
[13, 24]: Whether or not each pitch class is present in the chord.
[25, 36]: A one-hot encoding of the chord bass pitch class.
Args:
events: A magenta.music.ChordProgression object.
position: An integer event position in the chord progression.
Returns:
An input vector, an self.input_size length list of floats.'
| def events_to_input(self, events, position):
| chord = events[position]
input_ = ([0.0] * self.input_size)
if (chord == NO_CHORD):
input_[0] = 1.0
return input_
root = chord_symbols_lib.chord_symbol_root(chord)
input_[(1 + root)] = 1.0
pitches = chord_symbols_lib.chord_symbol_pitches(chord)
for pitch in pitches:
input_[((1 + NOTES_PER_OCTAVE) + pitch)] = 1.0
bass = chord_symbols_lib.chord_symbol_bass(chord)
input_[((1 + (2 * NOTES_PER_OCTAVE)) + bass)] = 1.0
return input_
|
'Initializes the MultiDrumOneHotEncoding.
Args:
drum_type_pitches: A Python list of the MIDI pitch values for each drum
type. If None, `DEFAULT_DRUM_TYPE_PITCHES` will be used.
ignore_unknown_drums: If True, unknown drum pitches will not be encoded.
If False, a DrumsEncodingException will be raised when unknown drum
pitches are encountered.'
| def __init__(self, drum_type_pitches=None, ignore_unknown_drums=True):
| if (drum_type_pitches is None):
drum_type_pitches = DEFAULT_DRUM_TYPE_PITCHES
self._drum_map = dict(enumerate(drum_type_pitches))
self._inverse_drum_map = dict(((pitch, index) for (index, pitches) in self._drum_map.items() for pitch in pitches))
self._ignore_unknown_drums = ignore_unknown_drums
|
'Initializes a MelodyOneHotEncoding object.
Args:
min_note: The minimum midi pitch the encoded melody events can have.
max_note: The maximum midi pitch (exclusive) the encoded melody events
can have.
Raises:
ValueError: If `min_note` or `max_note` are outside the midi range, or if
`max_note` is not greater than `min_note`.'
| def __init__(self, min_note, max_note):
| if (min_note < MIN_MIDI_PITCH):
raise ValueError(('min_note must be >= 0. min_note is %d.' % min_note))
if (max_note > (MAX_MIDI_PITCH + 1)):
raise ValueError(('max_note must be <= 128. max_note is %d.' % max_note))
if (max_note <= min_note):
raise ValueError('max_note must be greater than min_note')
self._min_note = min_note
self._max_note = max_note
|
'Collapses a melody event value into a zero-based index range.
Args:
event: A Melody event value. -2 = no event, -1 = note-off event,
[0, 127] = note-on event for that midi pitch.
Returns:
An int in the range [0, self.num_classes). 0 = no event,
1 = note-off event, [2, self.num_classes) = note-on event for
that pitch relative to the [self._min_note, self._max_note) range.
Raises:
ValueError: If `event` is a MIDI note not between self._min_note and
self._max_note, or an invalid special event value.'
| def encode_event(self, event):
| if (event < (- NUM_SPECIAL_MELODY_EVENTS)):
raise ValueError(('invalid melody event value: %d' % event))
if ((event >= 0) and (event < self._min_note)):
raise ValueError(('melody event less than min note: %d < %d' % (event, self._min_note)))
if (event >= self._max_note):
raise ValueError(('melody event greater than max note: %d >= %d' % (event, self._max_note)))
if (event < 0):
return (event + NUM_SPECIAL_MELODY_EVENTS)
return ((event - self._min_note) + NUM_SPECIAL_MELODY_EVENTS)
|
'Expands a zero-based index value to its equivalent melody event value.
Args:
index: An int in the range [0, self._num_model_events).
0 = no event, 1 = note-off event,
[2, self._num_model_events) = note-on event for that pitch relative
to the [self._min_note, self._max_note) range.
Returns:
A Melody event value. -2 = no event, -1 = note-off event,
[0, 127] = note-on event for that midi pitch.'
| def decode_event(self, index):
| if (index < NUM_SPECIAL_MELODY_EVENTS):
return (index - NUM_SPECIAL_MELODY_EVENTS)
return ((index - NUM_SPECIAL_MELODY_EVENTS) + self._min_note)
|
'Initializes the KeyMelodyEncoderDecoder.
Args:
min_note: The minimum midi pitch the encoded melody events can have.
max_note: The maximum midi pitch (exclusive) the encoded melody events can
have.
lookback_distances: A list of step intervals to look back in history to
encode both the following event and whether the current step is a
repeat. If None, use default lookback distances.
binary_counter_bits: The number of input bits to use as a counter for the
metric position of the next note.'
| def __init__(self, min_note, max_note, lookback_distances=None, binary_counter_bits=7):
| self._lookback_distances = (lookback_distances if (lookback_distances is not None) else DEFAULT_LOOKBACK_DISTANCES)
self._binary_counter_bits = binary_counter_bits
self._min_note = min_note
self._note_range = (max_note - min_note)
|
'Returns the input vector for the given position in the melody.
Returns a self.input_size length list of floats. Assuming
self._min_note = 48, self._note_range = 36, two lookback distances, and
seven binary counters, then self.input_size = 74. Each index represents a
different input signal to the model.
Indices [0, 73]:
[0, 35]: A note is playing at that pitch [48, 84).
36: Any note is playing.
37: Silence is playing.
38: The current event is the note-on event of the currently playing note.
39: Whether the melody is currently ascending or descending.
40: The last event is repeating (first lookback distance).
41: The last event is repeating (second lookback distance).
[42, 48]: Time keeping toggles.
49: The next event is the start of a bar.
[50, 61]: The keys the current melody is in.
[62, 73]: The keys the last 3 notes are in.
Args:
events: A magenta.music.Melody object.
position: An integer event position in the melody.
Returns:
An input vector, an self.input_size length list of floats.'
| def events_to_input(self, events, position):
| current_note = None
is_attack = False
is_ascending = None
last_3_notes = collections.deque(maxlen=3)
sub_melody = melodies_lib.Melody(events[:(position + 1)])
for note in sub_melody:
if (note == MELODY_NO_EVENT):
is_attack = False
elif (note == MELODY_NOTE_OFF):
current_note = None
else:
is_attack = True
current_note = note
if last_3_notes:
if (note > last_3_notes[(-1)]):
is_ascending = True
if (note < last_3_notes[(-1)]):
is_ascending = False
if (note in last_3_notes):
last_3_notes.remove(note)
last_3_notes.append(note)
input_ = ([0.0] * self.input_size)
offset = 0
if current_note:
input_[((offset + current_note) - self._min_note)] = 1.0
input_[(offset + self._note_range)] = 1.0
else:
input_[((offset + self._note_range) + 1)] = 1.0
offset += (self._note_range + 2)
if is_attack:
input_[offset] = 1.0
offset += 1
if (is_ascending is not None):
input_[offset] = (1.0 if is_ascending else (-1.0))
offset += 1
for (i, lookback_distance) in enumerate(self._lookback_distances):
lookback_position = (position - lookback_distance)
if ((lookback_position >= 0) and (events[position] == events[lookback_position])):
input_[offset] = 1.0
offset += 1
n = len(sub_melody)
for i in range(self._binary_counter_bits):
input_[offset] = (1.0 if ((n / (2 ** i)) % 2) else (-1.0))
offset += 1
if ((len(sub_melody) % DEFAULT_STEPS_PER_BAR) == 0):
input_[offset] = 1.0
offset += 1
key_histogram = sub_melody.get_major_key_histogram()
max_val = max(key_histogram)
for (i, key_val) in enumerate(key_histogram):
if (key_val == max_val):
input_[offset] = 1.0
offset += 1
last_3_note_melody = melodies_lib.Melody(list(last_3_notes))
key_histogram = last_3_note_melody.get_major_key_histogram()
max_val = max(key_histogram)
for (i, key_val) in enumerate(key_histogram):
if (key_val == max_val):
input_[offset] = 1.0
offset += 1
assert (offset == self.input_size)
return input_
|
'Returns the label for the given position in the melody.
Returns an int in the range [0, self.num_classes). Assuming
self._min_note = 48, self._note_range = 36, and two lookback distances,
then self.num_classes = 40.
Values [0, 39]:
[0, 35]: Note-on event for midi pitch [48, 84).
36: No event.
37: Note-off event.
38: Repeat first lookback (takes precedence over above values).
39: Repeat second lookback (takes precedence over above values).
Args:
events: A magenta.music.Melody object.
position: An integer event position in the melody.
Returns:
A label, an integer.'
| def events_to_label(self, events, position):
| if ((position < self._lookback_distances[(-1)]) and (events[position] == MELODY_NO_EVENT)):
return ((self._note_range + len(self._lookback_distances)) + 1)
for (i, lookback_distance) in reversed(list(enumerate(self._lookback_distances))):
lookback_position = (position - lookback_distance)
if ((lookback_position >= 0) and (events[position] == events[lookback_position])):
return ((self._note_range + 2) + i)
if (events[position] == MELODY_NOTE_OFF):
return (self._note_range + 1)
if (events[position] == MELODY_NO_EVENT):
return self._note_range
return (events[position] - self._min_note)
|
'Returns the melody event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An int in the range [0, self.num_classes).
events: The magenta.music.Melody events list of the current melody.
Returns:
A magenta.music.Melody event value.'
| def class_index_to_event(self, class_index, events):
| for (i, lookback_distance) in reversed(list(enumerate(self._lookback_distances))):
if (class_index == ((self._note_range + 2) + i)):
if (len(events) < lookback_distance):
return MELODY_NO_EVENT
return events[(- lookback_distance)]
if (class_index == (self._note_range + 1)):
return MELODY_NOTE_OFF
if (class_index == self._note_range):
return MELODY_NO_EVENT
return (self._min_note + class_index)
|
'Compares MusicXMLDocument object against a sequence proto.
Args:
musicxml: A MusicXMLDocument object.
sequence_proto: A tensorflow.magenta.Sequence proto.'
| def checkmusicxmlandsequence(self, musicxml, sequence_proto):
| self.assertEqual(len(musicxml.get_time_signatures()), len(sequence_proto.time_signatures))
for (musicxml_time, sequence_time) in zip(musicxml.get_time_signatures(), sequence_proto.time_signatures):
self.assertEqual(musicxml_time.numerator, sequence_time.numerator)
self.assertEqual(musicxml_time.denominator, sequence_time.denominator)
self.assertAlmostEqual(musicxml_time.time_position, sequence_time.time)
self.assertEqual(len(musicxml.get_key_signatures()), len(sequence_proto.key_signatures))
for (musicxml_key, sequence_key) in zip(musicxml.get_key_signatures(), sequence_proto.key_signatures):
if (musicxml_key.mode == 'major'):
mode = 0
elif (musicxml_key.mode == 'minor'):
mode = 1
music_proto_keys = [11, 6, 1, 8, 3, 10, 5, 0, 7, 2, 9, 4, 11, 6, 1]
key = music_proto_keys[(musicxml_key.key + 7)]
self.assertEqual(key, sequence_key.key)
self.assertEqual(mode, sequence_key.mode)
self.assertAlmostEqual(musicxml_key.time_position, sequence_key.time)
musicxml_tempos = musicxml.get_tempos()
self.assertEqual(len(musicxml_tempos), len(sequence_proto.tempos))
for (musicxml_tempo, sequence_tempo) in zip(musicxml_tempos, sequence_proto.tempos):
self.assertAlmostEqual(musicxml_tempo.qpm, sequence_tempo.qpm)
self.assertAlmostEqual(musicxml_tempo.time_position, sequence_tempo.time)
seq_parts = defaultdict(list)
for seq_note in sequence_proto.notes:
seq_parts[seq_note.part].append(seq_note)
self.assertEqual(len(musicxml.parts), len(seq_parts))
for (musicxml_part, seq_part_id) in zip(musicxml.parts, sorted(seq_parts.keys())):
seq_instrument_notes = seq_parts[seq_part_id]
musicxml_notes = []
for musicxml_measure in musicxml_part.measures:
for musicxml_note in musicxml_measure.notes:
if (not musicxml_note.is_rest):
musicxml_notes.append(musicxml_note)
self.assertEqual(len(musicxml_notes), len(seq_instrument_notes))
for (musicxml_note, sequence_note) in zip(musicxml_notes, seq_instrument_notes):
self.assertEqual(musicxml_note.pitch[1], sequence_note.pitch)
self.assertEqual(musicxml_note.velocity, sequence_note.velocity)
self.assertAlmostEqual(musicxml_note.note_duration.time_position, sequence_note.start_time)
self.assertAlmostEqual((musicxml_note.note_duration.time_position + musicxml_note.note_duration.seconds), sequence_note.end_time)
self.assertAlmostEqual(musicxml_note.note_duration.duration, ((musicxml_note.state.divisions * 4) * musicxml_note.note_duration.duration_float()), delta=1)
|
'Test the translation from MusicXML to Sequence proto.'
| def checkmusicxmltosequence(self, filename):
| source_musicxml = musicxml_parser.MusicXMLDocument(filename)
sequence_proto = musicxml_reader.musicxml_to_sequence_proto(source_musicxml)
self.checkmusicxmlandsequence(source_musicxml, sequence_proto)
|
'Verify MusicXML scale file.
Verify that it contains the correct pitches (sounding pitch) and durations.
Args:
filename: file to test.
part_name: name of the part the sequence is expected to contain.'
| def checkFMajorScale(self, filename, part_name):
| expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n key_signatures {\n key: F\n time: 0\n }\n time_signatures {\n numerator: 4\n denominator: 4\n }\n tempos {\n qpm: 120.0\n }\n total_time: 4.0\n ')
part_info = expected_ns.part_infos.add()
part_info.name = part_name
expected_pitches = [65, 67, 69, 70, 72, 74, 76, 77]
time = 0
for pitch in expected_pitches:
note = expected_ns.notes.add()
note.part = 0
note.voice = 1
note.pitch = pitch
note.start_time = time
time += 0.5
note.end_time = time
note.velocity = 64
note.numerator = 1
note.denominator = 4
source_musicxml = musicxml_parser.MusicXMLDocument(filename)
sequence_proto = musicxml_reader.musicxml_to_sequence_proto(source_musicxml)
self.assertProtoEquals(expected_ns, sequence_proto)
|
'Test the simple flute scale MusicXML file.'
| def testsimplemusicxmltosequence(self):
| self.checkmusicxmltosequence(self.flute_scale_filename)
self.checkFMajorScale(self.flute_scale_filename, 'Flute')
|
'Test the complex band score MusicXML file.'
| def testcomplexmusicxmltosequence(self):
| self.checkmusicxmltosequence(self.band_score_filename)
|
'Test the translation from transposed MusicXML to Sequence proto.
Compare a transposed MusicXML file (clarinet) to an identical untransposed
sequence (flute).'
| def testtransposedxmltosequence(self):
| untransposed_musicxml = musicxml_parser.MusicXMLDocument(self.flute_scale_filename)
transposed_musicxml = musicxml_parser.MusicXMLDocument(self.clarinet_scale_filename)
untransposed_proto = musicxml_reader.musicxml_to_sequence_proto(untransposed_musicxml)
self.checkmusicxmlandsequence(transposed_musicxml, untransposed_proto)
self.checkFMajorScale(self.clarinet_scale_filename, 'Clarinet in Bb')
|
'Test an MXL file containing a unicode filename within its zip archive.'
| def testcompressedmxlunicodefilename(self):
| unicode_filename = os.path.join(tf.resource_loader.get_data_files_path(), 'testdata/unicode_filename.mxl')
sequence = musicxml_reader.musicxml_file_to_sequence_proto(unicode_filename)
self.assertEqual(len(sequence.notes), 8)
|
'Test the translation from compressed MusicXML to Sequence proto.
Compare a compressed MusicXML file to an identical uncompressed sequence.'
| def testcompressedxmltosequence(self):
| uncompressed_musicxml = musicxml_parser.MusicXMLDocument(self.flute_scale_filename)
compressed_musicxml = musicxml_parser.MusicXMLDocument(self.compressed_filename)
uncompressed_proto = musicxml_reader.musicxml_to_sequence_proto(uncompressed_musicxml)
self.checkmusicxmlandsequence(compressed_musicxml, uncompressed_proto)
self.checkFMajorScale(self.flute_scale_filename, 'Flute')
|
'Test the translation from compressed MusicXML with multiple rootfiles.
The example MXL file contains a MusicXML file of the Flute F Major scale,
as well as the PNG rendering of the score contained within the single MXL
file.'
| def testmultiplecompressedxmltosequence(self):
| uncompressed_musicxml = musicxml_parser.MusicXMLDocument(self.flute_scale_filename)
compressed_musicxml = musicxml_parser.MusicXMLDocument(self.multiple_rootfile_compressed_filename)
uncompressed_proto = musicxml_reader.musicxml_to_sequence_proto(uncompressed_musicxml)
self.checkmusicxmlandsequence(compressed_musicxml, uncompressed_proto)
self.checkFMajorScale(self.flute_scale_filename, 'Flute')
|
'Test the rhythm durations MusicXML file.'
| def testrhythmdurationsxmltosequence(self):
| self.checkmusicxmltosequence(self.rhythm_durations_filename)
|
'Verify properties of the flute scale.'
| def testFluteScale(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.flute_scale_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures: {\n numerator: 4\n denominator: 4\n }\n tempos: {\n qpm: 120\n }\n key_signatures: {\n key: F\n }\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n part_infos {\n part: 0\n name: "Flute"\n }\n total_time: 4.0\n ')
expected_pitches = [65, 67, 69, 70, 72, 74, 76, 77]
time = 0
for pitch in expected_pitches:
note = expected_ns.notes.add()
note.part = 0
note.voice = 1
note.pitch = pitch
note.start_time = time
time += 0.5
note.end_time = time
note.velocity = 64
note.numerator = 1
note.denominator = 4
self.assertProtoEquals(expected_ns, ns)
|
'Test that transposition works when changing instrument transposition.
This can occur within a single part in a score where the score
has no key signature / is atonal. Examples include changing from a
non-transposing instrument to a transposing one (ex. Flute to Bb Clarinet)
or vice versa, or changing among transposing instruments (ex. Bb Clarinet
to Eb Alto Saxophone).'
| def test_atonal_transposition(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.atonal_transposition_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures: {\n numerator: 4\n denominator: 4\n }\n tempos: {\n qpm: 120\n }\n key_signatures: {\n }\n part_infos {\n part: 0\n name: "Flute"\n }\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n total_time: 4.0\n ')
expected_pitches = [72, 74, 76, 77, 79, 77, 76, 74]
time = 0
for pitch in expected_pitches:
note = expected_ns.notes.add()
note.pitch = pitch
note.start_time = time
time += 0.5
note.end_time = time
note.velocity = 64
note.numerator = 1
note.denominator = 4
note.voice = 1
self.maxDiff = None
self.assertProtoEquals(expected_ns, ns)
|
'Test that incomplete measures have the correct time signature.
This can occur in pickup bars or incomplete measures. For example,
if the time signature in the MusicXML is 4/4, but the measure only
contains one quarter note, Magenta expects this pickup measure to have
a time signature of 1/4.'
| def test_incomplete_measures(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.time_signature_filename)
self.assertEqual(len(ns.time_signatures), 6)
self.assertEqual(len(ns.key_signatures), 1)
self.assertEqual(len(ns.notes), 112)
|
'Test that time signatures are inserted for music without time signatures.
MusicXML does not require the use of time signatures. Music without
time signatures occur in medieval chant, cadenzas, and contemporary music.'
| def test_unmetered_music(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.unmetered_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures: {\n numerator: 11\n denominator: 8\n }\n tempos: {\n qpm: 120\n }\n key_signatures: {\n }\n notes {\n pitch: 72\n velocity: 64\n end_time: 0.5\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 74\n velocity: 64\n start_time: 0.5\n end_time: 0.75\n numerator: 1\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 76\n velocity: 64\n start_time: 0.75\n end_time: 1.25\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 77\n velocity: 64\n start_time: 1.25\n end_time: 1.75\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 79\n velocity: 64\n start_time: 1.75\n end_time: 2.75\n numerator: 1\n denominator: 2\n voice: 1\n }\n part_infos {\n name: "Flute"\n }\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n total_time: 2.75\n ')
self.maxDiff = None
self.assertProtoEquals(expected_ns, ns)
|
'Verify properties of the St. Anne file.
The file contains 2 parts and 4 voices.'
| def test_st_anne(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.st_anne_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures {\n numerator: 1\n denominator: 4\n }\n time_signatures {\n time: 0.5\n numerator: 4\n denominator: 4\n }\n time_signatures {\n time: 6.5\n numerator: 3\n denominator: 4\n }\n time_signatures {\n time: 8.0\n numerator: 1\n denominator: 4\n }\n time_signatures {\n time: 8.5\n numerator: 4\n denominator: 4\n }\n time_signatures {\n time: 14.5\n numerator: 3\n denominator: 4\n }\n tempos: {\n qpm: 120\n }\n key_signatures: {\n key: C\n }\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n part_infos {\n part: 0\n name: "Harpsichord"\n }\n part_infos {\n part: 1\n name: "Piano"\n }\n total_time: 16.0\n ')
pitches_0_1 = [(67, 0.5), (64, 0.5), (69, 0.5), (67, 0.5), (72, 0.5), (72, 0.5), (71, 0.5), (72, 0.5), (67, 0.5), (72, 0.5), (67, 0.5), (69, 0.5), (66, 0.5), (67, 1.5), (71, 0.5), (72, 0.5), (69, 0.5), (74, 0.5), (71, 0.5), (72, 0.5), (69, 0.5), (71, 0.5), (67, 0.5), (69, 0.5), (72, 0.5), (74, 0.5), (71, 0.5), (72, 1.5)]
pitches_0_2 = [(60, 0.5), (60, 0.5), (60, 0.5), (60, 0.5), (64, 0.5), (62, 0.5), (62, 0.5), (64, 0.5), (64, 0.5), (64, 0.5), (64, 0.5), (64, 0.5), (62, 0.5), (62, 1.5), (62, 0.5), (64, 0.5), (60, 0.5), (65, 0.5), (62, 0.5), (64, 0.75), (62, 0.25), (59, 0.5), (60, 0.5), (65, 0.5), (64, 0.5), (62, 0.5), (62, 0.5), (64, 1.5)]
pitches_1_1 = [(52, 0.5), (55, 0.5), (57, 0.5), (60, 0.5), (60, 0.5), (57, 0.5), (55, 0.5), (55, 0.5), (60, 0.5), (60, 0.5), (59, 0.5), (57, 0.5), (57, 0.5), (59, 1.5), (55, 0.5), (55, 0.5), (57, 0.5), (57, 0.5), (55, 0.5), (55, 0.5), (57, 0.5), (56, 0.5), (55, 0.5), (53, 0.5), (55, 0.5), (57, 0.5), (55, 0.5), (55, 1.5)]
pitches_1_2 = [(48, 0.5), (48, 0.5), (53, 0.5), (52, 0.5), (57, 0.5), (53, 0.5), (55, 0.5), (48, 0.5), (48, 0.5), (45, 0.5), (52, 0.5), (48, 0.5), (50, 0.5), (43, 1.5), (55, 0.5), (48, 0.5), (53, 0.5), (50, 0.5), (55, 0.5), (48, 0.5), (53, 0.5), (52, 0.5), (52, 0.5), (50, 0.5), (48, 0.5), (53, 0.5), (55, 0.5), (48, 1.5)]
part_voice_instrument_program_pitches = [(0, 1, 1, 7, pitches_0_1), (0, 2, 1, 7, pitches_0_2), (1, 1, 2, 1, pitches_1_1), (1, 2, 2, 1, pitches_1_2)]
for (part, voice, instrument, program, pitches) in part_voice_instrument_program_pitches:
time = 0
for (pitch, duration) in pitches:
note = expected_ns.notes.add()
note.part = part
note.voice = voice
note.pitch = pitch
note.start_time = time
time += duration
note.end_time = time
note.velocity = 64
note.instrument = instrument
note.program = program
if (duration == 0.5):
note.numerator = 1
note.denominator = 4
if (duration == 0.25):
note.numerator = 1
note.denominator = 8
if (duration == 0.75):
note.numerator = 3
note.denominator = 8
if (duration == 1.5):
note.numerator = 3
note.denominator = 4
expected_ns.notes.sort(key=(lambda note: (note.part, note.voice, note.start_time)))
ns.notes.sort(key=(lambda note: (note.part, note.voice, note.start_time)))
self.assertProtoEquals(expected_ns, ns)
|
'Verify that a part with an empty name can be parsed.'
| def test_empty_part_name(self):
| xml = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n <!DOCTYPE score-partwise PUBLIC\n "-//Recordare//DTD MusicXML 3.0 Partwise//EN"\n "http://www.musicxml.org/dtds/partwise.dtd">\n <score-partwise version="3.0">\n <part-list>\n <score-part id="P1">\n <part-name/>\n </score-part>\n </part-list>\n <part id="P1">\n </part>\n </score-partwise>\n '
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(xml)
temp_file.flush()
ns = musicxml_reader.musicxml_file_to_sequence_proto(temp_file.name)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n key_signatures {\n key: C\n time: 0\n }\n tempos {\n qpm: 120.0\n }\n part_infos {\n part: 0\n }\n total_time: 0.0\n ')
self.assertProtoEquals(expected_ns, ns)
|
'Verify that a part without a corresponding score-part can be parsed.'
| def test_empty_part_list(self):
| xml = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n <!DOCTYPE score-partwise PUBLIC\n "-//Recordare//DTD MusicXML 3.0 Partwise//EN"\n "http://www.musicxml.org/dtds/partwise.dtd">\n <score-partwise version="3.0">\n <part id="P1">\n </part>\n </score-partwise>\n '
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(xml)
temp_file.flush()
ns = musicxml_reader.musicxml_file_to_sequence_proto(temp_file.name)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n key_signatures {\n key: C\n time: 0\n }\n tempos {\n qpm: 120.0\n }\n part_infos {\n part: 0\n }\n total_time: 0.0\n ')
self.assertProtoEquals(expected_ns, ns)
|
'Verify that an empty doc can be parsed.'
| def test_empty_doc(self):
| xml = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n <!DOCTYPE score-partwise PUBLIC\n "-//Recordare//DTD MusicXML 3.0 Partwise//EN"\n "http://www.musicxml.org/dtds/partwise.dtd">\n <score-partwise version="3.0">\n </score-partwise>\n '
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(xml)
temp_file.flush()
ns = musicxml_reader.musicxml_file_to_sequence_proto(temp_file.name)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n key_signatures {\n key: C\n time: 0\n }\n tempos {\n qpm: 120.0\n }\n total_time: 0.0\n ')
self.assertProtoEquals(expected_ns, ns)
|
'Test that a whole measure rest can be encoded using <forward>.
A whole measure rest is usually encoded as a <note> with a duration
equal to that of a whole measure. An alternative encoding is to
use the <forward> element to advance the time cursor to a duration
equal to that of a whole measure. This implies a whole measure rest
when there are no <note> elements in this measure.'
| def test_whole_measure_rest_forward(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.whole_measure_rest_forward_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures {\n numerator: 4\n denominator: 4\n }\n time_signatures {\n time: 6.0\n numerator: 2\n denominator: 4\n }\n key_signatures {\n }\n tempos {\n qpm: 120\n }\n notes {\n pitch: 72\n velocity: 64\n end_time: 2.0\n numerator: 1\n denominator: 1\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 4.0\n end_time: 6.0\n numerator: 1\n denominator: 1\n voice: 1\n }\n notes {\n pitch: 60\n velocity: 64\n start_time: 6.0\n end_time: 7.0\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 60\n velocity: 64\n start_time: 8.0\n end_time: 9.0\n numerator: 1\n denominator: 2\n voice: 1\n }\n total_time: 9.0\n part_infos {\n name: "Flute"\n }\n source_info {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n ')
self.assertProtoEquals(expected_ns, ns)
|
'Test that meters are encoded properly.
Musical meters are expressed as a ratio of beats to divisions.
The MusicXML parser uses this ratio in lowest terms for timing
purposes. However, the meters should be in the actual terms
when appearing in a NoteSequence.'
| def test_meter(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.meter_test_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures {\n numerator: 1\n denominator: 4\n }\n time_signatures {\n time: 0.5\n numerator: 2\n denominator: 4\n }\n time_signatures {\n time: 1.5\n numerator: 3\n denominator: 4\n }\n time_signatures {\n time: 3.0\n numerator: 4\n denominator: 4\n }\n time_signatures {\n time: 5.0\n numerator: 5\n denominator: 4\n }\n time_signatures {\n time: 7.5\n numerator: 6\n denominator: 4\n }\n time_signatures {\n time: 10.5\n numerator: 7\n denominator: 4\n }\n time_signatures {\n time: 14.0\n numerator: 1\n denominator: 8\n }\n time_signatures {\n time: 14.25\n numerator: 2\n denominator: 8\n }\n time_signatures {\n time: 14.75\n numerator: 3\n denominator: 8\n }\n time_signatures {\n time: 15.5\n numerator: 4\n denominator: 8\n }\n time_signatures {\n time: 16.5\n numerator: 5\n denominator: 8\n }\n time_signatures {\n time: 17.75\n numerator: 6\n denominator: 8\n }\n time_signatures {\n time: 19.25\n numerator: 7\n denominator: 8\n }\n time_signatures {\n time: 21.0\n numerator: 8\n denominator: 8\n }\n time_signatures {\n time: 23.0\n numerator: 9\n denominator: 8\n }\n time_signatures {\n time: 25.25\n numerator: 10\n denominator: 8\n }\n time_signatures {\n time: 27.75\n numerator: 11\n denominator: 8\n }\n time_signatures {\n time: 30.5\n numerator: 12\n denominator: 8\n }\n time_signatures {\n time: 33.5\n numerator: 2\n denominator: 2\n }\n time_signatures {\n time: 35.5\n numerator: 3\n denominator: 2\n }\n time_signatures {\n time: 38.5\n numerator: 4\n denominator: 2\n }\n time_signatures {\n time: 42.5\n numerator: 4\n denominator: 4\n }\n time_signatures {\n time: 44.5\n numerator: 2\n denominator: 2\n }\n key_signatures {\n }\n tempos {\n qpm: 120\n }\n notes {\n pitch: 72\n velocity: 64\n end_time: 0.5\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 0.5\n end_time: 1.5\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 1.5\n end_time: 3.0\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 3.0\n end_time: 5.0\n numerator: 1\n denominator: 1\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 5.0\n end_time: 6.5\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 6.5\n end_time: 7.5\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 7.5\n end_time: 9.0\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 9.0\n end_time: 10.5\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 10.5\n end_time: 12.0\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 12.0\n end_time: 13.5\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 13.5\n end_time: 14.0\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 14.0\n end_time: 14.25\n numerator: 1\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 14.25\n end_time: 14.75\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 14.75\n end_time: 15.5\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 15.5\n end_time: 16.0\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 16.0\n end_time: 16.5\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 16.5\n end_time: 17.0\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 17.0\n end_time: 17.5\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 17.5\n end_time: 17.75\n numerator: 1\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 17.75\n end_time: 18.5\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 18.5\n end_time: 19.25\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 19.25\n end_time: 20.0\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 20.0\n end_time: 20.5\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 20.5\n end_time: 21.0\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 21.0\n end_time: 21.75\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 21.75\n end_time: 22.5\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 22.5\n end_time: 23.0\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 23.0\n end_time: 24.5\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 24.5\n end_time: 25.25\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 25.25\n end_time: 26.75\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 26.75\n end_time: 27.25\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 27.25\n end_time: 27.75\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 27.75\n end_time: 29.25\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 29.25\n end_time: 30.0\n numerator: 3\n denominator: 8\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 30.0\n end_time: 30.5\n numerator: 1\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 30.5\n end_time: 32.0\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 32.0\n end_time: 33.5\n numerator: 3\n denominator: 4\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 33.5\n end_time: 34.5\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 34.5\n end_time: 35.5\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 35.5\n end_time: 36.5\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 36.5\n end_time: 37.5\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 37.5\n end_time: 38.5\n numerator: 1\n denominator: 2\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 38.5\n end_time: 40.5\n numerator: 1\n denominator: 1\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 40.5\n end_time: 42.5\n numerator: 1\n denominator: 1\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 42.5\n end_time: 44.5\n numerator: 1\n denominator: 1\n voice: 1\n }\n notes {\n pitch: 72\n velocity: 64\n start_time: 44.5\n end_time: 46.5\n numerator: 1\n denominator: 1\n voice: 1\n }\n total_time: 46.5\n part_infos {\n name: "Flute"\n }\n source_info {\n source_type: SCORE_BASED\n encoding_type: MUSIC_XML\n parser: MAGENTA_MUSIC_XML\n }\n ')
self.assertProtoEquals(expected_ns, ns)
|
'Construct a ChordProgression.'
| def __init__(self, events=None, **kwargs):
| if ('pad_event' in kwargs):
del kwargs['pad_event']
super(ChordProgression, self).__init__(pad_event=NO_CHORD, events=events, **kwargs)
|
'Adds the given chord to the `events` list.
`start_step` is set to the given chord. Everything after `start_step` in
`events` is deleted before the chord is added. `events`\'s length will be
changed so that the last event has index `end_step` - 1.
Args:
figure: Chord symbol figure. A string like "Cm9" representing the chord.
start_step: A non-negative integer step that the chord begins on.
end_step: An integer step that the chord ends on. The chord is considered
to end at the onset of the end step. `end_step` must be greater than
`start_step`.
Raises:
BadChordException: If `start_step` does not precede `end_step`.'
| def _add_chord(self, figure, start_step, end_step):
| if (start_step >= end_step):
raise BadChordException(('Start step does not precede end step: start=%d, end=%d' % (start_step, end_step)))
self.set_length(end_step)
for i in range(start_step, end_step):
self._events[i] = figure
|
'Populate self with the chords from the given quantized NoteSequence.
A chord progression is extracted from the given sequence starting at time
step `start_step` and ending at time step `end_step`.
The number of time steps per bar is computed from the time signature in
`quantized_sequence`.
Args:
quantized_sequence: A quantized NoteSequence instance.
start_step: Start populating chords at this time step.
end_step: Stop populating chords at this time step.
Raises:
NonIntegerStepsPerBarException: If `quantized_sequence`\'s bar length
(derived from its time signature) is not an integer number of time
steps.
CoincidentChordsException: If any of the chords start on the same step.'
| def from_quantized_sequence(self, quantized_sequence, start_step, end_step):
| sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
self._reset()
steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence)
if ((steps_per_bar_float % 1) != 0):
raise events_lib.NonIntegerStepsPerBarException(('There are %f timesteps per bar. Time signature: %d/%d' % (steps_per_bar_float, quantized_sequence.time_signature.numerator, quantized_sequence.time_signature.denominator)))
self._steps_per_bar = int(steps_per_bar_float)
self._steps_per_quarter = quantized_sequence.quantization_info.steps_per_quarter
chords = sorted([a for a in quantized_sequence.text_annotations if (a.annotation_type == CHORD_SYMBOL)], key=(lambda chord: chord.quantized_step))
prev_step = None
prev_figure = NO_CHORD
for chord in chords:
if (chord.quantized_step >= end_step):
break
elif (chord.quantized_step < start_step):
prev_step = chord.quantized_step
prev_figure = chord.text
continue
if (chord.quantized_step == prev_step):
if (chord.text == prev_figure):
continue
else:
self._reset()
raise CoincidentChordsException(('chords %s and %s are coincident' % (prev_figure, chord.text)))
if (chord.quantized_step > start_step):
if (prev_step is None):
start_index = 0
else:
start_index = (max(prev_step, start_step) - start_step)
end_index = (chord.quantized_step - start_step)
self._add_chord(prev_figure, start_index, end_index)
prev_step = chord.quantized_step
prev_figure = chord.text
if ((prev_step is None) or (prev_step < end_step)):
if (prev_step is None):
start_index = 0
else:
start_index = (max(prev_step, start_step) - start_step)
end_index = (end_step - start_step)
self._add_chord(prev_figure, start_index, end_index)
self._start_step = start_step
self._end_step = end_step
|
'Converts the ChordProgression to NoteSequence proto.
This doesn\'t generate actual notes, but text annotations specifying the
chord changes when they occur.
Args:
sequence_start_time: A time in seconds (float) that the first chord in
the sequence will land on.
qpm: Quarter notes per minute (float).
Returns:
A NoteSequence proto encoding the given chords as text annotations.'
| def to_sequence(self, sequence_start_time=0.0, qpm=120.0):
| seconds_per_step = ((60.0 / qpm) / self.steps_per_quarter)
sequence = music_pb2.NoteSequence()
sequence.tempos.add().qpm = qpm
sequence.ticks_per_quarter = STANDARD_PPQ
current_figure = NO_CHORD
for (step, figure) in enumerate(self):
if (figure != current_figure):
current_figure = figure
chord = sequence.text_annotations.add()
chord.time = ((step * seconds_per_step) + sequence_start_time)
chord.text = figure
chord.annotation_type = CHORD_SYMBOL
return sequence
|
'Transpose chords in this ChordProgression.
Args:
transpose_amount: The number of half steps to transpose this
ChordProgression. Positive values transpose up. Negative values
transpose down.
Raises:
ChordSymbolException: If a chord (other than "no chord") fails to be
interpreted by the `chord_symbols_lib` module.'
| def transpose(self, transpose_amount):
| for i in range(len(self._events)):
if (self._events[i] != NO_CHORD):
self._events[i] = chord_symbols_lib.transpose_chord_symbol(self._events[i], (transpose_amount % NOTES_PER_OCTAVE))
|
'Renders the chord symbols of a NoteSequence.
This function renders chord symbol annotations in a NoteSequence as actual
notes. Notes are added to the NoteSequence object, and the chord symbols
remain also.
Args:
sequence: The NoteSequence for which to render chord symbols.'
| @abc.abstractmethod
def render(self, sequence):
| pass
|
'Initialize a BasicChordRenderer object.
Args:
velocity: The MIDI note velocity to use.
instrument: The MIDI instrument to use.
program: The MIDI program to use.
octave: The octave in which to render chord notes. If the bass note is not
otherwise part of the chord, it will not be rendered in this octave.
bass_octave: The octave in which to render chord bass notes.'
| def __init__(self, velocity=100, instrument=1, program=88, octave=4, bass_octave=3):
| self._velocity = velocity
self._instrument = instrument
self._program = program
self._octave = octave
self._bass_octave = bass_octave
|
'Construct a LeadSheet.
If `melody` and `chords` are specified, instantiate with the provided
melody and chords. Otherwise, create an empty LeadSheet.
Args:
melody: A Melody object.
chords: A ChordProgression object.
Raises:
MelodyChordsMismatchException: If the melody and chord progression differ
in temporal resolution or position in the source sequence, or if only
one of melody or chords is specified.'
| def __init__(self, melody=None, chords=None):
| if ((melody is None) != (chords is None)):
raise MelodyChordsMismatchException('melody and chords must be both specified or both unspecified')
if (melody is not None):
self._from_melody_and_chords(melody, chords)
else:
self._reset()
|
'Clear events and reset object state.'
| def _reset(self):
| self._melody = melodies_lib.Melody()
self._chords = chords_lib.ChordProgression()
|
'Initializes a LeadSheet with a given melody and chords.
Args:
melody: A Melody object.
chords: A ChordProgression object.
Raises:
MelodyChordsMismatchException: If the melody and chord progression differ
in temporal resolution or position in the source sequence.'
| def _from_melody_and_chords(self, melody, chords):
| if ((len(melody) != len(chords)) or (melody.steps_per_bar != chords.steps_per_bar) or (melody.steps_per_quarter != chords.steps_per_quarter) or (melody.start_step != chords.start_step) or (melody.end_step != chords.end_step)):
raise MelodyChordsMismatchException()
self._melody = melody
self._chords = chords
|
'Return an iterator over (melody, chord) tuples in this LeadSheet.
Returns:
Python iterator over (melody, chord) event tuples.'
| def __iter__(self):
| return itertools.izip(self._melody, self._chords)
|
'Returns the melody-chord tuple at the given index.'
| def __getitem__(self, i):
| return (self._melody[i], self._chords[i])
|
'Returns a LeadSheet object for the given slice range.'
| def __getslice__(self, i, j):
| return LeadSheet(self._melody[i:j], self._chords[i:j])
|
'How many events (melody-chord tuples) are in this LeadSheet.
Returns:
Number of events as an integer.'
| def __len__(self):
| return len(self._melody)
|
'Return the melody of the lead sheet.
Returns:
The lead sheet melody, a Melody object.'
| @property
def melody(self):
| return self._melody
|
'Return the chord progression of the lead sheet.
Returns:
The lead sheet chords, a ChordProgression object.'
| @property
def chords(self):
| return self._chords
|
'Appends event to the end of the sequence and increments the end step.
Args:
event: The event (a melody-chord tuple) to append to the end.'
| def append(self, event):
| (melody_event, chord_event) = event
self._melody.append(melody_event)
self._chords.append(chord_event)
|
'Converts the LeadSheet to NoteSequence proto.
Args:
velocity: Midi velocity to give each melody note. Between 1 and 127
(inclusive).
instrument: Midi instrument to give each melody note.
sequence_start_time: A time in seconds (float) that the first note (and
chord) in the sequence will land on.
qpm: Quarter notes per minute (float).
Returns:
A NoteSequence proto encoding the melody and chords from the lead sheet.'
| def to_sequence(self, velocity=100, instrument=0, sequence_start_time=0.0, qpm=120.0):
| sequence = self._melody.to_sequence(velocity=velocity, instrument=instrument, sequence_start_time=sequence_start_time, qpm=qpm)
chord_sequence = self._chords.to_sequence(sequence_start_time=sequence_start_time, qpm=qpm)
for text_annotation in chord_sequence.text_annotations:
if (text_annotation.annotation_type == CHORD_SYMBOL):
chord = sequence.text_annotations.add()
chord.CopyFrom(text_annotation)
return sequence
|
'Transpose notes and chords in this LeadSheet.
All notes and chords are transposed the specified amount. Additionally,
all notes are octave shifted to lie within the [min_note, max_note) range.
Args:
transpose_amount: The number of half steps to transpose this
LeadSheet. Positive values transpose up. Negative values
transpose down.
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.'
| def transpose(self, transpose_amount, min_note=0, max_note=128):
| self._melody.transpose(transpose_amount, min_note, max_note)
self._chords.transpose(transpose_amount)
|
'Transpose and octave shift the notes and chords in this LeadSheet.
Args:
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.
transpose_to_key: The lead sheet is transposed to be in this key.
Returns:
The transpose amount, in half steps.'
| def squash(self, min_note, max_note, transpose_to_key):
| transpose_amount = self._melody.squash(min_note, max_note, transpose_to_key)
self._chords.transpose(transpose_amount)
return transpose_amount
|
'Sets the length of the lead sheet to the specified number of steps.
Args:
steps: How many steps long the lead sheet should be.'
| def set_length(self, steps):
| self._melody.set_length(steps)
self._chords.set_length(steps)
|
'Increase the resolution of a LeadSheet.
Increases the resolution of a LeadSheet object by a factor of `k`. This
increases the resolution of the melody and chords separately, which uses
MELODY_NO_EVENT to extend each event in the melody, and simply repeats each
chord event `k` times.
Args:
k: An integer, the factor by which to increase the resolution of the lead
sheet.'
| def increase_resolution(self, k):
| self._melody.increase_resolution(k)
self._chords.increase_resolution(k)
|
'Constructs a BaseModel.'
| def __init__(self):
| self._session = None
|
'Builds and returns the model graph for generation.
Will be called before restoring a checkpoint file.
Returns:
The tf.Graph object.'
| @abc.abstractmethod
def _build_graph_for_generation(self):
| pass
|
'Builds the TF graph given a checkpoint file.
Calls into _build_graph_for_generation, which must be implemented by the
subclass, before restoring the checkpoint.
Args:
checkpoint_file: The path to the checkpoint file that should be used.'
| def initialize_with_checkpoint(self, checkpoint_file):
| graph = self._build_graph_for_generation()
with graph.as_default():
saver = tf.train.Saver()
self._session = tf.Session()
tf.logging.info('Checkpoint used: %s', checkpoint_file)
saver.restore(self._session, checkpoint_file)
|
'Builds the TF graph with a checkpoint and metagraph.
Args:
checkpoint_filename: The path to the checkpoint file that should be used.
metagraph_filename: The path to the metagraph file that should be used.'
| def initialize_with_checkpoint_and_metagraph(self, checkpoint_filename, metagraph_filename):
| with tf.Graph().as_default():
self._session = tf.Session()
new_saver = tf.train.import_meta_graph(metagraph_filename)
new_saver.restore(self._session, checkpoint_filename)
|
'Writes the checkpoint and metagraph.
Args:
checkpoint_filename: Path to the checkpoint file.'
| def write_checkpoint_with_metagraph(self, checkpoint_filename):
| with self._session.graph.as_default():
saver = tf.train.Saver(sharded=False, write_version=tf.train.SaverDef.V1)
saver.save(self._session, checkpoint_filename, meta_graph_suffix='meta', write_meta_graph=True)
|
'Closes the TF session.'
| def close(self):
| self._session.close()
self._session = None
|
'Appends event to the end of the sequence.
Args:
event: The event to append to the end.'
| @abc.abstractmethod
def append(self, event):
| pass
|
'Sets the length of the sequence to the specified number of steps.
If the event sequence is not long enough, will pad to make the sequence
the specified length. If it is too long, it will be truncated to the
requested length.
Args:
steps: How many steps long the event sequence should be.
from_left: Whether to add/remove from the left instead of right.'
| @abc.abstractmethod
def set_length(self, steps, from_left=False):
| pass
|
'Returns the event at the given index.'
| @abc.abstractmethod
def __getitem__(self, i):
| pass
|
'Returns an iterator over the events.'
| @abc.abstractmethod
def __iter__(self):
| pass
|
'How many events are in this EventSequence.
Returns:
Number of events as an integer.'
| @abc.abstractmethod
def __len__(self):
| pass
|
'Construct a SimpleEventSequence.
If `events` is specified, instantiate with the provided event list.
Otherwise, create an empty SimpleEventSequence.
Args:
pad_event: Event value to use when padding sequences.
events: List of events to instantiate with.
start_step: The integer starting step offset.
steps_per_bar: The number of steps in a bar.
steps_per_quarter: The number of steps in a quarter note.'
| def __init__(self, pad_event, events=None, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER):
| self._pad_event = pad_event
if (events is not None):
self._from_event_list(events, start_step=start_step, steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)
else:
self._events = []
self._steps_per_bar = steps_per_bar
self._steps_per_quarter = steps_per_quarter
self._start_step = start_step
self._end_step = start_step
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.