desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Clear events and reset object state.'
def _reset(self):
self._events = [] self._steps_per_bar = DEFAULT_STEPS_PER_BAR self._steps_per_quarter = DEFAULT_STEPS_PER_QUARTER self._start_step = 0 self._end_step = 0
'Initializes with a list of event values and sets attributes.'
def _from_event_list(self, events, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER):
self._events = list(events) self._start_step = start_step self._end_step = (start_step + len(self)) self._steps_per_bar = steps_per_bar self._steps_per_quarter = steps_per_quarter
'Return an iterator over the events in this SimpleEventSequence. Returns: Python iterator over events.'
def __iter__(self):
return iter(self._events)
'Returns the event at the given index.'
def __getitem__(self, i):
return self._events[i]
'Returns this sequence restricted to events in the given slice range.'
def __getslice__(self, i, j):
i = min(max(i, 0), len(self)) return type(self)(pad_event=self._pad_event, events=self._events[i:j], start_step=(self.start_step + i), steps_per_bar=self.steps_per_bar, steps_per_quarter=self.steps_per_quarter)
'How many events are in this SimpleEventSequence. Returns: Number of events as an integer.'
def __len__(self):
return len(self._events)
'Appends event to the end of the sequence and increments the end step. Args: event: The event to append to the end.'
def append(self, event):
self._events.append(event) self._end_step += 1
'Sets the length of the sequence to the specified number of steps. If the event sequence is not long enough, pads to make the sequence the specified length. If it is too long, it will be truncated to the requested length. Args: steps: How many steps long the event sequence should be. from_left: Whether to add/remove from the left instead of right.'
def set_length(self, steps, from_left=False):
if (steps > len(self)): if from_left: self._events[:0] = ([self._pad_event] * (steps - len(self))) else: self._events.extend(([self._pad_event] * (steps - len(self)))) elif from_left: del self._events[0:(- steps)] else: del self._events[steps:] if from_left: self._start_step = (self._end_step - steps) else: self._end_step = (self._start_step + steps)
'Increase the resolution of an event sequence. Increases the resolution of a SimpleEventSequence object by a factor of `k`. Args: k: An integer, the factor by which to increase the resolution of the event sequence. fill_event: Event value to use to extend each low-resolution event. If None, each low-resolution event value will be repeated `k` times.'
def increase_resolution(self, k, fill_event=None):
if (fill_event is None): fill = (lambda event: ([event] * k)) else: fill = (lambda event: ([event] + ([fill_event] * (k - 1)))) new_events = [] for event in self._events: new_events += fill(event) self._events = new_events self._start_step *= k self._end_step *= k self._steps_per_bar *= k self._steps_per_quarter *= k
'Compares PrettyMIDI object against a sequence proto. Args: midi: A pretty_midi.PrettyMIDI object. sequence_proto: A tensorflow.magenta.Sequence proto.'
def CheckPrettyMidiAndSequence(self, midi, sequence_proto):
self.assertEqual(len(midi.time_signature_changes), len(sequence_proto.time_signatures)) for (midi_time, sequence_time) in zip(midi.time_signature_changes, sequence_proto.time_signatures): self.assertEqual(midi_time.numerator, sequence_time.numerator) self.assertEqual(midi_time.denominator, sequence_time.denominator) self.assertAlmostEqual(midi_time.time, sequence_time.time) self.assertEqual(len(midi.key_signature_changes), len(sequence_proto.key_signatures)) for (midi_key, sequence_key) in zip(midi.key_signature_changes, sequence_proto.key_signatures): self.assertEqual((midi_key.key_number % 12), sequence_key.key) self.assertEqual((midi_key.key_number / 12), sequence_key.mode) self.assertAlmostEqual(midi_key.time, sequence_key.time) (midi_times, midi_qpms) = midi.get_tempo_changes() self.assertEqual(len(midi_times), len(sequence_proto.tempos)) self.assertEqual(len(midi_qpms), len(sequence_proto.tempos)) for (midi_time, midi_qpm, sequence_tempo) in zip(midi_times, midi_qpms, sequence_proto.tempos): self.assertAlmostEqual(midi_qpm, sequence_tempo.qpm) self.assertAlmostEqual(midi_time, sequence_tempo.time) seq_instruments = defaultdict((lambda : defaultdict(list))) for seq_note in sequence_proto.notes: seq_instruments[(seq_note.instrument, seq_note.program, seq_note.is_drum)]['notes'].append(seq_note) for seq_bend in sequence_proto.pitch_bends: seq_instruments[(seq_bend.instrument, seq_bend.program, seq_bend.is_drum)]['bends'].append(seq_bend) for seq_control in sequence_proto.control_changes: seq_instruments[(seq_control.instrument, seq_control.program, seq_control.is_drum)]['controls'].append(seq_control) sorted_seq_instrument_keys = sorted(seq_instruments.keys(), key=(lambda (instr, program, is_drum): (instr, program, is_drum))) if seq_instruments: self.assertEqual(len(midi.instruments), len(seq_instruments)) else: self.assertEqual(1, len(midi.instruments)) self.assertEqual(0, len(midi.instruments[0].notes)) self.assertEqual(0, len(midi.instruments[0].pitch_bends)) for (midi_instrument, seq_instrument_key) in zip(midi.instruments, sorted_seq_instrument_keys): seq_instrument_notes = seq_instruments[seq_instrument_key]['notes'] self.assertEqual(len(midi_instrument.notes), len(seq_instrument_notes)) for (midi_note, sequence_note) in zip(midi_instrument.notes, seq_instrument_notes): self.assertEqual(midi_note.pitch, sequence_note.pitch) self.assertEqual(midi_note.velocity, sequence_note.velocity) self.assertAlmostEqual(midi_note.start, sequence_note.start_time) self.assertAlmostEqual(midi_note.end, sequence_note.end_time) seq_instrument_pitch_bends = seq_instruments[seq_instrument_key]['bends'] self.assertEqual(len(midi_instrument.pitch_bends), len(seq_instrument_pitch_bends)) for (midi_pitch_bend, sequence_pitch_bend) in zip(midi_instrument.pitch_bends, seq_instrument_pitch_bends): self.assertEqual(midi_pitch_bend.pitch, sequence_pitch_bend.bend) self.assertAlmostEqual(midi_pitch_bend.time, sequence_pitch_bend.time)
'Test the translation from PrettyMIDI to Sequence proto.'
def CheckMidiToSequence(self, filename):
source_midi = pretty_midi.PrettyMIDI(filename) sequence_proto = midi_io.midi_to_sequence_proto(source_midi) self.CheckPrettyMidiAndSequence(source_midi, sequence_proto)
'Test the translation from Sequence proto to PrettyMIDI.'
def CheckSequenceToPrettyMidi(self, filename):
source_midi = pretty_midi.PrettyMIDI(filename) sequence_proto = midi_io.midi_to_sequence_proto(source_midi) translated_midi = midi_io.sequence_proto_to_pretty_midi(sequence_proto) self.CheckPrettyMidiAndSequence(translated_midi, sequence_proto)
'Test writing to a MIDI file and comparing it to the original Sequence.'
def CheckReadWriteMidi(self, filename):
with tempfile.NamedTemporaryFile(prefix='MidiIoTest') as rewrite_file: original_midi = pretty_midi.PrettyMIDI(filename) original_midi.write(rewrite_file.name) source_midi = pretty_midi.PrettyMIDI(rewrite_file.name) sequence_proto = midi_io.midi_to_sequence_proto(source_midi) with tempfile.NamedTemporaryFile(prefix='MidiIoTest') as temp_file: midi_io.sequence_proto_to_midi_file(sequence_proto, temp_file.name) created_midi = pretty_midi.PrettyMIDI(temp_file.name) self.CheckPrettyMidiAndSequence(created_midi, sequence_proto)
'Verify that is_drum instruments are properly tracked. self.midi_is_drum_filename is a MIDI file containing two tracks set to channel 9 (is_drum == True). Each contains one NoteOn. This test is designed to catch a bug where the second track would lose is_drum, remapping the drum track to an instrument track.'
def testIsDrumDetection(self):
sequence_proto = midi_io.midi_file_to_sequence_proto(self.midi_is_drum_filename) with tempfile.NamedTemporaryFile(prefix='MidiDrumTest') as temp_file: midi_io.sequence_proto_to_midi_file(sequence_proto, temp_file.name) midi_data1 = mido.MidiFile(filename=self.midi_is_drum_filename) midi_data2 = mido.MidiFile(filename=temp_file.name) channel_counts = [0, 0] for (index, midi_data) in enumerate([midi_data1, midi_data2]): for event in midi_data: if ((event.type == 'note_on') and (event.velocity > 0) and (event.channel == 9)): channel_counts[index] += 1 self.assertEqual(channel_counts, [2, 2])
'Initialize a PianorollEncoderDecoder object. Args: input_size: The size of the input vector.'
def __init__(self, input_size=88):
self._input_size = input_size
'Returns the input vector for the given position in the event sequence. Args: events: A list-like sequence of PianorollSequence events. position: An integer event position in the event sequence. Returns: An input vector, a list of floats.'
def events_to_input(self, events, position):
return self._event_to_input(events[position])
'Returns the label for the given position in the event sequence. Args: events: A list-like sequence of PianorollSequence events. position: An integer event position in the event sequence. Returns: A label, an integer.'
def events_to_label(self, events, position):
return self._event_to_label(events[position])
'Returns the event for the given class index. This is the reverse process of the self.events_to_label method. Args: class_index: An integer in the range [0, self.num_classes). events: A list-like sequence of events. This object is not used in this implementation. Returns: An PianorollSequence event value.'
def class_index_to_event(self, class_index, events):
assert (class_index < self.num_classes) event = [] for i in range(self.input_size): if (class_index % 2): event.append(i) class_index >>= 1 assert (class_index == 0) return tuple(event)
'Extends the event sequences by adding the new samples. Args: pianoroll_seqs: A collection of PianorollSequences to append `samples` to. samples: A collection of binary arrays with active pitches set to 1 and inactive pitches set to 0, which will be added to the corresponding `pianoroll_seqs`. Raises: ValueError: if inputs are not of equal length.'
def extend_event_sequences(self, pianoroll_seqs, samples):
if (len(pianoroll_seqs) != len(samples)): raise ValueError('`pianoroll_seqs` and `samples` must have equal lengths.') for (pianoroll_seq, sample) in zip(pianoroll_seqs, samples): event = tuple(np.where(sample)[0]) pianoroll_seq.append(event)
'Construct a PianorollSequence. Exactly one of `quantized_sequence` or `steps_per_quarter` must be supplied. At most one of `quantized_sequence` and `events_list` may be supplied. Args: quantized_sequence: an optional quantized NoteSequence proto to base PianorollSequence on. events_list: an optional list of Pianoroll events to base PianorollSequence on. steps_per_quarter: how many steps a quarter note represents. Must be provided if `quanitzed_sequence` not given. start_step: The offset of this sequence relative to the beginning of the source sequence. If a quantized sequence is used as input, only notes starting after this step will be considered. min_pitch: The minimum valid pitch value, inclusive. max_pitch: The maximum valid pitch value, inclusive. split_repeats: Whether to force repeated notes to have a 0-state step between them when initializing from a quantized NoteSequence. shift_range: If True, assume that the given events_list is in the full MIDI pitch range and needs to be shifted and filtered based on `min_pitch` and `max_pitch`.'
def __init__(self, quantized_sequence=None, events_list=None, steps_per_quarter=None, start_step=0, min_pitch=MIN_MIDI_PITCH, max_pitch=MAX_MIDI_PITCH, split_repeats=True, shift_range=False):
assert ((quantized_sequence, steps_per_quarter).count(None) == 1) assert ((quantized_sequence, events_list).count(None) >= 1) self._min_pitch = min_pitch self._max_pitch = max_pitch if quantized_sequence: sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence) self._events = self._from_quantized_sequence(quantized_sequence, start_step, min_pitch, max_pitch, split_repeats) self._steps_per_quarter = quantized_sequence.quantization_info.steps_per_quarter else: self._events = [] self._steps_per_quarter = steps_per_quarter if events_list: for e in events_list: self.append(e, shift_range) self._start_step = start_step
'Sets the length of the sequence to the specified number of steps. If the event sequence is not long enough, pads with silence to make the sequence the specified length. If it is too long, it will be truncated to the requested length. Note that this will append a STEP_END event to the end of the sequence if there is an unfinished step. Args: steps: How many quantized steps long the event sequence should be. from_left: Whether to add/remove from the left instead of right.'
def set_length(self, steps, from_left=False):
if from_left: raise NotImplementedError('from_left is not supported') if (self.num_steps < steps): self._events += ([()] * (steps - self.num_steps)) elif (self.num_steps > steps): del self._events[steps:] assert (self.num_steps == steps)
'Appends the event to the end of the sequence. Args: event: The polyphonic event to append to the end. shift_range: If True, assume that the given event is in the full MIDI pitch range and needs to be shifted and filtered based on `min_pitch` and `max_pitch`. Raises: ValueError: If `event` is not a valid polyphonic event.'
def append(self, event, shift_range=False):
if shift_range: event = tuple(((p - self._min_pitch) for p in event if (self._min_pitch <= p <= self._max_pitch))) self._events.append(event)
'How many events are in this sequence. Returns: Number of events as an integer.'
def __len__(self):
return len(self._events)
'Returns the event at the given index.'
def __getitem__(self, i):
return self._events[i]
'Return an iterator over the events in this sequence.'
def __iter__(self):
return iter(self._events)
'Returns how many steps long this sequence is. Returns: Length of the sequence in quantized steps.'
@property def num_steps(self):
return len(self)
'Populate self with events from the given quantized NoteSequence object. Args: quantized_sequence: A quantized NoteSequence instance. start_step: Start converting the sequence at this time step. Assumed to be the beginning of a bar. min_pitch: The minimum valid pitch value, inclusive. max_pitch: The maximum valid pitch value, inclusive. split_repeats: Whether to force repeated notes to have a 0-state step between them. Returns: A list of events.'
@staticmethod def _from_quantized_sequence(quantized_sequence, start_step, min_pitch, max_pitch, split_repeats):
piano_roll = np.zeros(((quantized_sequence.total_quantized_steps - start_step), ((max_pitch - min_pitch) + 1)), np.bool) for note in quantized_sequence.notes: if (note.quantized_start_step < start_step): continue if (not (min_pitch <= note.pitch <= max_pitch)): continue note_pitch_offset = (note.pitch - min_pitch) note_start_offset = (note.quantized_start_step - start_step) note_end_offset = (note.quantized_end_step - start_step) if split_repeats: piano_roll[((note_start_offset - 1), note_pitch_offset)] = 0 piano_roll[note_start_offset:note_end_offset, note_pitch_offset] = 1 events = [tuple(np.where(frame)[0]) for frame in piano_roll] return events
'Converts the PianorollSequence to NoteSequence proto. Args: velocity: Midi velocity to give each note. Between 1 and 127 (inclusive). instrument: Midi instrument to give each note. program: Midi program to give each note. qpm: Quarter notes per minute (float). base_note_sequence: A NoteSequence to use a starting point. Must match the specified qpm. Raises: ValueError: if an unknown event is encountered. Returns: A NoteSequence proto.'
def to_sequence(self, velocity=100, instrument=0, program=0, qpm=constants.DEFAULT_QUARTERS_PER_MINUTE, base_note_sequence=None):
seconds_per_step = ((60.0 / qpm) / self._steps_per_quarter) sequence_start_time = (self.start_step * seconds_per_step) if base_note_sequence: sequence = copy.deepcopy(base_note_sequence) if (sequence.tempos[0].qpm != qpm): raise ValueError(('Supplied QPM (%d) does not match QPM of base_note_sequence (%d)' % (qpm, sequence.tempos[0].qpm))) else: sequence = music_pb2.NoteSequence() sequence.tempos.add().qpm = qpm sequence.ticks_per_quarter = STANDARD_PPQ step = 0 open_notes = {} for (step, event) in enumerate(self): frame_pitches = set(event) open_pitches = set(open_notes) for pitch_to_close in (open_pitches - frame_pitches): note_to_close = open_notes[pitch_to_close] note_to_close.end_time = ((step * seconds_per_step) + sequence_start_time) del open_notes[pitch_to_close] for pitch_to_open in (frame_pitches - open_pitches): new_note = sequence.notes.add() new_note.start_time = ((step * seconds_per_step) + sequence_start_time) new_note.pitch = (pitch_to_open + self._min_pitch) new_note.velocity = velocity new_note.instrument = instrument new_note.program = program open_notes[pitch_to_open] = new_note final_step = (step + (len(open_notes) > 0)) for note_to_close in open_notes.values(): note_to_close.end_time = ((final_step * seconds_per_step) + sequence_start_time) sequence.total_time = ((seconds_per_step * final_step) + sequence_start_time) if sequence.notes: assert (sequence.total_time >= sequence.notes[(-1)].end_time) return sequence
'Construct a DrumTrack.'
def __init__(self, events=None, **kwargs):
if ('pad_event' in kwargs): del kwargs['pad_event'] super(DrumTrack, self).__init__(pad_event=frozenset(), events=events, **kwargs)
'Initializes with a list of event values and sets attributes. Args: events: List of drum events to set drum track to. start_step: The integer starting step offset. steps_per_bar: The number of steps in a bar. steps_per_quarter: The number of steps in a quarter note. Raises: ValueError: If `events` contains an event that is not a valid drum event.'
def _from_event_list(self, events, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER):
for event in events: if (not isinstance(event, frozenset)): raise ValueError(('Invalid drum event: %s' % event)) if (not all(((MIN_MIDI_PITCH <= drum <= MAX_MIDI_PITCH) for drum in event))): raise ValueError(('Drum event contains invalid note: %s' % event)) super(DrumTrack, self)._from_event_list(events, start_step=start_step, steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)
'Appends the event to the end of the drums and increments the end step. Args: event: The drum event to append to the end. Raises: ValueError: If `event` is not a valid drum event.'
def append(self, event):
if (not isinstance(event, frozenset)): raise ValueError(('Invalid drum event: %s' % event)) if (not all(((MIN_MIDI_PITCH <= drum <= MAX_MIDI_PITCH) for drum in event))): raise ValueError(('Drum event contains invalid note: %s' % event)) super(DrumTrack, self).append(event)
'Populate self with drums from the given quantized NoteSequence object. A drum track is extracted from the given quantized sequence starting at time step `start_step`. `start_step` can be used to drive extraction of multiple drum tracks from the same quantized sequence. The end step of the extracted drum track will be stored in `self._end_step`. 0 velocity notes are ignored. The drum extraction is ended when there are no drums for a time stretch of `gap_bars` in bars (measures) of music. The number of time steps per bar is computed from the time signature in `quantized_sequence`. Each drum event is a Python frozenset of simultaneous (after quantization) drum "pitches", or an empty frozenset to indicate no drums are played. Args: quantized_sequence: A quantized NoteSequence instance. search_start_step: Start searching for drums at this time step. Assumed to be the beginning of a bar. gap_bars: If this many bars or more follow a non-empty drum event, the drum track is ended. pad_end: If True, the end of the drums will be padded with empty events so that it will end at a bar boundary. ignore_is_drum: Whether accept notes where `is_drum` is False. Raises: NonIntegerStepsPerBarException: If `quantized_sequence`\'s bar length (derived from its time signature) is not an integer number of time steps.'
def from_quantized_sequence(self, quantized_sequence, search_start_step=0, gap_bars=1, pad_end=False, ignore_is_drum=False):
sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence) self._reset() steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence) if ((steps_per_bar_float % 1) != 0): raise events_lib.NonIntegerStepsPerBarException(('There are %f timesteps per bar. Time signature: %d/%d' % (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator, quantized_sequence.time_signatures[0].denominator))) self._steps_per_bar = steps_per_bar = int(steps_per_bar_float) self._steps_per_quarter = quantized_sequence.quantization_info.steps_per_quarter all_notes = [note for note in quantized_sequence.notes if ((note.is_drum or ignore_is_drum) and note.velocity and (note.quantized_start_step >= search_start_step))] grouped_notes = collections.defaultdict(list) for note in all_notes: grouped_notes[note.quantized_start_step].append(note) notes = sorted(grouped_notes.items(), key=operator.itemgetter(0)) if (not notes): return gap_start_index = 0 track_start_step = (notes[0][0] - ((notes[0][0] - search_start_step) % steps_per_bar)) for (start, group) in notes: start_index = (start - track_start_step) pitches = frozenset((note.pitch for note in group)) note_distance = (start_index - gap_start_index) if (len(self) and (note_distance >= (gap_bars * steps_per_bar))): break self.set_length((start_index + 1)) self._events[start_index] = pitches gap_start_index = (start_index + 1) if (not self._events): return self._start_step = track_start_step length = len(self) if pad_end: length += ((- len(self)) % steps_per_bar) self.set_length(length)
'Converts the DrumTrack to NoteSequence proto. Args: velocity: Midi velocity to give each note. Between 1 and 127 (inclusive). instrument: Midi instrument to give each note. program: Midi program to give each note. sequence_start_time: A time in seconds (float) that the first event in the sequence will land on. qpm: Quarter notes per minute (float). Returns: A NoteSequence proto encoding the given drum track.'
def to_sequence(self, velocity=100, instrument=9, program=0, sequence_start_time=0.0, qpm=120.0):
seconds_per_step = ((60.0 / qpm) / self.steps_per_quarter) sequence = music_pb2.NoteSequence() sequence.tempos.add().qpm = qpm sequence.ticks_per_quarter = STANDARD_PPQ sequence_start_time += (self.start_step * seconds_per_step) for (step, event) in enumerate(self): for pitch in event: note = sequence.notes.add() note.start_time = ((step * seconds_per_step) + sequence_start_time) note.end_time = (((step + 1) * seconds_per_step) + sequence_start_time) note.pitch = pitch note.velocity = velocity note.instrument = instrument note.program = program note.is_drum = True if sequence.notes: sequence.total_time = sequence.notes[(-1)].end_time return sequence
'Increase the resolution of a DrumTrack. Increases the resolution of a DrumTrack object by a factor of `k`. This uses empty events to extend each event in the drum track to be `k` steps long. Args: k: An integer, the factor by which to increase the resolution of the drum track.'
def increase_resolution(self, k):
super(DrumTrack, self).increase_resolution(k, fill_event=frozenset())
'Returns a message using the signal\'s specifications, if possible.'
def to_message(self):
if self._msg: return self._msg if (not self._type): raise MidiHubException('Cannot build message if type is not inferrable.') return mido.Message(self._type, **self._kwargs)
'Returns a regex pattern for matching against a mido.Message string.'
def __str__(self):
if (self._msg is not None): regex_pattern = (('^' + mido.messages.format_as_string(self._msg, include_time=False)) + ' time=\\d+.\\d+$') else: parts = [('.*' if (self._type is None) else self._type)] for name in mido.messages.SPEC_BY_TYPE[self._inferred_types[0]]['value_names']: if (name in self._kwargs): parts.append(('%s=%d' % (name, self._kwargs[name]))) else: parts.append(('%s=\\d+' % name)) regex_pattern = (('^' + ' '.join(parts)) + ' time=\\d+.\\d+$') return regex_pattern
'Updates Metronome options.'
def update(self, qpm, start_time, stop_time=None, program=_DEFAULT_METRONOME_PROGRAM, signals=None, duration=_DEFAULT_METRONOME_TICK_DURATION, channel=None):
self._channel = (_DEFAULT_METRONOME_CHANNEL if (channel is None) else channel) self._outport.send(mido.Message(type='program_change', program=program, channel=self._channel)) self._period = (60.0 / qpm) self._start_time = start_time self._stop_time = stop_time self._messages = (_DEFAULT_METRONOME_MESSAGES if (signals is None) else [(s.to_message() if s else None) for s in signals]) self._duration = duration
'Sends message on the qpm interval until stop signal received.'
def run(self):
sleeper = concurrency.Sleeper() while True: now = time.time() tick_number = max(0, (int(((now - self._start_time) // self._period)) + 1)) tick_time = ((tick_number * self._period) + self._start_time) if ((self._stop_time is not None) and (self._stop_time < tick_time)): break sleeper.sleep_until(tick_time) metric_position = (tick_number % len(self._messages)) tick_message = self._messages[metric_position] if (tick_message is None): continue tick_message.channel = self._channel self._outport.send(tick_message) if (tick_message.type == 'note_on'): sleeper.sleep(self._duration) end_tick_message = mido.Message('note_off', note=tick_message.note, channel=self._channel) self._outport.send(end_tick_message)
'Signals for the metronome to stop. Args: stop_time: The float wall time in seconds after which the metronome should stop. By default, stops at next tick. block: If true, blocks until thread terminates.'
def stop(self, stop_time=0, block=True):
self._stop_time = stop_time if block: self.join()
'Updates sequence being played by the MidiPlayer. Adds events to close any notes that are no longer being closed by the new sequence using the times when they would have been closed by the previous sequence. Args: sequence: The NoteSequence to play back. start_time: The float time before which to strip events. Defaults to call time. Raises: MidiHubException: If called when _allow_updates is False.'
@concurrency.serialized def update_sequence(self, sequence, start_time=None):
if (start_time is None): start_time = time.time() if (not self._allow_updates): raise MidiHubException('Attempted to update a MidiPlayer sequence with updates disabled.') new_message_list = [] closed_notes = set() for note in sequence.notes: if (note.start_time >= start_time): new_message_list.append(mido.Message(type='note_on', note=note.pitch, velocity=note.velocity, time=note.start_time)) new_message_list.append(mido.Message(type='note_off', note=note.pitch, time=note.end_time)) elif ((note.end_time >= start_time) and (note.pitch in self._open_notes)): new_message_list.append(mido.Message(type='note_off', note=note.pitch, time=note.end_time)) closed_notes.add(note.pitch) notes_to_close = (self._open_notes - closed_notes) if notes_to_close: next_event_time = (min((msg.time for msg in new_message_list)) if new_message_list else 0) for note in notes_to_close: new_message_list.append(mido.Message(type='note_off', note=note, time=next_event_time)) for msg in new_message_list: msg.channel = self._channel msg.time += self._offset self._message_queue = deque(sorted(new_message_list, key=(lambda msg: (msg.time, msg.note)))) self._update_cv.notify()
'Plays messages in the queue until empty and _allow_updates is False.'
@concurrency.serialized def run(self):
while (self._message_queue and (self._message_queue[0].time < time.time())): self._message_queue.popleft() while True: while self._message_queue: delta = (self._message_queue[0].time - time.time()) if (delta > 0): self._update_cv.wait(timeout=delta) else: msg = self._message_queue.popleft() if (msg.type == 'note_on'): self._open_notes.add(msg.note) elif (msg.type == 'note_off'): self._open_notes.discard(msg.note) self._outport.send(msg) if self._allow_updates: self._update_cv.wait() else: break
'Signals for the playback to stop and ends all open notes. Args: block: If true, blocks until thread terminates.'
def stop(self, block=True):
with self._lock: if (not self._stop_signal.is_set()): self._stop_signal.set() self._allow_updates = False self._message_queue.clear() for note in self._open_notes: self._message_queue.append(mido.Message(type='note_off', note=note, time=time.time())) self._update_cv.notify() if block: self.join()
'Updates the start time, removing any notes that started before it.'
@start_time.setter @concurrency.serialized def start_time(self, value):
self._start_time = value i = 0 for note in self._captured_sequence.notes: if (note.start_time >= self._start_time): break i += 1 del self._captured_sequence.notes[:i]
'Adds received mido.Message to the queue for capture. Args: msg: The incoming mido.Message object to add to the queue for capture. The time attribute is assumed to be pre-set with the wall time when the message was received. Raises: MidiHubException: When the received message has an empty time attribute.'
def receive(self, msg):
if (not msg.time): raise MidiHubException(('MidiCaptor received message with empty time attribute: %s' % msg)) self._receive_queue.put(msg)
'Handles a single incoming MIDI message during capture. Must be serialized in children. Args: msg: The incoming mido.Message object to capture. The time field is assumed to be pre-filled with the wall time when the message was received.'
@abc.abstractmethod def _capture_message(self, msg):
pass
'Adds and returns a new open note based on the MIDI message.'
def _add_note(self, msg):
new_note = self._captured_sequence.notes.add() new_note.start_time = msg.time new_note.pitch = msg.note new_note.velocity = msg.velocity new_note.is_drum = (msg.channel == _DRUM_CHANNEL) return new_note
'Captures incoming messages until stop time or signal received.'
def run(self):
while True: timeout = None stop_time = self._stop_time if (stop_time is not None): timeout = (stop_time - time.time()) if (timeout <= 0): break try: msg = self._receive_queue.get(block=True, timeout=timeout) except Queue.Empty: continue if (msg is MidiCaptor._WAKE_MESSAGE): continue if (msg.time <= self._start_time): continue if (self._stop_regex.match(str(msg)) is not None): break with self._lock: msg_str = str(msg) for (regex, queue) in self._iter_signals: if (regex.match(msg_str) is not None): queue.put(msg.copy()) self._capture_message(msg) stop_time = self._stop_time end_time = (stop_time if (stop_time is not None) else msg.time) with self._lock: self._captured_sequence = self.captured_sequence(end_time) for (regex, queue) in self._iter_signals: queue.put(MidiCaptor._WAKE_MESSAGE)
'Ends capture and truncates the captured sequence at `stop_time`. Args: stop_time: The float time in seconds to stop the capture, or None if it should be stopped now. May be in the past, in which case the captured sequence will be truncated appropriately. block: If True, blocks until the thread terminates. Raises: MidiHubException: When called multiple times with a `stop_time`.'
def stop(self, stop_time=None, block=True):
with self._lock: if self._stop_signal.is_set(): if (stop_time is not None): raise MidiHubException('`stop` must not be called multiple times with a `stop_time` on MidiCaptor.') else: self._stop_signal.set() self._stop_time = (time.time() if (stop_time is None) else stop_time) self._receive_queue.put(MidiCaptor._WAKE_MESSAGE) if block: self.join()
'Returns a copy of the current captured sequence. If called before the thread terminates, `end_time` is required and any open notes will have their end time set to it, any notes starting after it will be removed, and any notes ending after it will be truncated. `total_time` will also be set to `end_time`. Args: end_time: The float time in seconds to close any open notes and after which to close or truncate notes, if the thread is still alive. Otherwise, must be None. Returns: A copy of the current captured NoteSequence proto with open notes closed at and later notes removed or truncated to `end_time`. Raises: MidiHubException: When the thread is alive and `end_time` is None or the thread is terminated and `end_time` is not None.'
def captured_sequence(self, end_time=None):
current_captured_sequence = music_pb2.NoteSequence() with self._lock: current_captured_sequence.CopyFrom(self._captured_sequence) if self.is_alive(): if (end_time is None): raise MidiHubException('`end_time` must be provided when capture thread is still running.') for (i, note) in enumerate(current_captured_sequence.notes): if (note.start_time >= end_time): del current_captured_sequence.notes[i:] break if ((not note.end_time) or (note.end_time > end_time)): note.end_time = end_time current_captured_sequence.total_time = end_time elif (end_time is not None): raise MidiHubException('`end_time` must not be provided when capture is complete.') return current_captured_sequence
'Yields the captured sequence at every signal message or time period. Exactly one of `signal` or `period` must be specified. Continues until the captor terminates, at which point the final captured sequence is yielded before returning. If consecutive calls to iterate are longer than the period, immediately yields and logs a warning. Args: signal: A MidiSignal to use as a signal to yield, or None. period: A float period in seconds, or None. Yields: The captured NoteSequence at event time. Raises: MidiHubException: If neither `signal` nor `period` or both are specified.'
def iterate(self, signal=None, period=None):
if ((signal, period).count(None) != 1): raise MidiHubException('Exactly one of `signal` or `period` must be provided to `iterate` call.') if (signal is None): sleeper = concurrency.Sleeper() next_yield_time = (time.time() + period) else: regex = re.compile(str(signal)) queue = Queue.Queue() with self._lock: self._iter_signals.append((regex, queue)) while self.is_alive(): if (signal is None): skipped_periods = ((time.time() - next_yield_time) // period) if (skipped_periods > 0): tf.logging.warn('Skipping %d %.3fs period(s) to catch up on iteration.', skipped_periods, period) next_yield_time += (skipped_periods * period) else: sleeper.sleep_until(next_yield_time) end_time = next_yield_time next_yield_time += period else: signal_msg = queue.get() if (signal_msg is MidiCaptor._WAKE_MESSAGE): self.join() break end_time = signal_msg.time with self._lock: if (not self.is_alive()): break captured_sequence = self.captured_sequence(end_time) (yield captured_sequence) (yield self.captured_sequence())
'Calls `fn` at every signal message or time period. The callback function must take exactly one argument, which will be the current captured NoteSequence. Exactly one of `signal` or `period` must be specified. Continues until the captor thread terminates, at which point the callback is called with the final sequence, or `cancel_callback` is called. If callback execution is longer than a period, immediately calls upon completion and logs a warning. Args: fn: The callback function to call, passing in the captured sequence. signal: A MidiSignal to use as a signal to call `fn` on the current captured sequence, or None. period: A float period in seconds to specify how often to call `fn`, or None. Returns: The unqiue name of the callback thread to enable cancellation. Raises: MidiHubException: If neither `signal` nor `period` or both are specified.'
def register_callback(self, fn, signal=None, period=None):
class IteratorCallback(threading.Thread, ): 'A thread for executing a callback on each iteration.' def __init__(self, iterator, fn): self._iterator = iterator self._fn = fn self._stop_signal = threading.Event() super(IteratorCallback, self).__init__() def run(self): 'Calls the callback function for each iterator value.' for captured_sequence in self._iterator: if self._stop_signal.is_set(): break self._fn(captured_sequence) def stop(self): 'Stops the thread on next iteration, without blocking.' self._stop_signal.set() t = IteratorCallback(self.iterate(signal, period), fn) t.start() with self._lock: assert (t.name not in self._callbacks) self._callbacks[t.name] = t return t.name
'Cancels the callback with the given name. While the thread may continue to run until the next iteration, the callback function will not be executed. Args: name: The unique name of the callback thread to cancel.'
@concurrency.serialized def cancel_callback(self, name):
self._callbacks[name].stop() del self._callbacks[name]
'Handles a single incoming MIDI message during capture. If the message is a note_on event, ends the previous note (if applicable) and opens a new note in the capture sequence. Ignores repeated note_on events. If the message is a note_off event matching the current open note in the capture sequence Args: msg: The mido.Message MIDI message to handle.'
@concurrency.serialized def _capture_message(self, msg):
if ((msg.type == 'note_off') or ((msg.type == 'note_on') and (msg.velocity == 0))): if ((self._open_note is None) or (msg.note != self._open_note.pitch)): return self._open_note.end_time = msg.time self._open_note = None elif (msg.type == 'note_on'): if self._open_note: if (self._open_note.pitch == msg.note): return self._open_note.end_time = msg.time self._open_note = self._add_note(msg)
'Handles a single incoming MIDI message during capture. Args: msg: The mido.Message MIDI message to handle.'
@concurrency.serialized def _capture_message(self, msg):
if ((msg.type == 'note_off') or ((msg.type == 'note_on') and (msg.velocity == 0))): if (msg.note not in self._open_notes): return self._open_notes[msg.note].end_time = msg.time del self._open_notes[msg.note] elif (msg.type == 'note_on'): if (msg.note in self._open_notes): return new_note = self._add_note(msg) self._open_notes[new_note.pitch] = new_note
'Stops all running threads and waits for them to terminate.'
def __del__(self):
for captor in self._captors: captor.stop(block=False) for player in self._players: player.stop(block=False) self.stop_metronome() for captor in self._captors: captor.join() for player in self._players: player.join()
'Sets passthrough value, closing all open notes if being disabled.'
@passthrough.setter @concurrency.serialized def passthrough(self, value):
if (self._passthrough == value): return while self._open_notes: self._outport.send(mido.Message('note_off', note=self._open_notes.pop())) self._passthrough = value
'Stamps message with current time and passes it to the handler.'
def _timestamp_and_handle_message(self, msg):
if (msg.type == 'program_change'): return if (not msg.time): msg.time = time.time() self._handle_message(msg)
'Handles a single incoming MIDI message. -If the message is being used as a signal, notifies threads waiting on the appropriate condition variable. -Adds the message to any capture queues. -Passes the message through to the output port, if appropriate. Args: msg: The mido.Message MIDI message to handle.'
@concurrency.serialized def _handle_message(self, msg):
msg_str = str(msg) for regex in list(self._signals): if (regex.match(msg_str) is not None): self._signals[regex].notify_all() del self._signals[regex] for regex in list(self._callbacks): if (regex.match(msg_str) is not None): for fn in self._callbacks[regex]: threading.Thread(target=fn, args=(msg,)).start() del self._callbacks[regex] self._captors[:] = [t for t in self._captors if t.is_alive()] for t in self._captors: t.receive(msg.copy()) if (msg.type == 'control_change'): if (self._control_values.get(msg.control, None) != msg.value): tf.logging.debug('Control change %d: %d', msg.control, msg.value) self._control_values[msg.control] = msg.value if (not self._passthrough): pass elif (self._texture_type == TextureType.POLYPHONIC): if ((msg.type == 'note_on') and (msg.velocity > 0)): self._open_notes.add(msg.note) elif ((msg.type == 'note_off') or ((msg.type == 'note_on') and (msg.velocity == 0))): self._open_notes.discard(msg.note) self._outport.send(msg) elif (self._texture_type == TextureType.MONOPHONIC): assert (len(self._open_notes) <= 1) if (msg.type not in ['note_on', 'note_off']): self._outport.send(msg) elif (((msg.type == 'note_off') or ((msg.type == 'note_on') and (msg.velocity == 0))) and (msg.note in self._open_notes)): self._outport.send(msg) self._open_notes.remove(msg.note) elif ((msg.type == 'note_on') and (msg.velocity > 0)): if self._open_notes: self._outport.send(mido.Message('note_off', note=self._open_notes.pop())) self._outport.send(msg) self._open_notes.add(msg.note)
'Starts a MidiCaptor to compile incoming messages into a NoteSequence. If neither `stop_time` nor `stop_signal`, are provided, the caller must explicitly stop the returned capture thread. If both are specified, the one that occurs first will stop the capture. Args: qpm: The integer quarters per minute to use for the captured sequence. start_time: The float wall time in seconds to start the capture. May be in the past. Used for beat alignment. stop_time: The optional float wall time in seconds to stop the capture. stop_signal: The optional mido.Message to use as a signal to use to stop the capture. Returns: The MidiCaptor thread.'
def start_capture(self, qpm, start_time, stop_time=None, stop_signal=None):
captor_class = (MonophonicMidiCaptor if (self._texture_type == TextureType.MONOPHONIC) else PolyphonicMidiCaptor) captor = captor_class(qpm, start_time, stop_time, stop_signal) with self._lock: self._captors.append(captor) captor.start() return captor
'Compiles and returns incoming messages into a NoteSequence. Blocks until capture stops. At least one of `stop_time` or `stop_signal` must be specified. If both are specified, the one that occurs first will stop the capture. Args: qpm: The integer quarters per minute to use for the captured sequence. start_time: The float wall time in seconds to start the capture. May be in the past. Used for beat alignment. stop_time: The optional float wall time in seconds to stop the capture. stop_signal: The optional mido.Message to use as a signal to use to stop the capture. Returns: The captured NoteSequence proto. Raises: MidiHubException: When neither `stop_time` nor `stop_signal` are provided.'
def capture_sequence(self, qpm, start_time, stop_time=None, stop_signal=None):
if ((stop_time is None) and (stop_signal is None)): raise MidiHubException('At least one of `stop_time` and `stop_signal` must be provided to `capture_sequence` call.') captor = self.start_capture(qpm, start_time, stop_time, stop_signal) captor.join() return captor.captured_sequence()
'Blocks until a matching mido.Message arrives or the timeout occurs. Exactly one of `signal` or `timeout` must be specified. Using a timeout with a threading.Condition object causes additional delays when notified. Args: signal: A MidiSignal to use as a signal to stop waiting, or None. timeout: A float timeout in seconds, or None. Raises: MidiHubException: If neither `signal` nor `timeout` or both are specified.'
@concurrency.serialized def wait_for_event(self, signal=None, timeout=None):
if ((signal, timeout).count(None) != 1): raise MidiHubException('Exactly one of `signal` or `timeout` must be provided to `wait_for_event` call.') if (signal is None): concurrency.Sleeper().sleep(timeout) return signal_pattern = str(signal) cond_var = None for (regex, cond_var) in self._signals: if (regex.pattern == signal_pattern): break if (cond_var is None): cond_var = threading.Condition(self._lock) self._signals[re.compile(signal_pattern)] = cond_var cond_var.wait()
'Wakes all threads waiting on a signal event. Args: signal: The MidiSignal to wake threads waiting on, or None to wake all.'
@concurrency.serialized def wake_signal_waiters(self, signal=None):
for regex in list(self._signals): if ((signal is None) or (regex.pattern == str(signal))): self._signals[regex].notify_all() del self._signals[regex] for captor in self._captors: captor.wake_signal_waiters(signal)
'Starts or updates the metronome with the given arguments. Args: qpm: The quarter notes per minute to use. start_time: The wall time in seconds that the metronome is started on for synchronization and beat alignment. May be in the past. signals: An ordered collection of MidiSignals whose underlying messages are to be output on the metronome\'s tick, cyclically. A None value can be used in place of a MidiSignal to output nothing on a given tick. channel: The MIDI channel to output ticks on.'
@concurrency.serialized def start_metronome(self, qpm, start_time, signals=None, channel=None):
if ((self._metronome is not None) and self._metronome.is_alive()): self._metronome.update(qpm, start_time, signals=signals, channel=channel) else: self._metronome = Metronome(self._outport, qpm, start_time, signals=signals, channel=channel) self._metronome.start()
'Stops the metronome at the given time if it is currently running. Args: stop_time: The float wall time in seconds after which the metronome should stop. By default, stops at next tick. block: If true, blocks until metronome is stopped.'
@concurrency.serialized def stop_metronome(self, stop_time=0, block=True):
if (self._metronome is None): return self._metronome.stop(stop_time, block) self._metronome = None
'Plays the notes in aNoteSequence via the MIDI output port. Args: sequence: The NoteSequence to play, with times based on the wall clock. start_time: The float time before which to strip events. Defaults to call time. Events before this time will be sent immediately on start. allow_updates: A boolean specifying whether or not the player should stay allow the sequence to be updated and stay alive until `stop` is called. Returns: The MidiPlayer thread handling playback to enable updating.'
def start_playback(self, sequence, start_time=time.time(), allow_updates=False):
player = MidiPlayer(self._outport, sequence, start_time, allow_updates, self._playback_channel, self._playback_offset) with self._lock: self._players.append(player) player.start() return player
'Returns the most recently received value for the given control number. Args: control_number: The integer control number to return the value for, or None. Returns: The most recently recieved integer value for the given control number, or None if no values have been received for that control.'
@concurrency.serialized def control_value(self, control_number):
if (control_number is None): return None return self._control_values.get(control_number)
'Sends the specified control change message on the output port.'
def send_control_change(self, control_number, value):
self._outport.send(mido.Message(type='control_change', control=control_number, value=value))
'Calls `fn` at the next signal message. The callback function must take exactly one argument, which will be the message triggering the signal. Survives until signal is called or the MidiHub is destroyed. Args: fn: The callback function to call, passing in the triggering message. signal: A MidiSignal to use as a signal to call `fn` on the triggering message.'
@concurrency.serialized def register_callback(self, fn, signal):
self._callbacks[re.compile(str(signal))].append(fn)
'Prints instructions for mapping control changes.'
def _print_instructions(self):
print 'Enter the index of a signal to set the control change for, or `q` when done.' fmt = '{:>6} DCTB {:<20} DCTB {:>6}' print fmt.format('Index', 'Control', 'Current') for (i, signal) in enumerate(self._signals): print fmt.format((i + 1), signal, self._cc_map.get(signal)) print
'Updates mapping for the signal to the message\'s control change. Args: signal: The name of the signal to update the control change for. msg: The mido.Message whose control change the signal should be set to.'
def _update_signal(self, signal, msg):
if (msg.control in self._cc_map.values()): print ('Control number %d is already assigned. Ignoring.' % msg.control) else: self._cc_map[signal] = msg.control print ('Assigned control number %d to `%s`.' % (msg.control, signal)) self._update_event.set()
'Enters a loop that receives user input to set signal controls.'
def update_map(self):
while True: print self._print_instructions() response = raw_input('Selection: ') if (response == 'q'): return try: signal = self._signals[(int(response) - 1)] except (ValueError, IndexError): print 'Invalid response:', response continue self._update_event.clear() self._midi_hub.register_callback(partial(self._update_signal, signal), midi_hub.MidiSignal(type='control_change')) print ('Send a control signal using the control number you wish to associate with `%s`.' % signal) self._update_event.wait()
'Returns the SequenceGenerator selected by the current control value.'
@property def _sequence_generator(self):
if (len(self._sequence_generators) == 1): return self._sequence_generators[0] val = self._midi_hub.control_value(self._generator_select_control_number) val = (0 if (val is None) else val) return self._sequence_generators[(val % len(self._sequence_generators))]
'Returns the qpm based on the current tempo control value.'
@property def _qpm(self):
val = self._midi_hub.control_value(self._tempo_control_number) return (self._default_qpm if (val is None) else (val + self._BASE_QPM))
'Returns the temperature based on the current control value. Linearly interpolates between `min_temp` and `max_temp`. Args: min_temp: The minimum temperature, which will be returned when value is 0. max_temp: The maximum temperature, which will be returned when value is 127. default: The temperature to return if control value is None. Returns: A float temperature value based on the 8-bit MIDI control value.'
@property def _temperature(self, min_temp=0.1, max_temp=2.0, default=1.0):
val = self._midi_hub.control_value(self._temperature_control_number) if (val is None): return default return (min_temp + ((val / 127.0) * (max_temp - min_temp)))
'The main loop for the interaction. Must exit shortly after `self._stop_signal` is set.'
@abc.abstractmethod def run(self):
pass
'Stops the main loop, and blocks until the interaction is stopped.'
def stop(self):
self._stop_signal.set() self.join()
'Logs and sends a control change with the state.'
def _update_state(self, state):
if (self._state_control_number is not None): self._midi_hub.send_control_change(self._state_control_number, state) tf.logging.info('State: %s', self.State.to_string(state))
'Method to use as a callback for setting the end call signal.'
def _end_call_callback(self, unused_captured_seq):
self._end_call.set() tf.logging.info('End call signal received.')
'Method to use as a callback for setting the panic signal.'
def _panic_callback(self, unused_captured_seq):
self._panic.set() tf.logging.info('Panic signal received.')
'Method to use as a callback for setting the mutate signal.'
def _mutate_callback(self, unused_captured_seq):
self._mutate.set() tf.logging.info('Mutate signal received.')
'Returns the min listen ticks based on the current control value.'
@property def _min_listen_ticks(self):
val = self._midi_hub.control_value(self._min_listen_ticks_control_number) return (0 if (val is None) else val)
'Returns the max listen ticks based on the current control value.'
@property def _max_listen_ticks(self):
val = self._midi_hub.control_value(self._max_listen_ticks_control_number) return (float('inf') if (not val) else val)
'Generates a response sequence with the currently-selected generator. Args: input_sequence: The NoteSequence to use as a generation seed. zero_time: The float time in seconds to treat as the start of the input. response_start_time: The float time in seconds for the start of generation. response_end_time: The float time in seconds for the end of generation. Returns: The generated NoteSequence.'
def _generate(self, input_sequence, zero_time, response_start_time, response_end_time):
response_start_time -= zero_time response_end_time -= zero_time generator_options = generator_pb2.GeneratorOptions() generator_options.input_sections.add(start_time=0, end_time=response_start_time) generator_options.generate_sections.add(start_time=response_start_time, end_time=response_end_time) generator_options.args['temperature'].float_value = self._temperature tf.logging.info("Generating sequence using '%s' generator.", self._sequence_generator.details.id) tf.logging.debug('Generator Details: %s', self._sequence_generator.details) tf.logging.debug('Bundle Details: %s', self._sequence_generator.bundle_details) tf.logging.debug('Generator Options: %s', generator_options) response_sequence = self._sequence_generator.generate(adjust_sequence_times(input_sequence, (- zero_time)), generator_options) response_sequence = magenta.music.trim_note_sequence(response_sequence, response_start_time, response_end_time) return adjust_sequence_times(response_sequence, zero_time)
'The main loop for a real-time call and response interaction.'
def run(self):
start_time = time.time() self._captor = self._midi_hub.start_capture(self._qpm, start_time) if ((not self._clock_signal) and (self._metronome_channel is not None)): self._midi_hub.start_metronome(self._qpm, start_time, channel=self._metronome_channel) if (self._end_call_signal is not None): self._captor.register_callback(self._end_call_callback, signal=self._end_call_signal) if (self._panic_signal is not None): self._captor.register_callback(self._panic_callback, signal=self._panic_signal) if (self._mutate_signal is not None): self._captor.register_callback(self._mutate_callback, signal=self._mutate_signal) last_tick_time = time.time() listen_ticks = 0 response_sequence = music_pb2.NoteSequence() response_start_time = 0 response_duration = 0 player = self._midi_hub.start_playback(response_sequence, allow_updates=True) for captured_sequence in self._captor.iterate(signal=self._clock_signal, period=self._tick_duration): if self._stop_signal.is_set(): break if self._panic.is_set(): response_sequence = music_pb2.NoteSequence() player.update_sequence(response_sequence) self._panic.clear() tick_time = captured_sequence.total_time if ((not self._clock_signal) and (self._metronome_channel is not None)): self._midi_hub.start_metronome(self._qpm, tick_time, channel=self._metronome_channel) captured_sequence.tempos[0].qpm = self._qpm tick_duration = (tick_time - last_tick_time) last_end_time = (max((note.end_time for note in captured_sequence.notes)) if captured_sequence.notes else 0.0) silent_tick = (last_end_time <= last_tick_time) if (not silent_tick): listen_ticks += 1 if (not captured_sequence.notes): if (response_sequence.total_time <= tick_time): self._update_state(self.State.IDLE) if (self._captor.start_time < tick_time): self._captor.start_time = tick_time self._end_call.clear() listen_ticks = 0 elif (self._end_call.is_set() or silent_tick or (listen_ticks >= self._max_listen_ticks)): if (listen_ticks < self._min_listen_ticks): tf.logging.info('Input too short (%d vs %d). Skipping.', listen_ticks, self._min_listen_ticks) self._captor.start_time = tick_time else: self._update_state(self.State.RESPONDING) capture_start_time = self._captor.start_time if silent_tick: captured_sequence = adjust_sequence_times(captured_sequence, tick_duration) captured_sequence.total_time = tick_time capture_start_time += tick_duration num_ticks = self._midi_hub.control_value(self._response_ticks_control_number) if num_ticks: response_duration = (num_ticks * tick_duration) else: response_duration = (tick_time - capture_start_time) response_start_time = tick_time response_sequence = self._generate(captured_sequence, capture_start_time, response_start_time, (response_start_time + response_duration)) if ((time.time() - response_start_time) >= (tick_duration / 4)): push_ticks = (((time.time() - response_start_time) // tick_duration) + 1) response_start_time += (push_ticks * tick_duration) response_sequence = adjust_sequence_times(response_sequence, (push_ticks * tick_duration)) tf.logging.warn('Response too late. Pushing back %d ticks.', push_ticks) player.update_sequence(response_sequence, start_time=response_start_time) if self._allow_overlap: self._captor.start_time = response_start_time else: self._captor.start_time = (response_start_time + response_duration) self._end_call.clear() listen_ticks = 0 else: self._update_state(self.State.LISTENING) if (self._mutate.is_set() and (not response_sequence.notes)): self._mutate.clear() tf.logging.warn('Ignoring mutate request with nothing to mutate.') if ((response_sequence.total_time <= tick_time) and (self._should_loop or self._mutate.is_set())): if self._mutate.is_set(): new_start_time = (response_start_time + response_duration) new_end_time = (new_start_time + response_duration) response_sequence = self._generate(response_sequence, response_start_time, new_start_time, new_end_time) response_start_time = new_start_time self._mutate.clear() response_sequence = adjust_sequence_times(response_sequence, (tick_time - response_start_time)) response_start_time = tick_time player.update_sequence(response_sequence, start_time=tick_time) last_tick_time = tick_time player.stop()
'Create a cell with attention. Args: cell: an RNNCell, an attention is added to it. attn_states: External attention states typically the encoder output in the form [batch_size, time steps, hidden size] attn_vec_size: integer, the number of convolutional features calculated on attention state and a size of the hidden layer built from base cell state. Equal attn_size to by default. input_size: integer, the size of a hidden linear layer, built from inputs and attention. Derived from the input tensor by default. state_is_tuple: If True, accepted and returned states are n-tuples, where `n = len(cells)`. Must be set to True else will raise an exception concatenated along the column axis. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. Raises: TypeError: if cell is not an RNNCell. ValueError: if the flag `state_is_tuple` is `False` or if shape of `attn_states` is not 3 or if innermost dimension (hidden size) is None.'
def __init__(self, cell, attn_states, attn_vec_size=None, input_size=None, state_is_tuple=True, reuse=None):
super(ExternalAttentionCellWrapper, self).__init__(_reuse=reuse) if (not state_is_tuple): raise ValueError('Only tuple state is supported') self._cell = cell self._input_size = input_size attn_shape = attn_states.get_shape() if ((not attn_shape) or (len(attn_shape) != 3)): raise ValueError('attn_shape must be rank 3') self._attn_states = attn_states self._attn_size = attn_shape[2].value if (self._attn_size is None): raise ValueError('Hidden size of attn_states cannot be None') self._attn_vec_size = attn_vec_size if (self._attn_vec_size is None): self._attn_vec_size = self._attn_size self._reuse = reuse
'Combines previous state (from encoder) with internal attention values. You must use this function to derive the initial state passed into this cell as it expects a named tuple (AttentionTuple). Args: previous_state: State from another block that will be fed into this cell; Must have same structure as the state of the cell wrapped by this. Returns: Combined state (AttentionTuple).'
def combine_state(self, previous_state):
batch_size = self._attn_states.get_shape()[0].value if (batch_size is None): batch_size = tf.shape(self._attn_states)[0] zeroed_state = self.zero_state(batch_size, self._attn_states.dtype) return AttentionTuple(previous_state, zeroed_state.attention)
'Long short-term memory cell with attention (LSTMA).'
def call(self, inputs, state):
if (not isinstance(state, AttentionTuple)): raise TypeError('State must be of type AttentionTuple') (state, attns) = state attn_states = self._attn_states attn_length = attn_states.get_shape()[1].value if (attn_length is None): attn_length = tf.shape(attn_states)[1] input_size = self._input_size if (input_size is None): input_size = inputs.get_shape().as_list()[1] if (attns is not None): inputs = tf.layers.dense(tf.concat([inputs, attns], axis=1), input_size) (lstm_output, new_state) = self._cell(inputs, state) new_state_cat = tf.concat(nest.flatten(new_state), 1) new_attns = self._attention(new_state_cat, attn_states, attn_length) with tf.variable_scope('attn_output_projection'): output = tf.layers.dense(tf.concat([lstm_output, new_attns], axis=1), self._attn_size) new_state = AttentionTuple(new_state, new_attns) return (output, new_state)
'A inference method, see T2TModel.'
def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, last_position_only=False, alpha=0.0):
if (not features): features = {} inputs_old = None if (('inputs' in features) and (len(features['inputs'].shape) < 4)): inputs_old = features['inputs'] features['inputs'] = tf.expand_dims(features['inputs'], 2) if ('partial_targets' in features): initial_output = tf.convert_to_tensor(features['partial_targets']) else: batch_size = tf.shape(features['inputs'])[0] initial_output = tf.zeros((batch_size, 1, 1, 1), dtype=tf.int64) features['targets'] = initial_output (sharded_logits, _) = self.model_fn(features, False, last_position_only=last_position_only) sharded_samples = self._data_parallelism(tf.argmax, sharded_logits, 4) samples = tf.concat(sharded_samples, 0) with tf.variable_scope(tf.get_variable_scope(), reuse=True): features['targets'] = samples (sharded_logits, _) = self.model_fn(features, False, last_position_only=last_position_only) sharded_samples = self._data_parallelism(tf.argmax, sharded_logits, 4) samples = tf.concat(sharded_samples, 0) if (inputs_old is not None): features['inputs'] = inputs_old return samples
'Create a Problem. Args: was_reversed: bool, whether to reverse inputs and targets. was_copy: bool, whether to copy inputs to targets. Can be composed with was_reversed so that if both are true, the targets become the inputs, which are then copied to targets so that the task is targets->targets.'
def __init__(self, was_reversed=False, was_copy=False):
self._was_reversed = was_reversed self._was_copy = was_copy self._encoders = None
'Returns problem_hparams.'
def internal_hparams(self, model_hparams):
if (self._encoders is None): self.internal_build_encoders(model_hparams.data_dir) hp = _default_hparams() ret = self.hparams(hp, model_hparams) if (ret is not None): raise ValueError('The Problem subclass hparams function should mutate the defaults passed in and return None.') hp.add_hparam('vocabulary', self._encoders) hp.add_hparam('was_reversed', self._was_reversed) hp.add_hparam('was_copy', self._was_copy) if self._was_reversed: _reverse_problem_hparams(hp) if self._was_copy: _copy_problem_hparams(hp) return hp
'Generator of the training data.'
def train_generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
'Generator of the development data.'
def dev_generator(self, data_dir, tmp_dir):
return self.train_generator(data_dir, tmp_dir, False)
'Generator; takes 3 args: nbr_symbols, max_length, nbr_cases.'
@property def train_generator(self):
raise NotImplementedError()
'Constructor. Args: symbol: The character which represents this operation, such as \'+\' for addition. precedence: Operator precedence. This will determine where parentheses are used. associative: If true, the order of the operands does not matter.'
def __init__(self, symbol, precedence, associative=False):
self.symbol = symbol self.precedence = precedence self.associative = associative
'Returns True if `expr` is a subtree.'
def is_in(self, expr):
if (expr == self): return True is_in_left = is_in_expr(self.left, expr) is_in_right = is_in_expr(self.right, expr) return (is_in_left or is_in_right)
'Check that the file correctly preprocess the code source.'
def testCppPreprocess(self):
cpp_pb = desc2code.Desc2CodeCppProblem() self.assertEqual(cpp_pb.preprocess_target('firstline//comm1\nsecondline//comm2\n'), 'firstline secondline') self.assertEqual(cpp_pb.preprocess_target(CODE_CPP_IN), CODE_CPP_OUT) self.assertEqual(cpp_pb.preprocess_target(' not removed //abcd '), 'not removed //abcd')
'Apply some preprocessing to the target. For instance, remove space/tabs. Args: target (str): code source content Returns: the pre-processed string content'
def preprocess_target(self, target):
return target
'Simple tab to space replacement.'
def preprocess_target(self, target):
return target.replace(' DCTB ', ' ')
'Pre-process Cpp files.'
def preprocess_target(self, target):
target = re.sub(_RE_CPP_INLINE_COMMENT, ' ', target) target = ' '.join(target.split()) return target
'Transform a human-readable string into a sequence of int ids. The ids should be in the range [num_reserved_ids, vocab_size). Ids [0, num_reserved_ids) are reserved. EOS is not appended. Args: s: human-readable string to be converted. Returns: ids: list of integers'
def encode(self, s):
return [(int(w) + self._num_reserved_ids) for w in s.split()]