desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Reward function that rewards the model for playing within a given key.
Any note within the key is given equal reward, which can cause the model to
learn random sounding compositions.
Args:
action: One-hot encoding of the chosen action.
key: The numeric values of notes belonging to this key. Defaults to C
Major if not provided.
Returns:
Float reward value.'
| def reward_key_distribute_prob(self, action, key=None):
| if (key is None):
key = rl_tuner_ops.C_MAJOR_KEY
reward = 0
action_note = np.argmax(action)
if (action_note in key):
num_notes_in_key = len(key)
extra_prob = (1.0 / num_notes_in_key)
reward = extra_prob
return reward
|
'Applies a penalty for playing notes not in a specific key.
Args:
action: One-hot encoding of the chosen action.
penalty_amount: The amount the model will be penalized if it plays
a note outside the key.
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
Float reward value.'
| def reward_key(self, action, penalty_amount=(-1.0), key=None):
| if (key is None):
key = rl_tuner_ops.C_MAJOR_KEY
reward = 0
action_note = np.argmax(action)
if (action_note not in key):
reward = penalty_amount
return reward
|
'Rewards for playing the tonic note at the right times.
Rewards for playing the tonic as the first note of the first bar, and the
first note of the final bar.
Args:
action: One-hot encoding of the chosen action.
tonic_note: The tonic/1st note of the desired key.
reward_amount: The amount the model will be awarded if it plays the
tonic note at the right time.
Returns:
Float reward value.'
| def reward_tonic(self, action, tonic_note=rl_tuner_ops.C_MAJOR_TONIC, reward_amount=3.0):
| action_note = np.argmax(action)
first_note_of_final_bar = (self.num_notes_in_melody - 4)
if ((self.beat == 0) or (self.beat == first_note_of_final_bar)):
if (action_note == tonic_note):
return reward_amount
elif (self.beat == (first_note_of_final_bar + 1)):
if (action_note == NO_EVENT):
return reward_amount
elif (self.beat > (first_note_of_final_bar + 1)):
if ((action_note == NO_EVENT) or (action_note == NOTE_OFF)):
return reward_amount
return 0.0
|
'Rewards the model for not playing the same note over and over.
Penalizes the model for playing the same note repeatedly, although more
repeititions are allowed if it occasionally holds the note or rests in
between. Reward is uniform when there is no penalty.
Args:
action: One-hot encoding of the chosen action.
Returns:
Float reward value.'
| def reward_non_repeating(self, action):
| penalty = self.reward_penalize_repeating(action)
if (penalty >= 0):
return 0.1
|
'Detects whether the note played is repeating previous notes excessively.
Args:
action_note: An integer representing the note just played.
Returns:
True if the note just played is excessively repeated, False otherwise.'
| def detect_repeating_notes(self, action_note):
| num_repeated = 0
contains_held_notes = False
contains_breaks = False
for i in range((len(self.composition) - 1), (-1), (-1)):
if (self.composition[i] == action_note):
num_repeated += 1
elif (self.composition[i] == NOTE_OFF):
contains_breaks = True
elif (self.composition[i] == NO_EVENT):
contains_held_notes = True
else:
break
if ((action_note == NOTE_OFF) and (num_repeated > 1)):
return True
elif ((not contains_held_notes) and (not contains_breaks)):
if (num_repeated > 4):
return True
elif (contains_held_notes or contains_breaks):
if (num_repeated > 6):
return True
elif (num_repeated > 8):
return True
return False
|
'Sets the previous reward to 0 if the same is played repeatedly.
Allows more repeated notes if there are held notes or rests in between. If
no penalty is applied will return the previous reward.
Args:
action: One-hot encoding of the chosen action.
penalty_amount: The amount the model will be penalized if it plays
repeating notes.
Returns:
Previous reward or \'penalty_amount\'.'
| def reward_penalize_repeating(self, action, penalty_amount=(-100.0)):
| action_note = np.argmax(action)
is_repeating = self.detect_repeating_notes(action_note)
if is_repeating:
return penalty_amount
else:
return 0.0
|
'Reduces the previous reward if the composition is highly autocorrelated.
Penalizes the model for creating a composition that is highly correlated
with itself at lags of 1, 2, and 3 beats previous. This is meant to
encourage variety in compositions.
Args:
action: One-hot encoding of the chosen action.
penalty_weight: The default weight which will be multiplied by the sum
of the autocorrelation coefficients, and subtracted from prev_reward.
Returns:
Float reward value.'
| def reward_penalize_autocorrelation(self, action, penalty_weight=3.0):
| composition = (self.composition + [np.argmax(action)])
lags = [1, 2, 3]
sum_penalty = 0
for lag in lags:
coeff = rl_tuner_ops.autocorrelate(composition, lag=lag)
if (not np.isnan(coeff)):
if (np.abs(coeff) > 0.15):
sum_penalty += (np.abs(coeff) * penalty_weight)
return (- sum_penalty)
|
'Detects if a motif was just played and if so, returns it.
A motif should contain at least three distinct notes that are not note_on
or note_off, and occur within the course of one bar.
Args:
composition: The composition in which the function will look for a
recent motif. Defaults to the model\'s composition.
bar_length: The number of notes in one bar.
Returns:
None if there is no motif, otherwise the motif in the same format as the
composition.'
| def detect_last_motif(self, composition=None, bar_length=8):
| if (composition is None):
composition = self.composition
if (len(composition) < bar_length):
return (None, 0)
last_bar = composition[(- bar_length):]
actual_notes = [a for a in last_bar if ((a != NO_EVENT) and (a != NOTE_OFF))]
num_unique_notes = len(set(actual_notes))
if (num_unique_notes >= 3):
return (last_bar, num_unique_notes)
else:
return (None, num_unique_notes)
|
'Rewards the model for playing any motif.
Motif must have at least three distinct notes in the course of one bar.
There is a bonus for playing more complex motifs; that is, ones that involve
a greater number of notes.
Args:
action: One-hot encoding of the chosen action.
reward_amount: The amount that will be returned if the last note belongs
to a motif.
Returns:
Float reward value.'
| def reward_motif(self, action, reward_amount=3.0):
| composition = (self.composition + [np.argmax(action)])
(motif, num_notes_in_motif) = self.detect_last_motif(composition=composition)
if (motif is not None):
motif_complexity_bonus = max(((num_notes_in_motif - 3) * 0.3), 0)
return (reward_amount + motif_complexity_bonus)
else:
return 0.0
|
'Detects whether the last motif played repeats an earlier motif played.
Args:
action: One-hot encoding of the chosen action.
bar_length: The number of beats in one bar. This determines how many beats
the model has in which to play the motif.
Returns:
True if the note just played belongs to a motif that is repeated. False
otherwise.'
| def detect_repeated_motif(self, action, bar_length=8):
| composition = (self.composition + [np.argmax(action)])
if (len(composition) < bar_length):
return (False, None)
(motif, _) = self.detect_last_motif(composition=composition, bar_length=bar_length)
if (motif is None):
return (False, None)
prev_composition = self.composition[:(- (bar_length - 1))]
for i in range(((len(prev_composition) - len(motif)) + 1)):
for j in range(len(motif)):
if (prev_composition[(i + j)] != motif[j]):
break
else:
return (True, motif)
return (False, None)
|
'Adds a big bonus to previous reward if the model plays a repeated motif.
Checks if the model has just played a motif that repeats an ealier motif in
the composition.
There is also a bonus for repeating more complex motifs.
Args:
action: One-hot encoding of the chosen action.
bar_length: The number of notes in one bar.
reward_amount: The amount that will be added to the reward if the last
note belongs to a repeated motif.
Returns:
Float reward value.'
| def reward_repeated_motif(self, action, bar_length=8, reward_amount=4.0):
| (is_repeated, motif) = self.detect_repeated_motif(action, bar_length)
if is_repeated:
actual_notes = [a for a in motif if ((a != NO_EVENT) and (a != NOTE_OFF))]
num_notes_in_motif = len(set(actual_notes))
motif_complexity_bonus = max((num_notes_in_motif - 3), 0)
return (reward_amount + motif_complexity_bonus)
else:
return 0.0
|
'Finds the melodic interval between the action and the last note played.
Uses constants to represent special intervals like rests.
Args:
action: One-hot encoding of the chosen action
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
An integer value representing the interval, or a constant value for
special intervals.'
| def detect_sequential_interval(self, action, key=None):
| if (not self.composition):
return (0, None, None)
prev_note = self.composition[(-1)]
action_note = np.argmax(action)
c_major = False
if (key is None):
key = rl_tuner_ops.C_MAJOR_KEY
c_notes = [2, 14, 26]
g_notes = [9, 21, 33]
e_notes = [6, 18, 30]
c_major = True
tonic_notes = [2, 14, 26]
fifth_notes = [9, 21, 33]
prev_note_index = (len(self.composition) - 1)
while (((prev_note == NO_EVENT) or (prev_note == NOTE_OFF)) and (prev_note_index >= 0)):
prev_note = self.composition[prev_note_index]
prev_note_index -= 1
if ((prev_note == NOTE_OFF) or (prev_note == NO_EVENT)):
tf.logging.debug('Action_note: %s, prev_note: %s', action_note, prev_note)
return (0, action_note, prev_note)
tf.logging.debug('Action_note: %s, prev_note: %s', action_note, prev_note)
if (action_note == NO_EVENT):
if ((prev_note in tonic_notes) or (prev_note in fifth_notes)):
return (rl_tuner_ops.HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH, action_note, prev_note)
else:
return (rl_tuner_ops.HOLD_INTERVAL, action_note, prev_note)
elif (action_note == NOTE_OFF):
if ((prev_note in tonic_notes) or (prev_note in fifth_notes)):
return (rl_tuner_ops.REST_INTERVAL_AFTER_THIRD_OR_FIFTH, action_note, prev_note)
else:
return (rl_tuner_ops.REST_INTERVAL, action_note, prev_note)
interval = abs((action_note - prev_note))
if (c_major and (interval == rl_tuner_ops.FIFTH) and ((prev_note in c_notes) or (prev_note in g_notes))):
return (rl_tuner_ops.IN_KEY_FIFTH, action_note, prev_note)
if (c_major and (interval == rl_tuner_ops.THIRD) and ((prev_note in c_notes) or (prev_note in e_notes))):
return (rl_tuner_ops.IN_KEY_THIRD, action_note, prev_note)
return (interval, action_note, prev_note)
|
'Dispenses reward based on the melodic interval just played.
Args:
action: One-hot encoding of the chosen action.
scaler: This value will be multiplied by all rewards in this function.
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
Float reward value.'
| def reward_preferred_intervals(self, action, scaler=5.0, key=None):
| (interval, _, _) = self.detect_sequential_interval(action, key)
tf.logging.debug('Interval:', interval)
if (interval == 0):
tf.logging.debug('No interval or uninteresting.')
return 0.0
reward = 0.0
if (interval == rl_tuner_ops.REST_INTERVAL):
reward = 0.05
tf.logging.debug('Rest interval.')
if (interval == rl_tuner_ops.HOLD_INTERVAL):
reward = 0.075
if (interval == rl_tuner_ops.REST_INTERVAL_AFTER_THIRD_OR_FIFTH):
reward = 0.15
tf.logging.debug('Rest interval after 1st or 5th.')
if (interval == rl_tuner_ops.HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH):
reward = 0.3
if (interval == rl_tuner_ops.SEVENTH):
reward = (-0.3)
tf.logging.debug('7th')
if (interval > rl_tuner_ops.OCTAVE):
reward = (-1.0)
tf.logging.debug('More than octave.')
if (interval == rl_tuner_ops.IN_KEY_FIFTH):
reward = 0.1
tf.logging.debug('In key 5th')
if (interval == rl_tuner_ops.IN_KEY_THIRD):
reward = 0.15
tf.logging.debug('In key 3rd')
if (interval == rl_tuner_ops.THIRD):
reward = 0.09
tf.logging.debug('3rd')
if (interval == rl_tuner_ops.SECOND):
reward = 0.08
tf.logging.debug('2nd')
if (interval == rl_tuner_ops.FOURTH):
reward = 0.07
tf.logging.debug('4th')
if (interval == rl_tuner_ops.SIXTH):
reward = 0.05
tf.logging.debug('6th')
if (interval == rl_tuner_ops.FIFTH):
reward = 0.02
tf.logging.debug('5th')
tf.logging.debug('Interval reward', (reward * scaler))
return (reward * scaler)
|
'Checks a composition to see if the highest note within it is repeated.
Args:
composition: A list of integers representing the notes in the piece.
Returns:
True if the lowest note was unique, False otherwise.'
| def detect_high_unique(self, composition):
| max_note = max(composition)
if (list(composition).count(max_note) == 1):
return True
else:
return False
|
'Checks a composition to see if the lowest note within it is repeated.
Args:
composition: A list of integers representing the notes in the piece.
Returns:
True if the lowest note was unique, False otherwise.'
| def detect_low_unique(self, composition):
| no_special_events = [x for x in composition if ((x != NO_EVENT) and (x != NOTE_OFF))]
if no_special_events:
min_note = min(no_special_events)
if (list(composition).count(min_note) == 1):
return True
return False
|
'Evaluates if highest and lowest notes in composition occurred once.
Args:
action: One-hot encoding of the chosen action.
reward_amount: Amount of reward that will be given for the highest note
being unique, and again for the lowest note being unique.
Returns:
Float reward value.'
| def reward_high_low_unique(self, action, reward_amount=3.0):
| if ((len(self.composition) + 1) != self.num_notes_in_melody):
return 0.0
composition = np.array(self.composition)
composition = np.append(composition, np.argmax(action))
reward = 0.0
if self.detect_high_unique(composition):
reward += reward_amount
if self.detect_low_unique(composition):
reward += reward_amount
return reward
|
'Detects when the composition takes a musical leap, and if it is resolved.
When the composition jumps up or down by an interval of a fifth or more,
it is a \'leap\'. The model then remembers that is has a \'leap direction\'. The
function detects if it then takes another leap in the same direction, if it
leaps back, or if it gradually resolves the leap.
Args:
action: One-hot encoding of the chosen action.
steps_between_leaps: Leaping back immediately does not constitute a
satisfactory resolution of a leap. Therefore the composition must wait
\'steps_between_leaps\' beats before leaping back.
Returns:
0 if there is no leap, \'LEAP_RESOLVED\' if an existing leap has been
resolved, \'LEAP_DOUBLED\' if 2 leaps in the same direction were made.'
| def detect_leap_up_back(self, action, steps_between_leaps=6):
| if (not self.composition):
return 0
outcome = 0
(interval, action_note, prev_note) = self.detect_sequential_interval(action)
if ((action_note == NOTE_OFF) or (action_note == NO_EVENT)):
self.steps_since_last_leap += 1
tf.logging.debug('Rest, adding to steps since last leap. It isnow: %s', self.steps_since_last_leap)
return 0
if ((interval >= rl_tuner_ops.FIFTH) or (interval == rl_tuner_ops.IN_KEY_FIFTH)):
if (action_note > prev_note):
leap_direction = rl_tuner_ops.ASCENDING
tf.logging.debug('Detected an ascending leap')
else:
leap_direction = rl_tuner_ops.DESCENDING
tf.logging.debug('Detected a descending leap')
if (self.composition_direction != 0):
if (self.composition_direction != leap_direction):
tf.logging.debug('Detected a resolved leap')
tf.logging.debug('Num steps since last leap: %s', self.steps_since_last_leap)
if (self.steps_since_last_leap > steps_between_leaps):
outcome = rl_tuner_ops.LEAP_RESOLVED
tf.logging.debug('Sufficient steps before leap resolved, awarding bonus')
self.composition_direction = 0
self.leapt_from = None
else:
tf.logging.debug('Detected a double leap')
outcome = rl_tuner_ops.LEAP_DOUBLED
else:
tf.logging.debug('There was no previous leap direction')
self.composition_direction = leap_direction
self.leapt_from = prev_note
self.steps_since_last_leap = 0
else:
self.steps_since_last_leap += 1
tf.logging.debug('No leap, adding to steps since last leap. It is now: %s', self.steps_since_last_leap)
if (((self.composition_direction == rl_tuner_ops.ASCENDING) and (action_note <= self.leapt_from)) or ((self.composition_direction == rl_tuner_ops.DESCENDING) and (action_note >= self.leapt_from))):
tf.logging.debug('detected a gradually resolved leap')
outcome = rl_tuner_ops.LEAP_RESOLVED
self.composition_direction = 0
self.leapt_from = None
return outcome
|
'Applies punishment and reward based on the principle leap up leap back.
Large interval jumps (more than a fifth) should be followed by moving back
in the same direction.
Args:
action: One-hot encoding of the chosen action.
resolving_leap_bonus: Amount of reward dispensed for resolving a previous
leap.
leaping_twice_punishment: Amount of reward received for leaping twice in
the same direction.
Returns:
Float reward value.'
| def reward_leap_up_back(self, action, resolving_leap_bonus=5.0, leaping_twice_punishment=(-5.0)):
| leap_outcome = self.detect_leap_up_back(action)
if (leap_outcome == rl_tuner_ops.LEAP_RESOLVED):
tf.logging.debug('Leap resolved, awarding %s', resolving_leap_bonus)
return resolving_leap_bonus
elif (leap_outcome == rl_tuner_ops.LEAP_DOUBLED):
tf.logging.debug('Leap doubled, awarding %s', leaping_twice_punishment)
return leaping_twice_punishment
else:
return 0.0
|
'Generates a music sequence with the current model, and saves it to MIDI.
The resulting MIDI file is saved to the model\'s output_dir directory. The
sequence is generated by sampling from the output probabilities at each
timestep, and feeding the resulting note back in as input to the model.
Args:
title: The name that will be used to save the output MIDI file.
visualize_probs: If True, the function will plot the softmax
probabilities of the model for each note that occur throughout the
sequence. Useful for debugging.
prob_image_name: The name of a file in which to save the softmax
probability image. If None, the image will simply be displayed.
length: The length of the sequence to be generated. Defaults to the
num_notes_in_melody parameter of the model.
most_probable: If True, instead of sampling each note in the sequence,
the model will always choose the argmax, most probable note.'
| def generate_music_sequence(self, title='rltuner_sample', visualize_probs=False, prob_image_name=None, length=None, most_probable=False):
| if (length is None):
length = self.num_notes_in_melody
self.reset_composition()
next_obs = self.prime_internal_models()
tf.logging.info('Priming with note %s', np.argmax(next_obs))
lengths = np.full(self.q_network.batch_size, 1, dtype=int)
if visualize_probs:
prob_image = np.zeros((self.input_size, length))
generated_seq = ([0] * length)
for i in range(length):
input_batch = np.reshape(next_obs, (self.q_network.batch_size, 1, self.num_actions))
if (self.algorithm == 'g'):
(softmax, self.q_network.state_value, self.reward_rnn.state_value) = self.session.run([self.action_softmax, self.q_network.state_tensor, self.reward_rnn.state_tensor], {self.q_network.melody_sequence: input_batch, self.q_network.initial_state: self.q_network.state_value, self.q_network.lengths: lengths, self.reward_rnn.melody_sequence: input_batch, self.reward_rnn.initial_state: self.reward_rnn.state_value, self.reward_rnn.lengths: lengths})
else:
(softmax, self.q_network.state_value) = self.session.run([self.action_softmax, self.q_network.state_tensor], {self.q_network.melody_sequence: input_batch, self.q_network.initial_state: self.q_network.state_value, self.q_network.lengths: lengths})
softmax = np.reshape(softmax, self.num_actions)
if visualize_probs:
prob_image[:, i] = softmax
if most_probable:
sample = np.argmax(softmax)
else:
sample = rl_tuner_ops.sample_softmax(softmax)
generated_seq[i] = sample
next_obs = np.array(rl_tuner_ops.make_onehot([sample], self.num_actions)).flatten()
tf.logging.info('Generated sequence: %s', generated_seq)
print('Generated sequence:', generated_seq)
melody = mlib.Melody(rl_tuner_ops.decoder(generated_seq, self.q_network.transpose_amount))
sequence = melody.to_sequence(qpm=rl_tuner_ops.DEFAULT_QPM)
filename = rl_tuner_ops.get_next_file_name(self.output_dir, title, 'mid')
midi_io.sequence_proto_to_midi_file(sequence, filename)
tf.logging.info('Wrote a melody to %s', self.output_dir)
if visualize_probs:
tf.logging.info('Visualizing note selection probabilities:')
plt.figure()
plt.imshow(prob_image, interpolation='none', cmap='Reds')
plt.ylabel('Note probability')
plt.xlabel('Time (beat)')
plt.gca().invert_yaxis()
if (prob_image_name is not None):
plt.savefig(((self.output_dir + '/') + prob_image_name))
else:
plt.show()
|
'Computes statistics about music theory rule adherence.
Args:
num_compositions: How many compositions should be randomly generated
for computing the statistics.
key: The numeric values of notes belonging to this key. Defaults to C
Major if not provided.
tonic_note: The tonic/1st note of the desired key.
Returns:
A dictionary containing the statistics.'
| def evaluate_music_theory_metrics(self, num_compositions=10000, key=None, tonic_note=rl_tuner_ops.C_MAJOR_TONIC):
| stat_dict = rl_tuner_eval_metrics.compute_composition_stats(self, num_compositions=num_compositions, composition_length=self.num_notes_in_melody, key=key, tonic_note=tonic_note)
return stat_dict
|
'Saves a checkpoint of the model and a .npz file with stored rewards.
Args:
name: String name to use for the checkpoint and rewards files.
directory: Path to directory where the data will be saved. Defaults to
self.output_dir if None is provided.'
| def save_model(self, name, directory=None):
| if (directory is None):
directory = self.output_dir
save_loc = os.path.join(directory, name)
self.saver.save(self.session, save_loc, global_step=(len(self.rewards_batched) * self.output_every_nth))
self.save_stored_rewards(name)
|
'Saves the models stored rewards over time in a .npz file.
Args:
file_name: Name of the file that will be saved.'
| def save_stored_rewards(self, file_name):
| training_epochs = (len(self.rewards_batched) * self.output_every_nth)
filename = os.path.join(self.output_dir, ((file_name + '-') + str(training_epochs)))
np.savez(filename, train_rewards=self.rewards_batched, train_music_theory_rewards=self.music_theory_rewards_batched, train_note_rnn_rewards=self.note_rnn_rewards_batched, eval_rewards=self.eval_avg_reward, eval_music_theory_rewards=self.eval_avg_music_theory_reward, eval_note_rnn_rewards=self.eval_avg_note_rnn_reward, target_val_list=self.target_val_list)
|
'Saves the model checkpoint, .npz file, and reward plots.
Args:
name: Name of the model that will be used on the images,
checkpoint, and .npz files.
directory: Path to directory where files will be saved.
If None defaults to self.output_dir.'
| def save_model_and_figs(self, name, directory=None):
| self.save_model(name, directory=directory)
self.plot_rewards(image_name=(('TrainRewards-' + name) + '.eps'), directory=directory)
self.plot_evaluation(image_name=(('EvaluationRewards-' + name) + '.eps'), directory=directory)
self.plot_target_vals(image_name=(('TargetVals-' + name) + '.eps'), directory=directory)
|
'Plots the cumulative rewards received as the model was trained.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.'
| def plot_rewards(self, image_name=None, directory=None):
| if (directory is None):
directory = self.output_dir
reward_batch = self.output_every_nth
x = [(reward_batch * i) for i in np.arange(len(self.rewards_batched))]
plt.figure()
plt.plot(x, self.rewards_batched)
plt.plot(x, self.music_theory_rewards_batched)
plt.plot(x, self.note_rnn_rewards_batched)
plt.xlabel('Training epoch')
plt.ylabel((('Cumulative reward for last ' + str(reward_batch)) + ' steps'))
plt.legend(['Total', 'Music theory', 'Note RNN'], loc='best')
if (image_name is not None):
plt.savefig(((directory + '/') + image_name))
else:
plt.show()
|
'Plots the rewards received as the model was evaluated during training.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.
start_at_epoch: Training epoch where the plot should begin.'
| def plot_evaluation(self, image_name=None, directory=None, start_at_epoch=0):
| if (directory is None):
directory = self.output_dir
reward_batch = self.output_every_nth
x = [(reward_batch * i) for i in np.arange(len(self.eval_avg_reward))]
start_index = (start_at_epoch / self.output_every_nth)
plt.figure()
plt.plot(x[start_index:], self.eval_avg_reward[start_index:])
plt.plot(x[start_index:], self.eval_avg_music_theory_reward[start_index:])
plt.plot(x[start_index:], self.eval_avg_note_rnn_reward[start_index:])
plt.xlabel('Training epoch')
plt.ylabel('Average reward')
plt.legend(['Total', 'Music theory', 'Note RNN'], loc='best')
if (image_name is not None):
plt.savefig(((directory + '/') + image_name))
else:
plt.show()
|
'Plots the target values used to train the model over time.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.'
| def plot_target_vals(self, image_name=None, directory=None):
| if (directory is None):
directory = self.output_dir
reward_batch = self.output_every_nth
x = [(reward_batch * i) for i in np.arange(len(self.target_val_list))]
plt.figure()
plt.plot(x, self.target_val_list)
plt.xlabel('Training epoch')
plt.ylabel('Target value')
if (image_name is not None):
plt.savefig(((directory + '/') + image_name))
else:
plt.show()
|
'Primes both internal models based on self.priming_mode.
Returns:
A one-hot encoding of the note output by the q_network to be used as
the initial observation.'
| def prime_internal_models(self):
| self.prime_internal_model(self.target_q_network)
self.prime_internal_model(self.reward_rnn)
next_obs = self.prime_internal_model(self.q_network)
return next_obs
|
'Restores this model from a saved checkpoint.
Args:
directory: Path to directory where checkpoint is located. If
None, defaults to self.output_dir.
checkpoint_name: The name of the checkpoint within the
directory.
reward_file_name: The name of the .npz file where the stored
rewards are saved. If None, will not attempt to load stored
rewards.'
| def restore_from_directory(self, directory=None, checkpoint_name=None, reward_file_name=None):
| if (directory is None):
directory = self.output_dir
if (checkpoint_name is not None):
checkpoint_file = os.path.join(directory, checkpoint_name)
else:
tf.logging.info('Directory %s.', directory)
checkpoint_file = tf.train.latest_checkpoint(directory)
if (checkpoint_file is None):
tf.logging.fatal('Error! Cannot locate checkpoint in the directory')
return
print('Attempting to restore from checkpoint', checkpoint_file)
tf.logging.info('Attempting to restore from checkpoint %s', checkpoint_file)
self.saver.restore(self.session, checkpoint_file)
if (reward_file_name is not None):
npz_file_name = os.path.join(directory, reward_file_name)
print('Attempting to load saved reward values from file', npz_file_name)
tf.logging.info('Attempting to load saved reward values from file %s', npz_file_name)
npz_file = np.load(npz_file_name)
self.rewards_batched = npz_file['train_rewards']
self.music_theory_rewards_batched = npz_file['train_music_theory_rewards']
self.note_rnn_rewards_batched = npz_file['train_note_rnn_rewards']
self.eval_avg_reward = npz_file['eval_rewards']
self.eval_avg_music_theory_reward = npz_file['eval_music_theory_rewards']
self.eval_avg_note_rnn_reward = npz_file['eval_note_rnn_rewards']
self.target_val_list = npz_file['target_val_list']
|
'Initialize by building the graph and loading a previous checkpoint.
Args:
graph: A tensorflow graph where the MelodyRNN\'s graph will be added.
scope: The tensorflow scope where this network will be saved.
checkpoint_dir: Path to the directory where the checkpoint file is saved.
checkpoint_file: Path to a checkpoint file to be used if none can be
found in the checkpoint_dir
midi_primer: Path to a single midi file that can be used to prime the
model.
training_file_list: List of paths to tfrecord files containing melody
training data.
hparams: A tf_lib.HParams object. Must match the hparams used to create
the checkpoint file.
note_rnn_type: If \'default\', will use the basic LSTM described in the
research paper. If \'basic_rnn\', will assume the checkpoint is from a
Magenta basic_rnn model.
checkpoint_scope: The scope in lstm which the model was originally defined
when it was first trained.'
| def __init__(self, graph, scope, checkpoint_dir, checkpoint_file=None, midi_primer=None, training_file_list=None, hparams=None, note_rnn_type='default', checkpoint_scope='rnn_model'):
| self.graph = graph
self.session = None
self.scope = scope
self.batch_size = 1
self.midi_primer = midi_primer
self.checkpoint_scope = checkpoint_scope
self.note_rnn_type = note_rnn_type
self.training_file_list = training_file_list
self.checkpoint_dir = checkpoint_dir
self.checkpoint_file = checkpoint_file
if (hparams is not None):
tf.logging.info('Using custom hparams')
self.hparams = hparams
else:
tf.logging.info('Empty hparams string. Using defaults')
self.hparams = rl_tuner_ops.default_hparams()
self.build_graph()
self.state_value = self.get_zero_state()
if (midi_primer is not None):
self.load_primer()
self.variable_names = rl_tuner_ops.get_variable_names(self.graph, self.scope)
self.transpose_amount = 0
|
'Gets an initial state of zeros of the appropriate size.
Required size is based on the model\'s internal RNN cell.
Returns:
A matrix of batch_size x cell size zeros.'
| def get_zero_state(self):
| return np.zeros((self.batch_size, self.cell.state_size))
|
'Saves the session, restores variables from checkpoint, primes model.
Model is primed with its default midi file.
Args:
session: A tensorflow session.'
| def restore_initialize_prime(self, session):
| self.session = session
self.restore_vars_from_checkpoint(self.checkpoint_dir)
self.prime_model()
|
'Saves the session, restores variables from checkpoint.
Args:
session: A tensorflow session.'
| def initialize_and_restore(self, session):
| self.session = session
self.restore_vars_from_checkpoint(self.checkpoint_dir)
|
'Saves the session, initializes all variables to random values.
Args:
session: A tensorflow session.'
| def initialize_new(self, session=None):
| with self.graph.as_default():
if (session is None):
self.session = tf.Session(graph=self.graph)
else:
self.session = session
self.session.run(tf.initialize_all_variables())
|
'Constructs a dict mapping the checkpoint variables to those in new graph.
Returns:
A dict mapping variable names in the checkpoint to variables in the graph.'
| def get_variable_name_dict(self):
| var_dict = dict()
for var in self.variables():
inner_name = rl_tuner_ops.get_inner_scope(var.name)
inner_name = rl_tuner_ops.trim_variable_postfixes(inner_name)
if ('/Adam' in var.name):
pass
elif (self.note_rnn_type == 'basic_rnn'):
var_dict[inner_name] = var
else:
var_dict[((self.checkpoint_scope + '/') + inner_name)] = var
return var_dict
|
'Constructs the portion of the graph that belongs to this model.'
| def build_graph(self):
| tf.logging.info('Initializing melody RNN graph for scope %s', self.scope)
with self.graph.as_default():
with tf.device((lambda op: '')):
with tf.variable_scope(self.scope):
if (self.note_rnn_type == 'basic_rnn'):
self.cell = events_rnn_graph.make_rnn_cell(self.hparams.rnn_layer_sizes)
else:
self.cell = rl_tuner_ops.make_rnn_cell(self.hparams.rnn_layer_sizes)
self.melody_sequence = tf.placeholder(tf.float32, [None, None, self.hparams.one_hot_length], name='melody_sequence')
self.lengths = tf.placeholder(tf.int32, [None], name='lengths')
self.initial_state = tf.placeholder(tf.float32, [None, self.cell.state_size], name='initial_state')
if (self.training_file_list is not None):
(self.train_sequence, self.train_labels, self.train_lengths) = sequence_example_lib.get_padded_batch(self.training_file_list, self.hparams.batch_size, self.hparams.one_hot_length)
def run_network_on_melody(m_seq, lens, initial_state, swap_memory=True, parallel_iterations=1):
'Internal function that defines the RNN network structure.\n\n Args:\n m_seq: A batch of melody sequences of one-hot notes.\n lens: Lengths of the melody_sequences.\n initial_state: Vector representing the initial state of the RNN.\n swap_memory: Uses more memory and is faster.\n parallel_iterations: Argument to tf.nn.dynamic_rnn.\n Returns:\n Output of network (either softmax or logits) and RNN state.\n '
(outputs, final_state) = tf.nn.dynamic_rnn(self.cell, m_seq, sequence_length=lens, initial_state=initial_state, swap_memory=swap_memory, parallel_iterations=parallel_iterations)
outputs_flat = tf.reshape(outputs, [(-1), self.hparams.rnn_layer_sizes[(-1)]])
linear_layer = (tf.contrib.layers.linear if (self.note_rnn_type == 'basic_rnn') else tf.contrib.layers.legacy_linear)
logits_flat = linear_layer(outputs_flat, self.hparams.one_hot_length)
return (logits_flat, final_state)
(self.logits, self.state_tensor) = run_network_on_melody(self.melody_sequence, self.lengths, self.initial_state)
self.softmax = tf.nn.softmax(self.logits)
self.run_network_on_melody = run_network_on_melody
if (self.training_file_list is not None):
with tf.variable_scope(self.scope, reuse=True):
zero_state = self.cell.zero_state(batch_size=self.hparams.batch_size, dtype=tf.float32)
(self.train_logits, self.train_state) = run_network_on_melody(self.train_sequence, self.train_lengths, zero_state)
self.train_softmax = tf.nn.softmax(self.train_logits)
|
'Loads model weights from a saved checkpoint.
Args:
checkpoint_dir: Directory which contains a saved checkpoint of the
model.'
| def restore_vars_from_checkpoint(self, checkpoint_dir):
| tf.logging.info('Restoring variables from checkpoint')
var_dict = self.get_variable_name_dict()
with self.graph.as_default():
saver = tf.train.Saver(var_list=var_dict)
tf.logging.info('Checkpoint dir: %s', checkpoint_dir)
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
if (checkpoint_file is None):
tf.logging.warn("Can't find checkpoint file, using %s", self.checkpoint_file)
checkpoint_file = self.checkpoint_file
tf.logging.info('Checkpoint file: %s', checkpoint_file)
saver.restore(self.session, checkpoint_file)
|
'Loads default MIDI primer file.
Also assigns the steps per bar of this file to be the model\'s defaults.'
| def load_primer(self):
| if (not os.path.exists(self.midi_primer)):
tf.logging.warn('ERROR! No such primer file exists! %s', self.midi_primer)
return
self.primer_sequence = midi_io.midi_file_to_sequence_proto(self.midi_primer)
quantized_seq = sequences_lib.quantize_note_sequence(self.primer_sequence, steps_per_quarter=4)
(extracted_melodies, _) = melodies_lib.extract_melodies(quantized_seq, min_bars=0, min_unique_pitches=1)
self.primer = extracted_melodies[0]
self.steps_per_bar = self.primer.steps_per_bar
|
'Primes the model with its default midi primer.'
| def prime_model(self):
| with self.graph.as_default():
tf.logging.debug('Priming the model with MIDI file %s', self.midi_primer)
encoder = magenta.music.OneHotEventSequenceEncoderDecoder(magenta.music.MelodyOneHotEncoding(min_note=rl_tuner_ops.MIN_NOTE, max_note=rl_tuner_ops.MAX_NOTE))
seq = encoder.encode(self.primer)
features = seq.feature_lists.feature_list['inputs'].feature
primer_input = [list(i.float_list.value) for i in features]
primer_input_batch = np.tile([primer_input], (self.batch_size, 1, 1))
(self.state_value, softmax) = self.session.run([self.state_tensor, self.softmax], feed_dict={self.initial_state: self.state_value, self.melody_sequence: primer_input_batch, self.lengths: np.full(self.batch_size, len(self.primer), dtype=int)})
priming_output = softmax[(-1), :]
self.priming_note = self.get_note_from_softmax(priming_output)
|
'Extracts a one-hot encoding of the most probable note.
Args:
softmax: Softmax probabilities over possible next notes.
Returns:
One-hot encoding of most probable note.'
| def get_note_from_softmax(self, softmax):
| note_idx = np.argmax(softmax)
note_enc = rl_tuner_ops.make_onehot([note_idx], rl_tuner_ops.NUM_CLASSES)
return np.reshape(note_enc, rl_tuner_ops.NUM_CLASSES)
|
'Allows the network to be called, as in the following code snippet!
q_network = MelodyRNN(...)
q_network()
The q_network() operation can then be placed into a larger graph as a tf op.
Note that to get actual values from call, must do session.run and feed in
melody_sequence, lengths, and initial_state in the feed dict.
Returns:
Either softmax probabilities over notes, or raw logit scores.'
| def __call__(self):
| with self.graph.as_default():
with tf.variable_scope(self.scope, reuse=True):
(logits, self.state_tensor) = self.run_network_on_melody(self.melody_sequence, self.lengths, self.initial_state)
return logits
|
'Runs one batch of training data through the model.
Uses a queue runner to pull one batch of data from the training files
and run it through the model.
Returns:
A batch of softmax probabilities and model state vectors.'
| def run_training_batch(self):
| if (self.training_file_list is None):
tf.logging.warn('No training file path was provided, cannot run trainingbatch')
return
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=self.session, coord=coord)
(softmax, state, lengths) = self.session.run([self.train_softmax, self.train_state, self.train_lengths])
coord.request_stop()
return (softmax, state, lengths)
|
'Given a note, uses the model to predict the most probable next note.
Args:
note: A one-hot encoding of the note.
Returns:
Next note in the same format.'
| def get_next_note_from_note(self, note):
| with self.graph.as_default():
with tf.variable_scope(self.scope, reuse=True):
singleton_lengths = np.full(self.batch_size, 1, dtype=int)
input_batch = np.reshape(note, (self.batch_size, 1, rl_tuner_ops.NUM_CLASSES))
(softmax, self.state_value) = self.session.run([self.softmax, self.state_tensor], {self.melody_sequence: input_batch, self.initial_state: self.state_value, self.lengths: singleton_lengths})
return self.get_note_from_softmax(softmax)
|
'Gets names of all the variables in the graph belonging to this model.
Returns:
List of variable names.'
| def variables(self):
| with self.graph.as_default():
return [v for v in tf.global_variables() if v.name.startswith(self.scope)]
|
'Initialize dataset using a subset and the path to the data.'
| def __init__(self, subset):
| assert (subset in self.available_subsets()), self.available_subsets()
self.subset = subset
|
'Returns the number of classes in the data set.'
| def num_classes(self):
| return 1000
|
'Returns the number of examples in the data set.'
| def num_examples_per_epoch(self):
| if (self.subset == 'train'):
return 1281167
if (self.subset == 'validation'):
return 50000
|
'Instruction to download and extract the tarball from Flowers website.'
| def download_message(self):
| print(('Failed to find any ImageNet %s files' % self.subset))
print('')
print('If you have already downloaded and processed the data, then make sure to set --imagenet_data_dir to point to the directory containing the location of the sharded TFRecords.\n')
print('If you have not downloaded and prepared the ImageNet data in the TFRecord format, you will need to do this at least once. This process could take several hours depending on the speed of your computer and network connection\n')
print('Please see https://github.com/tensorflow/models/blob/master/inception for instructions on how to build the ImageNet dataset using download_and_preprocess_imagenet.\n')
print('Note that the raw data size is 300 GB and the processed data size is 150 GB. Please ensure you have at least 500GB disk space.')
|
'Returns the list of available subsets.'
| def available_subsets(self):
| return ['train', 'validation']
|
'Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.'
| def data_files(self):
| imagenet_data_dir = os.path.expanduser(FLAGS.imagenet_data_dir)
tf_record_pattern = os.path.join(imagenet_data_dir, ('%s-*' % self.subset))
data_files = tf.gfile.Glob(tf_record_pattern)
if (not data_files):
print(('No files found for dataset ImageNet/%s at %s' % (self.subset, imagenet_data_dir)))
self.download_message()
exit((-1))
return data_files
|
'Return a reader for a single entry from the data set.
See io_ops.py for details of Reader class.
Returns:
Reader object that reads the data set.'
| def reader(self):
| return tf.TFRecordReader()
|
'Creates a PianorollRnnNadeSequenceGenerator.
Args:
model: Instance of PianorollRnnNadeModel.
details: A generator_pb2.GeneratorDetails for this generator.
steps_per_quarter: What precision to use when quantizing the sequence. How
many steps per quarter note.
checkpoint: Where to search for the most recent model checkpoint. Mutually
exclusive with `bundle`.
bundle: A GeneratorBundle object that includes both the model checkpoint
and metagraph. Mutually exclusive with `checkpoint`.'
| def __init__(self, model, details, steps_per_quarter=4, checkpoint=None, bundle=None):
| super(PianorollRnnNadeSequenceGenerator, self).__init__(model, details, checkpoint, bundle)
self.steps_per_quarter = steps_per_quarter
|
'The number of hidden units for each input/output of the NADE.'
| @property
def num_hidden(self):
| return self._num_hidden
|
'The number of input/output dimensions of the NADE.'
| @property
def num_dims(self):
| return self._num_dims
|
'Computes the log probability and conditionals for observations.
Args:
x: A batch of observations to compute the log probability of, sized
`[batch_size, num_dims]`.
b_enc: External encoder bias terms (`b` in [1]), sized
`[batch_size, num_hidden]`.
b_dec: External decoder bias terms (`c` in [1]), sized
`[batch_size, num_dims]`.
Returns:
log_prob: The log probabilities of each observation in the batch, sized
`[batch_size, 1]`.
cond_probs: The conditional probabilities at each index for every batch,
sized `[batch_size, num_dims]`.'
| def log_prob(self, x, b_enc, b_dec):
| batch_size = tf.shape(x)[0]
a_0 = b_enc
log_p_0 = tf.zeros([batch_size, 1])
cond_p_0 = []
x_arr = tf.unstack(tf.reshape(tf.transpose(x), [self.num_dims, batch_size, 1]))
w_enc_arr = tf.unstack(self.w_enc)
w_dec_arr = tf.unstack(self.w_dec_t)
b_dec_arr = tf.unstack(tf.reshape(tf.transpose(b_dec), [self.num_dims, batch_size, 1]))
def loop_body(i, a, log_p, cond_p):
'Accumulate hidden state, log_p, and cond_p for index i.'
w_enc_i = w_enc_arr[i]
w_dec_i = w_dec_arr[i]
b_dec_i = b_dec_arr[i]
v_i = x_arr[i]
cond_p_i = self._cond_prob(a, w_dec_i, b_dec_i)
log_p_i = ((v_i * safe_log(cond_p_i)) + ((1 - v_i) * safe_log((1 - cond_p_i))))
log_p_new = (log_p + log_p_i)
cond_p_new = (cond_p + [cond_p_i])
a_new = (a + tf.matmul(v_i, w_enc_i))
return (a_new, log_p_new, cond_p_new)
(a, log_p, cond_p) = (a_0, log_p_0, cond_p_0)
for i in range(self.num_dims):
(a, log_p, cond_p) = loop_body(i, a, log_p, cond_p)
return (tf.squeeze(log_p, squeeze_dims=[1]), tf.transpose(tf.squeeze(tf.stack(cond_p), [2])))
|
'Generate samples for the batch from the NADE.
Args:
b_enc: External encoder bias terms (`b` in [1]), sized
`[batch_size, num_hidden]`.
b_dec: External decoder bias terms (`c` in [1]), sized
`[batch_size, num_dims]`.
Returns:
sample: The generated samples, sized `[batch_size, num_dims]`.
log_prob: The log probabilities of each observation in the batch, sized
`[batch_size, 1]`.'
| def sample(self, b_enc, b_dec):
| batch_size = tf.shape(b_enc)[0]
a_0 = b_enc
sample_0 = []
log_p_0 = tf.zeros([batch_size, 1])
w_enc_arr = tf.unstack(self.w_enc)
w_dec_arr = tf.unstack(self.w_dec_t)
b_dec_arr = tf.unstack(tf.reshape(tf.transpose(b_dec), [self.num_dims, batch_size, 1]))
def loop_body(i, a, sample, log_p):
'Accumulate hidden state, sample, and log probability for index i.'
w_enc_i = w_enc_arr[i]
w_dec_i = w_dec_arr[i]
b_dec_i = b_dec_arr[i]
cond_p_i = self._cond_prob(a, w_dec_i, b_dec_i)
bernoulli = tf.contrib.distributions.Bernoulli(probs=cond_p_i, dtype=tf.float32)
v_i = bernoulli.sample()
sample_new = (sample + [v_i])
log_p_i = ((v_i * safe_log(cond_p_i)) + ((1 - v_i) * safe_log((1 - cond_p_i))))
log_p_new = (log_p + log_p_i)
a_new = (a + tf.matmul(v_i, w_enc_i))
return (a_new, sample_new, log_p_new)
(a, sample, log_p) = (a_0, sample_0, log_p_0)
for i in range(self.num_dims):
(a, sample, log_p) = loop_body(i, a, sample, log_p)
return (tf.transpose(tf.squeeze(tf.stack(sample), [2])), log_p)
|
'Gets the conditional probability for a single dimension.
Args:
a: Model\'s hidden state, sized `[batch_size, num_hidden]`.
w_dec_i: The decoder weight terms for the dimension, sized
`[num_hidden, 1]`.
b_dec_i: The decoder bias terms, sized `[batch_size, 1]`.
Returns:
The conditional probability of the dimension, sized `[batch_size, 1]`.'
| def _cond_prob(self, a, w_dec_i, b_dec_i):
| h = tf.sigmoid(a)
p_cond_i = tf.sigmoid((b_dec_i + tf.matmul(h, w_dec_i)))
return p_cond_i
|
'Return a tensor or tuple of tensors for an initial rnn state.'
| def _get_rnn_zero_state(self, batch_size):
| return self._rnn_cell.zero_state(batch_size, tf.float32)
|
'Computes the state of the RNN-NADE (NADE bias parameters and RNN state).
Args:
inputs: A batch of sequences to compute the state from, sized
`[batch_size, max(lengths), num_dims]` or `[batch_size, num_dims]`.
lengths: The length of each sequence, sized `[batch_size]`.
initial_state: An RnnNadeStateTuple, the initial state of the RNN-NADE, or
None if the zero state should be used.
Returns:
final_state: An RnnNadeStateTuple, the final state of the RNN-NADE.'
| def _get_state(self, inputs, lengths=None, initial_state=None):
| batch_size = inputs.shape[0].value
lengths = (tf.tile(tf.shape(inputs)[1:2], [batch_size]) if (lengths is None) else lengths)
initial_rnn_state = (self._get_rnn_zero_state(batch_size) if (initial_state is None) else initial_state.rnn_state)
helper = tf.contrib.seq2seq.TrainingHelper(inputs=inputs, sequence_length=lengths)
decoder = tf.contrib.seq2seq.BasicDecoder(cell=self._rnn_cell, helper=helper, initial_state=initial_rnn_state, output_layer=self._fc_layer)
(final_outputs, final_rnn_state) = tf.contrib.seq2seq.dynamic_decode(decoder)[0:2]
final_outputs_flat = magenta.common.flatten_maybe_padded_sequences(final_outputs.rnn_output, lengths)
(b_enc, b_dec) = tf.split(final_outputs_flat, [self._nade.num_hidden, self._nade.num_dims], axis=1)
return RnnNadeStateTuple(b_enc, b_dec, final_rnn_state)
|
'Computes the log probability of a sequence of values.
Flattens the time dimension.
Args:
sequences: A batch of sequences to compute the log probabilities of,
sized `[batch_size, max(lengths), num_dims]`.
lengths: The length of each sequence, sized `[batch_size]` or None if
all are equal.
Returns:
log_prob: The log probability of each sequence value, sized
`[sum(lengths), 1]`.
cond_prob: The conditional probabilities at each non-padded value for
every batch, sized `[sum(lengths), num_dims]`.'
| def log_prob(self, sequences, lengths=None):
| assert (self._num_dims == sequences.shape[2].value)
inputs = sequences[:, 0:(-1), :]
inputs = tf.pad(inputs, [[0, 0], [1, 0], [0, 0]])
state = self._get_state(inputs, lengths=lengths)
labels_flat = magenta.common.flatten_maybe_padded_sequences(sequences, lengths)
return self._nade.log_prob(labels_flat, state.b_enc, state.b_dec)
|
'Computes the new RNN-NADE state from a batch of inputs.
Args:
inputs: A batch of values to compute the log probabilities of,
sized `[batch_size, length, num_dims]`.
state: An RnnNadeStateTuple containing the RNN-NADE for each value, sized
`([batch_size, self._nade.num_hidden], [batch_size, num_dims],
[batch_size, self._rnn_cell.state_size]`).
Returns:
new_state: The updated RNN-NADE state tuple given the new inputs.'
| def steps(self, inputs, state):
| return self._get_state(inputs, initial_state=state)
|
'Computes a sample and its probability from each of a batch of states.
Args:
state: An RnnNadeStateTuple containing the state of the RNN-NADE for each
sample, sized
`([batch_size, self._nade.num_hidden], [batch_size, num_dims],
[batch_size, self._rnn_cell.state_size]`).
Returns:
sample: A sample for each input state, sized `[batch_size, num_dims]`.
log_prob: The log probability of each sample, sized `[batch_size, 1]`.'
| def sample_single(self, state):
| (sample, log_prob) = self._nade.sample(state.b_enc, state.b_dec)
return (sample, log_prob)
|
'Create an RnnNadeStateTuple of zeros.
Args:
batch_size: batch size.
Returns:
An RnnNadeStateTuple of zeros.'
| def zero_state(self, batch_size):
| with tf.name_scope('RnnNadeZeroState', values=[batch_size]):
zero_state = self._get_rnn_zero_state(batch_size)
return RnnNadeStateTuple(tf.zeros((batch_size, self._nade.num_hidden), name='b_enc'), tf.zeros((batch_size, self._num_dims), name='b_dec'), zero_state)
|
'Extends a batch of event sequences by a single step each.
This method modifies the event sequences in place.
Args:
pianoroll_sequences: A list of PianorollSequences. The list of event
sequences should have length equal to `self._batch_size()`.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN-NADE state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
temperature: Unused.
Returns:
final_state: The final RNN-NADE state, the same size as `initial_state`.
loglik: The log-likelihood of the sampled value for each event
sequence, a 1-D numpy array of length
`self._batch_size()`. If `inputs` is a full-length inputs batch, the
log-likelihood of each entire sequence up to and including the
generated step will be computed and returned.'
| def _generate_step_for_batch(self, pianoroll_sequences, inputs, initial_state, temperature):
| assert (len(pianoroll_sequences) == self._batch_size())
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = tuple(self._session.graph.get_collection('initial_state'))
graph_final_state = tuple(self._session.graph.get_collection('final_state'))
graph_sample = self._session.graph.get_collection('sample')[0]
graph_log_prob = self._session.graph.get_collection('log_prob')[0]
(sample, loglik, final_state) = self._session.run([graph_sample, graph_log_prob, graph_final_state], {graph_inputs: inputs, graph_initial_state: initial_state})
self._config.encoder_decoder.extend_event_sequences(pianoroll_sequences, sample)
return (final_state, loglik[:, 0])
|
'Generate a pianoroll track from a primer pianoroll track.
Args:
num_steps: The integer length in steps of the final track, after
generation. Includes the primer.
primer_sequence: The primer sequence, a PianorollSequence object.
beam_size: An integer, beam size to use when generating tracks via
beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: The number of steps to take per beam search
iteration.
Returns:
The generated PianorollSequence object (which begins with the provided
primer track).'
| def generate_pianoroll_sequence(self, num_steps, primer_sequence, beam_size=1, branch_factor=1, steps_per_iteration=1):
| return self._generate_events(num_steps=num_steps, primer_events=primer_sequence, temperature=None, beam_size=beam_size, branch_factor=branch_factor, steps_per_iteration=steps_per_iteration)
|
'Remove entries from strokes having > max_seq_length points.'
| def preprocess(self, strokes):
| raw_data = []
seq_len = []
count_data = 0
for i in range(len(strokes)):
data = strokes[i]
if (len(data) <= self.max_seq_length):
count_data += 1
data = np.minimum(data, self.limit)
data = np.maximum(data, (- self.limit))
data = np.array(data, dtype=np.float32)
data[:, 0:2] /= self.scale_factor
raw_data.append(data)
seq_len.append(len(data))
seq_len = np.array(seq_len)
idx = np.argsort(seq_len)
self.strokes = []
for i in range(len(seq_len)):
self.strokes.append(raw_data[idx[i]])
print(('total images <= max_seq_len is %d' % count_data))
self.num_batches = int((count_data / self.batch_size))
|
'Return a random sample, in stroke-3 format as used by draw_strokes.'
| def random_sample(self):
| sample = np.copy(random.choice(self.strokes))
return sample
|
'Augment data by stretching x and y axis randomly [1-e, 1+e].'
| def random_scale(self, data):
| x_scale_factor = ((((np.random.random() - 0.5) * 2) * self.random_scale_factor) + 1.0)
y_scale_factor = ((((np.random.random() - 0.5) * 2) * self.random_scale_factor) + 1.0)
result = np.copy(data)
result[:, 0] *= x_scale_factor
result[:, 1] *= y_scale_factor
return result
|
'Calculate the normalizing factor explained in appendix of sketch-rnn.'
| def calculate_normalizing_scale_factor(self):
| data = []
for i in range(len(self.strokes)):
if (len(self.strokes[i]) > self.max_seq_length):
continue
for j in range(len(self.strokes[i])):
data.append(self.strokes[i][(j, 0)])
data.append(self.strokes[i][(j, 1)])
data = np.array(data)
return np.std(data)
|
'Normalize entire dataset (delta_x, delta_y) by the scaling factor.'
| def normalize(self, scale_factor=None):
| if (scale_factor is None):
scale_factor = self.calculate_normalizing_scale_factor()
self.scale_factor = scale_factor
for i in range(len(self.strokes)):
self.strokes[i][:, 0:2] /= self.scale_factor
|
'Given a list of indices, return the potentially augmented batch.'
| def _get_batch_from_indices(self, indices):
| x_batch = []
seq_len = []
for idx in range(len(indices)):
i = indices[idx]
data = self.random_scale(self.strokes[i])
data_copy = np.copy(data)
if (self.augment_stroke_prob > 0):
data_copy = augment_strokes(data_copy, self.augment_stroke_prob)
x_batch.append(data_copy)
length = len(data_copy)
seq_len.append(length)
seq_len = np.array(seq_len, dtype=int)
return (x_batch, self.pad_batch(x_batch, self.max_seq_length), seq_len)
|
'Return a randomised portion of the training data.'
| def random_batch(self):
| idx = np.random.permutation(range(0, len(self.strokes)))[0:self.batch_size]
return self._get_batch_from_indices(idx)
|
'Get the idx\'th batch from the dataset.'
| def get_batch(self, idx):
| assert (idx >= 0), 'idx must be non negative'
assert (idx < self.num_batches), 'idx must be less than the number of batches'
start_idx = (idx * self.batch_size)
indices = range(start_idx, (start_idx + self.batch_size))
return self._get_batch_from_indices(indices)
|
'Pad the batch to be stroke-5 bigger format as described in paper.'
| def pad_batch(self, batch, max_len):
| result = np.zeros((self.batch_size, (max_len + 1), 5), dtype=float)
assert (len(batch) == self.batch_size)
for i in range(self.batch_size):
l = len(batch[i])
assert (l <= max_len)
result[i, 0:l, 0:2] = batch[i][:, 0:2]
result[i, 0:l, 3] = batch[i][:, 2]
result[i, 0:l, 2] = (1 - result[i, 0:l, 3])
result[i, l:, 4] = 1
result[i, 1:, :] = result[i, :(-1), :]
result[i, 0, :] = 0
result[(i, 0, 2)] = self.start_stroke_token[2]
result[(i, 0, 3)] = self.start_stroke_token[3]
result[(i, 0, 4)] = self.start_stroke_token[4]
return result
|
'Initialize the Layer Norm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)'
| def __init__(self, num_units, forget_bias=1.0, use_recurrent_dropout=False, dropout_keep_prob=0.9):
| self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
|
'Initialize the Layer Norm HyperLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
use_layer_norm: boolean. (default True)
Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell.
hyper_num_units: int, number of units in HyperLSTM cell.
(default is 128, recommend experimenting with 256 for larger tasks)
hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.
(default is 16, recommend trying larger values for large datasets)
hyper_use_recurrent_dropout: boolean. (default False)
Controls whether HyperLSTM cell also uses recurrent dropout.
Recommend turning this on only if hyper_num_units becomes large (>= 512)'
| def __init__(self, num_units, forget_bias=1.0, use_recurrent_dropout=False, dropout_keep_prob=0.9, use_layer_norm=True, hyper_num_units=256, hyper_embedding_size=32, hyper_use_recurrent_dropout=False):
| self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
self.use_layer_norm = use_layer_norm
self.hyper_num_units = hyper_num_units
self.hyper_embedding_size = hyper_embedding_size
self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout
self.total_num_units = (self.num_units + self.hyper_num_units)
if self.use_layer_norm:
cell_fn = LayerNormLSTMCell
else:
cell_fn = LSTMCell
self.hyper_cell = cell_fn(hyper_num_units, use_recurrent_dropout=hyper_use_recurrent_dropout, dropout_keep_prob=dropout_keep_prob)
|
'Initializer for the SketchRNN model.
Args:
hps: a HParams object containing model hyperparameters
gpu_mode: a boolean that when True, uses GPU mode.
reuse: a boolean that when true, attemps to reuse variables.'
| def __init__(self, hps, gpu_mode=True, reuse=False):
| self.hps = hps
with tf.variable_scope('vector_rnn', reuse=reuse):
if (not gpu_mode):
with tf.device('/cpu:0'):
tf.logging.info('Model using cpu.')
self.build_model(hps)
else:
tf.logging.info('Model using gpu.')
self.build_model(hps)
|
'Define the bi-directional encoder module of sketch-rnn.'
| def encoder(self, batch, sequence_lengths):
| (unused_outputs, last_states) = tf.nn.bidirectional_dynamic_rnn(self.enc_cell_fw, self.enc_cell_bw, batch, sequence_length=sequence_lengths, time_major=False, swap_memory=True, dtype=tf.float32, scope='ENC_RNN')
(last_state_fw, last_state_bw) = last_states
last_h_fw = self.enc_cell_fw.get_output(last_state_fw)
last_h_bw = self.enc_cell_bw.get_output(last_state_bw)
last_h = tf.concat([last_h_fw, last_h_bw], 1)
mu = rnn.super_linear(last_h, self.hps.z_size, input_size=(self.hps.enc_rnn_size * 2), scope='ENC_RNN_mu', init_w='gaussian', weight_start=0.001)
presig = rnn.super_linear(last_h, self.hps.z_size, input_size=(self.hps.enc_rnn_size * 2), scope='ENC_RNN_sigma', init_w='gaussian', weight_start=0.001)
return (mu, presig)
|
'Define model architecture.'
| def build_model(self, hps):
| if hps.is_training:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if (hps.dec_model == 'lstm'):
cell_fn = rnn.LSTMCell
elif (hps.dec_model == 'layer_norm'):
cell_fn = rnn.LayerNormLSTMCell
elif (hps.dec_model == 'hyper'):
cell_fn = rnn.HyperLSTMCell
else:
assert False, 'please choose a respectable cell'
if (hps.enc_model == 'lstm'):
enc_cell_fn = rnn.LSTMCell
elif (hps.enc_model == 'layer_norm'):
enc_cell_fn = rnn.LayerNormLSTMCell
elif (hps.enc_model == 'hyper'):
enc_cell_fn = rnn.HyperLSTMCell
else:
assert False, 'please choose a respectable cell'
use_recurrent_dropout = self.hps.use_recurrent_dropout
use_input_dropout = self.hps.use_input_dropout
use_output_dropout = self.hps.use_output_dropout
if (hps.dec_model == 'hyper'):
cell = cell_fn(hps.dec_rnn_size, use_recurrent_dropout=use_recurrent_dropout, dropout_keep_prob=self.hps.recurrent_dropout_prob)
else:
cell = cell_fn(hps.dec_rnn_size, use_recurrent_dropout=use_recurrent_dropout, dropout_keep_prob=self.hps.recurrent_dropout_prob)
if hps.conditional:
if (hps.enc_model == 'hyper'):
self.enc_cell_fw = enc_cell_fn(hps.enc_rnn_size, use_recurrent_dropout=use_recurrent_dropout, dropout_keep_prob=self.hps.recurrent_dropout_prob)
self.enc_cell_bw = enc_cell_fn(hps.enc_rnn_size, use_recurrent_dropout=use_recurrent_dropout, dropout_keep_prob=self.hps.recurrent_dropout_prob)
else:
self.enc_cell_fw = enc_cell_fn(hps.enc_rnn_size, use_recurrent_dropout=use_recurrent_dropout, dropout_keep_prob=self.hps.recurrent_dropout_prob)
self.enc_cell_bw = enc_cell_fn(hps.enc_rnn_size, use_recurrent_dropout=use_recurrent_dropout, dropout_keep_prob=self.hps.recurrent_dropout_prob)
tf.logging.info('Input dropout mode = %s.', use_input_dropout)
tf.logging.info('Output dropout mode = %s.', use_output_dropout)
tf.logging.info('Recurrent dropout mode = %s.', use_recurrent_dropout)
if use_input_dropout:
tf.logging.info('Dropout to input w/ keep_prob = %4.4f.', self.hps.input_dropout_prob)
cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=self.hps.input_dropout_prob)
if use_output_dropout:
tf.logging.info('Dropout to output w/ keep_prob = %4.4f.', self.hps.output_dropout_prob)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.hps.output_dropout_prob)
self.cell = cell
self.sequence_lengths = tf.placeholder(dtype=tf.int32, shape=[self.hps.batch_size])
self.input_data = tf.placeholder(dtype=tf.float32, shape=[self.hps.batch_size, (self.hps.max_seq_len + 1), 5])
self.output_x = self.input_data[:, 1:(self.hps.max_seq_len + 1), :]
self.input_x = self.input_data[:, :self.hps.max_seq_len, :]
if hps.conditional:
(self.mean, self.presig) = self.encoder(self.output_x, self.sequence_lengths)
self.sigma = tf.exp((self.presig / 2.0))
eps = tf.random_normal((self.hps.batch_size, self.hps.z_size), 0.0, 1.0, dtype=tf.float32)
self.batch_z = (self.mean + tf.multiply(self.sigma, eps))
self.kl_cost = ((-0.5) * tf.reduce_mean((((1 + self.presig) - tf.square(self.mean)) - tf.exp(self.presig))))
self.kl_cost = tf.maximum(self.kl_cost, self.hps.kl_tolerance)
pre_tile_y = tf.reshape(self.batch_z, [self.hps.batch_size, 1, self.hps.z_size])
overlay_x = tf.tile(pre_tile_y, [1, self.hps.max_seq_len, 1])
actual_input_x = tf.concat([self.input_x, overlay_x], 2)
self.initial_state = tf.nn.tanh(rnn.super_linear(self.batch_z, cell.state_size, init_w='gaussian', weight_start=0.001, input_size=self.hps.z_size))
else:
self.batch_z = tf.zeros((self.hps.batch_size, self.hps.z_size), dtype=tf.float32)
self.kl_cost = tf.zeros([], dtype=tf.float32)
actual_input_x = self.input_x
self.initial_state = cell.zero_state(batch_size=hps.batch_size, dtype=tf.float32)
self.num_mixture = hps.num_mixture
n_out = (3 + (self.num_mixture * 6))
with tf.variable_scope('RNN'):
output_w = tf.get_variable('output_w', [self.hps.dec_rnn_size, n_out])
output_b = tf.get_variable('output_b', [n_out])
(output, last_state) = tf.nn.dynamic_rnn(cell, actual_input_x, initial_state=self.initial_state, time_major=False, swap_memory=True, dtype=tf.float32, scope='RNN')
output = tf.reshape(output, [(-1), hps.dec_rnn_size])
output = tf.nn.xw_plus_b(output, output_w, output_b)
self.final_state = last_state
def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
'Returns result of eq # 24 of http://arxiv.org/abs/1308.0850.'
norm1 = tf.subtract(x1, mu1)
norm2 = tf.subtract(x2, mu2)
s1s2 = tf.multiply(s1, s2)
z = ((tf.square(tf.div(norm1, s1)) + tf.square(tf.div(norm2, s2))) - (2 * tf.div(tf.multiply(rho, tf.multiply(norm1, norm2)), s1s2)))
neg_rho = (1 - tf.square(rho))
result = tf.exp(tf.div((- z), (2 * neg_rho)))
denom = ((2 * np.pi) * tf.multiply(s1s2, tf.sqrt(neg_rho)))
result = tf.div(result, denom)
return result
def get_lossfunc(z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr, z_pen_logits, x1_data, x2_data, pen_data):
'Returns a loss fn based on eq #26 of http://arxiv.org/abs/1308.0850.'
result0 = tf_2d_normal(x1_data, x2_data, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr)
epsilon = 1e-06
result1 = tf.multiply(result0, z_pi)
result1 = tf.reduce_sum(result1, 1, keep_dims=True)
result1 = (- tf.log((result1 + epsilon)))
fs = (1.0 - pen_data[:, 2])
fs = tf.reshape(fs, [(-1), 1])
result1 = tf.multiply(result1, fs)
result2 = tf.nn.softmax_cross_entropy_with_logits(labels=pen_data, logits=z_pen_logits)
result2 = tf.reshape(result2, [(-1), 1])
if (not self.hps.is_training):
result2 = tf.multiply(result2, fs)
result = (result1 + result2)
return result
def get_mixture_coef(output):
'Returns the tf slices containing mdn dist params.'
z = output
z_pen_logits = z[:, 0:3]
(z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr) = tf.split(z[:, 3:], 6, 1)
z_pi = tf.nn.softmax(z_pi)
z_pen = tf.nn.softmax(z_pen_logits)
z_sigma1 = tf.exp(z_sigma1)
z_sigma2 = tf.exp(z_sigma2)
z_corr = tf.tanh(z_corr)
r = [z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr, z_pen, z_pen_logits]
return r
out = get_mixture_coef(output)
[o_pi, o_mu1, o_mu2, o_sigma1, o_sigma2, o_corr, o_pen, o_pen_logits] = out
self.pi = o_pi
self.mu1 = o_mu1
self.mu2 = o_mu2
self.sigma1 = o_sigma1
self.sigma2 = o_sigma2
self.corr = o_corr
self.pen_logits = o_pen_logits
self.pen = o_pen
target = tf.reshape(self.output_x, [(-1), 5])
[x1_data, x2_data, eos_data, eoc_data, cont_data] = tf.split(target, 5, 1)
pen_data = tf.concat([eos_data, eoc_data, cont_data], 1)
lossfunc = get_lossfunc(o_pi, o_mu1, o_mu2, o_sigma1, o_sigma2, o_corr, o_pen_logits, x1_data, x2_data, pen_data)
self.r_cost = tf.reduce_mean(lossfunc)
if self.hps.is_training:
self.lr = tf.Variable(self.hps.learning_rate, trainable=False)
optimizer = tf.train.AdamOptimizer(self.lr)
self.kl_weight = tf.Variable(self.hps.kl_weight_start, trainable=False)
self.cost = (self.r_cost + (self.kl_cost * self.kl_weight))
gvs = optimizer.compute_gradients(self.cost)
g = self.hps.grad_clip
capped_gvs = [(tf.clip_by_value(grad, (- g), g), var) for (grad, var) in gvs]
self.train_op = optimizer.apply_gradients(capped_gvs, global_step=self.global_step, name='train_step')
|
'Generate a performance track from a primer performance track.
Args:
num_steps: The integer length in steps of the final track, after
generation. Includes the primer.
primer_sequence: The primer sequence, a Performance object.
temperature: A float specifying how much to divide the logits by
before computing the softmax. Greater than 1.0 makes tracks more
random, less than 1.0 makes tracks less random.
beam_size: An integer, beam size to use when generating tracks via
beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: An integer, number of steps to take per beam search
iteration.
note_density: Desired note density of generated performance. If None,
don\'t condition on note density.
pitch_histogram: Desired pitch class histogram of generated performance.
If None, don\'t condition on pitch class histogram.
Returns:
The generated Performance object (which begins with the provided primer
track).
Raises:
ValueError: If both `note_density` and `pitch_histogram` are provided as
conditioning variables.'
| def generate_performance(self, num_steps, primer_sequence, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1, note_density=None, pitch_histogram=None):
| if ((note_density is not None) and (pitch_histogram is not None)):
control_events = ([(note_density, pitch_histogram)] * num_steps)
elif (note_density is not None):
control_events = ([note_density] * num_steps)
elif (pitch_histogram is not None):
control_events = ([pitch_histogram] * num_steps)
else:
control_events = None
return self._generate_events(num_steps, primer_sequence, temperature, beam_size, branch_factor, steps_per_iteration, control_events=control_events)
|
'Evaluate the log likelihood of a performance.
Args:
sequence: The Performance object for which to evaluate the log likelihood.
note_density: Control note density on which performance is conditioned. If
None, don\'t condition on note density.
pitch_histogram: Control pitch class histogram on which performance is
conditioned. If None, don\'t condition on pitch class histogram
Returns:
The log likelihood of `sequence` under this model.
Raises:
ValueError: If both `note_density` and `pitch_histogram` are provided as
conditioning variables.'
| def performance_log_likelihood(self, sequence, note_density=None, pitch_histogram=None):
| if ((note_density is not None) and (pitch_histogram is not None)):
control_events = ([(note_density, pitch_histogram)] * len(sequence))
elif (note_density is not None):
control_events = ([note_density] * len(sequence))
elif (pitch_histogram is not None):
control_events = ([pitch_histogram] * len(sequence))
else:
control_events = None
return self._evaluate_log_likelihood([sequence], control_events=control_events)[0]
|
'Initialize a NoteDensityOneHotEncoding.
Args:
density_bin_ranges: List of note density (notes per second) bin boundaries
to use when quantizing. The number of bins will be one larger than the
list length.'
| def __init__(self, density_bin_ranges):
| self._density_bin_ranges = density_bin_ranges
|
'Constructs an EncoderPipeline.
Args:
config: A PerformanceRnnConfig that specifies the encoder/decoder and
note density conditioning behavior.
name: A unique pipeline name.'
| def __init__(self, config, name):
| super(EncoderPipeline, self).__init__(input_type=performance_lib.Performance, output_type=tf.train.SequenceExample, name=name)
self._encoder_decoder = config.encoder_decoder
self._density_bin_ranges = config.density_bin_ranges
self._density_window_size = config.density_window_size
self._pitch_histogram_window_size = config.pitch_histogram_window_size
|
'Creates a PerformanceRnnSequenceGenerator.
Args:
model: Instance of PerformanceRnnModel.
details: A generator_pb2.GeneratorDetails for this generator.
steps_per_second: Number of quantized steps per second.
num_velocity_bins: Number of quantized velocity bins. If 0, don\'t use
velocity.
note_density_conditioning: If True, generate conditional on note density.
pitch_histogram_conditioning: If True, generate conditional on pitch class
histogram.
max_note_duration: The maximum note duration in seconds to allow during
generation. This model often forgets to release notes; specifying a
maximum duration can force it to do so.
fill_generate_section: If True, the model will generate RNN steps until
the entire generate section has been filled. If False, the model will
estimate the number of RNN steps needed and then generate that many
events, even if the generate section isn\'t completely filled.
checkpoint: Where to search for the most recent model checkpoint. Mutually
exclusive with `bundle`.
bundle: A GeneratorBundle object that includes both the model checkpoint
and metagraph. Mutually exclusive with `checkpoint`.
Raises:
ValueError: If both `note_density_conditioning` and
`pitch_histogram_conditioning` are enabled.'
| def __init__(self, model, details, steps_per_second=performance_lib.DEFAULT_STEPS_PER_SECOND, num_velocity_bins=0, note_density_conditioning=False, pitch_histogram_conditioning=False, max_note_duration=MAX_NOTE_DURATION_SECONDS, fill_generate_section=True, checkpoint=None, bundle=None):
| super(PerformanceRnnSequenceGenerator, self).__init__(model, details, checkpoint, bundle)
self.steps_per_second = steps_per_second
self.num_velocity_bins = num_velocity_bins
self.note_density_conditioning = note_density_conditioning
self.pitch_histogram_conditioning = pitch_histogram_conditioning
self.max_note_duration = max_note_duration
self.fill_generate_section = fill_generate_section
|
'Construct a Performance.
Either quantized_sequence or steps_per_second should be supplied.
Args:
quantized_sequence: A quantized NoteSequence proto.
steps_per_second: Number of quantized time steps per second.
start_step: The offset of this sequence relative to the
beginning of the source sequence. If a quantized sequence is used as
input, only notes starting after this step will be considered.
num_velocity_bins: Number of velocity bins to use. If 0, velocity events
will not be included at all.
Raises:
ValueError: If `num_velocity_bins` is larger than the number of MIDI
velocity values.'
| def __init__(self, quantized_sequence=None, steps_per_second=None, start_step=0, num_velocity_bins=0):
| if ((quantized_sequence, steps_per_second).count(None) != 1):
raise ValueError('Must specify exactly one of quantized_sequence or steps_per_second')
if (num_velocity_bins > ((MAX_MIDI_VELOCITY - MIN_MIDI_VELOCITY) + 1)):
raise ValueError(('Number of velocity bins is too large: %d' % num_velocity_bins))
if quantized_sequence:
sequences_lib.assert_is_absolute_quantized_sequence(quantized_sequence)
self._events = self._from_quantized_sequence(quantized_sequence, start_step, num_velocity_bins)
self._steps_per_second = quantized_sequence.quantization_info.steps_per_second
else:
self._events = []
self._steps_per_second = steps_per_second
self._start_step = start_step
self._num_velocity_bins = num_velocity_bins
|
'Adds steps to the end of the sequence.'
| def _append_steps(self, num_steps):
| if (self._events and (self._events[(-1)].event_type == PerformanceEvent.TIME_SHIFT) and (self._events[(-1)].event_value < MAX_SHIFT_STEPS)):
added_steps = min(num_steps, (MAX_SHIFT_STEPS - self._events[(-1)].event_value))
self._events[(-1)] = PerformanceEvent(PerformanceEvent.TIME_SHIFT, (self._events[(-1)].event_value + added_steps))
num_steps -= added_steps
while (num_steps >= MAX_SHIFT_STEPS):
self._events.append(PerformanceEvent(event_type=PerformanceEvent.TIME_SHIFT, event_value=MAX_SHIFT_STEPS))
num_steps -= MAX_SHIFT_STEPS
if (num_steps > 0):
self._events.append(PerformanceEvent(event_type=PerformanceEvent.TIME_SHIFT, event_value=num_steps))
|
'Trims a given number of steps from the end of the sequence.'
| def _trim_steps(self, num_steps):
| steps_trimmed = 0
while (self._events and (steps_trimmed < num_steps)):
if (self._events[(-1)].event_type == PerformanceEvent.TIME_SHIFT):
if ((steps_trimmed + self._events[(-1)].event_value) > num_steps):
self._events[(-1)] = PerformanceEvent(event_type=PerformanceEvent.TIME_SHIFT, event_value=((self._events[(-1)].event_value - num_steps) + steps_trimmed))
steps_trimmed = num_steps
else:
steps_trimmed += self._events[(-1)].event_value
self._events.pop()
else:
self._events.pop()
|
'Sets the length of the sequence to the specified number of steps.
If the event sequence is not long enough, pads with time shifts to make the
sequence the specified length. If it is too long, it will be truncated to
the requested length.
Args:
steps: How many quantized steps long the event sequence should be.
from_left: Whether to add/remove from the left instead of right.'
| def set_length(self, steps, from_left=False):
| if from_left:
raise NotImplementedError('from_left is not supported')
if (self.num_steps < steps):
self._append_steps((steps - self.num_steps))
elif (self.num_steps > steps):
self._trim_steps((self.num_steps - steps))
assert (self.num_steps == steps)
|
'Appends the event to the end of the sequence.
Args:
event: The performance event to append to the end.
Raises:
ValueError: If `event` is not a valid performance event.'
| def append(self, event):
| if (not isinstance(event, PerformanceEvent)):
raise ValueError(('Invalid performance event: %s' % event))
self._events.append(event)
|
'Truncates this Performance to the specified number of events.
Args:
num_events: The number of events to which this performance will be
truncated.'
| def truncate(self, num_events):
| self._events = self._events[:num_events]
|
'How many events are in this sequence.
Returns:
Number of events as an integer.'
| def __len__(self):
| return len(self._events)
|
'Returns the event at the given index.'
| def __getitem__(self, i):
| return self._events[i]
|
'Return an iterator over the events in this sequence.'
| def __iter__(self):
| return iter(self._events)
|
'Returns how many steps long this sequence is.
Returns:
Length of the sequence in quantized steps.'
| @property
def num_steps(self):
| steps = 0
for event in self:
if (event.event_type == PerformanceEvent.TIME_SHIFT):
steps += event.event_value
return steps
|
'Populate self with events from the given quantized NoteSequence object.
Within a step, new pitches are started with NOTE_ON and existing pitches are
ended with NOTE_OFF. TIME_SHIFT shifts the current step forward in time.
VELOCITY changes the current velocity value that will be applied to all
NOTE_ON events.
Args:
quantized_sequence: A quantized NoteSequence instance.
start_step: Start converting the sequence at this time step.
num_velocity_bins: Number of velocity bins to use. If 0, velocity events
will not be included at all.
Returns:
A list of events.'
| @staticmethod
def _from_quantized_sequence(quantized_sequence, start_step=0, num_velocity_bins=0):
| notes = [note for note in quantized_sequence.notes if ((not note.is_drum) and (note.quantized_start_step >= start_step))]
sorted_notes = sorted(notes, key=(lambda note: note.start_time))
onsets = [(note.quantized_start_step, idx, False) for (idx, note) in enumerate(sorted_notes)]
offsets = [(note.quantized_end_step, idx, True) for (idx, note) in enumerate(sorted_notes)]
note_events = sorted((onsets + offsets))
if num_velocity_bins:
velocity_bin_size = int(math.ceil((((MAX_MIDI_VELOCITY - MIN_MIDI_VELOCITY) + 1) / num_velocity_bins)))
velocity_to_bin = (lambda v: (((v - MIN_MIDI_VELOCITY) // velocity_bin_size) + 1))
current_step = start_step
current_velocity_bin = 0
performance_events = []
for (step, idx, is_offset) in note_events:
if (step > current_step):
while (step > (current_step + MAX_SHIFT_STEPS)):
performance_events.append(PerformanceEvent(event_type=PerformanceEvent.TIME_SHIFT, event_value=MAX_SHIFT_STEPS))
current_step += MAX_SHIFT_STEPS
performance_events.append(PerformanceEvent(event_type=PerformanceEvent.TIME_SHIFT, event_value=int((step - current_step))))
current_step = step
if num_velocity_bins:
velocity_bin = velocity_to_bin(sorted_notes[idx].velocity)
if ((not is_offset) and (velocity_bin != current_velocity_bin)):
current_velocity_bin = velocity_bin
performance_events.append(PerformanceEvent(event_type=PerformanceEvent.VELOCITY, event_value=current_velocity_bin))
event_type = (PerformanceEvent.NOTE_OFF if is_offset else PerformanceEvent.NOTE_ON)
performance_events.append(PerformanceEvent(event_type=event_type, event_value=sorted_notes[idx].pitch))
return performance_events
|
'Converts the Performance to NoteSequence proto.
Args:
velocity: MIDI velocity to give each note. Between 1 and 127 (inclusive).
If the performance contains velocity events, those will be used
instead.
instrument: MIDI instrument to give each note.
program: MIDI program to give each note.
max_note_duration: Maximum note duration in seconds to allow. Notes longer
than this will be truncated. If None, notes can be any length.
Raises:
ValueError: if an unknown event is encountered.
Returns:
A NoteSequence proto.'
| def to_sequence(self, velocity=100, instrument=0, program=0, max_note_duration=None):
| seconds_per_step = (1.0 / self._steps_per_second)
sequence_start_time = (self.start_step * seconds_per_step)
sequence = music_pb2.NoteSequence()
sequence.ticks_per_quarter = STANDARD_PPQ
step = 0
if self._num_velocity_bins:
velocity_bin_size = int(math.ceil((((MAX_MIDI_VELOCITY - MIN_MIDI_VELOCITY) + 1) / self._num_velocity_bins)))
pitch_start_steps_and_velocities = collections.defaultdict(list)
for (i, event) in enumerate(self):
if (event.event_type == PerformanceEvent.NOTE_ON):
pitch_start_steps_and_velocities[event.event_value].append((step, velocity))
elif (event.event_type == PerformanceEvent.NOTE_OFF):
if (not pitch_start_steps_and_velocities[event.event_value]):
tf.logging.debug(('Ignoring NOTE_OFF at position %d with no previous NOTE_ON' % i))
else:
(pitch_start_step, pitch_velocity) = pitch_start_steps_and_velocities[event.event_value][0]
pitch_start_steps_and_velocities[event.event_value] = pitch_start_steps_and_velocities[event.event_value][1:]
if (step == pitch_start_step):
tf.logging.debug(('Ignoring note with zero duration at step %d' % step))
continue
note = sequence.notes.add()
note.start_time = ((pitch_start_step * seconds_per_step) + sequence_start_time)
note.end_time = ((step * seconds_per_step) + sequence_start_time)
if (max_note_duration and ((note.end_time - note.start_time) > max_note_duration)):
note.end_time = (note.start_time + max_note_duration)
note.pitch = event.event_value
note.velocity = pitch_velocity
note.instrument = instrument
note.program = program
if (note.end_time > sequence.total_time):
sequence.total_time = note.end_time
elif (event.event_type == PerformanceEvent.TIME_SHIFT):
step += event.event_value
elif (event.event_type == PerformanceEvent.VELOCITY):
assert self._num_velocity_bins
velocity = (MIN_MIDI_VELOCITY + ((event.event_value - 1) * velocity_bin_size))
else:
raise ValueError(('Unknown event type: %s' % event.event_type))
for pitch in pitch_start_steps_and_velocities:
for (pitch_start_step, pitch_velocity) in pitch_start_steps_and_velocities[pitch]:
if (step == pitch_start_step):
tf.logging.debug(('Ignoring note with zero duration at step %d' % step))
continue
note = sequence.notes.add()
note.start_time = ((pitch_start_step * seconds_per_step) + sequence_start_time)
note.end_time = ((step * seconds_per_step) + sequence_start_time)
if (max_note_duration and ((note.end_time - note.start_time) > max_note_duration)):
note.end_time = (note.start_time + max_note_duration)
note.pitch = pitch
note.velocity = pitch_velocity
note.instrument = instrument
note.program = program
if (note.end_time > sequence.total_time):
sequence.total_time = note.end_time
return sequence
|
'Threadsafe accessor for offset attribute.'
| @property
@serialized
def offset(self):
| return self._offset
|
'Threadsafe mutator for offset attribute.'
| @offset.setter
@serialized
def offset(self, value):
| self._offset = value
|
'Sleeps the requested number of seconds.'
| def sleep(self, seconds):
| wake_time = (time.time() + seconds)
self.sleep_until(wake_time)
|
'Sleeps until the requested time.'
| def sleep_until(self, wake_time):
| delta = (wake_time - time.time())
if (delta <= 0):
return
offset_ = self.offset
if (delta > offset_):
time.sleep((delta - offset_))
remaining_time = (time.time() - wake_time)
with self._lock:
if (self.offset == offset_):
offset_delta = ((offset_ - Sleeper._MIN_OFFSET) / 2)
if (remaining_time > 0):
self.offset -= offset_delta
elif (remaining_time < (- Sleeper._MIN_OFFSET)):
self.offset += offset_delta
while (time.time() < wake_time):
pass
|
'Constructs a `Statistic`.
Subclass constructors are expected to call this constructor.
Args:
name: The string name for this `Statistic`. Any two `Statistic` objects
with the same name will be merged together. The name should also
describe what this Statistic is measuring.'
| def __init__(self, name):
| self.name = name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.