Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -136,47 +136,111 @@ def render_midi_output(final_composition):
|
|
136 |
def load_midi(input_midi):
|
137 |
"""Process the input MIDI file and create a token sequence using without velocity logic."""
|
138 |
raw_score = TMIDIX.midi2single_track_ms_score(input_midi.name)
|
139 |
-
escore_notes = TMIDIX.advanced_score_processor(
|
140 |
-
raw_score, return_enhanced_score_notes=True, apply_sustain=True
|
141 |
-
)[0]
|
142 |
-
sp_escore_notes = TMIDIX.solo_piano_escore_notes(escore_notes)
|
143 |
-
zscore = TMIDIX.recalculate_score_timings(sp_escore_notes)
|
144 |
-
zscore = TMIDIX.augment_enhanced_score_notes(zscore, timings_divider=32)
|
145 |
-
fscore = TMIDIX.fix_escore_notes_durations(zscore)
|
146 |
-
cscore = TMIDIX.chordify_score([1000, fscore])
|
147 |
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
def save_midi(tokens, batch_number=None):
|
162 |
"""Convert token sequence back to a MIDI score and write it using TMIDIX (without velocity).
|
163 |
The output MIDI file name incorporates a date-time stamp.
|
164 |
"""
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
pitch =
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
# Generate a time stamp using the PDT timezone.
|
182 |
timestamp = datetime.datetime.now(PDT).strftime("%Y%m%d_%H%M%S")
|
@@ -186,17 +250,17 @@ def save_midi(tokens, batch_number=None):
|
|
186 |
else:
|
187 |
fname = f"Orpheus-Music-Transformer-Music-Composition_{timestamp}_Batch_{batch_number}"'''
|
188 |
|
189 |
-
fname = f"Orpheus-Music-Transformer-
|
190 |
|
191 |
TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(
|
192 |
-
|
193 |
output_signature='Orpheus Music Transformer',
|
194 |
output_file_name=fname,
|
195 |
track_name='Project Los Angeles',
|
196 |
list_of_MIDI_patches=patches,
|
197 |
verbose=False
|
198 |
)
|
199 |
-
return fname,
|
200 |
|
201 |
# -----------------------------
|
202 |
# MUSIC GENERATION FUNCTION (Combined)
|
@@ -204,7 +268,7 @@ def save_midi(tokens, batch_number=None):
|
|
204 |
@spaces.GPU
|
205 |
def generate_music(prime, num_gen_tokens, num_mem_tokens, num_gen_batches, model_temperature):
|
206 |
"""Generate music tokens given prime tokens and parameters."""
|
207 |
-
inputs = prime[-num_mem_tokens:] if prime else [
|
208 |
print("Generating...")
|
209 |
inp = torch.LongTensor([inputs] * num_gen_batches).cuda()
|
210 |
with ctx:
|
@@ -258,7 +322,7 @@ def generate_music_and_state(input_midi, num_prime_tokens, num_gen_tokens, num_m
|
|
258 |
plot_kwargs = {'plot_title': f'Batch # {i}', 'return_plt': True}
|
259 |
|
260 |
if len(final_composition) > PREVIEW_LENGTH:
|
261 |
-
plot_kwargs['preview_length_in_notes'] = len([t for t in preview_tokens if t
|
262 |
|
263 |
midi_plot = TMIDIX.plot_ms_SONG(midi_score, **plot_kwargs)
|
264 |
midi_audio = midi_to_colab_audio(midi_fname + '.mid',
|
@@ -391,9 +455,9 @@ with gr.Blocks() as demo:
|
|
391 |
[final_composition, generated_batches, block_lines])
|
392 |
|
393 |
gr.Markdown("## Generate")
|
394 |
-
num_prime_tokens = gr.Slider(
|
395 |
-
num_gen_tokens = gr.Slider(
|
396 |
-
num_mem_tokens = gr.Slider(
|
397 |
model_temperature = gr.Slider(0.1, 1, value=0.9, step=0.01, label="Model temperature")
|
398 |
generate_btn = gr.Button("Generate", variant="primary")
|
399 |
|
|
|
136 |
def load_midi(input_midi):
|
137 |
"""Process the input MIDI file and create a token sequence using without velocity logic."""
|
138 |
raw_score = TMIDIX.midi2single_track_ms_score(input_midi.name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
+
escore_notes = TMIDIX.advanced_score_processor(raw_score, return_enhanced_score_notes=True, apply_sustain=True)
|
141 |
+
|
142 |
+
escore_notes = TMIDIX.augment_enhanced_score_notes(escore_notes[0], sort_drums_last=True)
|
143 |
+
|
144 |
+
dscore = TMIDIX.delta_score_notes(escore_notes)
|
145 |
+
|
146 |
+
dcscore = TMIDIX.chordify_score([d[1:] for d in dscore])
|
147 |
+
|
148 |
+
melody_chords = [18816]
|
149 |
+
|
150 |
+
#=======================================================
|
151 |
+
# MAIN PROCESSING CYCLE
|
152 |
+
#=======================================================
|
153 |
+
|
154 |
+
for i, c in enumerate(dcscore):
|
155 |
+
|
156 |
+
delta_time = c[0][0]
|
157 |
+
|
158 |
+
melody_chords.append(delta_time)
|
159 |
+
|
160 |
+
for e in c:
|
161 |
+
|
162 |
+
#=======================================================
|
163 |
+
|
164 |
+
# Durations
|
165 |
+
dur = max(1, min(255, e[1]))
|
166 |
+
|
167 |
+
# Patches
|
168 |
+
pat = max(0, min(128, e[5]))
|
169 |
+
|
170 |
+
# Pitches
|
171 |
+
ptc = max(1, min(127, e[3]))
|
172 |
+
|
173 |
+
# Velocities
|
174 |
+
# Calculating octo-velocity
|
175 |
+
vel = max(8, min(127, e[4]))
|
176 |
+
velocity = round(vel / 15)-1
|
177 |
+
|
178 |
+
#=======================================================
|
179 |
+
# FINAL NOTE SEQ
|
180 |
+
#=======================================================
|
181 |
+
|
182 |
+
# Writing final note
|
183 |
+
pat_ptc = (128 * pat) + ptc
|
184 |
+
dur_vel = (8 * dur) + velocity
|
185 |
+
|
186 |
+
melody_chords.extend([pat_ptc+256, dur_vel+16768])
|
187 |
+
|
188 |
+
return melody_chords
|
189 |
|
190 |
def save_midi(tokens, batch_number=None):
|
191 |
"""Convert token sequence back to a MIDI score and write it using TMIDIX (without velocity).
|
192 |
The output MIDI file name incorporates a date-time stamp.
|
193 |
"""
|
194 |
+
time = 0
|
195 |
+
dur = 1
|
196 |
+
vel = 90
|
197 |
+
pitch = 60
|
198 |
+
channel = 0
|
199 |
+
patch = 0
|
200 |
+
|
201 |
+
patches = [-1] * 16
|
202 |
+
|
203 |
+
channels = [0] * 16
|
204 |
+
channels[9] = 1
|
205 |
+
|
206 |
+
for ss in tokens:
|
207 |
+
|
208 |
+
if 0 <= ss < 256:
|
209 |
+
|
210 |
+
time += ss * 16
|
211 |
+
|
212 |
+
if 256 <= ss < 16768:
|
213 |
+
|
214 |
+
patch = (ss-256) // 128
|
215 |
+
|
216 |
+
if patch < 128:
|
217 |
+
|
218 |
+
if patch not in patches:
|
219 |
+
if 0 in channels:
|
220 |
+
cha = channels.index(0)
|
221 |
+
channels[cha] = 1
|
222 |
+
else:
|
223 |
+
cha = 15
|
224 |
+
|
225 |
+
patches[cha] = patch
|
226 |
+
channel = patches.index(patch)
|
227 |
+
else:
|
228 |
+
channel = patches.index(patch)
|
229 |
+
|
230 |
+
if patch == 128:
|
231 |
+
channel = 9
|
232 |
+
|
233 |
+
pitch = (ss-256) % 128
|
234 |
+
|
235 |
+
|
236 |
+
if 16768 <= ss < 18816:
|
237 |
+
|
238 |
+
dur = ((ss-16768) // 8) * 16
|
239 |
+
vel = (((ss-16768) % 8)+1) * 15
|
240 |
+
|
241 |
+
song_f.append(['note', time, dur, channel, pitch, vel ])
|
242 |
+
|
243 |
+
patches = [0 if x==-1 else x for x in patches]
|
244 |
|
245 |
# Generate a time stamp using the PDT timezone.
|
246 |
timestamp = datetime.datetime.now(PDT).strftime("%Y%m%d_%H%M%S")
|
|
|
250 |
else:
|
251 |
fname = f"Orpheus-Music-Transformer-Music-Composition_{timestamp}_Batch_{batch_number}"'''
|
252 |
|
253 |
+
fname = f"Orpheus-Music-Transformer-Composition"
|
254 |
|
255 |
TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(
|
256 |
+
song_f,
|
257 |
output_signature='Orpheus Music Transformer',
|
258 |
output_file_name=fname,
|
259 |
track_name='Project Los Angeles',
|
260 |
list_of_MIDI_patches=patches,
|
261 |
verbose=False
|
262 |
)
|
263 |
+
return fname, song_f
|
264 |
|
265 |
# -----------------------------
|
266 |
# MUSIC GENERATION FUNCTION (Combined)
|
|
|
268 |
@spaces.GPU
|
269 |
def generate_music(prime, num_gen_tokens, num_mem_tokens, num_gen_batches, model_temperature):
|
270 |
"""Generate music tokens given prime tokens and parameters."""
|
271 |
+
inputs = prime[-num_mem_tokens:] if prime else [18816]
|
272 |
print("Generating...")
|
273 |
inp = torch.LongTensor([inputs] * num_gen_batches).cuda()
|
274 |
with ctx:
|
|
|
322 |
plot_kwargs = {'plot_title': f'Batch # {i}', 'return_plt': True}
|
323 |
|
324 |
if len(final_composition) > PREVIEW_LENGTH:
|
325 |
+
plot_kwargs['preview_length_in_notes'] = len([t for t in preview_tokens if 256 <= t < 16768])
|
326 |
|
327 |
midi_plot = TMIDIX.plot_ms_SONG(midi_score, **plot_kwargs)
|
328 |
midi_audio = midi_to_colab_audio(midi_fname + '.mid',
|
|
|
455 |
[final_composition, generated_batches, block_lines])
|
456 |
|
457 |
gr.Markdown("## Generate")
|
458 |
+
num_prime_tokens = gr.Slider(16, 7168, value=7168, step=1, label="Number of prime tokens")
|
459 |
+
num_gen_tokens = gr.Slider(16, 1024, value=512, step=1, label="Number of tokens to generate")
|
460 |
+
num_mem_tokens = gr.Slider(16, 8192, value=8192, step=1, label="Number of memory tokens")
|
461 |
model_temperature = gr.Slider(0.1, 1, value=0.9, step=0.01, label="Model temperature")
|
462 |
generate_btn = gr.Button("Generate", variant="primary")
|
463 |
|