thecollabagepatch commited on
Commit
8ae81eb
·
1 Parent(s): ff54f69

cmon claude we got this

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -121,19 +121,16 @@ def generate_midi(seed, use_chords, chord_progression, bpm):
121
 
122
  @spaces.GPU(duration=120)
123
  def generate_music(midi_audio, prompt_duration, musicgen_model, num_iterations, bpm):
124
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
125
- temp_filename = temp_file.name
126
- torchaudio.save(temp_filename, midi_audio, sample_rate=44100)
127
 
128
- # Load the generated audio
129
- song, sr = torchaudio.load(temp_filename)
130
- song = song.to(device)
131
 
132
  # Use the user-provided BPM value for duration calculation
133
  duration = calculate_duration(bpm)
134
 
135
  # Create slices from the song using the user-provided BPM value
136
- slices = create_slices(song, sr, 35, bpm, num_slices=5)
137
 
138
  # Load the model
139
  model_name = musicgen_model.split(" ")[0]
 
121
 
122
  @spaces.GPU(duration=120)
123
  def generate_music(midi_audio, prompt_duration, musicgen_model, num_iterations, bpm):
124
+ audio_data, sample_rate = midi_audio
 
 
125
 
126
+ # Convert the audio data to a PyTorch tensor
127
+ song = torch.from_numpy(audio_data).to(device)
 
128
 
129
  # Use the user-provided BPM value for duration calculation
130
  duration = calculate_duration(bpm)
131
 
132
  # Create slices from the song using the user-provided BPM value
133
+ slices = create_slices(song, sample_rate, 35, bpm, num_slices=5)
134
 
135
  # Load the model
136
  model_name = musicgen_model.split(" ")[0]