ZhifengKong commited on
Commit
ff11d1b
·
1 Parent(s): 052dec1

comment space.gpu

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -4,7 +4,7 @@
4
  import os
5
  import yaml
6
 
7
- import spaces
8
  import gradio as gr
9
 
10
  import librosa
@@ -24,7 +24,7 @@ else:
24
  device = 'cpu'
25
 
26
 
27
- @spaces.GPU
28
  def load_laionclap():
29
  model = laion_clap.CLAP_Module(enable_fusion=True, amodel='HTSAT-tiny').to(device)
30
  model.load_ckpt(ckpt='630k-audioset-fusion-best.pt')
@@ -91,7 +91,7 @@ def load_audio(file_path, target_sr=44100, duration=33.25, start=0.0):
91
  return data
92
 
93
 
94
- @spaces.GPU
95
  @torch.no_grad()
96
  def compute_laionclap_text_audio_sim(audio_file, laionclap_model, outputs):
97
  try:
@@ -141,7 +141,7 @@ model = prepare_model(
141
  )
142
 
143
 
144
- @spaces.GPU
145
  def inference_item(name, prompt):
146
  item = {
147
  'name': str(name),
 
4
  import os
5
  import yaml
6
 
7
+ # import spaces
8
  import gradio as gr
9
 
10
  import librosa
 
24
  device = 'cpu'
25
 
26
 
27
+ # @spaces.GPU
28
  def load_laionclap():
29
  model = laion_clap.CLAP_Module(enable_fusion=True, amodel='HTSAT-tiny').to(device)
30
  model.load_ckpt(ckpt='630k-audioset-fusion-best.pt')
 
91
  return data
92
 
93
 
94
+ # @spaces.GPU
95
  @torch.no_grad()
96
  def compute_laionclap_text_audio_sim(audio_file, laionclap_model, outputs):
97
  try:
 
141
  )
142
 
143
 
144
+ # @spaces.GPU
145
  def inference_item(name, prompt):
146
  item = {
147
  'name': str(name),