Groundlight commited on
Commit
687b4a7
·
1 Parent(s): 75bf27f

make run_inference to request GPUs

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -204,6 +204,7 @@ def prepare_for_inference():
204
  )
205
 
206
 
 
207
  def run_inference(word, image, mapping):
208
  """Main inference function, now focused just on generation"""
209
  if not word or not image or not mapping:
@@ -230,7 +231,6 @@ def run_inference(word, image, mapping):
230
  )
231
 
232
  # Start generation in a separate thread with torch.no_grad()
233
- @spaces.GPU
234
  def generate_with_no_grad():
235
  with torch.no_grad():
236
  model.generate(**generation_kwargs)
 
204
  )
205
 
206
 
207
+ @spaces.GPU
208
  def run_inference(word, image, mapping):
209
  """Main inference function, now focused just on generation"""
210
  if not word or not image or not mapping:
 
231
  )
232
 
233
  # Start generation in a separate thread with torch.no_grad()
 
234
  def generate_with_no_grad():
235
  with torch.no_grad():
236
  model.generate(**generation_kwargs)