MohamedRashad commited on
Commit
453686a
·
1 Parent(s): 7f22dce

Upload code

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -111,14 +111,14 @@ def resize_without_crop(image, target_width, target_height):
111
 
112
 
113
  @torch.inference_mode()
114
- @spaces.GPU(duration=240)
115
  def interrogator_process(x):
116
  image_description = wd14tagger.default_interrogator(x)
117
  return image_description, image_description
118
 
119
 
120
  @torch.inference_mode()
121
- @spaces.GPU(duration=240)
122
  def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
123
  progress=gr.Progress()):
124
  rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
@@ -215,7 +215,7 @@ def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale=
215
 
216
 
217
  @torch.inference_mode()
218
- @spaces.GPU(duration=240)
219
  def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
220
  result_frames = []
221
  cropped_images = []
 
111
 
112
 
113
  @torch.inference_mode()
114
+ @spaces.GPU(duration=360)
115
  def interrogator_process(x):
116
  image_description = wd14tagger.default_interrogator(x)
117
  return image_description, image_description
118
 
119
 
120
  @torch.inference_mode()
121
+ @spaces.GPU(duration=360)
122
  def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
123
  progress=gr.Progress()):
124
  rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
 
215
 
216
 
217
  @torch.inference_mode()
218
+ @spaces.GPU(duration=360)
219
  def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
220
  result_frames = []
221
  cropped_images = []