vilarin commited on
Commit
0c24dd1
·
verified ·
1 Parent(s): a0d1812

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -31
app.py CHANGED
@@ -54,8 +54,6 @@ JS = """function () {
54
  }"""
55
 
56
 
57
-
58
-
59
  # Ensure model and scheduler are initialized in GPU-enabled function
60
  if torch.cuda.is_available():
61
  model_manager = ModelManager(
@@ -99,7 +97,12 @@ def change_media(image_in, video_in, selected):
99
  elif selected == "Diffutoon":
100
  return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
101
 
102
-
 
 
 
 
 
103
 
104
  @spaces.GPU(duration=120)
105
  def generate(
@@ -108,15 +111,19 @@ def generate(
108
  selected,
109
  prompt: str = "best quality",
110
  seed: Optional[int] = -1,
111
- num_inference_steps: int = 5,
 
 
 
 
 
112
  animatediff_batch_size: int = 32,
113
  animatediff_stride: int = 16,
114
  motion_bucket_id: int = 127,
115
  fps_id: int = 25,
116
- num_frames: int = 50,
117
  output_folder: str = "outputs",
118
  progress=gr.Progress(track_tqdm=True)):
119
-
120
  if seed == -1:
121
  seed = random.randint(0, MAX_SEED)
122
 
@@ -125,15 +132,15 @@ def generate(
125
  os.makedirs(output_folder, exist_ok=True)
126
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
127
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
128
-
129
  if selected == "ExVideo" and image_in:
130
  image = Image.open(image_in)
131
  video = pipe(
132
- input_image=image.resize((512, 512)),
133
  num_frames=num_frames,
134
  fps=fps_id,
135
- height=512,
136
- width=512,
137
  motion_bucket_id=motion_bucket_id,
138
  num_inference_steps=num_inference_steps,
139
  min_cfg_scale=2,
@@ -144,18 +151,19 @@ def generate(
144
  elif selected == "Diffutoon" and video_in:
145
  up_video = VideoData(
146
  video_file=video_in,
147
- height=512, width=512)
148
- input_video = [up_video[i] for i in range(1, 30)]
 
149
 
150
  video = pipe2(
151
  prompt=prompt,
152
  negative_prompt="verybadimagenegative_v1.3",
153
  cfg_scale=3,
154
  clip_skip=2,
155
- controlnet_frames=input_video, num_frames=len(input_video),
156
  num_inference_steps=num_inference_steps,
157
- height=512,
158
- width=512,
159
  animatediff_batch_size=animatediff_batch_size,
160
  animatediff_stride=animatediff_stride,
161
  vram_limit_level=0,
@@ -182,7 +190,11 @@ examples = [
182
 
183
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
184
  gr.HTML("<h1><center>Exvideo📽️Diffutoon</center></h1>")
185
- gr.HTML("<p><center>Exvideo and Diffutoon video generation<br><b>Update</b>: first version<br><b>Note</b>: ZeroGPU limited, Set the parameters appropriately.</center></p>")
 
 
 
 
186
  with gr.Row():
187
  video_in = gr.Video(label='Upload Video', height=600, scale=2)
188
  image_in = gr.Image(label='Upload Image', height=600, scale=2, image_mode="RGB", type="filepath", visible=False)
@@ -204,10 +216,50 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
204
  label="Inference steps",
205
  info="Inference steps",
206
  step=1,
207
- value=5,
208
  minimum=1,
209
- maximum=50
210
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  with gr.Accordion("Diffutoon Options", open=False):
212
  animatediff_batch_size = gr.Slider(
213
  label="Animatediff batch size",
@@ -222,7 +274,7 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
222
  maximum=50,
223
  step=1,
224
  value=16,
225
- )
226
  with gr.Accordion("ExVideo Options", open=False):
227
  motion_bucket_id = gr.Slider(
228
  label="Motion bucket id",
@@ -230,7 +282,7 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
230
  value=127,
231
  step=1,
232
  minimum=1,
233
- maximum=255
234
  )
235
  fps_id = gr.Slider(
236
  label="Frames per second",
@@ -238,17 +290,9 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
238
  value=6,
239
  step=1,
240
  minimum=5,
241
- maximum=30
242
- )
243
- num_frames = gr.Slider(
244
- label="Frames num",
245
- info="Frames num",
246
- step=1,
247
- value=50,
248
- minimum=1,
249
- maximum=128
250
  )
251
- prompt = gr.Textbox(label="Prompt")
252
  with gr.Row():
253
  submit_btn = gr.Button(value="Generate")
254
  #stop_btn = gr.Button(value="Stop", variant="stop")
@@ -263,7 +307,8 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
263
  examples_per_page=4,
264
  )
265
  selected.change(change_media, inputs=[image_in, video_in, selected], outputs=[image_in, video_in, prompt])
266
- submit_event = submit_btn.click(fn=generate, inputs=[video_in, image_in, selected, prompt, seed, num_inference_steps, animatediff_batch_size, animatediff_stride, motion_bucket_id, fps_id, num_frames], outputs=[video, seed], api_name="video")
 
267
  #stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
268
 
269
  demo.queue().launch()
 
54
  }"""
55
 
56
 
 
 
57
  # Ensure model and scheduler are initialized in GPU-enabled function
58
  if torch.cuda.is_available():
59
  model_manager = ModelManager(
 
97
  elif selected == "Diffutoon":
98
  return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
99
 
100
+ def update_frames(video_in):
101
+ up_video = VideoData(
102
+ video_file=video_in,
103
+ height=shape_height, width=shape_width)
104
+ frame_len = len(up_video)
105
+ return gr.update(maximum=frame_len)
106
 
107
  @spaces.GPU(duration=120)
108
  def generate(
 
111
  selected,
112
  prompt: str = "best quality",
113
  seed: Optional[int] = -1,
114
+ num_inference_steps: int = 10,
115
+ num_frames: int = 30,
116
+ shape_height: int = 512,
117
+ shape_width: int = 512,
118
+ height: int = 512,
119
+ width: int = 512,
120
  animatediff_batch_size: int = 32,
121
  animatediff_stride: int = 16,
122
  motion_bucket_id: int = 127,
123
  fps_id: int = 25,
 
124
  output_folder: str = "outputs",
125
  progress=gr.Progress(track_tqdm=True)):
126
+ video = ""
127
  if seed == -1:
128
  seed = random.randint(0, MAX_SEED)
129
 
 
132
  os.makedirs(output_folder, exist_ok=True)
133
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
134
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
135
+
136
  if selected == "ExVideo" and image_in:
137
  image = Image.open(image_in)
138
  video = pipe(
139
+ input_image=image.resize((shape_height, shape_width)),
140
  num_frames=num_frames,
141
  fps=fps_id,
142
+ height=height,
143
+ width=width,
144
  motion_bucket_id=motion_bucket_id,
145
  num_inference_steps=num_inference_steps,
146
  min_cfg_scale=2,
 
151
  elif selected == "Diffutoon" and video_in:
152
  up_video = VideoData(
153
  video_file=video_in,
154
+ height=shape_height, width=shape_width)
155
+ frame_len = min(num_frames, len(up_video))
156
+ input_video = [up_video[i] for i in range(1, frame_len)]
157
 
158
  video = pipe2(
159
  prompt=prompt,
160
  negative_prompt="verybadimagenegative_v1.3",
161
  cfg_scale=3,
162
  clip_skip=2,
163
+ controlnet_frames=input_video, num_frames=num_frames,
164
  num_inference_steps=num_inference_steps,
165
+ height=height,
166
+ width=width,
167
  animatediff_batch_size=animatediff_batch_size,
168
  animatediff_stride=animatediff_stride,
169
  vram_limit_level=0,
 
190
 
191
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
192
  gr.HTML("<h1><center>Exvideo📽️Diffutoon</center></h1>")
193
+ gr.HTML("""
194
+ <p><center>Exvideo and Diffutoon video generation
195
+ <br><b>Update</b>: Origin and Output resize, Frames control.
196
+ <br><b>Note</b>: ZeroGPU limited, Set the parameters appropriately.</center></p>
197
+ """)
198
  with gr.Row():
199
  video_in = gr.Video(label='Upload Video', height=600, scale=2)
200
  image_in = gr.Image(label='Upload Image', height=600, scale=2, image_mode="RGB", type="filepath", visible=False)
 
216
  label="Inference steps",
217
  info="Inference steps",
218
  step=1,
219
+ value=10,
220
  minimum=1,
221
+ maximum=50,
222
  )
223
+ num_frames = gr.Slider(
224
+ label="Num frames",
225
+ info="Output Frames",
226
+ step=1,
227
+ value=30,
228
+ minimum=1,
229
+ maximum=128,
230
+ )
231
+ with gr.Row():
232
+ shape_height = gr.Slider(
233
+ label="Shape Height",
234
+ info="Resize Height",
235
+ step=8,
236
+ value=512,
237
+ minimum=256,
238
+ maximum=2560,
239
+ )
240
+ shape_width = gr.Slider(
241
+ label="Shape Width",
242
+ info="Resize Width",
243
+ step=8,
244
+ value=512,
245
+ minimum=256,
246
+ maximum=2560,
247
+ )
248
+ with gr.Row():
249
+ height = gr.Slider(
250
+ label="Output Height",
251
+ step=8,
252
+ value=512,
253
+ minimum=256,
254
+ maximum=2560,
255
+ )
256
+ width = gr.Slider(
257
+ label="Output Width",
258
+ step=8,
259
+ value=512,
260
+ minimum=256,
261
+ maximum=2560,
262
+ )
263
  with gr.Accordion("Diffutoon Options", open=False):
264
  animatediff_batch_size = gr.Slider(
265
  label="Animatediff batch size",
 
274
  maximum=50,
275
  step=1,
276
  value=16,
277
+ )
278
  with gr.Accordion("ExVideo Options", open=False):
279
  motion_bucket_id = gr.Slider(
280
  label="Motion bucket id",
 
282
  value=127,
283
  step=1,
284
  minimum=1,
285
+ maximum=255,
286
  )
287
  fps_id = gr.Slider(
288
  label="Frames per second",
 
290
  value=6,
291
  step=1,
292
  minimum=5,
293
+ maximum=30,
 
 
 
 
 
 
 
 
294
  )
295
+ prompt = gr.Textbox(label="Prompt", value="best quality")
296
  with gr.Row():
297
  submit_btn = gr.Button(value="Generate")
298
  #stop_btn = gr.Button(value="Stop", variant="stop")
 
307
  examples_per_page=4,
308
  )
309
  selected.change(change_media, inputs=[image_in, video_in, selected], outputs=[image_in, video_in, prompt])
310
+ video_in.upload(update_frames, inputs=[video_in], outputs=[num_frames])
311
+ submit_event = submit_btn.click(fn=generate, inputs=[video_in, image_in, selected, prompt, seed, num_inference_steps, num_frames, shape_height, shape_width, height, width, animatediff_batch_size, animatediff_stride, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
312
  #stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
313
 
314
  demo.queue().launch()