seawolf2357 commited on
Commit
71aff80
ยท
verified ยท
1 Parent(s): a48705b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -22
app.py CHANGED
@@ -197,9 +197,52 @@ css = """
197
  .tab-nav button:nth-child(3) { border-top: 3px solid #f7b731; }
198
  """
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
  with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
202
- gr.Markdown("์ˆํผํผ ์ŠคํŠœ๋””์˜ค")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
  with gr.Tab(label='Image+Text to Video'):
204
  with gr.Column():
205
  with gr.Row():
@@ -233,32 +276,35 @@ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
233
  )
234
 
235
 
236
- with gr.Tab(label='Text to Image'):
237
  with gr.Column():
 
238
  with gr.Row():
239
- t2i_input_text = gr.Text(label='Prompt')
 
240
  with gr.Row():
241
- t2i_seed = gr.Slider(label='Seed', minimum=0, maximum=MAX_SEED, step=1, value=42)
242
- t2i_randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
243
  with gr.Row():
244
- t2i_width = gr.Slider(label='Width', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=1024)
245
- t2i_height = gr.Slider(label='Height', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=576)
246
  with gr.Row():
247
- t2i_guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=20.0, step=0.1, value=5.0)
248
- t2i_num_inference_steps = gr.Slider(label='Inference Steps', minimum=1, maximum=100, step=1, value=28)
249
- # t2i_generate_btn = gr.Button("Generate")
250
- # t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
251
- # t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
 
 
 
 
 
252
 
253
- t2i_generate_btn = gr.Button("Generate")
254
- t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
255
- t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
256
- t2i_translated_prompt = gr.Text(label="Translated Prompt (if applicable)", elem_id="t2i_translated_prompt")
 
 
257
 
258
- t2i_generate_btn.click(
259
- fn=infer_t2i,
260
- inputs=[t2i_input_text, t2i_seed, t2i_randomize_seed, t2i_width, t2i_height, t2i_guidance_scale, t2i_num_inference_steps],
261
- outputs=[t2i_output_image, t2i_output_seed, t2i_translated_prompt]
262
- )
263
-
264
  dynamicrafter_iface.queue(max_size=12).launch(show_api=True)
 
197
  .tab-nav button:nth-child(3) { border-top: 3px solid #f7b731; }
198
  """
199
 
200
+ # ๋จผ์ € text-to-video ํ•จ์ˆ˜๋ฅผ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค.
201
+ @spaces.GPU(duration=300)
202
+ def infer_t2v(prompt, seed=42, randomize_seed=False, width=1024, height=576, guidance_scale=5.0, num_inference_steps=28,
203
+ video_steps=50, video_cfg_scale=7.5, video_eta=1.0, video_fps=3, video_length=2):
204
+ # ํ…์ŠคํŠธ๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
205
+ image, _, translated_prompt = infer_t2i(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps)
206
+
207
+ # ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋กœ ๋น„๋””์˜ค ์ƒ์„ฑ
208
+ video_path = infer(np.array(image), translated_prompt, video_steps, video_cfg_scale, video_eta, video_fps, seed, video_length)
209
+
210
+ return video_path, translated_prompt
211
+
212
+
213
 
214
  with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
215
+ gr.Markdown("์ˆํผ ์ŠคํŠœ๋””์˜ค")
216
+
217
+ with gr.Tab(label='Text to Image'):
218
+ with gr.Column():
219
+ with gr.Row():
220
+ t2i_input_text = gr.Text(label='Prompt')
221
+ with gr.Row():
222
+ t2i_seed = gr.Slider(label='Seed', minimum=0, maximum=MAX_SEED, step=1, value=42)
223
+ t2i_randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
224
+ with gr.Row():
225
+ t2i_width = gr.Slider(label='Width', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=1024)
226
+ t2i_height = gr.Slider(label='Height', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=576)
227
+ with gr.Row():
228
+ t2i_guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=20.0, step=0.1, value=5.0)
229
+ t2i_num_inference_steps = gr.Slider(label='Inference Steps', minimum=1, maximum=100, step=1, value=28)
230
+ # t2i_generate_btn = gr.Button("Generate")
231
+ # t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
232
+ # t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
233
+
234
+ t2i_generate_btn = gr.Button("Generate")
235
+ t2i_output_image = gr.Image(label="Generated Image", elem_id="t2i_output_img")
236
+ t2i_output_seed = gr.Number(label="Used Seed", elem_id="t2i_output_seed")
237
+ t2i_translated_prompt = gr.Text(label="Translated Prompt (if applicable)", elem_id="t2i_translated_prompt")
238
+
239
+ t2i_generate_btn.click(
240
+ fn=infer_t2i,
241
+ inputs=[t2i_input_text, t2i_seed, t2i_randomize_seed, t2i_width, t2i_height, t2i_guidance_scale, t2i_num_inference_steps],
242
+ outputs=[t2i_output_image, t2i_output_seed, t2i_translated_prompt]
243
+ )
244
+
245
+
246
  with gr.Tab(label='Image+Text to Video'):
247
  with gr.Column():
248
  with gr.Row():
 
276
  )
277
 
278
 
279
+ with gr.Tab(label='Text to Video'):
280
  with gr.Column():
281
+ t2v_input_text = gr.Text(label='Prompt')
282
  with gr.Row():
283
+ t2v_seed = gr.Slider(label='Seed', minimum=0, maximum=MAX_SEED, step=1, value=42)
284
+ t2v_randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
285
  with gr.Row():
286
+ t2v_width = gr.Slider(label='Width', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=1024)
287
+ t2v_height = gr.Slider(label='Height', minimum=256, maximum=MAX_IMAGE_SIZE, step=64, value=576)
288
  with gr.Row():
289
+ t2v_guidance_scale = gr.Slider(label='Image Guidance Scale', minimum=1.0, maximum=20.0, step=0.1, value=5.0)
290
+ t2v_num_inference_steps = gr.Slider(label='Image Inference Steps', minimum=1, maximum=100, step=1, value=28)
291
  with gr.Row():
292
+ t2v_video_steps = gr.Slider(label='Video Steps', minimum=1, maximum=50, step=1, value=30)
293
+ t2v_video_cfg_scale = gr.Slider(label='Video CFG Scale', minimum=1.0, maximum=15.0, step=0.5, value=3.5)
294
+ with gr.Row():
295
+ t2v_video_eta = gr.Slider(label='Video ETA', minimum=0.0, maximum=1.0, step=0.1, value=1.0)
296
+ t2v_video_fps = gr.Slider(label='Video FPS', minimum=5, maximum=20, step=1, value=8)
297
+ t2v_video_length = gr.Slider(label='Video Length (seconds)', minimum=2, maximum=8, step=1, value=2)
298
+
299
+ t2v_generate_btn = gr.Button("Generate Video")
300
+ t2v_output_video = gr.Video(label="Generated Video", elem_id="t2v_output_vid", autoplay=True, show_share_button=True)
301
+ t2v_translated_prompt = gr.Text(label="Translated Prompt (if applicable)", elem_id="t2v_translated_prompt")
302
 
303
+ t2v_generate_btn.click(
304
+ fn=infer_t2v,
305
+ inputs=[t2v_input_text, t2v_seed, t2v_randomize_seed, t2v_width, t2v_height, t2v_guidance_scale, t2v_num_inference_steps,
306
+ t2v_video_steps, t2v_video_cfg_scale, t2v_video_eta, t2v_video_fps, t2v_video_length],
307
+ outputs=[t2v_output_video, t2v_translated_prompt]
308
+ )
309
 
 
 
 
 
 
 
310
  dynamicrafter_iface.queue(max_size=12).launch(show_api=True)