Spaces:
Running
Running
daquanzhou
commited on
Commit
•
196ce97
1
Parent(s):
a263b49
update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import sys
|
|
4 |
from typing import Sequence, Mapping, Any, Union
|
5 |
import torch
|
6 |
import gradio as gr
|
|
|
7 |
|
8 |
import logging.config
|
9 |
LOGGING_CONFIG = {
|
@@ -149,6 +150,12 @@ from nodes import (
|
|
149 |
|
150 |
class MagicMeController:
|
151 |
def __init__(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
with torch.inference_mode():
|
153 |
vaeloader = VAELoader()
|
154 |
self.vaeloader_2 = vaeloader.load_vae(
|
@@ -218,15 +225,32 @@ class MagicMeController:
|
|
218 |
self.ultimatesdupscale = NODE_CLASS_MAPPINGS["UltimateSDUpscale"]()
|
219 |
self.imagecasharpening = NODE_CLASS_MAPPINGS["ImageCASharpening+"]()
|
220 |
|
221 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
with torch.inference_mode():
|
223 |
cliptextencode = CLIPTextEncode()
|
224 |
cliptextencode_6 = cliptextencode.encode(
|
225 |
-
text=
|
226 |
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
227 |
)
|
228 |
cliptextencode_274 = cliptextencode.encode(
|
229 |
-
text=
|
230 |
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
231 |
)
|
232 |
ade_animatediffloaderwithcontext_261 = (
|
@@ -425,6 +449,18 @@ class MagicMeController:
|
|
425 |
unique_id=5059112282155244564,
|
426 |
)
|
427 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
428 |
import_custom_nodes()
|
429 |
c = MagicMeController()
|
430 |
|
@@ -509,14 +545,16 @@ def ui():
|
|
509 |
)
|
510 |
with gr.Row():
|
511 |
with gr.Column():
|
512 |
-
#
|
513 |
# motion_module_dropdown = gr.Dropdown( label="Motion Module", choices=c.motion_module_list, value=c.motion_module_list[0], interactive=True )
|
514 |
|
515 |
-
#
|
516 |
# motion_module_dropdown.change(fn=c.update_motion_module, inputs=[motion_module_dropdown], outputs=[motion_module_dropdown])
|
|
|
|
|
517 |
|
518 |
-
prompt_textbox = gr.Textbox( label="Prompt", lines=3 )
|
519 |
-
negative_prompt_textbox = gr.Textbox( label="Negative Prompt", lines=3, value="worst quality, low quality,
|
520 |
|
521 |
# with gr.Accordion("Advance", open=False):
|
522 |
# with gr.Row():
|
@@ -533,7 +571,7 @@ def ui():
|
|
533 |
result_video = gr.Video( label="Generated Animation", interactive=False )
|
534 |
json_config = gr.Json( label="Config", value=None )
|
535 |
|
536 |
-
inputs = [prompt_textbox, negative_prompt_textbox]
|
537 |
outputs = [result_video, json_config]
|
538 |
|
539 |
generate_button.click( fn=c.run_once, inputs=inputs, outputs=outputs )
|
|
|
4 |
from typing import Sequence, Mapping, Any, Union
|
5 |
import torch
|
6 |
import gradio as gr
|
7 |
+
from glob import glob
|
8 |
|
9 |
import logging.config
|
10 |
LOGGING_CONFIG = {
|
|
|
150 |
|
151 |
class MagicMeController:
|
152 |
def __init__(self):
|
153 |
+
self.id_embed_dir = "models/embeddings"
|
154 |
+
self.save_dir = "output"
|
155 |
+
self.id_embed_list = []
|
156 |
+
self.woman_id_embed_list = ["beyonce", "hermione", "lifeifei", "lisa", "mona", "monroe", "taylor", "scarlett"]
|
157 |
+
self.refresh_id_embed()
|
158 |
+
self.update_id_embed(self.id_embed_list[0])
|
159 |
with torch.inference_mode():
|
160 |
vaeloader = VAELoader()
|
161 |
self.vaeloader_2 = vaeloader.load_vae(
|
|
|
225 |
self.ultimatesdupscale = NODE_CLASS_MAPPINGS["UltimateSDUpscale"]()
|
226 |
self.imagecasharpening = NODE_CLASS_MAPPINGS["ImageCASharpening+"]()
|
227 |
|
228 |
+
|
229 |
+
|
230 |
+
def refresh_id_embed(self):
|
231 |
+
id_embed_list = glob(os.path.join(self.id_embed_dir, "*.pt"))
|
232 |
+
self.id_embed_list = [os.path.basename(p) for p in id_embed_list]
|
233 |
+
|
234 |
+
def update_id_embed(self, id_embed_dropdown):
|
235 |
+
self.selected_id_embed = id_embed_dropdown
|
236 |
+
return gr.Dropdown.update()
|
237 |
+
|
238 |
+
|
239 |
+
def run_once(self, prompt_text_box, negative_prompt_text_box, id_embed_dropdown):
|
240 |
+
if self.selected_id_embed != id_embed_dropdown: self.update_id_embed(id_embed_dropdown)
|
241 |
+
|
242 |
+
category = "woman" if self.selected_id_embed in self.woman_id_embed_list else "man"
|
243 |
+
prompt = f"a photo of embedding:{self.selected_id_embed} {category} " + prompt_text_box
|
244 |
+
print("prompt:", prompt)
|
245 |
+
print("negative_prompt_text_box:", negative_prompt_text_box)
|
246 |
with torch.inference_mode():
|
247 |
cliptextencode = CLIPTextEncode()
|
248 |
cliptextencode_6 = cliptextencode.encode(
|
249 |
+
text=negative_prompt_text_box,
|
250 |
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
251 |
)
|
252 |
cliptextencode_274 = cliptextencode.encode(
|
253 |
+
text=prompt,
|
254 |
clip=get_value_at_index(self.checkpointloadersimple_32, 1),
|
255 |
)
|
256 |
ade_animatediffloaderwithcontext_261 = (
|
|
|
449 |
unique_id=5059112282155244564,
|
450 |
)
|
451 |
|
452 |
+
|
453 |
+
save_sample_path = sorted(glob(self.save_dir, 'SR*.mp4'))[-1]
|
454 |
+
|
455 |
+
json_config = {
|
456 |
+
"prompt": prompt,
|
457 |
+
"n_prompt": negative_prompt_text_box,
|
458 |
+
"id_embed_dropdown": id_embed_dropdown,
|
459 |
+
}
|
460 |
+
return gr.Video.update(value=save_sample_path), gr.Json.update(value=json_config)
|
461 |
+
|
462 |
+
|
463 |
+
|
464 |
import_custom_nodes()
|
465 |
c = MagicMeController()
|
466 |
|
|
|
545 |
)
|
546 |
with gr.Row():
|
547 |
with gr.Column():
|
548 |
+
# id_embed_dropdown = gr.Dropdown( label="Base DreamBooth Model", choices=c.base_model_list, value=c.base_model_list[0], interactive=True )
|
549 |
# motion_module_dropdown = gr.Dropdown( label="Motion Module", choices=c.motion_module_list, value=c.motion_module_list[0], interactive=True )
|
550 |
|
551 |
+
# id_embed_dropdown.change(fn=c.update_base_model, inputs=[id_embed_dropdown], outputs=[id_embed_dropdown])
|
552 |
# motion_module_dropdown.change(fn=c.update_motion_module, inputs=[motion_module_dropdown], outputs=[motion_module_dropdown])
|
553 |
+
id_embed_dropdown = gr.Dropdown( label="ID Embedding", choices=c.id_embed_list, value=c.id_embed_list[0], interactive=True )
|
554 |
+
id_embed_dropdown.change(fn=c.update_id_embed, inputs=[id_embed_dropdown], outputs=[id_embed_dropdown])
|
555 |
|
556 |
+
prompt_textbox = gr.Textbox( label="Prompt", info="a photo of <V*> man/woman ", lines=3, value="in superman costume in the outer space, stars in the background" )
|
557 |
+
negative_prompt_textbox = gr.Textbox( label="Negative Prompt", lines=3, value="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream")
|
558 |
|
559 |
# with gr.Accordion("Advance", open=False):
|
560 |
# with gr.Row():
|
|
|
571 |
result_video = gr.Video( label="Generated Animation", interactive=False )
|
572 |
json_config = gr.Json( label="Config", value=None )
|
573 |
|
574 |
+
inputs = [prompt_textbox, negative_prompt_textbox, id_embed_dropdown]
|
575 |
outputs = [result_video, json_config]
|
576 |
|
577 |
generate_button.click( fn=c.run_once, inputs=inputs, outputs=outputs )
|