emi-2-demo / app.py
alfredplpl's picture
Update app.py
76a8c47 verified
raw
history blame
5.16 kB
# Ref: https://huggingface.co/spaces/multimodalart/cosxl
import spaces
import gradio as gr
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
import torch
import os
from compel import Compel, ReturnedEmbeddingsType
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
model_id = "aipicasso/emi-2-5"
token=os.environ["TOKEN"]
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id,subfolder="scheduler",token=token)
pipe_normal = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.bfloat16,token=token)
negative_ti_file = hf_hub_download(repo_id="Aikimi/unaestheticXL_Negative_TI", filename="unaestheticXLv31.safetensors")
state_dict = load_file(negative_ti_file)
pipe_normal.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe_normal.text_encoder_2, tokenizer=pipe_normal.tokenizer_2)
pipe_normal.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe_normal.text_encoder, tokenizer=pipe_normal.tokenizer)
state_dict = load_file("unaestheticXL_Alb2.safetensors")
pipe_normal.load_textual_inversion(state_dict["clip_g"], token="unaestheticXL_Alb2", text_encoder=pipe_normal.text_encoder_2, tokenizer=pipe_normal.tokenizer_2)
pipe_normal.load_textual_inversion(state_dict["clip_l"], token="unaestheticXL_Alb2", text_encoder=pipe_normal.text_encoder, tokenizer=pipe_normal.tokenizer)
pipe_normal.load_lora_weights("fix_hands.pt")
pipe_normal.fuse_lora(lora_scale=1.0)
pipe_normal.to("cuda")
pipe_normal.enable_freeu(s1=1.2, s2=0.7, b1=1.1, b2=1.3)
compel = Compel(tokenizer=[pipe_normal.tokenizer, pipe_normal.tokenizer_2] ,
text_encoder=[pipe_normal.text_encoder, pipe_normal.text_encoder_2],
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
requires_pooled=[False, True])
@spaces.GPU
def run_normal(prompt, negative_prompt="", guidance_scale=7.5, progress=gr.Progress(track_tqdm=True)):
# ユーザーの著作権侵害を防ぐフィルター
words=["pokemon", "pikachu", "picachu", "mario", "sonic", "genshin"]
for word in words:
prompt=prompt.replace(word,"")
if(prompt==""):
conditioning, pooled = compel("1girl, (upper body)++, black long hair, hime cut, black eyes, looking at viewer, blue and purple hydrangea")
else:
conditioning, pooled = compel(prompt)
negative_conditioning, negatice_pooled = compel("(unaestheticXLv31)++++, (unaestheticXL_Alb2)++++, bad hands, bad anatomy, low quality, 3d, photo, realism, text, sign, "+negative_prompt)
result = pipe_normal(
prompt_embeds=conditioning,
pooled_prompt_embeds=pooled,
negative_prompt_embeds=negative_conditioning,
negative_pooled_prompt_embeds=negatice_pooled,
num_inference_steps = 25,
guidance_scale = guidance_scale,
width = 768,
height = 1344)
return result.images[0]
css = '''
.gradio-container{
max-width: 768px !important;
margin: 0 auto;
}
'''
normal_examples = [
"1girl, (upper body)++, black long hair, hime cut, black eyes, looking at viewer, blue and purple hydrangea",
"1girl, (full body)++, black long hair, hime cut, black eyes, looking at viewer, school uniform, blue and purple hydrangea",
"no humans, manga, black and white, monochrome, Mt. fuji, 4k, highly detailed",
"no humans, manga, black and white, monochrome, Shibuya street, 4k, highly detailed",
"anime, 1boy++, (upper body)++, silver very short hair, blue eyes, looking at viewer, white background",
"anime, 1boy++, (full body)++, silver very short hair, blue eyes, looking at viewer, white background",
]
with gr.Blocks(css=css) as demo:
gr.Markdown('''# Emi 2.5
Official demo for [Emi 2.5](https://huggingface.co/aipicasso/emi-2-5). Click the generate button!<br>
本モデルの生成物は各種法令に従って取り扱って下さい。
''')
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
with gr.Group():
with gr.Row():
prompt_normal = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt, e.g.: 1girl, (upper body)++, brown bob short hair, brown eyes, looking at viewer, cherry blossom")
button_normal = gr.Button("Generate", min_width=120)
output_normal = gr.Image(label="Your result image", interactive=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_normal = gr.Textbox(label="Negative Prompt")
guidance_scale_normal = gr.Number(label="Guidance Scale", value=7.5)
gr.Examples(examples=normal_examples, fn=run_normal, inputs=[prompt_normal], outputs=[output_normal], cache_examples=True)
gr.on(
triggers=[
button_normal.click,
prompt_normal.submit
],
fn=run_normal,
inputs=[prompt_normal, negative_prompt_normal, guidance_scale_normal],
outputs=[output_normal],
)
if __name__ == "__main__":
demo.launch(share=True)