File size: 5,160 Bytes
4af9c2b
76a8c47
4af9c2b
 
 
d268852
4af9c2b
2bd6b89
 
c8d73ef
5e187cb
c8d73ef
f1b3fce
4353309
4af9c2b
2d7c34b
dd704f7
c8d73ef
 
 
efbf68a
 
c8d73ef
a6b996a
 
 
 
6f1c60f
c208b25
8c49122
4af9c2b
 
2dfa028
3f3e9be
fa169a4
 
 
 
c8d73ef
4af9c2b
c8d73ef
c8c57a4
e460ad9
0c3261b
 
be881cb
634692e
ce9b008
 
c7101d7
0358a3a
1622413
 
 
 
01fc9d8
23616f4
01fc9d8
 
fa169a4
 
4af9c2b
114766c
 
 
 
 
 
 
c9a9082
23fe301
b895768
c517e31
 
02eec09
 
c9a9082
056fb20
4af9c2b
f1b3fce
 
8d3f01e
4af9c2b
a5ad6db
4af9c2b
 
36cdd82
4af9c2b
 
 
 
999c041
4af9c2b
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# Ref: https://huggingface.co/spaces/multimodalart/cosxl
import spaces
import gradio as gr
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
import torch 
import os

from compel import Compel, ReturnedEmbeddingsType

from huggingface_hub import hf_hub_download
from safetensors.torch import load_file

model_id = "aipicasso/emi-2-5"
token=os.environ["TOKEN"]

scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id,subfolder="scheduler",token=token)
pipe_normal = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.bfloat16,token=token)

negative_ti_file = hf_hub_download(repo_id="Aikimi/unaestheticXL_Negative_TI", filename="unaestheticXLv31.safetensors")
state_dict = load_file(negative_ti_file)
pipe_normal.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe_normal.text_encoder_2, tokenizer=pipe_normal.tokenizer_2)
pipe_normal.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe_normal.text_encoder, tokenizer=pipe_normal.tokenizer)

state_dict = load_file("unaestheticXL_Alb2.safetensors")
pipe_normal.load_textual_inversion(state_dict["clip_g"], token="unaestheticXL_Alb2", text_encoder=pipe_normal.text_encoder_2, tokenizer=pipe_normal.tokenizer_2)
pipe_normal.load_textual_inversion(state_dict["clip_l"], token="unaestheticXL_Alb2", text_encoder=pipe_normal.text_encoder, tokenizer=pipe_normal.tokenizer)

pipe_normal.load_lora_weights("fix_hands.pt")
pipe_normal.fuse_lora(lora_scale=1.0)

pipe_normal.to("cuda")

pipe_normal.enable_freeu(s1=1.2, s2=0.7, b1=1.1, b2=1.3)

compel = Compel(tokenizer=[pipe_normal.tokenizer, pipe_normal.tokenizer_2] , 
                text_encoder=[pipe_normal.text_encoder, pipe_normal.text_encoder_2], 
                returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, 
                requires_pooled=[False, True])

@spaces.GPU
def run_normal(prompt, negative_prompt="", guidance_scale=7.5, progress=gr.Progress(track_tqdm=True)):
    # ユーザーの著作権侵害を防ぐフィルター
    words=["pokemon", "pikachu", "picachu", "mario", "sonic", "genshin"]
    for word in words:
        prompt=prompt.replace(word,"")
    if(prompt==""):
        conditioning, pooled = compel("1girl, (upper body)++, black long hair, hime cut, black eyes, looking at viewer, blue and purple hydrangea")
    else:
        conditioning, pooled = compel(prompt)
    negative_conditioning, negatice_pooled = compel("(unaestheticXLv31)++++, (unaestheticXL_Alb2)++++, bad hands, bad anatomy, low quality, 3d, photo, realism, text, sign, "+negative_prompt)
    result = pipe_normal(
        prompt_embeds=conditioning,
        pooled_prompt_embeds=pooled, 
        negative_prompt_embeds=negative_conditioning, 
        negative_pooled_prompt_embeds=negatice_pooled,
        num_inference_steps = 25,
        guidance_scale = guidance_scale,
        width = 768,
        height = 1344)
    
    return result.images[0]

css = '''
.gradio-container{
max-width: 768px !important;
margin: 0 auto;
}
'''

normal_examples = [
    "1girl, (upper body)++, black long hair, hime cut, black eyes, looking at viewer, blue and purple hydrangea",
    "1girl, (full body)++, black long hair, hime cut, black eyes, looking at viewer, school uniform, blue and purple hydrangea",
    "no humans, manga, black and white, monochrome, Mt. fuji, 4k, highly detailed",
    "no humans, manga, black and white, monochrome, Shibuya street, 4k, highly detailed",
    "anime, 1boy++, (upper body)++, silver very short hair, blue eyes, looking at viewer, white background",
    "anime, 1boy++, (full body)++, silver very short hair, blue eyes, looking at viewer, white background",
]

with gr.Blocks(css=css) as demo:
    gr.Markdown('''# Emi 2.5
    Official demo for [Emi 2.5](https://huggingface.co/aipicasso/emi-2-5). Click the generate button!<br>
    本モデルの生成物は各種法令に従って取り扱って下さい。
    ''')
    gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
    with gr.Group():
        with gr.Row():
          prompt_normal = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt, e.g.: 1girl, (upper body)++, brown bob short hair, brown eyes, looking at viewer, cherry blossom")
          button_normal = gr.Button("Generate", min_width=120)
        output_normal = gr.Image(label="Your result image", interactive=False)
        with gr.Accordion("Advanced Settings", open=False):
          negative_prompt_normal = gr.Textbox(label="Negative Prompt")
          guidance_scale_normal = gr.Number(label="Guidance Scale", value=7.5)
    gr.Examples(examples=normal_examples, fn=run_normal, inputs=[prompt_normal], outputs=[output_normal], cache_examples=True) 
    
    gr.on(
        triggers=[
            button_normal.click,
            prompt_normal.submit
        ],
        fn=run_normal,
        inputs=[prompt_normal, negative_prompt_normal, guidance_scale_normal],
        outputs=[output_normal],
    )
if __name__ == "__main__":
    demo.launch(share=True)