showee commited on
Commit
f8b17b0
·
1 Parent(s): 94a6ee2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -7
app.py CHANGED
@@ -1,11 +1,81 @@
1
- from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  model_id = 'andite/anything-v4.0'
7
  prefix = ''
8
-
 
 
 
 
9
  scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
10
 
11
  pipe = StableDiffusionPipeline.from_pretrained(
@@ -13,11 +83,23 @@ pipe = StableDiffusionPipeline.from_pretrained(
13
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
14
  scheduler=scheduler)
15
 
 
 
 
 
 
 
16
  pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
17
  model_id,
18
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
19
  scheduler=scheduler)
20
 
 
 
 
 
 
 
21
  if torch.cuda.is_available():
22
  pipe = pipe.to("cuda")
23
  pipe_i2i = pipe_i2i.to("cuda")
@@ -76,14 +158,14 @@ with gr.Blocks(css=css) as demo:
76
  f"""
77
  <div class="main-div">
78
  <div>
79
- <h1>Anything V4.0</h1>
80
  </div>
81
  <p>
82
- Demo for <a href="https://huggingface.co/andite/anything-v4.0">Anything V4.0</a> Stable Diffusion model.<br>
83
  {"Add the following tokens to your prompts for the model to work properly: <b>prefix</b>" if prefix else ""}
84
  </p>
85
- Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/akhaliq/anything-v4.0/settings'>Settings</a></b>"} after duplicating the space<br><br>
86
- <a style="display:inline-block" href="https://huggingface.co/spaces/akhaliq/anything-v4.0?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
87
  </div>
88
  """
89
  )
@@ -101,7 +183,15 @@ with gr.Blocks(css=css) as demo:
101
  with gr.Column(scale=45):
102
  with gr.Tab("Options"):
103
  with gr.Group():
104
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
 
 
 
 
 
 
 
 
105
  auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix)
106
 
107
  with gr.Row():
@@ -119,6 +209,25 @@ with gr.Blocks(css=css) as demo:
119
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
120
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
123
 
124
  inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
 
1
+ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler, AutoencoderKL
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
5
+ from huggingface_hub import hf_hub_download
6
+ from safetensors.torch import load_file
7
+
8
+
9
+ def convert_safetensors_to_bin(pipeline, state_dict, alpha = 0.4):
10
+ LORA_PREFIX_UNET = 'lora_unet'
11
+ LORA_PREFIX_TEXT_ENCODER = 'lora_te'
12
+
13
+ visited = []
14
+
15
+ # directly update weight in diffusers model
16
+ for key in state_dict:
17
+ # it is suggested to print out the key, it usually will be something like below
18
+ # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
19
+
20
+ # as we have set the alpha beforehand, so just skip
21
+ if '.alpha' in key or key in visited:
22
+ continue
23
+
24
+ if 'text' in key:
25
+ layer_infos = key.split('.')[0].split(LORA_PREFIX_TEXT_ENCODER + '_')[-1].split('_')
26
+ curr_layer = pipeline.text_encoder
27
+ else:
28
+ layer_infos = key.split('.')[0].split(LORA_PREFIX_UNET + '_')[-1].split('_')
29
+ curr_layer = pipeline.unet
30
+
31
+ # find the target layer
32
+ temp_name = layer_infos.pop(0)
33
+ while len(layer_infos) > -1:
34
+ try:
35
+ curr_layer = curr_layer.__getattr__(temp_name)
36
+ if len(layer_infos) > 0:
37
+ temp_name = layer_infos.pop(0)
38
+ elif len(layer_infos) == 0:
39
+ break
40
+ except Exception:
41
+ if len(temp_name) > 0:
42
+ temp_name += '_' + layer_infos.pop(0)
43
+ else:
44
+ temp_name = layer_infos.pop(0)
45
+
46
+ # org_forward(x) + lora_up(lora_down(x)) * multiplier
47
+ pair_keys = []
48
+ if 'lora_down' in key:
49
+ pair_keys.append(key.replace('lora_down', 'lora_up'))
50
+ pair_keys.append(key)
51
+ else:
52
+ pair_keys.append(key)
53
+ pair_keys.append(key.replace('lora_up', 'lora_down'))
54
+
55
+ # update weight
56
+ if len(state_dict[pair_keys[0]].shape) == 4:
57
+ weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)
58
+ weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)
59
+ curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3)
60
+ else:
61
+ weight_up = state_dict[pair_keys[0]].to(torch.float32)
62
+ weight_down = state_dict[pair_keys[1]].to(torch.float32)
63
+ curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down)
64
+
65
+ # update visited list
66
+ for item in pair_keys:
67
+ visited.append(item)
68
+
69
+ return pipeline
70
+
71
 
72
  model_id = 'andite/anything-v4.0'
73
  prefix = ''
74
+ lora_path = hf_hub_download(
75
+ "showee/showee-lora-v1.0", "showee-any4.0.safetensors"
76
+ )
77
+ vae_path = "./anything-v4.0-vae/diffusion_pytorch_model.bin"
78
+
79
  scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
80
 
81
  pipe = StableDiffusionPipeline.from_pretrained(
 
83
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
84
  scheduler=scheduler)
85
 
86
+ pipe.vae.load_state_dict(torch.load(vae_path))
87
+
88
+ state_dict = load_file(lora_path)
89
+ pipe = convert_safetensors_to_bin(pipe, state_dict, 0.3)
90
+
91
+
92
  pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
93
  model_id,
94
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
95
  scheduler=scheduler)
96
 
97
+ pipe_i2i.vae.load_state_dict(torch.load(vae_path))
98
+
99
+ state_dict_i2i = load_file(lora_path)
100
+ pipe_i2i = convert_safetensors_to_bin(pipe, state_dict_i2i, 0.3)
101
+
102
+
103
  if torch.cuda.is_available():
104
  pipe = pipe.to("cuda")
105
  pipe_i2i = pipe_i2i.to("cuda")
 
158
  f"""
159
  <div class="main-div">
160
  <div>
161
+ <h1>Showee V1.0</h1>
162
  </div>
163
  <p>
164
+ Demo for <a href="https://huggingface.co/showee/showee-lora-v1.0">Showee V1.0</a> LoRA adaption weights fine-tuned from <a href="https://huggingface.co/andite/anything-v4.0">Anything V4.0</a> Stable Diffusion model.<br>
165
  {"Add the following tokens to your prompts for the model to work properly: <b>prefix</b>" if prefix else ""}
166
  </p>
167
+ Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/showee/showee-v1.0/settings'>Settings</a></b>"} after duplicating the space<br><br>
168
+ <a style="display:inline-block" href="https://huggingface.co/spaces/showee/showee-v1.0?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
169
  </div>
170
  """
171
  )
 
183
  with gr.Column(scale=45):
184
  with gr.Tab("Options"):
185
  with gr.Group():
186
+ neg_prompt = gr.Textbox(label="Negative prompt",
187
+ placeholder="What to exclude from the image",
188
+ value="NSFW, lowres, ((bad anatomy)), ((bad hands)), text, missing finger, "
189
+ "extra digits, fewer digits, blurry, ((mutated hands and fingers)), "
190
+ "(poorly drawn face), ((mutation)), ((deformed face)), (ugly), "
191
+ "((bad proportions)), ((extra limbs)), extra face, (double head), "
192
+ "(extra head), ((extra feet)), monster, logo, cropped, worst quality, "
193
+ "low quality, normal quality, jpeg, humpbacked, long body, long neck, "
194
+ "((jpeg artifacts))")
195
  auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix)
196
 
197
  with gr.Row():
 
209
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
210
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
211
 
212
+ gr.Examples(
213
+ [[
214
+ "masterpiece, best quality, ultra-detailed, illustration, portrait, 1girl, solo, white hair, green eyes, "
215
+ "aqua_eyes, cat_ears, :3, ahoge, dress, red_jacket, long_sleeves, bangs, black_legwear, hair_ornament, "
216
+ "hairclip", 8, 25, 768, 1024, 909198616
217
+ ],
218
+ [
219
+ "masterpiece, best quality, ultra-detailed, illustration, portrait, 1girl, :3, animal_ears, aqua_eyes, ahoge, "
220
+ "asymmetrical_legwear, bangs, black_footwear, black_skirt, breasts, cleavage, hair_ornament, hairclip, "
221
+ "long_hair, navel, thighhighs, smile", 7.5, 25, 512, 768, 9
222
+ ],
223
+ [
224
+ "masterpiece, best quality, ultra-detailed, illustration, portrait, 1girl, :3, animal_ears, aqua_eyes, ahoge, seaside,"
225
+ "asymmetrical_legwear, bangs, black_footwear, black_skirt, breasts, cleavage, hair_ornament, hairclip, "
226
+ "long_hair, navel, thighhighs", 7.5, 25, 512, 512, 353573117
227
+ ]],
228
+ [prompt, guidance, steps, width, height, seed],
229
+ )
230
+
231
  auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
232
 
233
  inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]