TempleX commited on
Commit
0686264
·
1 Parent(s): 946d1b8

Update README

Browse files
Files changed (1) hide show
  1. app.py +31 -25
app.py CHANGED
@@ -8,6 +8,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
8
  from ip_adapter import EasyRef
9
  from huggingface_hub import hf_hub_download
10
  import gradio as gr
 
11
  import cv2
12
  import pillow_avif
13
 
@@ -25,6 +26,29 @@ def adaptive_resize(w, h, size=1024):
25
  def res2string(w, h):
26
  return str(w)+"x"+str(h)
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  base_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
29
  multimodal_llm_path = "Qwen/Qwen2-VL-2B-Instruct"
30
  ip_ckpt = hf_hub_download(repo_id="zongzhuofan/EasyRef", filename="pytorch_model.bin", repo_type="model")
@@ -116,31 +140,13 @@ with gr.Blocks(css=css) as demo:
116
  submit.click(fn=generate_image,
117
  inputs=[files,prompt,negative_prompt,height, width, scale, num_inference_steps, seed],
118
  outputs=gallery)
119
- examples = gr.Examples(
120
- examples=[
121
- [
122
- [
123
- "assets/aragaki_identity/1.jpg",
124
- "assets/aragaki_identity/2.webp",
125
- "assets/aragaki_identity/3.webp",
126
- "assets/aragaki_identity/4.jpeg",
127
- "assets/aragaki_identity/5.webp",
128
- ],
129
- "An oil painting of a smiling woman.",
130
- ],
131
- [
132
- [
133
- "assets/blindbox_style/1.jpg",
134
- "assets/blindbox_style/2.jpg",
135
- "assets/blindbox_style/3.jpg",
136
- "assets/blindbox_style/4.jpg",
137
- "assets/blindbox_style/5.jpg",
138
- ],
139
- "Donald Trump"
140
- ],
141
- ],
142
- inputs=[files, prompt],
143
- )
144
 
145
  gr.Markdown("We release our checkpoints for research purposes only. Users are granted the freedom to create images using this tool, but they are expected to comply with local laws and utilize it in a responsible manner. The developers do not assume any responsibility for potential misuse by users.")
146
 
 
8
  from ip_adapter import EasyRef
9
  from huggingface_hub import hf_hub_download
10
  import gradio as gr
11
+ import os
12
  import cv2
13
  import pillow_avif
14
 
 
26
  def res2string(w, h):
27
  return str(w)+"x"+str(h)
28
 
29
+ def get_image_path_list(folder_name):
30
+ image_basename_list = os.listdir(folder_name)
31
+ image_path_list = sorted([os.path.join(folder_name, basename) for basename in image_basename_list])
32
+ return image_path_list
33
+
34
+ def get_example():
35
+ case = [
36
+ [
37
+ get_image_path_list('./assets/aragaki_identity'),
38
+ "An oil painting of a smiling woman.",
39
+ "A collage of images, monochrome, lowres, bad anatomy, worst quality, low quality",
40
+ ],
41
+ [
42
+ get_image_path_list('./assets/blindbox_style'),
43
+ "Donald Trump",
44
+ "A collage of images, monochrome, lowres, bad anatomy, worst quality, low quality",
45
+ ],
46
+ ]
47
+ return case
48
+
49
+ def upload_example_to_gallery(images, prompt, negative_prompt):
50
+ return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
51
+
52
  base_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
53
  multimodal_llm_path = "Qwen/Qwen2-VL-2B-Instruct"
54
  ip_ckpt = hf_hub_download(repo_id="zongzhuofan/EasyRef", filename="pytorch_model.bin", repo_type="model")
 
140
  submit.click(fn=generate_image,
141
  inputs=[files,prompt,negative_prompt,height, width, scale, num_inference_steps, seed],
142
  outputs=gallery)
143
+ gr.Examples(
144
+ examples=get_example(),
145
+ inputs=[files, prompt, negative_prompt],
146
+ run_on_click=True,
147
+ fn=upload_example_to_gallery,
148
+ outputs=[uploaded_files, clear_button, files],
149
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
  gr.Markdown("We release our checkpoints for research purposes only. Users are granted the freedom to create images using this tool, but they are expected to comply with local laws and utilize it in a responsible manner. The developers do not assume any responsibility for potential misuse by users.")
152