Update app.py
Browse files
app.py
CHANGED
@@ -1,198 +1,64 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import random
|
4 |
import os
|
5 |
-
|
6 |
-
# import spaces #[uncomment to use ZeroGPU]
|
7 |
-
from diffusers import DiffusionPipeline
|
8 |
import torch
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
app = FastAPI()
|
13 |
-
|
14 |
-
#----------Start of theme----------
|
15 |
-
theme = gr.themes.Soft(
|
16 |
-
primary_hue="zinc",
|
17 |
-
secondary_hue="stone",
|
18 |
-
font=[gr.themes.GoogleFont('Kavivanar'), gr.themes.GoogleFont('Kavivanar'), 'system-ui', 'sans-serif'],
|
19 |
-
font_mono=[gr.themes.GoogleFont('Source Code Pro'), gr.themes.GoogleFont('Inconsolata'), gr.themes.GoogleFont('Inconsolata'), 'monospace'],
|
20 |
-
).set(
|
21 |
-
body_background_fill='*primary_100',
|
22 |
-
body_text_color='secondary_600',
|
23 |
-
body_text_color_subdued='*primary_500',
|
24 |
-
body_text_weight='500',
|
25 |
-
background_fill_primary='*primary_100',
|
26 |
-
background_fill_secondary='*secondary_200',
|
27 |
-
color_accent='*primary_300',
|
28 |
-
border_color_accent_subdued='*primary_400',
|
29 |
-
border_color_primary='*primary_400',
|
30 |
-
block_background_fill='*primary_300',
|
31 |
-
block_border_width='*panel_border_width',
|
32 |
-
block_info_text_color='*primary_700',
|
33 |
-
block_info_text_size='*text_md',
|
34 |
-
panel_background_fill='*primary_200',
|
35 |
-
accordion_text_color='*primary_600',
|
36 |
-
slider_color='*primary_500',
|
37 |
-
table_text_color='*primary_600',
|
38 |
-
input_background_fill='*primary_50',
|
39 |
-
input_background_fill_focus='*primary_100',
|
40 |
-
button_primary_background_fill='*primary_500',
|
41 |
-
button_primary_background_fill_hover='*primary_400',
|
42 |
-
button_primary_text_color='*primary_50',
|
43 |
-
button_primary_text_color_hover='*primary_100',
|
44 |
-
button_cancel_background_fill='*primary_500',
|
45 |
-
button_cancel_background_fill_hover='*primary_400'
|
46 |
-
)
|
47 |
-
#----------End of theme----------
|
48 |
-
|
49 |
-
API_TOKEN = os.getenv("HF_READ_TOKEN")
|
50 |
-
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
51 |
-
timeout = 100
|
52 |
-
|
53 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
54 |
-
model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
55 |
-
|
56 |
-
if torch.cuda.is_available():
|
57 |
-
torch_dtype = torch.float16
|
58 |
-
else:
|
59 |
-
torch_dtype = torch.float32
|
60 |
-
|
61 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
62 |
-
pipe = pipe.to(device)
|
63 |
-
|
64 |
-
MAX_SEED = np.iinfo(np.int32).max
|
65 |
-
MAX_IMAGE_SIZE = 1024
|
66 |
-
|
67 |
-
|
68 |
-
# @spaces.GPU #[uncomment to use ZeroGPU]
|
69 |
-
def infer(
|
70 |
-
prompt,
|
71 |
-
negative_prompt,
|
72 |
-
seed,
|
73 |
-
randomize_seed,
|
74 |
-
width,
|
75 |
-
height,
|
76 |
-
guidance_scale,
|
77 |
-
num_inference_steps,
|
78 |
-
progress=gr.Progress(track_tqdm=True),
|
79 |
-
):
|
80 |
-
if randomize_seed:
|
81 |
-
seed = random.randint(0, MAX_SEED)
|
82 |
-
|
83 |
-
generator = torch.Generator().manual_seed(seed)
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
negative_prompt=negative_prompt,
|
88 |
-
guidance_scale=guidance_scale,
|
89 |
-
num_inference_steps=num_inference_steps,
|
90 |
-
width=width,
|
91 |
-
height=height,
|
92 |
-
generator=generator,
|
93 |
-
).images[0]
|
94 |
|
95 |
-
|
96 |
|
|
|
|
|
97 |
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
103 |
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
}
|
109 |
-
"""
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
|
115 |
-
|
116 |
-
prompt = gr.Text(
|
117 |
-
label="Prompt",
|
118 |
-
show_label=False,
|
119 |
-
max_lines=5,
|
120 |
-
placeholder="Enter your prompt",
|
121 |
-
container=False,
|
122 |
-
)
|
123 |
|
124 |
-
|
|
|
|
|
|
|
125 |
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
negative_prompt = gr.Text(
|
130 |
-
label="Negative prompt",
|
131 |
-
max_lines=5,
|
132 |
-
placeholder="(visible hand:1.3), (ugly:1.3), (duplicate:1.2), (morbid:1.1), (mutilated:1.1), out of frame, bad face, extra fingers, mutated hands, (poorly drawn hands:1.1), (poorly drawn face:1.3), (mutation:1.3), (deformed:1.3), blurry, (bad anatomy:1.1), (bad proportions:1.2), (extra limbs:1.1), cloned face, (disfigured:1.2), gross proportions, malformed limbs, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), fused fingers, too many fingers, (long neck:1.2), sketched by bad-artist, (bad-image-v2-39000:1.3)",
|
133 |
-
visible=False,
|
134 |
-
)
|
135 |
-
|
136 |
-
seed = gr.Slider(
|
137 |
-
label="Seed",
|
138 |
-
minimum=0,
|
139 |
-
maximum=MAX_SEED,
|
140 |
-
step=1,
|
141 |
-
value=0,
|
142 |
-
)
|
143 |
-
|
144 |
-
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
145 |
-
|
146 |
-
with gr.Row():
|
147 |
-
width = gr.Slider(
|
148 |
-
label="Width",
|
149 |
-
minimum=512,
|
150 |
-
maximum=MAX_IMAGE_SIZE,
|
151 |
-
step=32,
|
152 |
-
value=896, # Replace with defaults that work for your model
|
153 |
-
)
|
154 |
-
|
155 |
-
height = gr.Slider(
|
156 |
-
label="Height",
|
157 |
-
minimum=512,
|
158 |
-
maximum=MAX_IMAGE_SIZE,
|
159 |
-
step=32,
|
160 |
-
value=1150, # Replace with defaults that work for your model
|
161 |
-
)
|
162 |
-
|
163 |
-
with gr.Row():
|
164 |
-
guidance_scale = gr.Slider(
|
165 |
-
label="Guidance scale",
|
166 |
-
minimum=0.0,
|
167 |
-
maximum=10.0,
|
168 |
-
step=0.1,
|
169 |
-
value=5.0, # Replace with defaults that work for your model
|
170 |
-
)
|
171 |
-
|
172 |
-
num_inference_steps = gr.Slider(
|
173 |
-
label="Number of inference steps",
|
174 |
-
minimum=1,
|
175 |
-
maximum=50,
|
176 |
-
step=1,
|
177 |
-
value=30, # Replace with defaults that work for your model
|
178 |
-
)
|
179 |
-
|
180 |
-
gr.Examples(examples=examples, inputs=[prompt])
|
181 |
-
gr.on(
|
182 |
-
triggers=[run_button.click, prompt.submit],
|
183 |
-
fn=infer,
|
184 |
-
inputs=[
|
185 |
-
prompt,
|
186 |
-
negative_prompt,
|
187 |
-
seed,
|
188 |
-
randomize_seed,
|
189 |
-
width,
|
190 |
-
height,
|
191 |
-
guidance_scale,
|
192 |
-
num_inference_steps,
|
193 |
-
],
|
194 |
-
outputs=[result, seed],
|
195 |
)
|
196 |
-
|
197 |
-
|
198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import threading
|
|
|
3 |
import os
|
|
|
|
|
|
|
4 |
import torch
|
5 |
|
6 |
+
os.environ["OMP_NUM_THREADS"] = str(os.cpu_count())
|
7 |
+
torch.set_num_threads(os.cpu_count())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
model1 = gr.load("models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA")
|
10 |
+
model2 = gr.load("models/Purz/face-projection")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
stop_event = threading.Event()
|
13 |
|
14 |
+
def generate_images(text, selected_model):
|
15 |
+
stop_event.clear()
|
16 |
|
17 |
+
if selected_model == "Model 1 (Turbo Realism)":
|
18 |
+
model = model1
|
19 |
+
elif selected_model == "Model 2 (Face Projection)":
|
20 |
+
model = model2
|
21 |
+
else:
|
22 |
+
return ["Invalid model selection."] * 3
|
23 |
|
24 |
+
results = []
|
25 |
+
for i in range(3):
|
26 |
+
if stop_event.is_set():
|
27 |
+
return ["Image generation stopped by user."] * 3
|
|
|
|
|
28 |
|
29 |
+
modified_text = f"{text} variation {i+1}"
|
30 |
+
result = model(modified_text)
|
31 |
+
results.append(result)
|
32 |
|
33 |
+
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
def stop_generation():
|
36 |
+
"""Stops the ongoing image generation by setting the stop_event flag."""
|
37 |
+
stop_event.set()
|
38 |
+
return ["Generation stopped."] * 3
|
39 |
|
40 |
+
with gr.Blocks() as interface:#...
|
41 |
+
gr.Markdown(
|
42 |
+
"### ⚠ Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
)
|
44 |
+
|
45 |
+
text_input = gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt...")
|
46 |
+
model_selector = gr.Radio(
|
47 |
+
["Model 1 (Turbo Realism)", "Model 2 (Face Projection)"],
|
48 |
+
label="Select Model",
|
49 |
+
value="Model 1 (Turbo Realism)"
|
50 |
+
)
|
51 |
+
|
52 |
+
with gr.Row():
|
53 |
+
generate_button = gr.Button("Generate 3 Images 🎨")
|
54 |
+
stop_button = gr.Button("Stop Image Generation")
|
55 |
+
|
56 |
+
with gr.Row():
|
57 |
+
output1 = gr.Image(label="Generated Image 1")
|
58 |
+
output2 = gr.Image(label="Generated Image 2")
|
59 |
+
output3 = gr.Image(label="Generated Image 3")
|
60 |
+
|
61 |
+
generate_button.click(generate_images, inputs=[text_input, model_selector], outputs=[output1, output2, output3])
|
62 |
+
stop_button.click(stop_generation, inputs=[], outputs=[output1, output2, output3])
|
63 |
+
|
64 |
+
interface.launch()
|