DigiP-AI commited on
Commit
743fbf0
·
verified ·
1 Parent(s): 90c1c1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +191 -47
app.py CHANGED
@@ -1,64 +1,208 @@
1
  import gradio as gr
2
- import threading
 
 
3
  import os
4
- import torch
 
 
 
5
 
6
- os.environ["OMP_NUM_THREADS"] = str(os.cpu_count())
7
- torch.set_num_threads(os.cpu_count())
8
 
9
- model1 = gr.load("models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA")
10
- model2 = gr.load("models/Purz/face-projection")
11
 
12
- stop_event = threading.Event()
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- def generate_images(text, selected_model):
15
- stop_event.clear()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- if selected_model == "Model 1 (Turbo Realism)":
18
- model = model1
19
- elif selected_model == "Model 2 (Face Projection)":
20
- model = model2
21
- else:
22
- return ["Invalid model selection."] * 3
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- results = []
25
- for i in range(3):
26
- if stop_event.is_set():
27
- return ["Image generation stopped by user."] * 3
28
 
29
- modified_text = f"{text} variation {i+1}"
30
- result = model(modified_text)
31
- results.append(result)
 
32
 
33
- return results
 
 
34
 
35
- def stop_generation():
36
- """Stops the ongoing image generation by setting the stop_event flag."""
37
- stop_event.set()
38
- return ["Generation stopped."] * 3
39
 
40
- with gr.Blocks() as interface:#...
41
- gr.Markdown(
42
- "### ⚠ Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
43
- )
44
 
45
- text_input = gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt...")
46
- model_selector = gr.Radio(
47
- ["Model 1 (Turbo Realism)", "Model 2 (Face Projection)"],
48
- label="Select Model",
49
- value="Model 1 (Turbo Realism)"
50
- )
51
 
52
- with gr.Row():
53
- generate_button = gr.Button("Generate 3 Images 🎨")
54
- stop_button = gr.Button("Stop Image Generation")
 
 
 
 
55
 
56
- with gr.Row():
57
- output1 = gr.Image(label="Generated Image 1")
58
- output2 = gr.Image(label="Generated Image 2")
59
- output3 = gr.Image(label="Generated Image 3")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
- generate_button.click(generate_images, inputs=[text_input, model_selector], outputs=[output1, output2, output3])
62
- stop_button.click(stop_generation, inputs=[], outputs=[output1, output2, output3])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- interface.launch()
 
 
 
 
1
  import gradio as gr
2
+ import requests
3
+ import io
4
+ import random
5
  import os
6
+ import time
7
+ from PIL import Image
8
+ from deep_translator import GoogleTranslator
9
+ import json
10
 
11
+ from datetime import datetime
12
+ from fastapi import FastAPI
13
 
14
+ app = FastAPI()
 
15
 
16
+ #----------Start of theme----------
17
+ theme = gr.themes.Ocean(
18
+ primary_hue="zinc",
19
+ secondary_hue="slate",
20
+ neutral_hue="neutral",
21
+ font=[gr.themes.GoogleFont('Kavivanar'), gr.themes.GoogleFont('Kavivanar'), 'system-ui', 'sans-serif'],
22
+ font_mono=[gr.themes.GoogleFont('Source Code Pro'), gr.themes.GoogleFont('Inconsolata'), gr.themes.GoogleFont('Inconsolata'), 'monospace'],
23
+ ).set(
24
+ #Body Settings
25
+ body_background_fill='linear-gradient(10deg, *primary_200, *secondary_50)',
26
+ body_text_color='secondary_600',
27
+ body_text_color_subdued='*primary_500',
28
+ body_text_weight='500',
29
 
30
+ #Background Settings
31
+ background_fill_primary='*primary_100',
32
+ background_fill_secondary='*secondary_200',
33
+
34
+ color_accent='*primary_300',
35
+
36
+ #Border Settings
37
+ border_color_accent_subdued='*primary_400',
38
+ border_color_primary='*primary_400',
39
+
40
+ #Block Settings
41
+ block_radius='*radius_md',
42
+ block_background_fill='*primary_200',
43
+ block_border_color='*primary_500',
44
+ block_border_width='*panel_border_width',
45
+ block_info_text_color='*primary_700',
46
+ block_info_text_size='*text_md',
47
+
48
+ container_radius='*radius_xl',
49
+ panel_background_fill='*primary_200',
50
+ accordion_text_color='*primary_600',
51
+ checkbox_border_radius='*radius_xl',
52
+ slider_color='*primary_500',
53
+ table_text_color='*primary_600',
54
+ input_background_fill='*primary_50',
55
+ input_background_fill_focus='*primary_100',
56
 
57
+ #Button Settings
58
+ button_border_width='1px',
59
+ button_transform_hover='scale(1.01)',
60
+ button_transition='all 0.1s ease-in-out',
61
+ button_transform_active='Scale(0.9)',
62
+ button_large_radius='*radius_xl',
63
+ button_medium_radius='*radius_xl',
64
+ button_small_radius='*radius_xl',
65
+ button_primary_border_color='*primary_500',
66
+ button_secondary_border_color='*primary_400',
67
+ button_primary_background_fill_hover='linear-gradient(90deg, *primary_400, *secondary_200, *primary_400)',
68
+ button_primary_background_fill='linear-gradient(90deg,*secondary_300 , *primary_500, *secondary_300)',
69
+ button_primary_text_color='*primary_100',
70
+ button_primary_text_color_hover='*primary_700',
71
+ button_cancel_background_fill='*primary_500',
72
+ button_cancel_background_fill_hover='*primary_400'
73
+ )
74
+ #----------End of theme----------
75
 
76
+ # Project by Nymbo
 
 
 
77
 
78
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
79
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
80
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
81
+ timeout = 100
82
 
83
+ # Function to clear input and output
84
+ def clear():
85
+ return None
86
 
87
+ # Function to query the API and return the generated image
88
+ def query(prompt, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=896, height=1152):
89
+ if prompt == "" or prompt is None:
90
+ return None
91
 
92
+ key = random.randint(0, 999)
 
 
 
93
 
94
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
95
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
 
 
96
 
97
+ # Translate the prompt from Russian to English if necessary
98
+ prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
99
+ print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
100
+
101
+ # Add some extra flair to the prompt
102
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
103
+ print(f'\033[1mGeneration {key}:\033[0m {prompt}')
104
 
105
+ # Prepare the payload for the API call, including width and height
106
+ payload = {
107
+ "inputs": prompt,
108
+ "is_negative": is_negative,
109
+ "steps": steps,
110
+ "cfg_scale": cfg_scale,
111
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
112
+ "strength": strength,
113
+ "parameters": {
114
+ "width": width, # Pass the width to the API
115
+ "height": height # Pass the height to the API
116
+ }
117
+ }
118
+
119
+ # Send the request to the API and handle the response
120
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
121
+ if response.status_code != 200:
122
+ print(f"Error: Failed to get image. Response status: {response.status_code}")
123
+ print(f"Response content: {response.text}")
124
+ if response.status_code == 503:
125
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
126
+ raise gr.Error(f"{response.status_code}")
127
 
128
+ try:
129
+ # Convert the response content into an image
130
+ image_bytes = response.content
131
+ image = Image.open(io.BytesIO(image_bytes))
132
+ print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
133
+ return image
134
+ except Exception as e:
135
+ print(f"Error when trying to open the image: {e}")
136
+ return None
137
+
138
+ examples = [
139
+ "a beautiful woman with blonde hair and blue eyes",
140
+ "a beautiful woman with brown hair and grey eyes",
141
+ "a beautiful woman with black hair and brown eyes",
142
+ ]
143
+
144
+ # CSS to style the app
145
+ css = """
146
+ #app-container {
147
+ max-width: 930px;
148
+ margin-left: auto;
149
+ margin-right: auto;
150
+ background-image: url("https://drive.google.com/file/d/1Kz2pi93EfsEHw90fil6XJBoSq9f-BlkJ"); repeat 0 0;}')
151
+ }
152
+ ".gradio-container {background: url('file/abstract.png')"
153
+
154
+ """
155
+
156
+ # Build the Gradio UI with Blocks
157
+ with gr.Blocks(theme=theme, css=css) as app:
158
+ # Add a title to the app
159
+ gr.HTML("<center><h1>🎨 Stable Diffusion 3.5 🇬🇧</h1></center>")
160
+
161
+ # Container for all the UI elements
162
+ with gr.Column(elem_id="app-container"):
163
+ # Add a text input for the main prompt
164
+ with gr.Row():
165
+ with gr.Column(elem_id="prompt-container"):
166
+ with gr.Row():
167
+ text_prompt = gr.Textbox(label="Image Prompt", placeholder="Enter a prompt here", lines=2, show_copy_button = True, elem_id="prompt-text-input")
168
+
169
+ # Accordion for advanced settings
170
+ with gr.Row():
171
+ with gr.Accordion("Advanced Settings", open=False):
172
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="((visible hand:1.3), (ugly:1.3), (duplicate:1.2), (morbid:1.1), (mutilated:1.1), out of frame, bad face, extra fingers, mutated hands, (poorly drawn hands:1.1), (poorly drawn face:1.3), (mutation:1.3), (deformed:1.3), blurry, (bad anatomy:1.1), (bad proportions:1.2), (extra limbs:1.1), cloned face, (disfigured:1.2), gross proportions, malformed limbs, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), fused fingers, too many fingers, (long neck:1.2), sketched by bad-artist, (bad-image-v2-39000:1.3)", lines=5, elem_id="negative-prompt-text-input")
173
+ with gr.Row():
174
+ width = gr.Slider(label="ImageWidth", value=896, minimum=64, maximum=1216, step=32)
175
+ height = gr.Slider(label="Image Height", value=1152, minimum=64, maximum=1216, step=32)
176
+ steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1)
177
+ cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=1)
178
+ strength = gr.Slider(label="PromptStrength", value=100, minimum=0, maximum=100, step=1)
179
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) # Setting the seed to -1 will make it random
180
+ method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "DEIS", "LMS", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "Euler", "Euler CFG PP", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "DDIM", "PLMS", "UniPC", "UniPC BH2"])
181
+
182
+ # Add a button to trigger the image generation
183
+ with gr.Row():
184
+ text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button")
185
+ clr_button =gr.Button("Clear Prompt",variant="primary", elem_id="clear_button")
186
+ clr_button.click(lambda: gr.Textbox(value=""), None, text_prompt)
187
+
188
+ # Image output area to display the generated image
189
+ with gr.Row():
190
+ image_output1 = gr.Image(type="pil", label="Image Output 1", format="png", elem_id="gallery")
191
+ image_output2 = gr.Image(type="pil", label="Image Output 2", format="png", elem_id="gallery")
192
+
193
+ with gr.Row():
194
+ clear_btn = gr.Button(value="Clear Image", variant="primary", elem_id="clear_button")
195
+ clear_btn.click(clear, inputs=[], outputs=[image_output])
196
+
197
+ gr.Examples(
198
+ examples = examples,
199
+ inputs = [text_prompt],
200
+ )
201
+
202
+ # Bind the button to the query function with the added width and height inputs
203
+ text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output1, image_output2])
204
 
205
+ if __name__ == "__main__":
206
+
207
+ # Launch the Gradio app
208
+ app.launch(show_api=False, share=False)