DigiP-AI commited on
Commit
fcb9503
·
verified ·
1 Parent(s): 7c81c80

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +256 -0
app.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import io
4
+ import random
5
+ import os
6
+ import time
7
+ import json
8
+ import base64
9
+ from io import BytesIO
10
+ from datetime import datetime
11
+ from PIL import Image
12
+ from mistralai import Mistral
13
+ from deep_translator import GoogleTranslator
14
+ import json
15
+ from theme import theme
16
+ from fastapi import FastAPI
17
+
18
+ app = FastAPI()
19
+
20
+
21
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
22
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
23
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
24
+ timeout = 100
25
+
26
+ api_key = os.getenv("MISTRAL_API_KEY")
27
+ Mistralclient = Mistral(api_key=api_key)
28
+
29
+ # Function to query the API and return the generated image
30
+ def query(prompt, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
31
+ if prompt == "" or prompt is None:
32
+ return None
33
+
34
+ key = random.randint(0, 999)
35
+
36
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
37
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
38
+
39
+ # Translate the prompt from Russian to English if necessary
40
+ prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
41
+ print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
42
+
43
+ # Add some extra flair to the prompt
44
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
45
+ print(f'\033[1mGeneration {key}:\033[0m {prompt}')
46
+
47
+ # If seed is -1, generate a random seed and use it
48
+ if seed == -1:
49
+ seed = random.randint(1, 1000000000)
50
+
51
+ # Prepare the payload for the API call, including width and height
52
+ payload = {
53
+ "inputs": prompt,
54
+ "is_negative": is_negative,
55
+ "steps": steps,
56
+ "cfg_scale": cfg_scale,
57
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
58
+ "strength": strength,
59
+ "parameters": {
60
+ "width": width, # Pass the width to the API
61
+ "height": height # Pass the height to the API
62
+ }
63
+ }
64
+
65
+ # Send the request to the API and handle the response
66
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
67
+ if response.status_code != 200:
68
+ print(f"Error: Failed to get image. Response status: {response.status_code}")
69
+ print(f"Response content: {response.text}")
70
+ if response.status_code == 503:
71
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
72
+ raise gr.Error(f"{response.status_code}")
73
+
74
+ try:
75
+ # Convert the response content into an image
76
+ image_bytes = response.content
77
+ image = Image.open(io.BytesIO(image_bytes))
78
+ print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
79
+ return image
80
+ except Exception as e:
81
+ print(f"Error when trying to open the image: {e}")
82
+ return None
83
+
84
+ def encode_image(image_path):
85
+ """Encode the image to base64."""
86
+ try:
87
+ # Open the image file
88
+ image = Image.open(image_path).convert("RGB")
89
+
90
+ # Resize the image to a height of 512 while maintaining the aspect ratio
91
+ base_height = 512
92
+ h_percent = (base_height / float(image.size[1]))
93
+ w_size = int((float(image.size[0]) * float(h_percent)))
94
+ image = image.resize((w_size, base_height), Image.LANCZOS)
95
+
96
+ # Convert the image to a byte stream
97
+ buffered = BytesIO()
98
+ image.save(buffered, format="JPEG")
99
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
100
+
101
+ return img_str
102
+ except FileNotFoundError:
103
+ print(f"Error: The file {image_path} was not found.")
104
+ return None
105
+ except Exception as e: # Add generic exception handling
106
+ print(f"Error: {e}")
107
+ return None
108
+
109
+ def feifeichat(image):
110
+ try:
111
+ model = "pixtral-12b-2409"
112
+ # Define the messages for the chat
113
+ base64_image = encode_image(image)
114
+ messages = [{
115
+ "role":
116
+ "user",
117
+ "content": [
118
+ {
119
+ "type": "text",
120
+ "text": "Please provide a detailed description of this photo"
121
+ },
122
+ {
123
+ "type": "image_url",
124
+ "image_url": f"data:image/jpeg;base64,{base64_image}"
125
+ },
126
+ ],
127
+ "stream": False,
128
+ }]
129
+
130
+ partial_message = ""
131
+ for chunk in Mistralclient.chat.stream(model=model, messages=messages):
132
+ if chunk.data.choices[0].delta.content is not None:
133
+ partial_message = partial_message + chunk.data.choices[
134
+ 0].delta.content
135
+ yield partial_message
136
+ except Exception as e: # 添加通用异常处理
137
+ print(f"Error: {e}")
138
+ return "Please upload a photo"
139
+
140
+
141
+ examples = [
142
+ "a beautiful woman with blonde hair and blue eyes",
143
+ "a beautiful woman with brown hair and grey eyes",
144
+ "a beautiful woman with black hair and brown eyes",
145
+ ]
146
+
147
+ # CSS to style the app
148
+ css = """
149
+ .gradio-container {background-color: MediumAquaMarine}
150
+ #app-container {
151
+ max-width: 930px;
152
+ margin-left: auto;
153
+ margin-right: auto;
154
+ }
155
+ footer {
156
+ visibility: hidden;
157
+ }
158
+ """
159
+
160
+ # Gradio Interface
161
+
162
+ with gr.Blocks(css=css, theme=theme) as app:
163
+ gr.HTML("<h1><center>Flux Dev Lab</center></h1>")
164
+ with gr.Tab(label="Image To Flux Prompt"):
165
+ with gr.Row():
166
+ with gr.Column(scale=4):
167
+ input_img = gr.Image(label="Input Picture",height=520, type="filepath")
168
+
169
+ with gr.Column(scale=3):
170
+ output_text = gr.Textbox(label="Flux Prompt", lines=2, scale=6, show_copy_button = True)
171
+ submit_btn = gr.Button(value="Generate Pompt", scale=4, variant='primary')
172
+ clear_prompt =gr.Button("Clear 🗑️",variant="primary", elem_id="clear_button")
173
+ clear_prompt.click(lambda: (None, None), None, [input_img, output_text], queue=False, show_api=False)
174
+
175
+ submit_btn.click(feifeichat, [input_img], [output_text])
176
+
177
+ with gr.Tab(label="Generate Image"):
178
+ with gr.Row():
179
+ with gr.Column(scale=4):
180
+ with gr.Row():
181
+ img_output = gr.Image(type="pil", label="Image Output", show_share_button=False, format="png", elem_id="gallery")
182
+ with gr.Row():
183
+ text_prompt = gr.Textbox(label="Image Prompt ✍️", placeholder="Enter prompt...", lines=2, scale=6, show_copy_button = True, elem_id="prompt-text-input")
184
+ text_button = gr.Button("Generate Image",scale=1, variant='primary', elem_id="gen-button")
185
+ clear_prompt =gr.Button("Clear 🗑️",variant="primary", elem_id="clear_button")
186
+ clear_prompt.click(lambda: (None, None), None, [text_prompt, img_output], queue=False, show_api=False)
187
+ with gr.Accordion("Advanced Options", open=True):
188
+ with gr.Column(scale=1):
189
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="( (((hands:-1.25))), physical-defects:2, unhealthy-deformed-joints:2, unhealthy-hands:2, out of frame, (((bad face))), (bad-image-v2-39000:1.3), (((out of frame))), deformed body features, (((poor facial details))), (poorly drawn face:1.3), jpeg artifacts, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), [asymmetrical features], warped expressions, distorted eyes ", lines=6, elem_id="negative-prompt-text-input")
190
+
191
+ width = gr.Slider(
192
+ label="Width",
193
+ minimum=512,
194
+ maximum=1280,
195
+ step=8,
196
+ value=896,
197
+ )
198
+ height = gr.Slider(
199
+ label="Height",
200
+ minimum=512,
201
+ maximum=1280,
202
+ step=8,
203
+ value=1152,
204
+ )
205
+ method = gr.Dropdown(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ 2S a Karras", "DPM2 a Karras", "DPM2 Karras", "DPM++ SDE Karras", "DEIS", "LMS", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "Euler", "Euler CFG PP", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "DDIM", "LMS Karras", "PLMS", "UniPC", "UniPC BH2"])
206
+ steps = gr.Slider(
207
+ label="Sampling steps",
208
+ minimum=1,
209
+ maximum=100,
210
+ step=1,
211
+ value=24,
212
+ )
213
+ cfg = gr.Slider(
214
+ label="CFG Scale",
215
+ minimum=3.5,
216
+ maximum=7,
217
+ step=0.1,
218
+ value=3.5,
219
+ )
220
+ strength = gr.Slider(label="Strength", value=90, minimum=0, maximum=100, step=10)
221
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
222
+
223
+ gr.Examples(
224
+ examples = examples,
225
+ inputs = [text_prompt],
226
+ )
227
+
228
+ # Bind the button to the query function with the added width and height inputs
229
+ text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=img_output)
230
+
231
+ with gr.Tab("ℹ️ Tips"):
232
+ with gr.Row():
233
+ with gr.Column():
234
+ gr.Markdown(
235
+ """
236
+ <div style="max-width: 650px; margin: 2rem auto; padding: 1rem; border-radius: 10px; background-color: #f0f0f0;">
237
+ <h2 style="float: left; font-size: 1.5rem; margin-bottom: 1rem;">How to Use</h2>
238
+ <ol style="padding-left: 1.5rem;">
239
+ <li>Add an image to generate a prompt, this is optional.</li>
240
+ <li>If using an image to prompt, copy the prompt and paste into the prompt on tab 2</li>
241
+ <li>Enter a detailed description of the image you want to create.</li>
242
+ <li>Adjust advanced settings if desired (tap to expand).</li>
243
+ <li>Tap "Generate Image" and wait for your creation!</li>
244
+ </ol>
245
+ <p style="margin-top: 1rem; font-style: italic;">Tip: Be specific in your description for best results!</p>
246
+ <p style="margin-top: 1rem; font-style: italic;">*Note: Some LoRA models will not work every time (not sure why), refresh the page and try again</p>
247
+ <p style="margin-top: 1rem; font-style: italic;">*I'm still playing around to try to sort the issue, feel free to let me know if you find a fix</p>
248
+ </div>
249
+ """
250
+ )
251
+
252
+
253
+ app.queue(default_concurrency_limit=200, max_size=200) # <-- Sets up a queue with default parameters
254
+ if __name__ == "__main__":
255
+ app.launch(show_api=False, share=False)
256
+