greendra commited on
Commit
92cbf13
·
verified ·
1 Parent(s): fc9e72f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +278 -66
app.py CHANGED
@@ -1,3 +1,9 @@
 
 
 
 
 
 
1
  import gradio as gr
2
  import requests
3
  import io
@@ -7,113 +13,319 @@ import time
7
  from PIL import Image
8
  from deep_translator import GoogleTranslator
9
  import json
 
10
 
11
  # Project by Nymbo
12
 
13
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
14
- API_TOKEN = os.getenv("HF_READ_TOKEN")
15
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
 
 
 
 
 
16
  timeout = 100
17
 
18
  # Function to query the API and return the generated image
19
- def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
20
- if prompt == "" or prompt is None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  return None
22
 
23
- key = random.randint(0, 999)
24
-
25
- API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
26
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
27
-
28
- # Translate the prompt from Russian to English if necessary
29
- prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
30
- print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # Add some extra flair to the prompt
33
- prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
34
- print(f'\033[1mGeneration {key}:\033[0m {prompt}')
35
-
36
  # Prepare the payload for the API call, including width and height
37
  payload = {
38
- "inputs": prompt,
39
- "is_negative": is_negative,
40
- "steps": steps,
41
- "cfg_scale": cfg_scale,
42
- "seed": seed if seed != -1 else random.randint(1, 1000000000),
43
- "strength": strength,
44
  "parameters": {
45
- "width": width, # Pass the width to the API
46
- "height": height # Pass the height to the API
 
 
 
 
 
 
 
 
47
  }
48
  }
 
 
 
 
 
 
 
 
 
49
 
50
  # Send the request to the API and handle the response
51
- response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
52
- if response.status_code != 200:
53
- print(f"Error: Failed to get image. Response status: {response.status_code}")
54
- print(f"Response content: {response.text}")
55
- if response.status_code == 503:
56
- raise gr.Error(f"{response.status_code} : The model is being loaded")
57
- raise gr.Error(f"{response.status_code}")
58
-
59
  try:
 
 
 
60
  # Convert the response content into an image
61
  image_bytes = response.content
62
  image = Image.open(io.BytesIO(image_bytes))
63
- print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
 
64
  return image
65
- except Exception as e:
66
- print(f"Error when trying to open the image: {e}")
67
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  # CSS to style the app
70
  css = """
71
  #app-container {
72
- max-width: 800px;
73
  margin-left: auto;
74
  margin-right: auto;
75
  }
76
- textarea:focus {
77
- background: #0d1117 !important;
78
- }
79
  """
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  # Build the Gradio UI with Blocks
82
- with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
 
 
 
 
 
 
 
83
  # Add a title to the app
84
- gr.HTML("<center><h1>FLUX.1-Schnell</h1></center>")
85
-
86
  # Container for all the UI elements
87
  with gr.Column(elem_id="app-container"):
88
  # Add a text input for the main prompt
89
  with gr.Row():
90
- with gr.Column(elem_id="prompt-container"):
91
- with gr.Row():
92
- text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input")
93
-
94
- # Accordion for advanced settings
95
- with gr.Row():
96
- with gr.Accordion("Advanced Settings", open=False):
97
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
98
- with gr.Row():
99
- width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32)
100
- height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32)
101
- steps = gr.Slider(label="Sampling steps", value=4, minimum=1, maximum=100, step=1)
102
- cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
103
- strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
104
- seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) # Setting the seed to -1 will make it random
105
- method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
 
 
 
 
 
 
 
106
 
107
  # Add a button to trigger the image generation
108
  with gr.Row():
109
- text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
110
-
111
  # Image output area to display the generated image
112
  with gr.Row():
113
- image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
114
-
115
- # Bind the button to the query function with the added width and height inputs
116
- text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
  # Launch the Gradio app
119
- app.launch(show_api=False, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Okay, let's integrate gr.Examples into your script with some diverse, high-quality Stable Diffusion-style prompts and enable caching.
2
+
3
+ Caching examples means that when the Space first builds (or rebuilds after changes), it will run the query function once for each example and store the resulting image. When a user clicks that example later, the cached image is shown instantly instead of running the model again.
4
+
5
+ Here's the modified app.py:
6
+
7
  import gradio as gr
8
  import requests
9
  import io
 
13
  from PIL import Image
14
  from deep_translator import GoogleTranslator
15
  import json
16
+ from typing import Callable, List, Any, Literal # Added for type hinting Examples
17
 
18
  # Project by Nymbo
19
 
20
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
21
+ API_TOKEN = os.getenv("HF_READ_TOKEN") # Your main token for primary use
22
+ # Consider adding more read tokens if you have them for resilience
23
+ ADDITIONAL_TOKENS = [t for t in os.getenv("HF_EXTRA_TOKENS", "").split(',') if t] # Example: HF_EXTRA_TOKENS="token1,token2"
24
+ ALL_TOKENS = [API_TOKEN] + ADDITIONAL_TOKENS
25
+ if not API_TOKEN:
26
+ print("Warning: HF_READ_TOKEN is not set. API calls may fail.")
27
+ # Optional: raise an error or use a dummy token if needed
28
+ # raise ValueError("HF_READ_TOKEN environment variable is required.")
29
+
30
  timeout = 100
31
 
32
  # Function to query the API and return the generated image
33
+ # Added type hints for clarity
34
+ def query(
35
+ prompt: str,
36
+ negative_prompt: str = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos",
37
+ steps: int = 35,
38
+ cfg_scale: float = 7.0,
39
+ sampler: str = "DPM++ 2M Karras", # Matched default in UI
40
+ seed: int = -1,
41
+ strength: float = 0.7,
42
+ width: int = 1024,
43
+ height: int = 1024
44
+ ) -> Image.Image | None:
45
+
46
+ if not prompt: # Simplified check
47
+ gr.Warning("Prompt cannot be empty.")
48
+ return None
49
+ if not ALL_TOKENS:
50
+ gr.Error("No Hugging Face API tokens available.")
51
  return None
52
 
53
+ key = random.randint(0, 9999)
54
+ start_time = time.time()
55
+
56
+ # Rotate through available tokens
57
+ selected_api_token = random.choice(ALL_TOKENS)
58
+ headers = {"Authorization": f"Bearer {selected_api_token}"}
59
+
60
+ translated_prompt = prompt
61
+ try:
62
+ # Simple check if likely Russian (Cyrillic) - adjust if needed
63
+ if any('\u0400' <= char <= '\u04FF' for char in prompt):
64
+ translated_prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
65
+ print(f'\033[1mGeneration {key} translation:\033[0m {translated_prompt}')
66
+ else:
67
+ # Assume English or other non-Russian if no Cyrillic detected
68
+ pass
69
+ except Exception as e:
70
+ print(f"Translation failed: {e}. Using original prompt.")
71
+ # Decide if you want to raise an error or just proceed
72
+ # gr.Warning(f"Translation failed: {e}. Using original prompt.")
73
 
74
  # Add some extra flair to the prompt
75
+ enhanced_prompt = f"{translated_prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
76
+ print(f'\033[1mGeneration {key} starting:\033[0m {enhanced_prompt}')
77
+
78
  # Prepare the payload for the API call, including width and height
79
  payload = {
80
+ "inputs": enhanced_prompt,
81
+ "negative_prompt": negative_prompt, # Use the negative prompt input
82
+ # Note: The underlying API might not support all parameters directly.
83
+ # Check the specific model card for supported parameters.
84
+ # We send common ones; the model API will use what it understands.
 
85
  "parameters": {
86
+ "num_inference_steps": steps,
87
+ "guidance_scale": cfg_scale,
88
+ "seed": seed if seed != -1 else random.randint(1, 2**32 - 1), # Use larger seed range
89
+ "strength": strength, # Often used for img2img, may not apply here
90
+ "width": width,
91
+ "height": height,
92
+ # Sampler might not be directly controllable via this endpoint structure
93
+ # The model often uses its default or recommended sampler.
94
+ # Sending it anyway in case future API versions support it.
95
+ "scheduler": sampler,
96
  }
97
  }
98
+ # Clean up payload if values are None or default where API expects omission
99
+ if seed == -1:
100
+ del payload["parameters"]["seed"] # Let API choose random if -1
101
+ if not negative_prompt:
102
+ # If empty string, some APIs prefer omitting the key entirely
103
+ del payload["negative_prompt"]
104
+
105
+
106
+ print(f"Payload for {key}: {json.dumps(payload, indent=2)}")
107
 
108
  # Send the request to the API and handle the response
 
 
 
 
 
 
 
 
109
  try:
110
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
111
+ response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
112
+
113
  # Convert the response content into an image
114
  image_bytes = response.content
115
  image = Image.open(io.BytesIO(image_bytes))
116
+ end_time = time.time()
117
+ print(f'\033[1mGeneration {key} completed in {end_time - start_time:.2f}s!\033[0m')
118
  return image
119
+
120
+ except requests.exceptions.Timeout:
121
+ print(f"Error: Request timed out after {timeout} seconds.")
122
+ raise gr.Error(f"Request timed out ({timeout}s). Model might be busy. Try again later.")
123
+ except requests.exceptions.RequestException as e:
124
+ status_code = e.response.status_code if e.response is not None else "N/A"
125
+ error_content = e.response.text if e.response is not None else str(e)
126
+ print(f"Error: API request failed. Status: {status_code}, Content: {error_content}")
127
+ if status_code == 503:
128
+ # Check for specific error messages if available
129
+ if "is currently loading" in error_content:
130
+ raise gr.Error("Model is loading. Please wait a moment and try again.")
131
+ else:
132
+ raise gr.Error("Model service unavailable (503). It might be overloaded or down. Try again later.")
133
+ elif status_code == 400:
134
+ raise gr.Error(f"Bad Request (400). Check parameters. API response: {error_content[:200]}")
135
+ elif status_code == 429:
136
+ raise gr.Error("Too many requests (429). Rate limit hit. Please wait.")
137
+ else:
138
+ raise gr.Error(f"API Error ({status_code}). Response: {error_content[:200]}")
139
+ except (OSError, json.JSONDecodeError, IOError) as e:
140
+ # Catch potential issues with image decoding or other file errors
141
+ print(f"Error processing response or image: {e}")
142
+ raise gr.Error(f"Failed to process the image response: {e}")
143
+ except Exception as e: # Catch any other unexpected errors
144
+ print(f"An unexpected error occurred: {e}")
145
+ import traceback
146
+ traceback.print_exc() # Print full traceback to console logs
147
+ raise gr.Error(f"An unexpected error occurred: {e}")
148
+
149
+
150
+ # --- Gradio UI ---
151
 
152
  # CSS to style the app
153
  css = """
154
  #app-container {
155
+ max-width: 960px; /* Slightly wider */
156
  margin-left: auto;
157
  margin-right: auto;
158
  }
159
+ /* Add more styling if desired */
 
 
160
  """
161
 
162
+ # Default negative prompt can be reused
163
+ default_negative_prompt = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos"
164
+
165
+ # Define the examples
166
+ # Each inner list corresponds to the inputs of the 'query' function IN ORDER:
167
+ # [prompt, negative_prompt, steps, cfg_scale, sampler, seed, strength, width, height]
168
+ example_list = [
169
+ [
170
+ "Epic cinematic shot of a medieval knight kneeling in a misty forest, volumetric lighting, hyperrealistic photo, 8k",
171
+ default_negative_prompt, 40, 7.5, "DPM++ 2M Karras", 12345, 0.7, 1024, 1024
172
+ ],
173
+ [
174
+ "Studio Ghibli style illustration of a cozy bakery storefront on a rainy day, warm lighting, detailed",
175
+ default_negative_prompt, 30, 6.0, "Euler a", 54321, 0.7, 1024, 1024
176
+ ],
177
+ [
178
+ "Macro photograph of a dewdrop on a spider web, intricate details, shallow depth of field, natural lighting",
179
+ "blurry, unfocused, cartoon", 50, 8.0, "DPM++ 2M Karras", -1, 0.7, 1024, 1024 # Random seed
180
+ ],
181
+ [
182
+ "Steampunk astronaut exploring an alien jungle landscape, brass and copper details, vibrant bioluminescent plants, wide angle",
183
+ default_negative_prompt, 35, 7.0, "DPM++ SDE Karras", 98765, 0.7, 1216, 832 # Different aspect ratio
184
+ ],
185
+ [
186
+ "Abstract geometric art, vibrant contrasting colors, sharp edges, minimalistic design, 4k wallpaper",
187
+ "photorealistic, noisy, cluttered", 25, 5.0, "Euler", -1, 0.7, 1024, 1024
188
+ ],
189
+ [
190
+ "Кот в очках читает книгу у камина", # Example in Russian for translation testing
191
+ default_negative_prompt, 35, 7.0, "DPM++ 2M Karras", 11223, 0.7, 1024, 1024
192
+ ]
193
+ ]
194
+
195
+
196
  # Build the Gradio UI with Blocks
197
+ # Use the custom theme if it's available, otherwise fallback to default
198
+ try:
199
+ theme = gr.themes.Base.load('Nymbo/Nymbo_Theme')
200
+ except Exception:
201
+ print("Could not load Nymbo/Nymbo_Theme, using default theme.")
202
+ theme = gr.themes.Default()
203
+
204
+ with gr.Blocks(theme=theme, css=css) as app:
205
  # Add a title to the app
206
+ gr.HTML("<center><h1>FLUX.1-Dev Image Generator</h1></center>")
207
+
208
  # Container for all the UI elements
209
  with gr.Column(elem_id="app-container"):
210
  # Add a text input for the main prompt
211
  with gr.Row():
212
+ with gr.Column(scale=3): # Give prompt more width
213
+ text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here (English or Russian)", lines=3, elem_id="prompt-text-input") # Increased lines
214
+
215
+ with gr.Column(scale=1): # Negative prompt column
216
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What to avoid...", value=default_negative_prompt, lines=3, elem_id="negative-prompt-text-input")
217
+
218
+ # Accordion for advanced settings
219
+ with gr.Accordion("Advanced Settings", open=False):
220
+ with gr.Row():
221
+ width = gr.Slider(label="Width", value=1024, minimum=256, maximum=1216, step=64) # Adjusted steps/min
222
+ height = gr.Slider(label="Height", value=1024, minimum=256, maximum=1216, step=64) # Adjusted steps/min
223
+ with gr.Row():
224
+ steps = gr.Slider(label="Sampling steps", value=35, minimum=10, maximum=100, step=1) # Adjusted min
225
+ cfg = gr.Slider(label="CFG Scale", value=7.0, minimum=1.0, maximum=20.0, step=0.5) # Added step
226
+ strength = gr.Slider(label="Strength (Img2Img)", value=0.7, minimum=0.0, maximum=1.0, step=0.01, info="Primarily for Image-to-Image tasks, may have limited effect here.") # Added step and info
227
+ with gr.Row():
228
+ seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2**32 - 1, step=1) # Increased max seed
229
+ method = gr.Radio(
230
+ label="Sampling method (Scheduler)",
231
+ value="DPM++ 2M Karras",
232
+ choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
233
+ info="Note: Model API might use its default scheduler regardless of selection."
234
+ )
235
 
236
  # Add a button to trigger the image generation
237
  with gr.Row():
238
+ text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button")
239
+
240
  # Image output area to display the generated image
241
  with gr.Row():
242
+ image_output = gr.Image(type="pil", label="Generated Image", elem_id="gallery") # Changed label
243
+
244
+
245
+ # --- Add Examples Component Here ---
246
+ with gr.Row():
247
+ gr.Examples(
248
+ examples=example_list,
249
+ # List ALL components that correspond to the example list order
250
+ inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height],
251
+ outputs=image_output, # The component to display the output
252
+ fn=query, # The function to run TO GENERATE the examples for caching
253
+ cache_examples=True, # << ENABLE CACHING HERE
254
+ label="Examples (Click to Run & View Cached Result)", # Customize label
255
+ examples_per_page=6 # Adjust how many show per page
256
+ # run_on_click=True # Optional: Set True if you want clicking to trigger 'query' again, even if cached. Usually False when caching.
257
+ )
258
+
259
+ # Bind the main button to the query function
260
+ # Ensure the order matches the function definition (excluding self if it were a class method)
261
+ text_button.click(
262
+ query,
263
+ inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height],
264
+ outputs=image_output,
265
+ api_name="generate_image" # Optional: Set API name for potential external calls
266
+ )
267
+
268
 
269
  # Launch the Gradio app
270
+ # share=False is correct for Hugging Face Spaces deployment
271
+ # show_api=False is fine unless you specifically need to expose the API endpoint documentation
272
+ # debug=True can be useful for development but remove for production/sharing
273
+ app.launch(show_api=False, share=False)
274
+
275
+
276
+ Key Changes:
277
+
278
+ Import typing: Added Callable, List, Any, Literal for better type hinting, though not strictly required.
279
+
280
+ Error Handling in query: Made error handling slightly more robust, checking for token availability, using response.raise_for_status(), catching specific request exceptions (like Timeout, 503), and providing clearer Gradio errors (gr.Error, gr.Warning). Also added basic translation error handling.
281
+
282
+ API Payload: Adjusted the payload structure slightly based on common inference API patterns (e.g., num_inference_steps, guidance_scale). Added notes that the specific model might ignore some parameters. Handles -1 seed better.
283
+
284
+ Default Negative Prompt: Stored the default negative prompt in a variable for reuse in examples.
285
+
286
+ example_list: Defined a list of lists. Each inner list contains values for all the inputs to the query function, in the correct order. Includes diverse prompts and some parameter variations. Added a Russian example.
287
+
288
+ gr.Examples Instantiation:
289
+
290
+ Placed gr.Examples(...) within the gr.Blocks context, after the main input/output components.
291
+
292
+ examples=example_list: Passed the defined list.
293
+
294
+ inputs=[...]: Listed all the input components (gr.Textbox, gr.Slider, etc.) in the exact order corresponding to the data in example_list.
295
+
296
+ outputs=image_output: Specified the output component.
297
+
298
+ fn=query: Crucially, provided the query function. This tells Gradio how to generate the results for caching.
299
+
300
+ cache_examples=True: This enables the caching mechanism.
301
+
302
+ Added label and examples_per_page for better UI.
303
+
304
+ run_on_click is typically False or omitted when cache_examples=True, as the point is to show the pre-computed result. Set it to True only if you want clicking an example to re-run the generation even if it's cached (useful if you want users to easily try variations from an example starting point).
305
+
306
+ UI Tweaks: Increased prompt textbox lines, adjusted slider steps/ranges, added info text to some sliders/radios.
307
+
308
+ Theme Loading: Added a try...except block for loading the custom theme to fall back gracefully if it's not found.
309
+
310
+ API Token Handling: Added basic handling for multiple tokens via an environment variable HF_EXTRA_TOKENS (comma-separated) and rotation.
311
+
312
+ Before Running:
313
+
314
+ Update requirements.txt: Ensure gradio (version >= 4.x recommended for latest features/fixes), requests, pillow, deep-translator are listed. You likely don't need langdetect anymore if you removed it.
315
+
316
+ requests
317
+ pillow
318
+ deep-translator
319
+ gradio>=4.44.1 # Use the version suggested or newer
320
+ IGNORE_WHEN_COPYING_START
321
+ content_copy
322
+ download
323
+ Use code with caution.
324
+ Txt
325
+ IGNORE_WHEN_COPYING_END
326
+
327
+ Set Environment Variables: Make sure HF_READ_TOKEN is set in your Space secrets. Optionally set HF_EXTRA_TOKENS if you have more tokens.
328
+
329
+ Commit and Push: Save app.py and requirements.txt, commit, and push to your Space.
330
+
331
+ The first time the Space builds after these changes, it will take longer as it runs query for each example to build the cache. Subsequent loads will be faster, and clicking examples will show results instantly.