greendra commited on
Commit
c84bbeb
·
verified ·
1 Parent(s): 87106fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -213
app.py CHANGED
@@ -7,261 +7,113 @@ import time
7
  from PIL import Image
8
  from deep_translator import GoogleTranslator
9
  import json
10
- from typing import Callable, List, Any, Literal # Added for type hinting Examples
11
 
12
  # Project by Nymbo
13
 
14
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
15
- API_TOKEN = os.getenv("HF_READ_TOKEN") # Your main token for primary use
16
- # Consider adding more read tokens if you have them for resilience
17
- ADDITIONAL_TOKENS = [t for t in os.getenv("HF_EXTRA_TOKENS", "").split(',') if t] # Example: HF_EXTRA_TOKENS="token1,token2"
18
- ALL_TOKENS = [API_TOKEN] + ADDITIONAL_TOKENS
19
- if not API_TOKEN:
20
- print("Warning: HF_READ_TOKEN is not set. API calls may fail.")
21
- # Optional: raise an error or use a dummy token if needed
22
- # raise ValueError("HF_READ_TOKEN environment variable is required.")
23
-
24
  timeout = 100
25
 
26
  # Function to query the API and return the generated image
27
- # Added type hints for clarity
28
- def query(
29
- prompt: str,
30
- negative_prompt: str = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos",
31
- steps: int = 35,
32
- cfg_scale: float = 7.0,
33
- sampler: str = "DPM++ 2M Karras", # Matched default in UI
34
- seed: int = -1,
35
- strength: float = 0.7,
36
- width: int = 1024,
37
- height: int = 1024
38
- ) -> Image.Image | None:
39
-
40
- if not prompt: # Simplified check
41
- gr.Warning("Prompt cannot be empty.")
42
  return None
43
- if not ALL_TOKENS:
44
- gr.Error("No Hugging Face API tokens available.")
45
- return None
46
-
47
- key = random.randint(0, 9999)
48
- start_time = time.time()
49
 
50
- # Rotate through available tokens
51
- selected_api_token = random.choice(ALL_TOKENS)
52
- headers = {"Authorization": f"Bearer {selected_api_token}"}
53
-
54
- translated_prompt = prompt
55
- try:
56
- # Simple check if likely Russian (Cyrillic) - adjust if needed
57
- if any('\u0400' <= char <= '\u04FF' for char in prompt):
58
- translated_prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
59
- print(f'\033[1mGeneration {key} translation:\033[0m {translated_prompt}')
60
- else:
61
- # Assume English or other non-Russian if no Cyrillic detected
62
- pass
63
- except Exception as e:
64
- print(f"Translation failed: {e}. Using original prompt.")
65
- # Decide if you want to raise an error or just proceed
66
- # gr.Warning(f"Translation failed: {e}. Using original prompt.")
67
 
68
  # Add some extra flair to the prompt
69
- enhanced_prompt = f"{translated_prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
70
- print(f'\033[1mGeneration {key} starting:\033[0m {enhanced_prompt}')
71
-
72
  # Prepare the payload for the API call, including width and height
73
  payload = {
74
- "inputs": enhanced_prompt,
75
- "negative_prompt": negative_prompt, # Use the negative prompt input
76
- # Note: The underlying API might not support all parameters directly.
77
- # Check the specific model card for supported parameters.
78
- # We send common ones; the model API will use what it understands.
 
79
  "parameters": {
80
- "num_inference_steps": steps,
81
- "guidance_scale": cfg_scale,
82
- "seed": seed if seed != -1 else random.randint(1, 2**32 - 1), # Use larger seed range
83
- "strength": strength, # Often used for img2img, may not apply here
84
- "width": width,
85
- "height": height,
86
- # Sampler might not be directly controllable via this endpoint structure
87
- # The model often uses its default or recommended sampler.
88
- # Sending it anyway in case future API versions support it.
89
- "scheduler": sampler,
90
  }
91
  }
92
- # Clean up payload if values are None or default where API expects omission
93
- if seed == -1:
94
- del payload["parameters"]["seed"] # Let API choose random if -1
95
- if not negative_prompt:
96
- # If empty string, some APIs prefer omitting the key entirely
97
- del payload["negative_prompt"]
98
-
99
-
100
- print(f"Payload for {key}: {json.dumps(payload, indent=2)}")
101
 
102
  # Send the request to the API and handle the response
 
 
 
 
 
 
 
 
103
  try:
104
- response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
105
- response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
106
-
107
  # Convert the response content into an image
108
  image_bytes = response.content
109
  image = Image.open(io.BytesIO(image_bytes))
110
- end_time = time.time()
111
- print(f'\033[1mGeneration {key} completed in {end_time - start_time:.2f}s!\033[0m')
112
  return image
113
-
114
- except requests.exceptions.Timeout:
115
- print(f"Error: Request timed out after {timeout} seconds.")
116
- raise gr.Error(f"Request timed out ({timeout}s). Model might be busy. Try again later.")
117
- except requests.exceptions.RequestException as e:
118
- status_code = e.response.status_code if e.response is not None else "N/A"
119
- error_content = e.response.text if e.response is not None else str(e)
120
- print(f"Error: API request failed. Status: {status_code}, Content: {error_content}")
121
- if status_code == 503:
122
- # Check for specific error messages if available
123
- if "is currently loading" in error_content:
124
- raise gr.Error("Model is loading. Please wait a moment and try again.")
125
- else:
126
- raise gr.Error("Model service unavailable (503). It might be overloaded or down. Try again later.")
127
- elif status_code == 400:
128
- raise gr.Error(f"Bad Request (400). Check parameters. API response: {error_content[:200]}")
129
- elif status_code == 429:
130
- raise gr.Error("Too many requests (429). Rate limit hit. Please wait.")
131
- else:
132
- raise gr.Error(f"API Error ({status_code}). Response: {error_content[:200]}")
133
- except (OSError, json.JSONDecodeError, IOError) as e:
134
- # Catch potential issues with image decoding or other file errors
135
- print(f"Error processing response or image: {e}")
136
- raise gr.Error(f"Failed to process the image response: {e}")
137
- except Exception as e: # Catch any other unexpected errors
138
- print(f"An unexpected error occurred: {e}")
139
- import traceback
140
- traceback.print_exc() # Print full traceback to console logs
141
- raise gr.Error(f"An unexpected error occurred: {e}")
142
-
143
-
144
- # --- Gradio UI ---
145
 
146
  # CSS to style the app
147
  css = """
148
  #app-container {
149
- max-width: 960px; /* Slightly wider */
150
  margin-left: auto;
151
  margin-right: auto;
152
  }
153
- /* Add more styling if desired */
 
 
154
  """
155
 
156
- # Default negative prompt can be reused
157
- default_negative_prompt = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos"
158
-
159
- # Define the examples
160
- # Each inner list corresponds to the inputs of the 'query' function IN ORDER:
161
- # [prompt, negative_prompt, steps, cfg_scale, sampler, seed, strength, width, height]
162
- example_list = [
163
- [
164
- "Epic cinematic shot of a medieval knight kneeling in a misty forest, volumetric lighting, hyperrealistic photo, 8k",
165
- default_negative_prompt, 40, 7.5, "DPM++ 2M Karras", 12345, 0.7, 1024, 1024
166
- ],
167
- [
168
- "Studio Ghibli style illustration of a cozy bakery storefront on a rainy day, warm lighting, detailed",
169
- default_negative_prompt, 30, 6.0, "Euler a", 54321, 0.7, 1024, 1024
170
- ],
171
- [
172
- "Macro photograph of a dewdrop on a spider web, intricate details, shallow depth of field, natural lighting",
173
- "blurry, unfocused, cartoon", 50, 8.0, "DPM++ 2M Karras", -1, 0.7, 1024, 1024 # Random seed
174
- ],
175
- [
176
- "Steampunk astronaut exploring an alien jungle landscape, brass and copper details, vibrant bioluminescent plants, wide angle",
177
- default_negative_prompt, 35, 7.0, "DPM++ SDE Karras", 98765, 0.7, 1216, 832 # Different aspect ratio
178
- ],
179
- [
180
- "Abstract geometric art, vibrant contrasting colors, sharp edges, minimalistic design, 4k wallpaper",
181
- "photorealistic, noisy, cluttered", 25, 5.0, "Euler", -1, 0.7, 1024, 1024
182
- ],
183
- [
184
- "Кот в очках читает книгу у камина", # Example in Russian for translation testing
185
- default_negative_prompt, 35, 7.0, "DPM++ 2M Karras", 11223, 0.7, 1024, 1024
186
- ]
187
- ]
188
-
189
-
190
  # Build the Gradio UI with Blocks
191
- # Use the custom theme if it's available, otherwise fallback to default
192
- try:
193
- theme = gr.themes.Base.load('Nymbo/Nymbo_Theme')
194
- except Exception:
195
- print("Could not load Nymbo/Nymbo_Theme, using default theme.")
196
- theme = gr.themes.Default()
197
-
198
- with gr.Blocks(theme=theme, css=css) as app:
199
  # Add a title to the app
200
- gr.HTML("<center><h1>FLUX.1-Dev Image Generator</h1></center>")
201
-
202
  # Container for all the UI elements
203
  with gr.Column(elem_id="app-container"):
204
  # Add a text input for the main prompt
205
  with gr.Row():
206
- with gr.Column(scale=3): # Give prompt more width
207
- text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here (English or Russian)", lines=3, elem_id="prompt-text-input") # Increased lines
208
-
209
- with gr.Column(scale=1): # Negative prompt column
210
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What to avoid...", value=default_negative_prompt, lines=3, elem_id="negative-prompt-text-input")
211
-
212
- # Accordion for advanced settings
213
- with gr.Accordion("Advanced Settings", open=False):
214
- with gr.Row():
215
- width = gr.Slider(label="Width", value=1024, minimum=256, maximum=1216, step=64) # Adjusted steps/min
216
- height = gr.Slider(label="Height", value=1024, minimum=256, maximum=1216, step=64) # Adjusted steps/min
217
- with gr.Row():
218
- steps = gr.Slider(label="Sampling steps", value=35, minimum=10, maximum=100, step=1) # Adjusted min
219
- cfg = gr.Slider(label="CFG Scale", value=7.0, minimum=1.0, maximum=20.0, step=0.5) # Added step
220
- strength = gr.Slider(label="Strength (Img2Img)", value=0.7, minimum=0.0, maximum=1.0, step=0.01, info="Primarily for Image-to-Image tasks, may have limited effect here.") # Added step and info
221
- with gr.Row():
222
- seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2**32 - 1, step=1) # Increased max seed
223
- method = gr.Radio(
224
- label="Sampling method (Scheduler)",
225
- value="DPM++ 2M Karras",
226
- choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
227
- info="Note: Model API might use its default scheduler regardless of selection."
228
- )
229
 
230
  # Add a button to trigger the image generation
231
  with gr.Row():
232
- text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button")
233
-
234
  # Image output area to display the generated image
235
  with gr.Row():
236
- image_output = gr.Image(type="pil", label="Generated Image", elem_id="gallery") # Changed label
237
-
238
-
239
- # --- Add Examples Component Here ---
240
- with gr.Row():
241
- gr.Examples(
242
- examples=example_list,
243
- # List ALL components that correspond to the example list order
244
- inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height],
245
- outputs=image_output, # The component to display the output
246
- fn=query, # The function to run TO GENERATE the examples for caching
247
- cache_examples=True, # << ENABLE CACHING HERE
248
- label="Examples (Click to Run & View Cached Result)", # Customize label
249
- examples_per_page=6 # Adjust how many show per page
250
- # run_on_click=True # Optional: Set True if you want clicking to trigger 'query' again, even if cached. Usually False when caching.
251
- )
252
-
253
- # Bind the main button to the query function
254
- # Ensure the order matches the function definition (excluding self if it were a class method)
255
- text_button.click(
256
- query,
257
- inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height],
258
- outputs=image_output,
259
- api_name="generate_image" # Optional: Set API name for potential external calls
260
- )
261
-
262
 
263
  # Launch the Gradio app
264
- # share=False is correct for Hugging Face Spaces deployment
265
- # show_api=False is fine unless you specifically need to expose the API endpoint documentation
266
- # debug=True can be useful for development but remove for production/sharing
267
  app.launch(show_api=False, share=False)
 
7
  from PIL import Image
8
  from deep_translator import GoogleTranslator
9
  import json
 
10
 
11
  # Project by Nymbo
12
 
13
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
14
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
15
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
 
 
 
 
 
16
  timeout = 100
17
 
18
  # Function to query the API and return the generated image
19
+ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
20
+ if prompt == "" or prompt is None:
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  return None
 
 
 
 
 
 
22
 
23
+ key = random.randint(0, 999)
24
+
25
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
26
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
27
+
28
+ # Translate the prompt from Russian to English if necessary
29
+ prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
30
+ print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
 
 
 
 
 
 
 
 
 
31
 
32
  # Add some extra flair to the prompt
33
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
34
+ print(f'\033[1mGeneration {key}:\033[0m {prompt}')
35
+
36
  # Prepare the payload for the API call, including width and height
37
  payload = {
38
+ "inputs": prompt,
39
+ "is_negative": is_negative,
40
+ "steps": steps,
41
+ "cfg_scale": cfg_scale,
42
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
43
+ "strength": strength,
44
  "parameters": {
45
+ "width": width, # Pass the width to the API
46
+ "height": height # Pass the height to the API
 
 
 
 
 
 
 
 
47
  }
48
  }
 
 
 
 
 
 
 
 
 
49
 
50
  # Send the request to the API and handle the response
51
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
52
+ if response.status_code != 200:
53
+ print(f"Error: Failed to get image. Response status: {response.status_code}")
54
+ print(f"Response content: {response.text}")
55
+ if response.status_code == 503:
56
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
57
+ raise gr.Error(f"{response.status_code}")
58
+
59
  try:
 
 
 
60
  # Convert the response content into an image
61
  image_bytes = response.content
62
  image = Image.open(io.BytesIO(image_bytes))
63
+ print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
 
64
  return image
65
+ except Exception as e:
66
+ print(f"Error when trying to open the image: {e}")
67
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  # CSS to style the app
70
  css = """
71
  #app-container {
72
+ max-width: 800px;
73
  margin-left: auto;
74
  margin-right: auto;
75
  }
76
+ textarea:focus {
77
+ background: #0d1117 !important;
78
+ }
79
  """
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  # Build the Gradio UI with Blocks
82
+ with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
 
 
 
 
 
 
 
83
  # Add a title to the app
84
+ gr.HTML("<center><h1>FLUX.1-Schnell</h1></center>")
85
+
86
  # Container for all the UI elements
87
  with gr.Column(elem_id="app-container"):
88
  # Add a text input for the main prompt
89
  with gr.Row():
90
+ with gr.Column(elem_id="prompt-container"):
91
+ with gr.Row():
92
+ text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input")
93
+
94
+ # Accordion for advanced settings
95
+ with gr.Row():
96
+ with gr.Accordion("Advanced Settings", open=False):
97
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
98
+ with gr.Row():
99
+ width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32)
100
+ height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32)
101
+ steps = gr.Slider(label="Sampling steps", value=4, minimum=1, maximum=100, step=1)
102
+ cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
103
+ strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
104
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) # Setting the seed to -1 will make it random
105
+ method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
 
 
 
 
 
 
 
106
 
107
  # Add a button to trigger the image generation
108
  with gr.Row():
109
+ text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
110
+
111
  # Image output area to display the generated image
112
  with gr.Row():
113
+ image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
114
+
115
+ # Bind the button to the query function with the added width and height inputs
116
+ text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
  # Launch the Gradio app
 
 
 
119
  app.launch(show_api=False, share=False)