|
Okay, let's integrate gr.Examples into your script with some diverse, high-quality Stable Diffusion-style prompts and enable caching. |
|
|
|
Caching examples means that when the Space first builds (or rebuilds after changes), it will run the query function once for each example and store the resulting image. When a user clicks that example later, the cached image is shown instantly instead of running the model again. |
|
|
|
Here's the modified app.py: |
|
|
|
import gradio as gr |
|
import requests |
|
import io |
|
import random |
|
import os |
|
import time |
|
from PIL import Image |
|
from deep_translator import GoogleTranslator |
|
import json |
|
from typing import Callable, List, Any, Literal |
|
|
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev" |
|
API_TOKEN = os.getenv("HF_READ_TOKEN") |
|
|
|
ADDITIONAL_TOKENS = [t for t in os.getenv("HF_EXTRA_TOKENS", "").split(',') if t] |
|
ALL_TOKENS = [API_TOKEN] + ADDITIONAL_TOKENS |
|
if not API_TOKEN: |
|
print("Warning: HF_READ_TOKEN is not set. API calls may fail.") |
|
|
|
|
|
|
|
timeout = 100 |
|
|
|
|
|
|
|
def query( |
|
prompt: str, |
|
negative_prompt: str = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", |
|
steps: int = 35, |
|
cfg_scale: float = 7.0, |
|
sampler: str = "DPM++ 2M Karras", |
|
seed: int = -1, |
|
strength: float = 0.7, |
|
width: int = 1024, |
|
height: int = 1024 |
|
) -> Image.Image | None: |
|
|
|
if not prompt: |
|
gr.Warning("Prompt cannot be empty.") |
|
return None |
|
if not ALL_TOKENS: |
|
gr.Error("No Hugging Face API tokens available.") |
|
return None |
|
|
|
key = random.randint(0, 9999) |
|
start_time = time.time() |
|
|
|
|
|
selected_api_token = random.choice(ALL_TOKENS) |
|
headers = {"Authorization": f"Bearer {selected_api_token}"} |
|
|
|
translated_prompt = prompt |
|
try: |
|
|
|
if any('\u0400' <= char <= '\u04FF' for char in prompt): |
|
translated_prompt = GoogleTranslator(source='ru', target='en').translate(prompt) |
|
print(f'\033[1mGeneration {key} translation:\033[0m {translated_prompt}') |
|
else: |
|
|
|
pass |
|
except Exception as e: |
|
print(f"Translation failed: {e}. Using original prompt.") |
|
|
|
|
|
|
|
|
|
enhanced_prompt = f"{translated_prompt} | ultra detail, ultra elaboration, ultra quality, perfect." |
|
print(f'\033[1mGeneration {key} starting:\033[0m {enhanced_prompt}') |
|
|
|
|
|
payload = { |
|
"inputs": enhanced_prompt, |
|
"negative_prompt": negative_prompt, |
|
|
|
|
|
|
|
"parameters": { |
|
"num_inference_steps": steps, |
|
"guidance_scale": cfg_scale, |
|
"seed": seed if seed != -1 else random.randint(1, 2**32 - 1), |
|
"strength": strength, |
|
"width": width, |
|
"height": height, |
|
|
|
|
|
|
|
"scheduler": sampler, |
|
} |
|
} |
|
|
|
if seed == -1: |
|
del payload["parameters"]["seed"] |
|
if not negative_prompt: |
|
|
|
del payload["negative_prompt"] |
|
|
|
|
|
print(f"Payload for {key}: {json.dumps(payload, indent=2)}") |
|
|
|
|
|
try: |
|
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) |
|
response.raise_for_status() |
|
|
|
|
|
image_bytes = response.content |
|
image = Image.open(io.BytesIO(image_bytes)) |
|
end_time = time.time() |
|
print(f'\033[1mGeneration {key} completed in {end_time - start_time:.2f}s!\033[0m') |
|
return image |
|
|
|
except requests.exceptions.Timeout: |
|
print(f"Error: Request timed out after {timeout} seconds.") |
|
raise gr.Error(f"Request timed out ({timeout}s). Model might be busy. Try again later.") |
|
except requests.exceptions.RequestException as e: |
|
status_code = e.response.status_code if e.response is not None else "N/A" |
|
error_content = e.response.text if e.response is not None else str(e) |
|
print(f"Error: API request failed. Status: {status_code}, Content: {error_content}") |
|
if status_code == 503: |
|
|
|
if "is currently loading" in error_content: |
|
raise gr.Error("Model is loading. Please wait a moment and try again.") |
|
else: |
|
raise gr.Error("Model service unavailable (503). It might be overloaded or down. Try again later.") |
|
elif status_code == 400: |
|
raise gr.Error(f"Bad Request (400). Check parameters. API response: {error_content[:200]}") |
|
elif status_code == 429: |
|
raise gr.Error("Too many requests (429). Rate limit hit. Please wait.") |
|
else: |
|
raise gr.Error(f"API Error ({status_code}). Response: {error_content[:200]}") |
|
except (OSError, json.JSONDecodeError, IOError) as e: |
|
|
|
print(f"Error processing response or image: {e}") |
|
raise gr.Error(f"Failed to process the image response: {e}") |
|
except Exception as e: |
|
print(f"An unexpected error occurred: {e}") |
|
import traceback |
|
traceback.print_exc() |
|
raise gr.Error(f"An unexpected error occurred: {e}") |
|
|
|
|
|
|
|
|
|
|
|
css = """ |
|
#app-container { |
|
max-width: 960px; /* Slightly wider */ |
|
margin-left: auto; |
|
margin-right: auto; |
|
} |
|
/* Add more styling if desired */ |
|
""" |
|
|
|
|
|
default_negative_prompt = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos" |
|
|
|
|
|
|
|
|
|
example_list = [ |
|
[ |
|
"Epic cinematic shot of a medieval knight kneeling in a misty forest, volumetric lighting, hyperrealistic photo, 8k", |
|
default_negative_prompt, 40, 7.5, "DPM++ 2M Karras", 12345, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Studio Ghibli style illustration of a cozy bakery storefront on a rainy day, warm lighting, detailed", |
|
default_negative_prompt, 30, 6.0, "Euler a", 54321, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Macro photograph of a dewdrop on a spider web, intricate details, shallow depth of field, natural lighting", |
|
"blurry, unfocused, cartoon", 50, 8.0, "DPM++ 2M Karras", -1, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Steampunk astronaut exploring an alien jungle landscape, brass and copper details, vibrant bioluminescent plants, wide angle", |
|
default_negative_prompt, 35, 7.0, "DPM++ SDE Karras", 98765, 0.7, 1216, 832 |
|
], |
|
[ |
|
"Abstract geometric art, vibrant contrasting colors, sharp edges, minimalistic design, 4k wallpaper", |
|
"photorealistic, noisy, cluttered", 25, 5.0, "Euler", -1, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Кот в очках читает книгу у камина", |
|
default_negative_prompt, 35, 7.0, "DPM++ 2M Karras", 11223, 0.7, 1024, 1024 |
|
] |
|
] |
|
|
|
|
|
|
|
|
|
try: |
|
theme = gr.themes.Base.load('Nymbo/Nymbo_Theme') |
|
except Exception: |
|
print("Could not load Nymbo/Nymbo_Theme, using default theme.") |
|
theme = gr.themes.Default() |
|
|
|
with gr.Blocks(theme=theme, css=css) as app: |
|
|
|
gr.HTML("<center><h1>FLUX.1-Dev Image Generator</h1></center>") |
|
|
|
|
|
with gr.Column(elem_id="app-container"): |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here (English or Russian)", lines=3, elem_id="prompt-text-input") |
|
|
|
with gr.Column(scale=1): |
|
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What to avoid...", value=default_negative_prompt, lines=3, elem_id="negative-prompt-text-input") |
|
|
|
|
|
with gr.Accordion("Advanced Settings", open=False): |
|
with gr.Row(): |
|
width = gr.Slider(label="Width", value=1024, minimum=256, maximum=1216, step=64) |
|
height = gr.Slider(label="Height", value=1024, minimum=256, maximum=1216, step=64) |
|
with gr.Row(): |
|
steps = gr.Slider(label="Sampling steps", value=35, minimum=10, maximum=100, step=1) |
|
cfg = gr.Slider(label="CFG Scale", value=7.0, minimum=1.0, maximum=20.0, step=0.5) |
|
strength = gr.Slider(label="Strength (Img2Img)", value=0.7, minimum=0.0, maximum=1.0, step=0.01, info="Primarily for Image-to-Image tasks, may have limited effect here.") |
|
with gr.Row(): |
|
seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2**32 - 1, step=1) |
|
method = gr.Radio( |
|
label="Sampling method (Scheduler)", |
|
value="DPM++ 2M Karras", |
|
choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"], |
|
info="Note: Model API might use its default scheduler regardless of selection." |
|
) |
|
|
|
|
|
with gr.Row(): |
|
text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button") |
|
|
|
|
|
with gr.Row(): |
|
image_output = gr.Image(type="pil", label="Generated Image", elem_id="gallery") |
|
|
|
|
|
|
|
with gr.Row(): |
|
gr.Examples( |
|
examples=example_list, |
|
|
|
inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], |
|
outputs=image_output, |
|
fn=query, |
|
cache_examples=True, |
|
label="Examples (Click to Run & View Cached Result)", |
|
examples_per_page=6 |
|
|
|
) |
|
|
|
|
|
|
|
text_button.click( |
|
query, |
|
inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], |
|
outputs=image_output, |
|
api_name="generate_image" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
app.launch(show_api=False, share=False) |
|
|
|
|
|
Key Changes: |
|
|
|
Import typing: Added Callable, List, Any, Literal for better type hinting, though not strictly required. |
|
|
|
Error Handling in query: Made error handling slightly more robust, checking for token availability, using response.raise_for_status(), catching specific request exceptions (like Timeout, 503), and providing clearer Gradio errors (gr.Error, gr.Warning). Also added basic translation error handling. |
|
|
|
API Payload: Adjusted the payload structure slightly based on common inference API patterns (e.g., num_inference_steps, guidance_scale). Added notes that the specific model might ignore some parameters. Handles -1 seed better. |
|
|
|
Default Negative Prompt: Stored the default negative prompt in a variable for reuse in examples. |
|
|
|
example_list: Defined a list of lists. Each inner list contains values for all the inputs to the query function, in the correct order. Includes diverse prompts and some parameter variations. Added a Russian example. |
|
|
|
gr.Examples Instantiation: |
|
|
|
Placed gr.Examples(...) within the gr.Blocks context, after the main input/output components. |
|
|
|
examples=example_list: Passed the defined list. |
|
|
|
inputs=[...]: Listed all the input components (gr.Textbox, gr.Slider, etc.) in the exact order corresponding to the data in example_list. |
|
|
|
outputs=image_output: Specified the output component. |
|
|
|
fn=query: Crucially, provided the query function. This tells Gradio how to generate the results for caching. |
|
|
|
cache_examples=True: This enables the caching mechanism. |
|
|
|
Added label and examples_per_page for better UI. |
|
|
|
run_on_click is typically False or omitted when cache_examples=True, as the point is to show the pre-computed result. Set it to True only if you want clicking an example to re-run the generation even if it's cached (useful if you want users to easily try variations from an example starting point). |
|
|
|
UI Tweaks: Increased prompt textbox lines, adjusted slider steps/ranges, added info text to some sliders/radios. |
|
|
|
Theme Loading: Added a try...except block for loading the custom theme to fall back gracefully if it's not found. |
|
|
|
API Token Handling: Added basic handling for multiple tokens via an environment variable HF_EXTRA_TOKENS (comma-separated) and rotation. |
|
|
|
Before Running: |
|
|
|
Update requirements.txt: Ensure gradio (version >= 4.x recommended for latest features/fixes), requests, pillow, deep-translator are listed. You likely don't need langdetect anymore if you removed it. |
|
|
|
requests |
|
pillow |
|
deep-translator |
|
gradio>=4.44.1 # Use the version suggested or newer |
|
IGNORE_WHEN_COPYING_START |
|
content_copy |
|
download |
|
Use code with caution. |
|
Txt |
|
IGNORE_WHEN_COPYING_END |
|
|
|
Set Environment Variables: Make sure HF_READ_TOKEN is set in your Space secrets. Optionally set HF_EXTRA_TOKENS if you have more tokens. |
|
|
|
Commit and Push: Save app.py and requirements.txt, commit, and push to your Space. |
|
|
|
The first time the Space builds after these changes, it will take longer as it runs query for each example to build the cache. Subsequent loads will be faster, and clicking examples will show results instantly. |