|
import gradio as gr |
|
import requests |
|
import io |
|
import random |
|
import os |
|
import time |
|
from PIL import Image |
|
from deep_translator import GoogleTranslator |
|
import json |
|
from typing import Callable, List, Any, Literal |
|
|
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev" |
|
API_TOKEN = os.getenv("HF_READ_TOKEN") |
|
|
|
ADDITIONAL_TOKENS = [t for t in os.getenv("HF_EXTRA_TOKENS", "").split(',') if t] |
|
ALL_TOKENS = [API_TOKEN] + ADDITIONAL_TOKENS |
|
if not API_TOKEN: |
|
print("Warning: HF_READ_TOKEN is not set. API calls may fail.") |
|
|
|
|
|
|
|
timeout = 100 |
|
|
|
|
|
|
|
def query( |
|
prompt: str, |
|
negative_prompt: str = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", |
|
steps: int = 35, |
|
cfg_scale: float = 7.0, |
|
sampler: str = "DPM++ 2M Karras", |
|
seed: int = -1, |
|
strength: float = 0.7, |
|
width: int = 1024, |
|
height: int = 1024 |
|
) -> Image.Image | None: |
|
|
|
if not prompt: |
|
gr.Warning("Prompt cannot be empty.") |
|
return None |
|
if not ALL_TOKENS: |
|
gr.Error("No Hugging Face API tokens available.") |
|
return None |
|
|
|
key = random.randint(0, 9999) |
|
start_time = time.time() |
|
|
|
|
|
selected_api_token = random.choice(ALL_TOKENS) |
|
headers = {"Authorization": f"Bearer {selected_api_token}"} |
|
|
|
translated_prompt = prompt |
|
try: |
|
|
|
if any('\u0400' <= char <= '\u04FF' for char in prompt): |
|
translated_prompt = GoogleTranslator(source='ru', target='en').translate(prompt) |
|
print(f'\033[1mGeneration {key} translation:\033[0m {translated_prompt}') |
|
else: |
|
|
|
pass |
|
except Exception as e: |
|
print(f"Translation failed: {e}. Using original prompt.") |
|
|
|
|
|
|
|
|
|
enhanced_prompt = f"{translated_prompt} | ultra detail, ultra elaboration, ultra quality, perfect." |
|
print(f'\033[1mGeneration {key} starting:\033[0m {enhanced_prompt}') |
|
|
|
|
|
payload = { |
|
"inputs": enhanced_prompt, |
|
"negative_prompt": negative_prompt, |
|
|
|
|
|
|
|
"parameters": { |
|
"num_inference_steps": steps, |
|
"guidance_scale": cfg_scale, |
|
"seed": seed if seed != -1 else random.randint(1, 2**32 - 1), |
|
"strength": strength, |
|
"width": width, |
|
"height": height, |
|
|
|
|
|
|
|
"scheduler": sampler, |
|
} |
|
} |
|
|
|
if seed == -1: |
|
del payload["parameters"]["seed"] |
|
if not negative_prompt: |
|
|
|
del payload["negative_prompt"] |
|
|
|
|
|
print(f"Payload for {key}: {json.dumps(payload, indent=2)}") |
|
|
|
|
|
try: |
|
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) |
|
response.raise_for_status() |
|
|
|
|
|
image_bytes = response.content |
|
image = Image.open(io.BytesIO(image_bytes)) |
|
end_time = time.time() |
|
print(f'\033[1mGeneration {key} completed in {end_time - start_time:.2f}s!\033[0m') |
|
return image |
|
|
|
except requests.exceptions.Timeout: |
|
print(f"Error: Request timed out after {timeout} seconds.") |
|
raise gr.Error(f"Request timed out ({timeout}s). Model might be busy. Try again later.") |
|
except requests.exceptions.RequestException as e: |
|
status_code = e.response.status_code if e.response is not None else "N/A" |
|
error_content = e.response.text if e.response is not None else str(e) |
|
print(f"Error: API request failed. Status: {status_code}, Content: {error_content}") |
|
if status_code == 503: |
|
|
|
if "is currently loading" in error_content: |
|
raise gr.Error("Model is loading. Please wait a moment and try again.") |
|
else: |
|
raise gr.Error("Model service unavailable (503). It might be overloaded or down. Try again later.") |
|
elif status_code == 400: |
|
raise gr.Error(f"Bad Request (400). Check parameters. API response: {error_content[:200]}") |
|
elif status_code == 429: |
|
raise gr.Error("Too many requests (429). Rate limit hit. Please wait.") |
|
else: |
|
raise gr.Error(f"API Error ({status_code}). Response: {error_content[:200]}") |
|
except (OSError, json.JSONDecodeError, IOError) as e: |
|
|
|
print(f"Error processing response or image: {e}") |
|
raise gr.Error(f"Failed to process the image response: {e}") |
|
except Exception as e: |
|
print(f"An unexpected error occurred: {e}") |
|
import traceback |
|
traceback.print_exc() |
|
raise gr.Error(f"An unexpected error occurred: {e}") |
|
|
|
|
|
|
|
|
|
|
|
css = """ |
|
#app-container { |
|
max-width: 960px; /* Slightly wider */ |
|
margin-left: auto; |
|
margin-right: auto; |
|
} |
|
/* Add more styling if desired */ |
|
""" |
|
|
|
|
|
default_negative_prompt = "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos" |
|
|
|
|
|
|
|
|
|
example_list = [ |
|
[ |
|
"Epic cinematic shot of a medieval knight kneeling in a misty forest, volumetric lighting, hyperrealistic photo, 8k", |
|
default_negative_prompt, 40, 7.5, "DPM++ 2M Karras", 12345, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Studio Ghibli style illustration of a cozy bakery storefront on a rainy day, warm lighting, detailed", |
|
default_negative_prompt, 30, 6.0, "Euler a", 54321, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Macro photograph of a dewdrop on a spider web, intricate details, shallow depth of field, natural lighting", |
|
"blurry, unfocused, cartoon", 50, 8.0, "DPM++ 2M Karras", -1, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Steampunk astronaut exploring an alien jungle landscape, brass and copper details, vibrant bioluminescent plants, wide angle", |
|
default_negative_prompt, 35, 7.0, "DPM++ SDE Karras", 98765, 0.7, 1216, 832 |
|
], |
|
[ |
|
"Abstract geometric art, vibrant contrasting colors, sharp edges, minimalistic design, 4k wallpaper", |
|
"photorealistic, noisy, cluttered", 25, 5.0, "Euler", -1, 0.7, 1024, 1024 |
|
], |
|
[ |
|
"Кот в очках читает книгу у камина", |
|
default_negative_prompt, 35, 7.0, "DPM++ 2M Karras", 11223, 0.7, 1024, 1024 |
|
] |
|
] |
|
|
|
|
|
|
|
|
|
try: |
|
theme = gr.themes.Base.load('Nymbo/Nymbo_Theme') |
|
except Exception: |
|
print("Could not load Nymbo/Nymbo_Theme, using default theme.") |
|
theme = gr.themes.Default() |
|
|
|
with gr.Blocks(theme=theme, css=css) as app: |
|
|
|
gr.HTML("<center><h1>FLUX.1-Dev Image Generator</h1></center>") |
|
|
|
|
|
with gr.Column(elem_id="app-container"): |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here (English or Russian)", lines=3, elem_id="prompt-text-input") |
|
|
|
with gr.Column(scale=1): |
|
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What to avoid...", value=default_negative_prompt, lines=3, elem_id="negative-prompt-text-input") |
|
|
|
|
|
with gr.Accordion("Advanced Settings", open=False): |
|
with gr.Row(): |
|
width = gr.Slider(label="Width", value=1024, minimum=256, maximum=1216, step=64) |
|
height = gr.Slider(label="Height", value=1024, minimum=256, maximum=1216, step=64) |
|
with gr.Row(): |
|
steps = gr.Slider(label="Sampling steps", value=35, minimum=10, maximum=100, step=1) |
|
cfg = gr.Slider(label="CFG Scale", value=7.0, minimum=1.0, maximum=20.0, step=0.5) |
|
strength = gr.Slider(label="Strength (Img2Img)", value=0.7, minimum=0.0, maximum=1.0, step=0.01, info="Primarily for Image-to-Image tasks, may have limited effect here.") |
|
with gr.Row(): |
|
seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2**32 - 1, step=1) |
|
method = gr.Radio( |
|
label="Sampling method (Scheduler)", |
|
value="DPM++ 2M Karras", |
|
choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"], |
|
info="Note: Model API might use its default scheduler regardless of selection." |
|
) |
|
|
|
|
|
with gr.Row(): |
|
text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button") |
|
|
|
|
|
with gr.Row(): |
|
image_output = gr.Image(type="pil", label="Generated Image", elem_id="gallery") |
|
|
|
|
|
|
|
with gr.Row(): |
|
gr.Examples( |
|
examples=example_list, |
|
|
|
inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], |
|
outputs=image_output, |
|
fn=query, |
|
cache_examples=True, |
|
label="Examples (Click to Run & View Cached Result)", |
|
examples_per_page=6 |
|
|
|
) |
|
|
|
|
|
|
|
text_button.click( |
|
query, |
|
inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], |
|
outputs=image_output, |
|
api_name="generate_image" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
app.launch(show_api=False, share=False) |