Spaces:
Sleeping
Sleeping
import gradio as gr | |
import io | |
import random | |
import os | |
import time | |
import numpy as np | |
import subprocess | |
import torch | |
import json | |
from transformers import AutoProcessor, AutoModelForCausalLM | |
from PIL import Image | |
from deep_translator import GoogleTranslator | |
from datetime import datetime | |
from model import models | |
from theme import theme | |
from fastapi import FastAPI | |
app = FastAPI() | |
API_TOKEN = os.getenv("HF_READ_TOKEN") | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
timeout = 100 | |
max_images = 6 | |
def flip_image(x): | |
return np.fliplr(x) | |
def clear(): | |
return None | |
def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=100, width=896, height=1152): | |
if prompt == "" or prompt == None: | |
return None | |
if lora_id.strip() == "" or lora_id == None: | |
lora_id = "black-forest-labs/FLUX.1-dev" | |
key = random.randint(0, 999) | |
API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip() | |
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")]) | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
# prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
# print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') | |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') | |
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." | |
print(f'\033[1mGeneration {key}:\033[0m {prompt}') | |
# If seed is -1, generate a random seed and use it | |
if seed == -1: | |
seed = random.randint(1, 1000000000) | |
# Prepare the payload for the API call, including width and height | |
payload = { | |
"inputs": prompt, | |
"is_negative": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed != -1 else random.randint(1, 1000000000), | |
"strength": strength, | |
"parameters": { | |
"width": width, # Pass the width to the API | |
"height": height # Pass the height to the API | |
} | |
} | |
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) | |
if response.status_code != 200: | |
print(f"Error: Failed to get image. Response status: {response.status_code}") | |
print(f"Response content: {response.text}") | |
if response.status_code == 503: | |
raise gr.Error(f"{response.status_code} : The model is being loaded") | |
raise gr.Error(f"{response.status_code}") | |
try: | |
image_bytes = response.content | |
image = Image.open(io.BytesIO(image_bytes)) | |
print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})') | |
return image, seed | |
except Exception as e: | |
print(f"Error when trying to open the image: {e}") | |
return None | |
with gr.Group(): | |
examples = [ | |
"a beautiful woman with blonde hair and blue eyes", | |
"a beautiful woman with brown hair and grey eyes", | |
"a beautiful woman with black hair and brown eyes", | |
] | |
css = """ | |
.title { font-size: 3em; align-items: center; text-align: center; } | |
.info { align-items: center; text-align: center; } | |
.model_info { text-align: center; } | |
.output { width=112px; height=112px; max_width=112px; max_height=112px; !important; } | |
.gallery { min_width=512px; min_height=512px; max_height=1024px; !important; } | |
""" | |
with gr.Blocks(theme=theme, fill_width=True, css=css) as app: | |
with gr.Tab("Image Generator"): | |
with gr.Row(): | |
with gr.Column(scale=10, elem_id="prompt-container"): | |
with gr.Group(): | |
with gr.Row(equal_height=True): | |
text_prompt = gr.Textbox(label="Image Prompt โ๏ธ", placeholder="Enter a prompt here", lines=2, show_copy_button = True, elem_id="prompt-text-input") | |
with gr.Row(): | |
with gr.Accordion("๐จ Lora trigger words", open=False): | |
gr.Markdown(""" | |
- **Canopus-Pencil-Art-LoRA**: Pencil Art | |
- **Flux-Realism-FineDetailed**: Fine Detailed | |
- **Fashion-Hut-Modeling-LoRA**: Modeling | |
- **SD3.5-Large-Turbo-HyperRealistic-LoRA**: hyper realistic | |
- **Flux-Fine-Detail-LoRA**: Super Detail | |
- **SD3.5-Turbo-Realism-2.0-LoRA**: Turbo Realism | |
- **Canopus-LoRA-Flux-UltraRealism-2.0**: Ultra realistic | |
- **Canopus-Pencil-Art-LoRA**: Pencil Art | |
- **SD3.5-Large-Photorealistic-LoRA**: photorealistic | |
- **Flux.1-Dev-LoRA-HDR-Realism**: HDR | |
- **prithivMLmods/Ton618-Epic-Realism-Flux-LoRA**: Epic Realism | |
- **john-singer-sargent-style**: John Singer Sargent Style | |
- **alphonse-mucha-style**: Alphonse Mucha Style | |
- **ultra-realistic-illustration**: ultra realistic illustration | |
- **eye-catching**: eye-catching | |
- **john-constable-style**: John Constable Style | |
- **film-noir**: in the style of FLMNR | |
- **flux-lora-pro-headshot**: PROHEADSHOT | |
""") | |
with gr.Row(): | |
custom_lora = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True) | |
with gr.Accordion("Advanced options", open=False): | |
negative_prompt = gr.Textbox(label="Negative Prompt", lines=5, placeholder="What should not be in the image", value="(((hands:-1.25))), physical-defects:2, unhealthy-deformed-joints:2, unhealthy-hands:2, out of frame, (((bad face))), (bad-image-v2-39000:1.3), (((out of frame))), deformed body features, (((poor facial details))), (poorly drawn face:1.3), jpeg artifacts, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), [asymmetrical features], warped expressions, distorted eyes") | |
with gr.Row(equal_height=True): | |
width = gr.Slider(label="Image Width", value=896, minimum=64, maximum=1216, step=32) | |
height = gr.Slider(label="Image Height", value=1152, minimum=64, maximum=1216, step=32) | |
strength = gr.Slider(label="Prompt Strength", value=100, minimum=0, maximum=100, step=1) | |
steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1) | |
cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5) | |
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) | |
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ 2S a Karras", "DPM2 Karras", "DPM2 a Karras", "DPM++ SDE Karras", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "DEIS", "DDIM", "Euler CFG PP", "Euler", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "LMS", "LMS Karras", "PLMS", "UniPC", "UniPC BH2"]) | |
with gr.Row(equal_height=True): | |
with gr.Accordion("๐ซSeed", open=False): | |
seed_output = gr.Textbox(label="Seed Used", elem_id="seed-output") | |
with gr.Row(equal_height=True): | |
image_num = gr.Slider(label="Number of images", minimum=1, maximum=max_images, value=1, step=1, interactive=True, scale=2) | |
# Add a button to trigger the image generation | |
with gr.Row(equal_height=True): | |
text_button = gr.Button("Generate Image ๐จ", variant='primary', elem_id="gen-button") | |
clear_prompt =gr.Button("Clear Prompt ๐๏ธ",variant="primary", elem_id="clear_button") | |
clear_prompt.click(lambda: (None), None, [text_prompt], queue=False, show_api=False) | |
with gr.Column(scale=10): | |
with gr.Group(): | |
with gr.Row(): | |
image_output = gr.Image(type="pil", label="Image Output", format="png", show_share_button=False, elem_id="gallery") | |
with gr.Group(): | |
with gr.Row(): | |
gr.Examples( | |
examples = examples, | |
inputs = [text_prompt], | |
) | |
with gr.Group(): | |
with gr.Row(): | |
clear_results = gr.Button(value="Clear Image ๐๏ธ", variant="primary", elem_id="clear_button") | |
clear_results.click(lambda: (None), None, [image_output], queue=False, show_api=False) | |
text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output]) | |
app.queue(default_concurrency_limit=200, max_size=200) # <-- Sets up a queue with default parameters | |
if __name__ == "__main__": | |
timeout = 100 | |
app.launch(show_api=False, share=False) | |