File size: 3,663 Bytes
c8f1f54 75726c1 4fe456a 75726c1 218c1e6 75726c1 218c1e6 882e052 df1e443 61b9893 75726c1 882e052 75726c1 218c1e6 75726c1 882e052 bc1c1c7 75726c1 218c1e6 75726c1 218c1e6 75726c1 218c1e6 bc1c1c7 218c1e6 75726c1 218c1e6 75726c1 218c1e6 75726c1 82c86ca 75726c1 218c1e6 75726c1 218c1e6 75726c1 218c1e6 75726c1 218c1e6 75726c1 218c1e6 75726c1 82c86ca 218c1e6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import gradio as gr
import numpy as np
import random
from PIL import Image, ImageDraw, ImageFont
import torch
from diffusers import DiffusionPipeline
# ===== CONFIG =====
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if device == "cuda" else torch.float32
model_repo_id = "stabilityai/sdxl-turbo"
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype, variant="fp16" if device == "cuda" else None)
pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
IMAGE_SIZE = 1024
WATERMARK_TEXT = "SelamGPT"
# ===== WATERMARK FUNCTION =====
def add_watermark(image):
draw = ImageDraw.Draw(image)
font_size = 24
try:
font = ImageFont.truetype("Roboto-Bold.ttf", font_size)
except:
font = ImageFont.load_default()
text_width = draw.textlength(WATERMARK_TEXT, font=font)
x = image.width - text_width - 10
y = image.height - 34
draw.text((x+1, y+1), WATERMARK_TEXT, font=font, fill=(0, 0, 0, 128))
draw.text((x, y), WATERMARK_TEXT, font=font, fill=(255, 255, 255))
return image
# ===== INFERENCE FUNCTION =====
def generate(
prompt,
negative_prompt,
seed,
randomize_seed,
guidance_scale,
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
if not prompt.strip():
return None, "⚠️ Please enter a prompt"
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.manual_seed(seed)
result = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=IMAGE_SIZE,
height=IMAGE_SIZE,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
).images[0]
image = add_watermark(result)
return image, seed
# ===== EXAMPLES =====
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
# ===== INTERFACE =====
css = "#container { max-width: 700px; margin: auto; }"
with gr.Blocks(css=css, title="SelamGPT Turbo Generator") as demo:
with gr.Column(elem_id="container"):
gr.Markdown("# 🖼️ SelamGPT Image Generator")
with gr.Row():
prompt = gr.Textbox(
label="Prompt",
show_label=False,
placeholder="Enter your prompt",
lines=1,
scale=3
)
generate_btn = gr.Button("Generate", variant="primary")
output_image = gr.Image(label="Generated Image", type="pil", format="png")
seed_display = gr.Textbox(label="Seed Used", interactive=False)
with gr.Accordion("⚙️ Advanced Settings", open=False):
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What to avoid (optional)", max_lines=1)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
seed = gr.Slider(0, MAX_SEED, step=1, label="Seed", value=0)
guidance_scale = gr.Slider(0.0, 10.0, step=0.1, label="Guidance Scale", value=0.0)
num_inference_steps = gr.Slider(1, 50, step=1, label="Inference Steps", value=2)
gr.Examples(examples=examples, inputs=[prompt])
generate_btn.click(
fn=generate,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
guidance_scale,
num_inference_steps
],
outputs=[output_image, seed_display]
)
if __name__ == "__main__":
demo.launch()
|