Wom_test / app.py
LEIDIA's picture
Update app.py
26be12d verified
import gradio as gr
import torch
import json
from diffusers import DiffusionPipeline
from datasets import load_dataset
from PIL import Image
# Configurações do dispositivo para uso apenas da CPU
device = "cpu"
model_repo_id = "stabilityai/stable-diffusion-xl-base-1.0" # Continuando com o modelo especificado
# Carregar o pipeline configurado para CPU
pipe = DiffusionPipeline.from_pretrained(model_repo_id)
pipe = pipe.to(device)
print("Detalhes do dataset:", dataset)
# Carrega o dataset de imagens
# Definir parâmetros padrão para geração rápida
DEFAULT_PROMPT = "A beautiful brunette woman wearing a blue leather pants"
DEFAULT_INFERENCE_STEPS = 6
IMAGE_WIDTH = 512
IMAGE_HEIGHT = 816
GUIDANCE_SCALE = 5.5
def resize_to_divisible_by_8(image):
width, height = image.size
new_width = width + (8 - width % 8) if width % 8 != 0 else width
new_height = height + (8 - height % 8) if height % 8 != 0 else height
return image.resize((new_width, new_height))
# Função simples para gerar imagem
def infer_simple(prompt):
# Geração da imagem
image = pipe(
prompt=prompt,
num_inference_steps=DEFAULT_INFERENCE_STEPS,
guidance_scale=GUIDANCE_SCALE,
height=IMAGE_HEIGHT,
width=IMAGE_WIDTH,
).images[0]
# Redimensionar a imagem para ser divisível por 8
image = resize_to_divisible_by_8(image)
return image
# Interface Gradio
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown("## Text-to-Image Wom Test - Quick CPU Version")
prompt = gr.Textbox(
label="Prompt",
value=DEFAULT_PROMPT,
placeholder="Describe the image you want to generate",
)
generate_button = gr.Button("Generate")
result = gr.Image(label="Generated Image")
generate_button.click(
fn=infer_simple,
inputs=prompt,
outputs=result,
)
if __name__ == "__main__":
demo.launch()