|
import streamlit as st |
|
from diffusers import DiffusionPipeline |
|
from peft import PeftModel |
|
from PIL import Image |
|
import torch |
|
|
|
|
|
@st.cache_resource |
|
def load_pipeline(): |
|
pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") |
|
|
|
|
|
try: |
|
pipe.load_lora_weights("Melonie/text_to_image_finetuned") |
|
except ValueError as e: |
|
st.error("PEFT backend is required but not properly set up.") |
|
raise e |
|
|
|
if torch.cuda.is_available(): |
|
pipe.to("cuda") |
|
return pipe |
|
|
|
pipe = load_pipeline() |
|
|
|
|
|
st.title("Text-to-Image Generation App") |
|
|
|
|
|
user_prompt = st.text_input( |
|
"Enter your image prompt", |
|
value="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", |
|
help="Provide a detailed description of the image you'd like to generate." |
|
) |
|
|
|
|
|
if st.button("Generate Image"): |
|
if user_prompt: |
|
with st.spinner("Generating image..."): |
|
try: |
|
|
|
image = pipe(user_prompt).images[0] |
|
|
|
|
|
st.image(image, caption="Generated Image", use_column_width=True) |
|
except Exception as e: |
|
st.error(f"Error generating image: {str(e)}") |
|
else: |
|
st.error("Please enter a valid prompt.") |
|
|