Spaces:
Sleeping
Sleeping
import streamlit as st | |
from PIL import Image | |
from diffusers import DiffusionPipeline | |
from huggingface_hub import login | |
import os | |
# Set a custom directory to save the model | |
MODEL_DIR = "./saved_models" | |
# Hugging Face Login Function | |
def authenticate_and_load_model(hf_token): | |
""" | |
Log in to Hugging Face, download the model, and save it locally for reuse. | |
""" | |
try: | |
# Log in to Hugging Face | |
login(token=hf_token) | |
# Load the model and LoRA weights, saving them to the custom directory | |
pipe = DiffusionPipeline.from_pretrained( | |
"black-forest-labs/FLUX.1-dev", | |
cache_dir=MODEL_DIR, | |
use_auth_token=hf_token | |
) | |
pipe.load_lora_weights( | |
"tryonlabs/FLUX.1-dev-LoRA-Lehenga-Generator", | |
cache_dir=MODEL_DIR, | |
use_auth_token=hf_token | |
) | |
return pipe | |
except Exception as e: | |
st.error(f"Error during login or model loading: {e}") | |
return None | |
# Streamlit App | |
st.title("Lehenga Dress Image Generator") | |
st.write("Enter a description to generate an image of a lehenga dress.") | |
# Hugging Face Token Input | |
hf_token = st.text_input("Enter your Hugging Face Token:", type="password") | |
pipe = None | |
if hf_token: | |
if "pipe" not in st.session_state: | |
with st.spinner("Authenticating and loading the model..."): | |
st.session_state.pipe = authenticate_and_load_model(hf_token) | |
pipe = st.session_state.pipe | |
# Input prompt | |
prompt = st.text_area( | |
"Enter your prompt:", | |
"A flat-lay image of a lehenga with a traditional style and a fitted waistline is elegantly crafted from stretchy silk material, ensuring a comfortable and flattering fit. The long hemline adds a touch of grace and sophistication to the ensemble. Adorned in a solid blue color, it features a sleeveless design that complements its sweetheart neckline. The solid pattern and the luxurious silk fabric together create a timeless and chic look that is perfect for special occasions." | |
) | |
# Generate button | |
if st.button("Generate Image"): | |
if not hf_token: | |
st.error("Please enter your Hugging Face token.") | |
elif not pipe: | |
st.error("Model not loaded. Please check your Hugging Face token.") | |
elif prompt.strip(): | |
with st.spinner("Generating image..."): | |
try: | |
# Generate the image | |
result = pipe(prompt).images[0] | |
# Display the image | |
st.image(result, caption="Generated Lehenga Image", use_column_width=True) | |
except Exception as e: | |
st.error(f"An error occurred during image generation: {e}") | |
else: | |
st.warning("Please enter a valid prompt.") | |
st.write("This app uses AI to generate images of lehenga dresses based on your input description.") | |