Text2image / app.py
Shankarm08's picture
Update app.py
4d21766 verified
import os
import streamlit as st
from dotenv import load_dotenv
from huggingface_hub import InferenceApi
from PIL import Image
from io import BytesIO
import requests
import base64
# Load environment variables from the .env file
load_dotenv()
# Hugging Face API token
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
# Initialize the Hugging Face Inference API
inference = InferenceApi(repo_id="stabilityai/stable-diffusion-3.5-large", token=HUGGINGFACE_API_TOKEN)
# Streamlit App UI
st.set_page_config(page_title="Stable Diffusion Demo", page_icon="🖼️")
st.title("Stable Diffusion 3.5 - Text-to-Image")
# Text input for the prompt
prompt = st.text_input("Enter a prompt for the image:")
# Button to generate the image
if st.button("Generate Image"):
if prompt:
try:
# Make request to the Hugging Face model
output = inference(inputs=prompt)
# Check if the output is a valid PIL image (already in image format)
if isinstance(output, Image.Image):
image = output
# Check if the output contains a base64-encoded string
elif isinstance(output, dict) and 'generated_image_base64' in output:
# Decode the base64 string to bytes
image_data = base64.b64decode(output['generated_image_base64'])
image = Image.open(BytesIO(image_data))
# If output contains an image URL
elif isinstance(output, dict) and 'generated_image_url' in output:
response = requests.get(output['generated_image_url'])
image = Image.open(BytesIO(response.content))
else:
st.error("Unexpected output format from the inference API.")
image = None
# Display the image
if image:
st.image(image, caption="Generated Image", use_column_width=True)
except Exception as e:
st.error(f"Error: {str(e)}")
else:
st.warning("Please enter a prompt.")