Spaces:
Sleeping
Sleeping
import streamlit as st | |
import base64 | |
from huggingface_hub import InferenceClient | |
# Function to encode the image to base64 | |
def encode_image(image_file): | |
return base64.b64encode(image_file.getvalue()).decode("utf-8") | |
# Streamlit page setup | |
st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="auto") | |
# Add the image with a specified width | |
image_width = 300 # Set the desired width in pixels | |
st.image('MTSS.ai_Logo.png', width=image_width) | |
st.header('VisionTexts™ | Accessibility') | |
st.subheader('Image Alt Text Creator') | |
# Initialize the Hugging Face InferenceClient with the API key from Streamlit secrets | |
client = InferenceClient(api_key=st.secrets["huggingface_api_key"]) | |
# File uploader | |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"]) | |
if uploaded_file: | |
# Display the uploaded image with specified width | |
image_width = 200 # Set the desired width in pixels | |
with st.expander("Image", expanded=True): | |
st.image(uploaded_file, caption=uploaded_file.name, width=image_width, use_column_width=False) | |
# Toggle for showing additional details input | |
show_details = st.checkbox("Add details about the image.", value=False) | |
if show_details: | |
# Text input for additional details about the image | |
additional_details = st.text_area( | |
"The details could include specific information that is important to include in the alt text or reflect why the image is being used:", | |
disabled=not show_details | |
) | |
# Toggle for modifying the prompt for complex images | |
complex_image = st.checkbox("Is this a complex image?", value=False) | |
if complex_image: | |
# Caption explaining the impact of the complex image toggle | |
st.caption( | |
"By clicking this toggle, it will instruct the app to create a description that exceeds the 125-character limit. " | |
"Add the description in a placeholder behind the image and 'Description in the content placeholder' in the alt text box." | |
) | |
# Button to trigger the analysis | |
analyze_button = st.button("Analyze the Image", type="secondary") | |
# Optimized prompt for complex images | |
complex_image_prompt_text = ( | |
"As an expert in image accessibility and alternative text, thoroughly describe the image provided. " | |
"Provide a brief description using not more than 500 characters that conveys the essential information in eight or fewer clear and concise sentences. " | |
"Skip phrases like 'image of' or 'picture of.' " | |
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points, focusing on creating a seamless narrative." | |
) | |
# Check if an image has been uploaded and if the analyze button has been pressed | |
if uploaded_file is not None and analyze_button: | |
with st.spinner("Analyzing the image ..."): | |
# Encode the image | |
base64_image = encode_image(uploaded_file) | |
# Determine which prompt to use based on the complexity of the image | |
if complex_image: | |
prompt_text = complex_image_prompt_text | |
else: | |
prompt_text = ( | |
"As an expert in image accessibility and alternative text, succinctly describe the image provided in less than 125 characters. " | |
"Provide a brief description using not more than 125 characters that conveys the essential information in three or fewer clear and concise sentences for use as alt text. " | |
"Skip phrases like 'image of' or 'picture of.' " | |
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points and newlines, focusing on creating a seamless narrative for accessibility purposes." | |
) | |
if show_details and additional_details: | |
prompt_text += ( | |
f"\n\nInclude the additional context provided by the user in your description:\n{additional_details}" | |
) | |
# Create the payload for the completion request | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": prompt_text}, | |
{ | |
"type": "image", | |
"image": { | |
# Provide the image bytes directly | |
"bytes": base64.b64decode(base64_image) | |
}, | |
}, | |
], | |
} | |
] | |
# Make the request to the Hugging Face API | |
try: | |
# Send the request to the model | |
completion = client.chat_completions( | |
model="meta-llama/Llama-3.2-11B-Vision-Instruct", | |
messages=messages, | |
max_new_tokens=1200 | |
) | |
# Extract the assistant's response | |
assistant_response = completion.get("choices")[0]["message"]["content"] | |
# Display the response | |
st.markdown(assistant_response) | |
st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.') | |
except Exception as e: | |
st.error(f"An error occurred: {e}") | |
else: | |
# Warning for user action required | |
if not uploaded_file and analyze_button: | |
st.warning("Please upload an image.") |