Spaces:
Sleeping
Sleeping
import streamlit as st | |
import base64 | |
import openai | |
# Function to encode the image to base64 | |
def encode_image(image_file): | |
return base64.b64encode(image_file.getvalue()).decode("utf-8") | |
# Streamlit page setup | |
st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="collapsed") | |
#Add the image with a specified width | |
image_width = 300 # Set the desired width in pixels | |
st.image('MTSS.ai_Logo.png', width=image_width) | |
st.title('VisionText™ | Accessibility') | |
st.subheader(':green[_Image Alt Text Generator_]') | |
# Retrieve the OpenAI API Key from secrets | |
openai.api_key = st.secrets["openai_api_key"] | |
# File uploader allows user to add their own image | |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"]) | |
if uploaded_file: | |
# Display the uploaded image with specified width | |
image_width = 150 # Set the desired width in pixels | |
with st.expander("Image", expanded=True): | |
st.image(uploaded_file, caption=uploaded_file.name, width=image_width, use_column_width=False) | |
# Toggle for showing additional details input | |
show_details = st.toggle("Add details about the image. ", value=False) | |
# Toggle for modifying the prompt for complex images | |
complex_image = st.toggle("Is this a complex image? ", value=False) | |
if show_details: | |
# Text input for additional details about the image, shown only if toggle is True | |
additional_details = st.text_area( | |
"The details could include specific information that is important to include in the alt text or reflect why the image is being used:", | |
disabled=not show_details | |
) | |
# Button to trigger the analysis | |
analyze_button = st.button("Analyze the Image", type="secondary") | |
# Optimized prompt for complex images | |
complex_image_prompt_text = ( | |
"As an expert in image accessibility and alternative text, thoroughly describe the image provided. " | |
"Provide a brief description using not more than 500 characters that convey the essential information conveyed by the image in eight or fewer clear and concise sentences. " | |
"Skip phrases like 'image of' or 'picture of.' " | |
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points, focusing on creating a seamless narrative." | |
) | |
# Check if an image has been uploaded, if the API key is available, and if the button has been pressed | |
if uploaded_file is not None and analyze_button: | |
with st.spinner("Analyzing the image ..."): | |
# Encode the image | |
base64_image = encode_image(uploaded_file) | |
# Determine which prompt to use based on the complexity of the image | |
if complex_image: | |
prompt_text = complex_image_prompt_text | |
else: | |
prompt_text = ( | |
"As an expert in image accessibility and alternative text, succinctly describe the image provided in less than 125 characters. " | |
"Provide a brief description using not more than 125 characters that convey the essential information conveyed by the image in three or fewer clear and concise sentences for use as alt text. " | |
"Skip phrases like 'image of' or 'picture of.' " | |
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points and newlines, focusing on creating a seamless narrative that serves as effective alternative text for accessibility purposes." | |
) | |
if show_details and additional_details: | |
prompt_text += ( | |
f"\n\nAdditional Context Provided by the User:\n{additional_details}" | |
) | |
# Create the payload for the completion request | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": prompt_text}, | |
{ | |
"type": "image_url", | |
"image_url": f"data:image/jpeg;base64,{base64_image}", | |
}, | |
], | |
} | |
] | |
# Make the request to the OpenAI API | |
try: | |
# Without Stream | |
# response = openai.chat.completions.create( | |
# model="gpt-4-vision-preview", messages=messages, max_tokens=250, stream=False | |
# ) | |
# Stream the response | |
full_response = "" | |
message_placeholder = st.empty() | |
for completion in openai.chat.completions.create( | |
model="gpt-4-vision-preview", messages=messages, | |
max_tokens=250, stream=True | |
): | |
# # Check if there is content to display | |
# if completion.choices[0].delta.content is not None: | |
# full_response += completion.choices[0].delta.content | |
# message_placeholder.markdown(full_response + "▌") | |
# # Final update to placeholder after the stream ends | |
# message_placeholder.markdown(full_response) # stream text | |
# Check if there is content to display | |
if completion.choices[0].delta.content is not None: | |
full_response += completion.choices[0].delta.content | |
# Display the response in a text area | |
st.text_area('Response:', value=full_response, height=400, key="response_text_area") | |
st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.') | |
except Exception as e: | |
st.error(f"An error occurred: {e}") | |
else: | |
# Warnings for user action required | |
if not uploaded_file and analyze_button: | |
st.warning("Please upload an image.") |