Spaces:
Sleeping
Sleeping
File size: 5,249 Bytes
5f1f9d6 0075144 7956ee3 5f1f9d6 7956ee3 5f1f9d6 0075144 5f1f9d6 0075144 5f1f9d6 7956ee3 0075144 5f1f9d6 0075144 5f1f9d6 0075144 5f1f9d6 0075144 5f1f9d6 0075144 5f1f9d6 7956ee3 0075144 7956ee3 5f1f9d6 0075144 bbec170 7956ee3 0075144 7956ee3 0075144 7956ee3 bbec170 0075144 bbec170 0075144 bbec170 0075144 7956ee3 bbec170 5f1f9d6 0075144 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import streamlit as st
from transformers import pipeline
from huggingface_hub import InferenceClient
from PIL import Image
import base64
# Streamlit page setup
st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="auto")
# Add the image with a specified width
image_width = 300 # Set the desired width in pixels
st.image('MTSS.ai_Logo.png', width=image_width)
st.header('VisionTexts™ | Accessibility')
st.subheader('Image Alt Text Creator')
# Retrieve the Hugging Face API Key from secrets
huggingface_api_key = st.secrets["huggingface_api_key"]
# Initialize the image captioning pipeline
image_captioner = pipeline(
"image-to-text",
model="Salesforce/blip-image-captioning-large",
use_auth_token=huggingface_api_key
)
# Initialize the language model client
client = InferenceClient(token=huggingface_api_key)
# File uploader allows user to add their own image
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
if uploaded_file:
# Display the uploaded image
image = Image.open(uploaded_file)
image_width = 200 # Set the desired width in pixels
with st.expander("Image", expanded=True):
st.image(image, caption=uploaded_file.name, width=image_width, use_column_width=False)
else:
st.warning("Please upload an image.")
# Toggle for showing additional details input
show_details = st.checkbox("Add details about the image.", value=False)
if show_details:
# Text input for additional details about the image
additional_details = st.text_area(
"The details could include specific information that is important to include in the alt text or reflect why the image is being used:"
)
else:
additional_details = ""
# Toggle for modifying the prompt for complex images
complex_image = st.checkbox("Is this a complex image?", value=False)
# Button to trigger the analysis
analyze_button = st.button("Analyze the Image", type="secondary")
# Optimized prompt for complex images
complex_image_prompt_text = (
"As an expert in image accessibility and alternative text, thoroughly describe the image caption provided. "
"Provide a brief description using not more than 500 characters that conveys the essential information in eight or fewer clear and concise sentences. "
"Skip phrases like 'image of' or 'picture of.' "
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points, focusing on creating a seamless narrative."
)
# Check if an image has been uploaded and if the button has been pressed
if uploaded_file is not None and analyze_button:
with st.spinner("Analyzing the image..."):
# Get the caption from the image using the image captioning model
caption_response = image_captioner(image)
image_caption = caption_response[0]['generated_text']
# Determine which prompt to use based on the complexity of the image
if complex_image:
prompt_text = complex_image_prompt_text
else:
prompt_text = (
"As an expert in image accessibility and alternative text, succinctly describe the image caption provided in less than 125 characters. "
"Provide a brief description using not more than 125 characters that conveys the essential information in three or fewer clear and concise sentences for use as alt text. "
"Skip phrases like 'image of' or 'picture of.' "
"Your description should form a clear, well-structured, and factual paragraph that avoids bullet points and newlines, focusing on creating a seamless narrative for accessibility purposes."
)
# Include additional details if provided
if additional_details:
prompt_text += f"\n\nInclude the additional context provided by the user in your description:\n{additional_details}"
# Create the prompt for the language model
full_prompt = f"{prompt_text}\n\nImage Caption: {image_caption}"
# Prepare messages for chat interface
messages = [
{"role": "user", "content": full_prompt}
]
# Use the language model to generate the alt text description
try:
# Stream the response from the language model
stream = client.chat(
model="meta-llama/Llama-2-7b-chat-hf",
messages=messages,
stream=True
)
# Stream the response
full_response = ""
message_placeholder = st.empty()
for chunk in stream:
if 'generated_text' in chunk:
content = chunk['generated_text']
full_response += content
message_placeholder.markdown(full_response + "▌")
# Final update after stream ends
message_placeholder.markdown(full_response)
st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
except Exception as e:
st.error(f"An error occurred: {e}")
else:
st.write("Please upload an image and click 'Analyze the Image' to generate alt text.") |