ProfessorLeVesseur commited on
Commit
4c77641
·
verified ·
1 Parent(s): d5a2742

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -25
app.py CHANGED
@@ -1,14 +1,18 @@
1
  import streamlit as st
2
  import base64
3
  import json
4
- from huggingface_hub import InferenceClient
5
 
6
- # Function to read the image file and return a base64-encoded string
7
- def get_image_base64(image_file):
8
- return base64.b64encode(image_file.read()).decode('utf-8')
9
 
10
  # Streamlit page setup
11
- st.set_page_config(page_title="MTSS Image Accessibility Alt Text Generator", layout="centered", initial_sidebar_state="auto")
 
 
 
 
12
 
13
  # Add the image with a specified width
14
  image_width = 300 # Set the desired width in pixels
@@ -17,8 +21,8 @@ st.image('MTSS.ai_Logo.png', width=image_width)
17
  st.header('VisionTexts™ | Accessibility')
18
  st.subheader('Image Alt Text Creator')
19
 
20
- # Initialize the Hugging Face InferenceClient with the API key from secrets
21
- client = InferenceClient(api_key=st.secrets["huggingface_api_key"])
22
 
23
  # File uploader
24
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
@@ -63,9 +67,8 @@ complex_image_prompt_text = (
63
  if uploaded_file is not None and analyze_button:
64
 
65
  with st.spinner("Analyzing the image ..."):
66
- # Get base64-encoded image string
67
- image_bytes = uploaded_file.read()
68
- base64_image_string = base64.b64encode(image_bytes).decode('utf-8')
69
 
70
  # Detect the image content type
71
  import imghdr
@@ -99,32 +102,45 @@ if uploaded_file is not None and analyze_button:
99
  }
100
  ]
101
 
102
- # Attachments array containing the image
103
- attachments = [
104
- {
105
- "type": "image",
106
- "content": base64_image_string,
107
- "content_type": content_type,
108
- }
109
- ]
 
 
 
 
110
 
111
  # Make the request to the Hugging Face API
112
  try:
113
- # Send the request to the model
114
- completion = client.chat.completions.create(
115
- model="meta-llama/Llama-3.2-11B-Vision-Instruct",
116
- messages=messages,
117
- attachments=attachments,
118
- max_tokens=500
 
119
  )
120
 
 
 
 
 
 
 
121
  # Extract the assistant's response
122
- assistant_response = completion.choices[0].message['content']
123
 
124
  # Display the response
125
  st.markdown(assistant_response)
126
 
127
  st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
 
 
128
  except Exception as e:
129
  st.error(f"An error occurred: {e}")
130
  else:
 
1
  import streamlit as st
2
  import base64
3
  import json
4
+ import requests
5
 
6
+ # Function to read the image file
7
+ def get_image_bytes(image_file):
8
+ return image_file.read()
9
 
10
  # Streamlit page setup
11
+ st.set_page_config(
12
+ page_title="MTSS Image Accessibility Alt Text Generator",
13
+ layout="centered",
14
+ initial_sidebar_state="auto"
15
+ )
16
 
17
  # Add the image with a specified width
18
  image_width = 300 # Set the desired width in pixels
 
21
  st.header('VisionTexts™ | Accessibility')
22
  st.subheader('Image Alt Text Creator')
23
 
24
+ # Initialize the API key from Streamlit secrets
25
+ api_key = st.secrets["huggingface_api_key"]
26
 
27
  # File uploader
28
  uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
 
67
  if uploaded_file is not None and analyze_button:
68
 
69
  with st.spinner("Analyzing the image ..."):
70
+ # Read the image bytes
71
+ image_bytes = get_image_bytes(uploaded_file)
 
72
 
73
  # Detect the image content type
74
  import imghdr
 
102
  }
103
  ]
104
 
105
+ # Prepare headers and endpoint
106
+ headers = {
107
+ "Authorization": f"Bearer {api_key}"
108
+ }
109
+ api_url = "https://api-inference.huggingface.co/v1/chat/completions"
110
+
111
+ # Prepare the data payload
112
+ payload = {
113
+ "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
114
+ "messages": messages,
115
+ "max_tokens": 500
116
+ }
117
 
118
  # Make the request to the Hugging Face API
119
  try:
120
+ # Send the request with the image file in the 'files' parameter
121
+ response = requests.post(
122
+ api_url,
123
+ headers=headers,
124
+ data={"data": json.dumps(payload)},
125
+ files={"file": ("image", image_bytes, content_type)},
126
+ timeout=60 # Optional: increase timeout if needed
127
  )
128
 
129
+ # Check for errors
130
+ response.raise_for_status()
131
+
132
+ # Parse the response
133
+ completion = response.json()
134
+
135
  # Extract the assistant's response
136
+ assistant_response = completion['choices'][0]['message']['content']
137
 
138
  # Display the response
139
  st.markdown(assistant_response)
140
 
141
  st.success('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
142
+ except requests.exceptions.HTTPError as http_err:
143
+ st.error(f"HTTP error occurred: {http_err}")
144
  except Exception as e:
145
  st.error(f"An error occurred: {e}")
146
  else: