Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
from google.cloud import vision
|
3 |
import os
|
4 |
-
from PIL import Image, ImageDraw
|
5 |
import io
|
6 |
import numpy as np
|
7 |
from streamlit_option_menu import option_menu
|
@@ -141,7 +141,7 @@ def display_results(annotated_img, labels, objects, text):
|
|
141 |
|
142 |
with col1:
|
143 |
st.markdown('<div class="subheader">Analyzed Image</div>', unsafe_allow_html=True)
|
144 |
-
st.image(annotated_img,
|
145 |
|
146 |
with col2:
|
147 |
st.markdown('<div class="subheader">Analysis Results</div>', unsafe_allow_html=True)
|
@@ -168,6 +168,94 @@ def display_results(annotated_img, labels, objects, text):
|
|
168 |
st.markdown('<div class="result-container">', unsafe_allow_html=True)
|
169 |
st.markdown(f'<div class="text-item">{text}</div>', unsafe_allow_html=True)
|
170 |
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
class VideoProcessor(VideoProcessorBase):
|
173 |
"""Process video frames for real-time analysis"""
|
@@ -802,7 +890,7 @@ def main():
|
|
802 |
|
803 |
# Show original image
|
804 |
st.markdown('<div class="subheader">Original Image</div>', unsafe_allow_html=True)
|
805 |
-
st.image(image,
|
806 |
|
807 |
# Add analyze button
|
808 |
if st.button("Analyze Image"):
|
@@ -846,7 +934,7 @@ def main():
|
|
846 |
for i, uploaded_file in enumerate(uploaded_files):
|
847 |
with cols[i]:
|
848 |
image = Image.open(uploaded_file)
|
849 |
-
st.image(image, caption=f"Image {i+1}",
|
850 |
|
851 |
# Add analyze button for batch processing
|
852 |
if st.button("Analyze All Images"):
|
@@ -1116,7 +1204,7 @@ def main():
|
|
1116 |
|
1117 |
# If it's an image file, display it
|
1118 |
if uploaded_file.type.startswith('image/'):
|
1119 |
-
st.image(uploaded_file, caption="Uploaded Document",
|
1120 |
else:
|
1121 |
st.info("PDF document uploaded (preview not available)")
|
1122 |
|
|
|
1 |
import streamlit as st
|
2 |
from google.cloud import vision
|
3 |
import os
|
4 |
+
from PIL import Image, ImageDraw, ImageFont
|
5 |
import io
|
6 |
import numpy as np
|
7 |
from streamlit_option_menu import option_menu
|
|
|
141 |
|
142 |
with col1:
|
143 |
st.markdown('<div class="subheader">Analyzed Image</div>', unsafe_allow_html=True)
|
144 |
+
st.image(annotated_img, use_container_width=True)
|
145 |
|
146 |
with col2:
|
147 |
st.markdown('<div class="subheader">Analysis Results</div>', unsafe_allow_html=True)
|
|
|
168 |
st.markdown('<div class="result-container">', unsafe_allow_html=True)
|
169 |
st.markdown(f'<div class="text-item">{text}</div>', unsafe_allow_html=True)
|
170 |
st.markdown('</div>', unsafe_allow_html=True)
|
171 |
+
|
172 |
+
# Add Download Summary Image button
|
173 |
+
summary_img = create_summary_image(annotated_img, labels, objects, text)
|
174 |
+
buf = io.BytesIO()
|
175 |
+
summary_img.save(buf, format="JPEG", quality=90)
|
176 |
+
byte_im = buf.getvalue()
|
177 |
+
|
178 |
+
st.download_button(
|
179 |
+
label="π₯ Download Complete Results Summary",
|
180 |
+
data=byte_im,
|
181 |
+
file_name="analysis_summary.jpg",
|
182 |
+
mime="image/jpeg",
|
183 |
+
help="Download a complete image showing the analyzed image and all detected features"
|
184 |
+
)
|
185 |
+
|
186 |
+
def create_summary_image(annotated_img, labels, objects, text):
|
187 |
+
"""Create a downloadable summary image with analysis results"""
|
188 |
+
# Create a new image with space for results
|
189 |
+
img_width, img_height = annotated_img.size
|
190 |
+
# Make room for text results (adjust height based on content)
|
191 |
+
result_height = 400 # Space for results
|
192 |
+
summary_img = Image.new('RGB', (img_width, img_height + result_height), color=(255, 255, 255))
|
193 |
+
|
194 |
+
# Paste the annotated image at the top
|
195 |
+
summary_img.paste(annotated_img, (0, 0))
|
196 |
+
|
197 |
+
# Create a drawing object
|
198 |
+
draw = ImageDraw.Draw(summary_img)
|
199 |
+
|
200 |
+
# Try to get a font - use default if not available
|
201 |
+
try:
|
202 |
+
font = ImageFont.truetype("arial.ttf", 16)
|
203 |
+
title_font = ImageFont.truetype("arial.ttf", 20)
|
204 |
+
except IOError:
|
205 |
+
font = ImageFont.load_default()
|
206 |
+
title_font = ImageFont.load_default()
|
207 |
+
|
208 |
+
# Draw title
|
209 |
+
draw.text((20, img_height + 20), "Cosmick Cloud AI Analyzer Results", fill=(65, 105, 225), font=title_font)
|
210 |
+
|
211 |
+
# Draw divider line
|
212 |
+
draw.line([(0, img_height + 50), (img_width, img_height + 50)], fill=(200, 200, 200), width=2)
|
213 |
+
|
214 |
+
# Current Y position for drawing text
|
215 |
+
y_pos = img_height + 60
|
216 |
+
|
217 |
+
# Draw labels
|
218 |
+
if labels:
|
219 |
+
draw.text((20, y_pos), "π·οΈ Labels Detected:", fill=(0, 0, 0), font=title_font)
|
220 |
+
y_pos += 30
|
221 |
+
|
222 |
+
for i, (label, confidence) in enumerate(sorted(labels.items(), key=lambda x: x[1], reverse=True)):
|
223 |
+
if i < 8: # Limit to top 8 labels to avoid overcrowding
|
224 |
+
draw.text((40, y_pos), f"{label}: {confidence}%", fill=(0, 100, 0), font=font)
|
225 |
+
y_pos += 25
|
226 |
+
|
227 |
+
# Draw a column divider
|
228 |
+
mid_point = img_width // 2
|
229 |
+
draw.line([(mid_point - 20, img_height + 60), (mid_point - 20, img_height + result_height - 20)],
|
230 |
+
fill=(200, 200, 200), width=1)
|
231 |
+
|
232 |
+
# Reset Y position for second column
|
233 |
+
y_pos = img_height + 60
|
234 |
+
|
235 |
+
# Draw objects in second column
|
236 |
+
if objects:
|
237 |
+
draw.text((mid_point, y_pos), "π¦ Objects Detected:", fill=(0, 0, 0), font=title_font)
|
238 |
+
y_pos += 30
|
239 |
+
|
240 |
+
for i, (obj, confidence) in enumerate(sorted(objects.items(), key=lambda x: x[1], reverse=True)):
|
241 |
+
if i < 8: # Limit to top 8 objects
|
242 |
+
draw.text((mid_point + 20, y_pos), f"{obj}: {confidence}%", fill=(0, 0, 128), font=font)
|
243 |
+
y_pos += 25
|
244 |
+
|
245 |
+
# Add text detection summary at the bottom
|
246 |
+
if text:
|
247 |
+
bottom_y = img_height + result_height - 80
|
248 |
+
draw.text((20, bottom_y), "π Text Detected:", fill=(0, 0, 0), font=title_font)
|
249 |
+
# Truncate text if too long
|
250 |
+
display_text = text if len(text) < 100 else text[:97] + "..."
|
251 |
+
draw.text((20, bottom_y + 30), display_text, fill=(128, 0, 0), font=font)
|
252 |
+
|
253 |
+
# Add timestamp
|
254 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
255 |
+
draw.text((img_width - 200, img_height + result_height - 30),
|
256 |
+
f"Generated: {timestamp}", fill=(100, 100, 100), font=font)
|
257 |
+
|
258 |
+
return summary_img
|
259 |
|
260 |
class VideoProcessor(VideoProcessorBase):
|
261 |
"""Process video frames for real-time analysis"""
|
|
|
890 |
|
891 |
# Show original image
|
892 |
st.markdown('<div class="subheader">Original Image</div>', unsafe_allow_html=True)
|
893 |
+
st.image(image, use_container_width=True)
|
894 |
|
895 |
# Add analyze button
|
896 |
if st.button("Analyze Image"):
|
|
|
934 |
for i, uploaded_file in enumerate(uploaded_files):
|
935 |
with cols[i]:
|
936 |
image = Image.open(uploaded_file)
|
937 |
+
st.image(image, caption=f"Image {i+1}", use_container_width=True)
|
938 |
|
939 |
# Add analyze button for batch processing
|
940 |
if st.button("Analyze All Images"):
|
|
|
1204 |
|
1205 |
# If it's an image file, display it
|
1206 |
if uploaded_file.type.startswith('image/'):
|
1207 |
+
st.image(uploaded_file, caption="Uploaded Document", use_container_width=True)
|
1208 |
else:
|
1209 |
st.info("PDF document uploaded (preview not available)")
|
1210 |
|