Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,17 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import DetrImageProcessor, DetrForObjectDetection
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
-
import requests
|
6 |
|
7 |
-
# Load the DETR model
|
8 |
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
|
9 |
-
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
13 |
|
14 |
# File uploader in Streamlit
|
15 |
uploaded_file = st.file_uploader("Choose an image...", type="jpg")
|
@@ -21,14 +23,28 @@ if uploaded_file is not None:
|
|
21 |
|
22 |
# Process the image and perform object detection
|
23 |
inputs = processor(images=image, return_tensors="pt")
|
24 |
-
outputs =
|
25 |
|
26 |
-
# Post-process the results to get bounding boxes and labels with confidence
|
27 |
target_sizes = torch.tensor([image.size[::-1]])
|
28 |
-
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.
|
29 |
|
30 |
-
#
|
|
|
31 |
st.write("Detected objects:")
|
32 |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
33 |
box = [round(i, 2) for i in box.tolist()]
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import DetrImageProcessor, DetrForObjectDetection, pipeline
|
3 |
import torch
|
4 |
from PIL import Image
|
|
|
5 |
|
6 |
+
# Load the DETR model for object detection
|
7 |
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
|
8 |
+
detr_model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
|
9 |
|
10 |
+
# Load an NLP model for summarization (T5-small used as an example)
|
11 |
+
summarizer = pipeline("summarization", model="t5-small")
|
12 |
+
|
13 |
+
st.title("DETR Object Detection with NLP Summary")
|
14 |
+
st.write("Upload an image to detect objects and get a summary of what is detected.")
|
15 |
|
16 |
# File uploader in Streamlit
|
17 |
uploaded_file = st.file_uploader("Choose an image...", type="jpg")
|
|
|
23 |
|
24 |
# Process the image and perform object detection
|
25 |
inputs = processor(images=image, return_tensors="pt")
|
26 |
+
outputs = detr_model(**inputs)
|
27 |
|
28 |
+
# Post-process the results to get bounding boxes and labels with a lower confidence threshold
|
29 |
target_sizes = torch.tensor([image.size[::-1]])
|
30 |
+
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.5)[0]
|
31 |
|
32 |
+
# Generate descriptions for detected objects
|
33 |
+
descriptions = []
|
34 |
st.write("Detected objects:")
|
35 |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
36 |
box = [round(i, 2) for i in box.tolist()]
|
37 |
+
label_text = detr_model.config.id2label[label.item()]
|
38 |
+
description = f"Detected {label_text} with confidence {round(score.item(), 2)} at location {box}."
|
39 |
+
descriptions.append(description)
|
40 |
+
st.write(description) # Display each detected object
|
41 |
+
|
42 |
+
# Combine descriptions into a single text input for the summarizer
|
43 |
+
description_text = " ".join(descriptions)
|
44 |
+
|
45 |
+
# Generate a summary using the NLP model
|
46 |
+
summary = summarizer(description_text, max_length=50, min_length=10, do_sample=False)[0]['summary_text']
|
47 |
+
|
48 |
+
# Display the summary
|
49 |
+
st.subheader("Summary")
|
50 |
+
st.write(summary)
|