Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,14 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from PIL import Image
|
3 |
import torch
|
4 |
-
from
|
5 |
import numpy as np
|
|
|
|
|
6 |
|
7 |
-
# Load
|
8 |
-
|
9 |
-
model = YOLO(model_path)
|
10 |
|
11 |
# Streamlit App
|
12 |
-
st.title("YOLO Object Detection")
|
13 |
|
14 |
st.sidebar.title("Options")
|
15 |
st.sidebar.markdown("Upload an image to detect objects.")
|
@@ -18,34 +17,24 @@ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
|
18 |
|
19 |
if uploaded_file:
|
20 |
# Load the image
|
21 |
-
image = Image.open(uploaded_file)
|
22 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
-
|
27 |
-
# Perform inference
|
28 |
-
st.write("Detecting objects...")
|
29 |
-
results = model.predict(image_np)
|
30 |
|
31 |
-
#
|
32 |
annotated_image = results[0].plot() # Get annotated image with bounding boxes
|
33 |
-
|
34 |
-
# Display the results
|
35 |
st.image(annotated_image, caption="Detected Objects", use_column_width=True)
|
36 |
|
37 |
# Show raw predictions
|
38 |
st.write("Detection Results:")
|
39 |
for result in results:
|
40 |
for box in result.boxes:
|
41 |
-
class_id = int(box.cls
|
42 |
-
confidence = float(box.conf
|
43 |
-
|
44 |
-
# Ensure bbox is processed correctly
|
45 |
-
if isinstance(box.xyxy, torch.Tensor):
|
46 |
-
bbox = box.xyxy.tolist() # Convert tensor to list
|
47 |
-
else:
|
48 |
-
bbox = box.xyxy # Already in list format
|
49 |
|
50 |
st.write(
|
51 |
f"Class: {class_id}, Confidence: {confidence:.2f}, Box: {bbox}"
|
|
|
|
|
|
|
1 |
import torch
|
2 |
+
from PIL import Image
|
3 |
import numpy as np
|
4 |
+
import streamlit as st
|
5 |
+
from ultralytics import YOLO
|
6 |
|
7 |
+
# Load YOLOv10 model
|
8 |
+
model = YOLO('best.pt') # Load the pre-trained model
|
|
|
9 |
|
10 |
# Streamlit App
|
11 |
+
st.title("YOLO Object Detection with Confidence Threshold")
|
12 |
|
13 |
st.sidebar.title("Options")
|
14 |
st.sidebar.markdown("Upload an image to detect objects.")
|
|
|
17 |
|
18 |
if uploaded_file:
|
19 |
# Load the image
|
20 |
+
image = Image.open(uploaded_file).convert('RGB')
|
21 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
22 |
|
23 |
+
# Perform inference with a confidence threshold of 0.25
|
24 |
+
st.write("Detecting objects with confidence threshold of 0.25...")
|
25 |
+
results = model.predict(source=image, conf=0.25, save=False) # Directly pass PIL image
|
|
|
|
|
|
|
26 |
|
27 |
+
# Annotate and display the image
|
28 |
annotated_image = results[0].plot() # Get annotated image with bounding boxes
|
|
|
|
|
29 |
st.image(annotated_image, caption="Detected Objects", use_column_width=True)
|
30 |
|
31 |
# Show raw predictions
|
32 |
st.write("Detection Results:")
|
33 |
for result in results:
|
34 |
for box in result.boxes:
|
35 |
+
class_id = int(box.cls) # Convert to Python int
|
36 |
+
confidence = float(box.conf) # Convert to Python float
|
37 |
+
bbox = box.xyxy.tolist() # Bounding box coordinates as a list
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
st.write(
|
40 |
f"Class: {class_id}, Confidence: {confidence:.2f}, Box: {bbox}"
|