Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
from ultralytics import YOLO
|
6 |
+
from PIL import Image
|
7 |
+
import os
|
8 |
+
|
9 |
+
# Load the YOLOv8 model ONCE (faster processing)
|
10 |
+
MODEL_PATH = "yolov8l.pt"
|
11 |
+
model = YOLO(MODEL_PATH)
|
12 |
+
|
13 |
+
# Get sample images from Space directory
|
14 |
+
valid_extensions = (".jpg", ".jpeg", ".png")
|
15 |
+
preloaded_images = [img for img in os.listdir() if img.lower().endswith(valid_extensions)]
|
16 |
+
|
17 |
+
# Object detection function
|
18 |
+
def predict(image):
|
19 |
+
if isinstance(image, str): # Sample image selected
|
20 |
+
image = Image.open(image)
|
21 |
+
else: # Uploaded image
|
22 |
+
image = Image.fromarray(image)
|
23 |
+
|
24 |
+
results = model(image) # YOLO detection
|
25 |
+
|
26 |
+
# Convert image for OpenCV processing
|
27 |
+
image_cv = np.array(image)
|
28 |
+
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_RGB2BGR)
|
29 |
+
overlay = image_cv.copy()
|
30 |
+
|
31 |
+
for result in results:
|
32 |
+
for box in result.boxes:
|
33 |
+
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
34 |
+
label = model.names[int(box.cls)]
|
35 |
+
confidence = float(box.conf)
|
36 |
+
|
37 |
+
# Translucent blue overlay
|
38 |
+
cv2.rectangle(overlay, (x1, y1), (x2, y2), (255, 0, 0), -1)
|
39 |
+
cv2.rectangle(image_cv, (x1, y1), (x2, y2), (255, 0, 0), 2)
|
40 |
+
cv2.putText(image_cv, f"{label}: {confidence:.2f}", (x1, y1 - 10),
|
41 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
|
42 |
+
|
43 |
+
# Blend overlay with transparency
|
44 |
+
alpha = 0.4
|
45 |
+
image_masked = cv2.addWeighted(overlay, alpha, image_cv, 1 - alpha, 0)
|
46 |
+
|
47 |
+
return Image.fromarray(cv2.cvtColor(image_masked, cv2.COLOR_BGR2RGB))
|
48 |
+
|
49 |
+
# Processing function with correct priority
|
50 |
+
def process_image(image, sample_name):
|
51 |
+
if image is not None: # Prioritize uploaded image
|
52 |
+
return predict(image)
|
53 |
+
elif sample_name: # Otherwise, use the selected sample
|
54 |
+
return predict(sample_name)
|
55 |
+
return None # No input provided
|
56 |
+
|
57 |
+
# Gradio Interface
|
58 |
+
with gr.Blocks() as interface:
|
59 |
+
gr.Markdown("# 🪨 Moon Rock Detection")
|
60 |
+
gr.Markdown("Upload a moon surface image or select a sample.")
|
61 |
+
|
62 |
+
with gr.Row():
|
63 |
+
with gr.Column(scale=1):
|
64 |
+
image_input = gr.Image(type="numpy", label="Upload Image")
|
65 |
+
sample_dropdown = gr.Dropdown(
|
66 |
+
choices=preloaded_images, label="Or Select a Sample Image", interactive=True
|
67 |
+
)
|
68 |
+
|
69 |
+
with gr.Column(scale=2):
|
70 |
+
output_image = gr.Image(type="pil", label="Detection Result")
|
71 |
+
|
72 |
+
# Automatically detect when an image is uploaded or selected
|
73 |
+
image_input.change(process_image, inputs=[image_input, sample_dropdown], outputs=output_image)
|
74 |
+
sample_dropdown.change(process_image, inputs=[image_input, sample_dropdown], outputs=output_image)
|
75 |
+
|
76 |
+
if __name__ == "__main__":
|
77 |
+
interface.launch()
|