Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import DetrImageProcessor, DetrForObjectDetection
|
3 |
+
import torch
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
# Load Hugging Face object detection model
|
9 |
+
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
|
10 |
+
model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
|
11 |
+
|
12 |
+
def detect_intrusion(video_path):
|
13 |
+
cap = cv2.VideoCapture(video_path)
|
14 |
+
alerts = []
|
15 |
+
count = 0
|
16 |
+
while cap.isOpened() and count < 20:
|
17 |
+
ret, frame = cap.read()
|
18 |
+
if not ret:
|
19 |
+
break
|
20 |
+
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
21 |
+
inputs = processor(images=image, return_tensors="pt")
|
22 |
+
outputs = model(**inputs)
|
23 |
+
target_sizes = torch.tensor([image.size[::-1]])
|
24 |
+
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0]
|
25 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
26 |
+
label_name = model.config.id2label[label.item()]
|
27 |
+
if label_name == "person":
|
28 |
+
alerts.append(f"Frame {count}: 🔴 Person Detected!")
|
29 |
+
break
|
30 |
+
count += 1
|
31 |
+
cap.release()
|
32 |
+
return "\n".join(alerts) if alerts else "✅ No intrusion detected."
|
33 |
+
|
34 |
+
def detect_overheat(temp, humidity, solar_output):
|
35 |
+
if temp > 75:
|
36 |
+
return "🔥 Overheat Fault!"
|
37 |
+
elif humidity < 20 and solar_output < 300:
|
38 |
+
return "🌫️ Dust/Shade Fault!"
|
39 |
+
else:
|
40 |
+
return "✅ All Good"
|
41 |
+
|
42 |
+
video_tab = gr.Interface(fn=detect_intrusion,
|
43 |
+
inputs=gr.Video(label="Upload Video"),
|
44 |
+
outputs=gr.Textbox(label="Intrusion Detection Alerts"))
|
45 |
+
|
46 |
+
sensor_tab = gr.Interface(fn=detect_overheat,
|
47 |
+
inputs=[gr.Number(label="Temperature (°C)"),
|
48 |
+
gr.Number(label="Humidity (%)"),
|
49 |
+
gr.Number(label="Solar Output (W)")],
|
50 |
+
outputs=gr.Textbox(label="Sensor Fault Detection"))
|
51 |
+
|
52 |
+
gr.TabbedInterface([video_tab, sensor_tab], ["Intrusion (Video)", "Sensor (Input)"]).launch()
|