import gradio as gr import torch from PIL import Image from transformers import DetrImageProcessor, DetrForObjectDetection import cv2 import numpy as np # Load the pretrained DETR model processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm") model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", revision="no_timm") # Define the object detection function def detect_objects(frame): # Convert the frame to PIL image image = Image.fromarray(frame) # Preprocess the image inputs = processor(images=image, return_tensors="pt") # Perform object detection outputs = model(**inputs) # Convert outputs to COCO API format target_sizes = torch.tensor([image.size[::-1]]) results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0] # Draw bounding boxes on the frame for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): box = [round(i, 2) for i in box.tolist()] frame = cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2) frame = cv2.putText(frame, f'{model.config.id2label[label.item()]}: {round(score.item(), 3)}', (int(box[0]), int(box[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA) return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Function to capture video frames and process them def capture_frames(): cap = cv2.VideoCapture(cv2.CAP_ANY) # Use 0 for default camera, you can change it if needed while True: ret, frame = cap.read() if not ret: break # Process the frame processed_frame = detect_objects(frame) # Display the processed frame cv2.imshow("Object Detection", processed_frame) # Check for exit key (press 'q' to exit) if cv2.waitKey(1) & 0xFF == ord('q'): break # Release the camera and close all windows cap.release() cv2.destroyAllWindows() # Run the video capturing and processing function capture_frames()