import streamlit as st import cv2 from ultralytics import YOLO model = YOLO('best.pt') def track(source, iou, conf): #result = model.track(source=source, device='mps') result = model.track(source=source, iou=iou, conf=conf, device='cpu') return result def inferir_camara(camera1, iou, conf, st_frame, st_cantidad, stop_button_pressed): while camera1.isOpened() and not stop_button_pressed: ret, frame = camera1.read() if not ret: st.write("Video Capture Ended") break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) resultado = track(frame, iou, conf) # Extract bounding boxes (replace this with the specific method from your library) boxes = resultado[0].boxes.cls # Boxes object for bbox outputs # Update object count object_count = len(boxes) st_cantidad.write(f'Cantidad detectados: {object_count}') res_plotted = resultado[0].plot() st_frame.image(res_plotted, caption='Detected Video', channels="RGB", #use_column_width=True ) if cv2.waitKey(1) & 0xFF == ord("q") or stop_button_pressed: break # Define function to display frames from two cameras def display_two_cameras(camera1, camera2): while True: # Capture frames from both cameras success1, frame1 = camera1.read() success2, frame2 = camera2.read() # Check if both captures were successful if success1 and success2: # Display the frames side-by-side frames = np.hstack((frame1, frame2)) # Concatenate frames horizontally cv2.imshow('Two Cameras', frames) # Display the combined frame # Check for keyboard input to quit if cv2.waitKey(1) & 0xFF == ord('q'): break else: # Handle camera failure print("Error capturing frames from one or both cameras.") break # Release cameras camera1.release() camera2.release() cv2.destroyAllWindows() # Try to open two cameras try: camera1 = cv2.VideoCapture(0) # Open camera 0 camera2 = cv2.VideoCapture(0) # Open camera 1 # Check if both cameras opened successfully if camera1.isOpened() and camera2.isOpened(): display_two_cameras(camera1, camera2) else: print("Failed to open both cameras. Showing single camera feed.") # Open and display only available camera camera = cv2.VideoCapture(0 if not camera1.isOpened() else 1) if camera.isOpened(): while True: success, frame = camera.read() if success: cv2.imshow('Single Camera', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: print("Error capturing frame.") break camera.release() cv2.destroyAllWindows() except Exception as e: print(f"Error accessing cameras: {e}") # Interfaz st.set_page_config(page_title="Tracking YOLOv8") st.title("Tracking YOLOv8") play_button_pressed = st.empty() stop_button_pressed = st.button("Detener") iou = float(st.sidebar.slider("NMS IoU threshold", 30, 100, 80)) / 100 # Umbral de intersección sobre unión (IoU) para NMS conf = float(st.sidebar.slider("Umbral o threshold", 30, 100, 80)) / 100 # Select Model Confidence st_frame_camara_1 = st.empty() st_frame_camara_2 = st.empty() st_cantidad_1 = st.empty() st_cantidad_2 = st.empty() play_button_1 = st.button("Iniciar cámara 1", on_click=inferir_camara(camera1, iou, conf, st_frame_camara_1, st_cantidad_1, stop_button_pressed)) #play_button_2 = st.button("Iniciar cámara 2", on_click=inferir_camara(camera2, iou, conf, st_frame_camara_2, st_cantidad_2, stop_button_pressed)) #camera1.release() #camera2.release() #cv2.destroyAllWindows()