import torch import cv2 import os import threading from PIL import Image import sys sys.path.insert(0, 'E:/studio Dropbox/studio/ai/libs/notes') print(sys.executable) # Import variables from config.py from dataset_prep.config import INPUT_FOLDER, OUTPUT_FOLDER, YOLO_DETECTED_FOLDER, ASPECT_RATIOS, SAVE_TO_YOLO_DETECTED_FOLDER # Define the locks counter_lock = threading.Lock() model_lock = threading.Lock() def resize_bbox_to_dimensions(bbox, target_width, target_height, img_width, img_height): x1, y1, x2, y2 = bbox current_width = x2 - x1 current_height = y2 - y1 desired_aspect_ratio = target_width / target_height current_aspect_ratio = current_width / current_height print(f"Original bbox: {bbox}") print(f"Current aspect ratio: {current_aspect_ratio}") print(f"Desired aspect ratio: {desired_aspect_ratio}") if current_aspect_ratio < desired_aspect_ratio: new_width = desired_aspect_ratio * current_height x1 -= (new_width - current_width) / 2 x2 += (new_width - current_width) / 2 elif current_aspect_ratio > desired_aspect_ratio: new_height = current_width / desired_aspect_ratio y1 -= (new_height - current_height) / 2 y2 += (new_height - current_height) / 2 x1 = max(x1, 0) y1 = max(y1, 0) x2 = min(x2, img_width) y2 = min(y2, img_height) new_bbox = [int(x1), int(y1), int(x2), int(y2)] print(f"New bbox: {new_bbox}") return new_bbox def process_files(filelist): global image_processed_counter with counter_lock: model = torch.hub.load('WongKinYiu/yolov7', 'custom', 'yolov7-e6e.pt', force_reload=False, trust_repo=True) for filename in filelist: try: # Start of the try block img_path = os.path.join(INPUT_FOLDER, filename) image = cv2.imread(img_path) if image is None: raise ValueError(f"Could not read image {filename}") img_width, img_height = image.shape[1], image.shape[0] with model_lock: results = model(img_path) detections = results.pandas().xyxy[0] person_detected = detections[detections['name'] == 'person'] print(f"Person detected: {not person_detected.empty}") if not person_detected.empty: x1, y1, x2, y2 = person_detected.iloc[0][['xmin', 'ymin', 'xmax', 'ymax']].astype(int) for target_width, target_height in ASPECT_RATIOS: new_x1, new_y1, new_x2, new_y2 = resize_bbox_to_dimensions([x1, y1, x2, y2], target_width, target_height, img_width, img_height) new_x1, new_y1 = max(new_x1, 0), max(new_y1, 0) new_x2, new_y2 = min(new_x2, img_width), min(new_y2, img_height) cropped_img = image[new_y1:new_y2, new_x1:new_x2] # Create a folder for each aspect ratio if it doesn't exist aspect_ratio_folder = f"{target_width}_{target_height}" aspect_ratio_path = os.path.join(OUTPUT_FOLDER, aspect_ratio_folder) os.makedirs(aspect_ratio_path, exist_ok=True) # Save the cropped image to the corresponding folder output_filename = os.path.join(aspect_ratio_path, f"cropped_{filename}") cv2.imwrite(output_filename, cropped_img) except Exception as e: print(f"An error occurred while processing file {filename}: {e}") if __name__ == "__main__": filelist = os.listdir(INPUT_FOLDER) process_files(filelist)