import sys import os sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) from paddleocr import PaddleOCR import cv2 import numpy as np import pandas as pd from doctr.models import ocr_predictor from torch.utils.data import DataLoader from doctr.io import DocumentFile import math from typing import Tuple, Union import cv2 import numpy as np import os from deskew import determine_skew print(sys.version) ocr = PaddleOCR(lang='en') model = ocr_predictor(pretrained=True) ocr = PaddleOCR(lang='en') def find_surr_keys(unassigned_key, known_keys): # Sort known keys print(known_keys) known_keys = sorted(known_keys) # Initialize distances and closest keys closest_keys = [] for k in known_keys: closest_keys.append((abs(int(k) - int(unassigned_key)), k)) # Sort by distance closest_keys.sort() # Return the two closest known keys if(closest_keys[0][1]unassigned_key): return [closest_keys[0][1], closest_keys[1][1]] else: raise ValueError(f"No closest keys found for unassigned key: {unassigned_key}") def label_text(text): # Define the two lists list1 = ['t', 'r', 'u', 'T', 'R', 'U'] list2 = ['f', 'a', 'l', 's', 'F', 'A', 'L', 'S'] # Count the matches for each list count1 = sum(text.count(char) for char in list1) count2 = sum(text.count(char) for char in list2) # Determine the label based on the counts if count1 > count2: return True elif count1!=0 or count2!=0: return False def percentMatch(text1,text2): list = ['t', 'r', 'u', 'T', 'R', 'U','f', 'a', 'l', 's', 'F', 'A', 'L', 'S'] if(text1): count1 = sum(text1.count(char) for char in list) count2 = sum(text2.count(char) for char in list) if(count1==3 and count2==4 or count1==4 and count2==3 ): #if one says true and other says false then priority given to 2nd print("true and false collision so given priority to text2 which is the incoming text") return 2 if(count1>count2): print("text1 i.e the prev text is the winner") return 1 else: print("text2 i.e the incoming text is the winner") return 2 else: print("text1 not there so text2 is the winner") return 2 def count_true_false(d): #in a dictionary to check how many T/F are there. true_count = sum(1 for v in d.values() if v is True) false_count = sum(1 for v in d.values() if v is False) return true_count, false_count def merge_dicts(dict1, dict2): true_count1, false_count1 = count_true_false(dict1) true_count2, false_count2 = count_true_false(dict2) if (true_count1 + false_count1) >= (true_count2 + false_count2): final_dict = dict1.copy() y_dirn_gap=False else: final_dict = dict2.copy() y_dirn_gap=True return final_dict,y_dirn_gap def assign_true_false_or_unknown(true_list, false_list, question_dict,total_questions): # Initialize the final dictionary final_dict = {str(i): 'UNASSIGNED' for i in range(1, total_questions+1)} unassigned_keys=[] assigned_keys=[] # Iterate over each question and its y-coordinate for question, y in question_dict.items(): # compute diff with true list such that we sub t/f box from s/n box # true_differences= [y - ty for ty in true_list] # Compute absolute differences with true list true_abs_differences = [abs(y - ty) for ty in true_list] # Compute absolute differences with false list # false_differences= [y - ty for ty in false_list] false_abs_differences = [abs(y - fy) for fy in false_list] # Find the minimum differences # min_true_diff = min((diff for diff in true_differences if diff > 0), default=float('inf')) # min_false_diff = min((diff for diff in false_differences if diff > 0), default=float('inf')) min_true_abs_diff=min(true_abs_differences) if true_abs_differences else float('inf') min_false_abs_diff=min(false_abs_differences) if false_abs_differences else float('inf') # Determine the smallest difference # min_diff = min(min_true_diff, min_false_diff) min_abs_diff=min(min_true_abs_diff,min_false_abs_diff) # Assign the value based on the smallest difference # if min_diff < 360: # if min_true_diff < min_false_diff: # final_dict[question] = True # true_list.pop(true_differences.index(min_true_diff)) # else: # final_dict[question] = False # false_list.pop(false_differences.index(min_false_diff)) # else: # checking the abs diff option if nothing can find in positive diff option if min_abs_diff < 300: if min_true_abs_diff < min_false_abs_diff: final_dict[question] = True true_list.pop(true_abs_differences.index(min_true_abs_diff)) else: final_dict[question] = False false_list.pop(false_abs_differences.index(min_false_abs_diff)) else: final_dict[question] = 'NULL' return final_dict def assign_true_false_or_unknown_rotated(true_list,false_list,true_list_x,false_list_x,question_dict,question_dict_x,total_questions): final_dict = {str(i): 'UNASSIGNED' for i in range(1, total_questions+1)} unassigned_keys=[] assigned_keys=[] final_dict_y={str(i): 'UNASSIGNED' for i in range(1, total_questions+1)} final_dict_x={str(i): 'UNASSIGNED' for i in range(1, total_questions+1)} # Iterate over each question and its y-coordinate for question, y in question_dict.items(): # Compute absolute differences with true list true_differences= [y - ty for ty in true_list] true_abs_differences = [abs(y - ty) for ty in true_list] # Compute absolute differences with false list false_differences= [y - fy for fy in false_list] false_abs_differences = [abs(y - fy) for fy in false_list] # Find the minimum differences min_true_diff = min((diff for diff in true_differences if diff > 0), default=float('inf')) min_false_diff = min((diff for diff in false_differences if diff > 0), default=float('inf')) min_true_abs_diff=min(true_abs_differences) if true_abs_differences else float('inf') min_false_abs_diff=min(false_abs_differences) if false_abs_differences else float('inf') # Determine the smallest difference min_diff = min(min_true_diff, min_false_diff) min_abs_diff=min(min_true_abs_diff,min_false_abs_diff) # print("the question number is :",question) # print("the min dist is :",min_diff) # print("the min abs_diff is :",min_abs_diff) # print("the false abs diff",false_abs_differences) # Assign the value based on the smallest difference first going with abs diff as for upside down it will favour abs if min_abs_diff < 310: if min_true_abs_diff < min_false_abs_diff: final_dict_y[question] = True true_list.pop(true_abs_differences.index(min_true_abs_diff)) else: final_dict_y[question] = False false_list.pop(false_abs_differences.index(min_false_abs_diff)) else: # checking the postive diff option if nothing can find in abs diff option if min_diff < 310: print(question) if min_true_diff < min_false_diff: final_dict_y[question] = True true_list.pop(true_differences.index(min_true_diff)) else: final_dict_y[question] = False false_list.pop(false_differences.index(min_false_diff)) else: final_dict_y[question] = 'NULL' for question,x in question_dict_x.items(): # Compute absolute differences with true list true_differences= [x - tx for tx in true_list_x] true_abs_differences = [abs(x - tx) for tx in true_list_x] # Compute absolute differences with false list false_differences= [x - fy for fy in false_list_x] false_abs_differences = [abs(x - fy) for fy in false_list_x] # Find the minimum differences min_true_diff = min((diff for diff in true_differences if diff > 0), default=float('inf')) min_false_diff = min((diff for diff in false_differences if diff > 0), default=float('inf')) min_true_abs_diff=min(true_abs_differences) if true_abs_differences else float('inf') min_false_abs_diff=min(false_abs_differences) if false_abs_differences else float('inf') # Determine the smallest difference min_diff = min(min_true_diff, min_false_diff) min_abs_diff=min(min_true_abs_diff,min_false_abs_diff) if min_diff < 310: if min_true_diff < min_false_diff: final_dict_x[question] = True true_list_x.pop(true_differences.index(min_true_diff)) else: final_dict_x[question] = False false_list_x.pop(false_differences.index(min_false_diff)) else: # checking the abs diff option if nothing can find in positive diff option if min_abs_diff < 310: if min_true_abs_diff < min_false_abs_diff: final_dict_x[question] = True true_list_x.pop(true_abs_differences.index(min_true_abs_diff)) else: final_dict_x[question] = False false_list_x.pop(false_abs_differences.index(min_false_abs_diff)) else: final_dict_x[question] = 'NULL' print("the final dict for y is: ") print(final_dict_y) print("the final dict for x is: ") print(final_dict_x) final_dict,y_dirn_gap=merge_dicts(final_dict_x,final_dict_y) if 'L' in final_dict: final_dict['7']=final_dict['L'] del final_dict['L'] if 'I' in final_dict: final_dict['1']=final_dict['I'] del final_dict['I'] if y_dirn_gap and '6' in final_dict and '9' in final_dict: #means image is inverted and 6 and 9 true and false value needs to swapped out temp=final_dict['6'] final_dict['6']=final_dict['9'] final_dict['9']=temp return final_dict def process_using_paddleocr(image_path,output_folder,output_folder1,total_questions): ocr = PaddleOCR(lang='en') base_name = os.path.basename(image_path) image_cv = cv2.imread(image_path) print("!------------------------------start with paddleocr-----------------------------------!") print("Started processing of the image :",base_name) output = ocr.ocr(image_path)[0] texts = [line[1][0] for line in output] print("OCR detection done") boxes = [line[0] for line in output] # probabilities = [line[1][1] for line in output] image_boxes = image_cv.copy() # print("!------------------------------all coordinates-----------------------------------!") for box,text in zip(boxes,texts): cv2.rectangle(image_boxes,(int(box[0][0]),int(box[0][1])),(int(box[2][0]),int(box[2][1])),(0,0,255),5) #needs top left and bottom right to draw bounding box cv2.putText(image_boxes,text,(int(box[0][0]),int(box[0][1])),cv2.FONT_HERSHEY_SIMPLEX,4,(222,0,0),3) alldet_file_name = f'detect_{base_name}' alldet_file_path = os.path.join(output_folder1, alldet_file_name) # Save the processed image cv2.imwrite(alldet_file_path, image_boxes) for box, text in zip(boxes, texts): if text=="SN" or text=="NS": num_l_x1=box[0][0] num_r_x1=box[2][0]+140 num_l_y1=box[0][1] num_r_y1=box[2][1]+140 print("left top x of SN:",num_l_x1) print("bottom right x of SN:",num_r_x1) print("left top y of SN:",num_l_y1) print("bottom right y of SN:",num_r_y1) cons_boxes_image=image_cv.copy() true_list=[] false_list=[] true_list_x=[] false_list_x=[] numbers_dict={} numbers_dict_x={} c=0 prev_x=0 prev_y=0 # this is for s/n column try: for box, text in zip(boxes, texts): # print(f"the text is : {text}") box_top_left_x = int(box[0][0]) box_top_left_y=int(box[0][1]) box_bottom_right_x = int(box[2][0]) box_bottom_right_y = int(box[2][1]) box_width_x = box_bottom_right_x - box_top_left_x box_width_y = box_bottom_right_y - box_top_left_y if (num_l_x1 <= box_bottom_right_x <= num_r_x1 or num_l_y1<= box_bottom_right_y<=num_r_y1) and box_width_x <= 200 and box_width_y <= 200 and text!="SN" and text!="NS": # print("entered in the S/N column ") # print(text) # print(box) numbers_dict[text] = int(box[0][1]) numbers_dict_x[text]=int(box[0][0]) cv2.rectangle(cons_boxes_image, (int(box[0][0]), int(box[0][1])), (int(box[2][0]), int(box[2][1])), (0, 0, 255), 5) cv2.putText(cons_boxes_image, text, (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, 4, (222, 0, 0), 1) #error in detection of S/N column except NameError: print("cant detect s/n column also so going with all detection using box width") c=0 for box,text in zip(boxes,texts): box_top_left_x = int(box[0][0]) box_top_left_y=int(box[0][1]) box_bottom_right_x = int(box[2][0]) box_bottom_right_y = int(box[2][1]) box_width_x = box_bottom_right_x - box_top_left_x box_width_y = box_bottom_right_y - box_top_left_y if (box_width_x <= 80 and box_width_y <= 80): if text.isdigit(): number = int(text) if 1 <= number <= total_questions+1: # Store in dictionaries only if the number is between 1 and 10 numbers_dict[text] = int(box[0][1]) numbers_dict_x[text] = int(box[0][0]) # Visualize the rectangle and text on the image (optional) cv2.rectangle(cons_boxes_image, (int(box[0][0]), int(box[0][1])), (int(box[1][0]), int(box[1][1])), (0, 0, 255), 5) cv2.putText(cons_boxes_image, text, (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, 4, (222, 0, 0), 1) if((box_width_x<=300 and box_width_y<=300) and ' ' not in text and label_text(text)==True): if(c==0): print("first t/f detection") print(text) print(box) prev_y=box[0][1] prev_x=box[0][0] true_list.append(int(box[0][1])) true_list_x.append(int(box[0][0])) else: if((abs(box[0][0]-prev_x)>160) or abs(box[0][1]-prev_y)>160): print(text) print(box) true_list.append(int(box[0][1])) true_list_x.append(int(box[0][0])) prev_y=box[0][1] prev_x=box[0][0] c+=1 cv2.rectangle(cons_boxes_image,(int(box[0][0]),int(box[0][1])),(int(box[2][0]),int(box[2][1])),(0,0,255),5) cv2.putText(cons_boxes_image,text,(int(box[0][0]),int(box[0][1])),cv2.FONT_HERSHEY_SIMPLEX,4,(222,0,0),1) if((box_width_x<=300 and box_width_y<=300) and ' ' not in text and label_text(text)==False): if(c==0): print("first t/f detection") print(text) print(box) prev_y=box[0][1] prev_x=box[0][0] false_list.append(int(box[0][1])) false_list_x.append(int(box[0][0])) else: if((abs(box[0][0]-prev_x)>160) or abs(box[0][1]-prev_y)>160): print(text) print(box) false_list.append(int(box[0][1])) false_list_x.append(int(box[0][0])) prev_y=box[0][1] prev_x=box[0][0] c+=1 cv2.rectangle(cons_boxes_image,(int(box[0][0]),int(box[0][1])),(int(box[2][0]),int(box[2][1])),(0,0,255),5) cv2.putText(cons_boxes_image,text,(int(box[0][0]),int(box[0][1])),cv2.FONT_HERSHEY_SIMPLEX,4,(222,0,0),1) print("the number dict is: ",numbers_dict) print("the number dict x is: ",numbers_dict_x) print("the true list is ",true_list) print("the false list is ",false_list) print("the true list for xdirn",true_list_x) print("the false list for xdirn",false_list_x) final_dict=assign_true_false_or_unknown_rotated(true_list,false_list,true_list_x,false_list_x,numbers_dict,numbers_dict_x,total_questions) # Create a unique output file name output_file_name = f'final_tf_{base_name}' output_file_path = os.path.join(output_folder, output_file_name) # Save the processed image cv2.imwrite(output_file_path, cons_boxes_image) return final_dict def rotate( image: np.ndarray, angle: float, background: Union[int, Tuple[int, int, int]] ) -> np.ndarray: old_width, old_height = image.shape[:2] angle_radian = math.radians(angle) width = abs(np.sin(angle_radian) * old_height) + abs(np.cos(angle_radian) * old_width) height = abs(np.sin(angle_radian) * old_width) + abs(np.cos(angle_radian) * old_height) image_center = tuple(np.array(image.shape[1::-1]) / 2) rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0) rot_mat[1, 2] += (width - old_width) / 2 rot_mat[0, 2] += (height - old_height) / 2 return cv2.warpAffine(image, rot_mat, (int(round(height)), int(round(width))), borderValue=background) def process_using_doctr_less_row_gap(boxes,texts,numbers_dict,num_l_x2,num_r_x2,image_path,total_questions): print("the number dict in low gap",numbers_dict) cons_boxes_image = cv2.imread(image_path) true_list=[] false_list=[] c=0 print("starting with low row gap") try: for box, text in zip(boxes, texts): box_bottom_right_x = int(box[1][0]) # Draw the adjusted bounding box if (num_l_x2 <= box_bottom_right_x <= num_r_x2): # print("entered in the t/f column ") if label_text(text)==True and text!='TRUE/FALSE': if(c==0): print("first t/f detection") print(text) print(box) prev=box[0][1] prev_text=text true_list.append(int(box[0][1])) else: if(abs(box[0][1]-prev)>20): #to avoid boxes in same row to overlap print(text) print(box) true_list.append(int(box[0][1])) prev=box[0][1] prev_text=text else: print(f"collision happend with box:{prev} and text:{prev_text} solving on the basis of percent match boxes") print("the current box specification are") print(text) print(box) ans=percentMatch(prev_text,text) if(ans==2): if(label_text(prev_text)==False): false_list.pop() elif(label_text(prev_text)==True): true_list.pop() prev=box[0][1] prev_text=text true_list.append(int(prev)) c+=1 elif label_text(text)==False and text!='TRUE/FALSE': if(c==0): print("first t/f detection") print(text) print(box) prev=box[0][1] prev_text=text false_list.append(int(box[0][1])) else: if(abs(box[0][1]-prev)>20): print(text) print(box) false_list.append(int(box[0][1])) prev=box[0][1] prev_text=text else: print(f"collision happend with box:{prev} and text:{prev_text} solving on the basis of percent match boxes") print("the current box specification are") print(text) print(box) ans=percentMatch(prev_text,text) if(ans==2): if(label_text(prev_text)==False): false_list.pop() elif(label_text(prev_text)==True): true_list.pop() prev=box[0][1] prev_text=text false_list.append(int(prev)) c+=1 cv2.rectangle(cons_boxes_image,(int(box[0][0]),int(box[0][1])),(int(box[1][0]),int(box[1][1])),(0,0,255),5) cv2.putText(cons_boxes_image,text,(int(box[0][0]),int(box[0][1])),cv2.FONT_HERSHEY_SIMPLEX,1,(222,0,0),1) final_dict=assign_true_false_or_unknown(true_list,false_list,numbers_dict,total_questions) return cons_boxes_image,final_dict except Exception as e: print("error occured") print(e) def process_and_save_image(image_path,actual_ans_csv ,output_folder , output_folder1): base_name = os.path.basename(image_path) image_cv = cv2.imread(image_path) height = image_cv.shape[0] width = image_cv.shape[1] print("!------------------------------starting detection using doctr-----------------------------------!") print("Started processing of the image :",base_name) # print(image_width) # output = ocr.ocr(image_path)[0] # checking if header is there with open(actual_ans_csv, 'r') as file: first_line = file.readline().strip() # Check if the first column of the first line is numeric first_column_numeric = False try: first_value = float(first_line.split(',')[0]) # Assuming comma-separated values first_column_numeric = True except ValueError: pass # If the first column cannot be converted to a float, it's not numeric # Read the CSV file based on the condition if first_column_numeric: actualAns_df = pd.read_csv(actual_ans_csv, header=None) else: actualAns_df = pd.read_csv(actual_ans_csv) total_questions = len(actualAns_df) #checking skewness grayscale = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY) angle = determine_skew(grayscale) image_cv = rotate(image_cv, angle, (0, 0, 0)) cv2.imwrite(image_path, image_cv) single_img_doc = DocumentFile.from_images(image_path) result = model(single_img_doc) texts=[] for page in result.pages: for block in page.blocks: for line in block.lines: for word in line.words: text = word.value texts.append(text) #checking for rotation r_count=0 while('TRUE/FALSE' not in texts): image_cv = cv2.rotate(image_cv, cv2.ROTATE_90_CLOCKWISE) print("rotation started") # Save the rotated image to a temporary path # temp_image_path = 'temp_rotated_image.jpg' cv2.imwrite(image_path, image_cv) # output=ocr.ocr(temp_image_path)[0] single_img_doc = DocumentFile.from_images(image_path) result=model(single_img_doc) texts=[] for page in result.pages: for block in page.blocks: for line in block.lines: for word in line.words: text = word.value texts.append(text) print(texts) r_count+=1 if r_count==4: #reaching the same orientation break if(r_count>0 and r_count!=4): # cv2.imwrite(image_path,image_cv) print("rotation done for: ",base_name) print("Number of times rotation done:",r_count) height = image_cv.shape[0] width = image_cv.shape[1] print("OCR detection done with doctr") boxes=[] # boxes = [line[0] for line in output]4 for page in result.pages: for block in page.blocks: for line in block.lines: for word in line.words: (x_min, y_min), (x_max, y_max) = word.geometry x_min_px = x_min * width y_min_px = y_min * height x_max_px = x_max * width y_max_px = y_max * height bbox=(x_min_px, y_min_px), (x_max_px, y_max_px) boxes.append(bbox) image_boxes = image_cv.copy() # print("!------------------------------all coordinates-----------------------------------!") for box,text in zip(boxes,texts): # print(text) # print(box) cv2.rectangle(image_boxes,(int(box[0][0]),int(box[0][1])),(int(box[1][0]),int(box[1][1])),(0,0,255),5) #needs top left and bottom right to draw bounding box cv2.putText(image_boxes,text,(int(box[0][0]),int(box[0][1])),cv2.FONT_HERSHEY_SIMPLEX,4,(222,0,0),3) # print("!------------------------------done with all coordinates-----------------------------------!") alldet_file_name = f'detect_{base_name}' alldet_file_path = os.path.join(output_folder1, alldet_file_name) # Save the processed image cv2.imwrite(alldet_file_path, image_boxes) for box, text in zip(boxes, texts): if text=="SN" or text=="NS": num_l_x1=box[0][0]-100 num_r_x1=box[1][0]+140 print("left top x of SN:",num_l_x1) print("bottom right x of SN:",num_r_x1) if text=="TRUE/FALSE": num_l_x2=box[0][0]-10 num_r_x2=box[1][0]+200 print("left top x of T/F:",num_l_x2) print("bottom right x of T/F:",num_r_x2) # Draw OCR bounding boxes within the final rectangle cons_boxes_image=image_cv.copy() true_list=[] false_list=[] numbers_dict={} numbers_dict_x={} c=0 no_of_collisions=0 try: # this is for s/n column for box, text in zip(boxes, texts): # print(f"the text is : {text}") box_top_left_x = int(box[0][0]) box_top_left_y=int(box[0][1]) box_bottom_right_x = int(box[1][0]) box_bottom_right_y = int(box[1][1]) # print(box_bottom_right_x) # print(box_bottom_right_y) # print(box_width_x) # print(box_width_y) if (num_l_x1 <= box_bottom_right_x <= num_r_x1 ): if text.isdigit(): number = int(text) if 1 <= number <= total_questions+1: # Store in dictionaries only if the number is between 1 and 10 numbers_dict[text] = int(box[0][1]) print(text) print(box) # Visualize the rectangle and text on the image (optional) cv2.rectangle(cons_boxes_image, (int(box[0][0]), int(box[0][1])), (int(box[1][0]), int(box[1][1])), (0, 0, 255), 5) cv2.putText(cons_boxes_image, text, (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (222, 0, 0), 1) prev=0 for box, text in zip(boxes, texts): box_bottom_right_x = int(box[1][0]) if(no_of_collisions>4): break # Draw the adjusted bounding box if (num_l_x2 <= box_bottom_right_x <= num_r_x2): # print("entered in the t/f column ") if label_text(text)==True and text!='TRUE/FALSE': if(c==0): print("first t/f detection") print(text) print(box) prev=box[0][1] prev_text=text true_list.append(int(box[0][1])) else: if(abs(box[0][1]-prev)>200): #to avoid boxes in same row to overlap print(text) print(box) true_list.append(int(box[0][1])) prev=box[0][1] prev_text=text else: print(f"collision happend with box:{prev} and text:{prev_text} solving on the basis of percent match boxes") print("the current box specification are") print(text) print(box) no_of_collisions+=1 ans=percentMatch(prev_text,text) if(ans==2): if(label_text(prev_text)==False): false_list.pop() elif(label_text(prev_text)==True): true_list.pop() prev=box[0][1] prev_text=text true_list.append(int(prev)) c+=1 elif label_text(text)==False and text!='TRUE/FALSE': if(c==0): print("first t/f detection") print(text) print(box) prev=box[0][1] prev_text=text false_list.append(int(box[0][1])) else: if(abs(box[0][1]-prev)>200): print(text) print(box) false_list.append(int(box[0][1])) prev=box[0][1] prev_text=text else: print(f"collision happend with box:{prev} and text:{prev_text} solving on the basis of percent match boxes") print("the current box specification are") print(text) print(box) no_of_collisions+=1 ans=percentMatch(prev_text,text) if(ans==2): if(label_text(prev_text)==False): false_list.pop() elif(label_text(prev_text)==True): true_list.pop() prev=box[0][1] prev_text=text false_list.append(int(prev)) c+=1 cv2.rectangle(cons_boxes_image,(int(box[0][0]),int(box[0][1])),(int(box[1][0]),int(box[1][1])),(0,0,255),5) cv2.putText(cons_boxes_image,text,(int(box[0][0]),int(box[0][1])),cv2.FONT_HERSHEY_SIMPLEX,1,(222,0,0),1) if(no_of_collisions<=4): final_dict=assign_true_false_or_unknown(true_list,false_list,numbers_dict,total_questions) else: print("going with doctr less gap") cons_boxes_image,final_dict=process_using_doctr_less_row_gap(boxes,texts,numbers_dict,num_l_x2,num_r_x2,image_path,total_questions) # Create a unique output file name output_file_name = f'final_tf_{base_name}' output_file_path = os.path.join(output_folder, output_file_name) # Save the processed image cv2.imwrite(output_file_path, cons_boxes_image) print("printing the number dict y_coordinate") print(numbers_dict) except NameError: print("TRUE/FALSE not detected. Skipping this part of processing.") print("going with paddleocr") final_dict=process_using_paddleocr(image_path,output_folder,output_folder1,total_questions) print("--------- Printing the final dict ------------") print(final_dict) df=pd.DataFrame(final_dict.items(),columns=['Q_No.','True/False']) # predcsv_file_name = f'answers_{base_name}.csv' # predcsv_file_path = os.path.join(output_folder, predcsv_file_name) # df.to_csv(predcsv_file_path,index=False) # print(f'DataFrame saved to {predcsv_file_path}') # predictions_file_path='pred_output.csv' # reading the answers and evaluting marks=0 w_ans=[] m_ans=[] for index, row in actualAns_df.iterrows(): question_number = str(row.iloc[0]) # Accessing the first column by index answer = row.iloc[1] # Accessing the second column by index # print(answer) if final_dict[question_number]==answer: marks += 1 elif final_dict[question_number] not in ("NULL", "UNASSIGNED"): w_ans.append(question_number) else: m_ans.append(question_number) print("Total Marks:", marks) image_name = base_name #Replace this with the actual image name marks_df = pd.DataFrame({"Filename": [image_name], "Marks": [marks]}) # Append the marks DataFrame to the predictions file # marks_df.to_csv(predictions_file_path, mode='a', header=False, index=False) output_text = f"Marks: {marks} out of {total_questions}" if w_ans: output_text += f" and the following were wrong_answers: {w_ans}" if m_ans and w_ans: output_text += f" and missed_questions: {m_ans}" if m_ans and len(w_ans)==0: output_text += f" and the following were missed_answers: {m_ans}" print(output_text) return output_text import gradio as gr output_folder = "test_gradio/output" output_folder1 = "test_gradio/detection" # actual_ans_csv = "test_gradio/ModelAnswer.csv" demo_image_paths = [ "test_gradio/samples/1zHXQVK.jpg", "test_gradio/samples/9X9qVWN.jpg", "test_gradio/samples/LRccyJJ.jpg" ] demo_csv_path = "test_gradio/answerKey.csv" # Define the Gradio interface demo = gr.Interface( fn=lambda img_path, csv_path: process_and_save_image(img_path, csv_path, output_folder, output_folder1), inputs=[gr.Image(type='filepath',label="Upload Image of your answer_sheet"), gr.File(type='filepath',label="Upload the Answer Key in csv file")], outputs=[gr.Textbox(label=f"Predicted Marks")], title="AutoEval for True/False AnswerSheet", examples=[ [demo_image_paths[0], demo_csv_path], [demo_image_paths[1], demo_csv_path], [demo_image_paths[2], demo_csv_path] ] ) # Launch the Gradio app demo.launch()