import gradio as gr import face_recognition import cv2 import numpy as np from PIL import Image import pickle import firebase_admin from firebase_admin import credentials from firebase_admin import db from firebase_admin import storage # Initialize Firebase cred = credentials.Certificate("serviceAccountKey.json") # Update with your credentials path firebase_app = firebase_admin.initialize_app(cred, { 'databaseURL': 'https://faceantendancerealtime-default-rtdb.firebaseio.com/', 'storageBucket': 'faceantendancerealtime.appspot.com' }) bucket = storage.bucket() # Function to download face encodings from Firebase Storage def download_encodings(): blob = bucket.blob('EncodeFile.p') blob.download_to_filename('EncodeFile.p') with open('EncodeFile.p', 'rb') as file: return pickle.load(file) encodeListKnownWithIds = download_encodings() encodeListKnown, studentsIds = encodeListKnownWithIds def recognize_face(input_image): # Convert PIL Image to numpy array img = np.array(input_image) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # Detect faces and encode face_locations = face_recognition.face_locations(img) face_encodings = face_recognition.face_encodings(img, face_locations) # Initialize the database reference ref = db.reference('Students') # Recognize faces and fetch data from the database results = [] for face_encoding in face_encodings: matches = face_recognition.compare_faces(encodeListKnown, face_encoding) name = "Unknown" student_info = {} face_distances = face_recognition.face_distance(encodeListKnown, face_encoding) best_match_index = np.argmin(face_distances) if matches[best_match_index]: student_id = studentsIds[best_match_index] student_info = ref.child(student_id).get() if student_info: name = student_info['name'] results.append(student_info) else: results.append({'name': 'Unknown'}) # Draw rectangles around the faces for (top, right, bottom, left), name in zip(face_locations, [student_info.get('name', 'Unknown') for student_info in results]): cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2) cv2.putText(img, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1) # Convert back to PIL Image pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) return pil_img, results # Gradio interface iface = gr.Interface( fn=recognize_face, inputs=gr.Image(type="pil"), outputs=[gr.Image(type="pil"), gr.JSON(label="Student Information")], title="Face Recognition Attendance System", description="Upload an image to identify individuals." ) if __name__ == "__main__": iface.launch(debug=True,inline=False)