LovnishVerma commited on
Commit
132c799
·
verified ·
1 Parent(s): c4479c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -107
app.py CHANGED
@@ -3,119 +3,56 @@ import cv2
3
  import numpy as np
4
  import streamlit as st
5
  from datetime import datetime
6
- from tensorflow.keras.models import load_model
7
-
8
- # Directories
9
- KNOWN_FACES_DIR = "known_faces"
10
- EMOTION_MODEL_PATH = "CNN_Model_acc_75.h5"
11
- CASCADE_PATH = "haarcascade_frontalface_default.xml"
12
 
13
  # Constants
 
14
  IMG_SIZE = (200, 200)
15
 
16
- # Load models
17
- emotion_model = load_model(EMOTION_MODEL_PATH)
18
- face_cascade = cv2.CascadeClassifier(CASCADE_PATH)
19
- face_recognizer = cv2.face.LBPHFaceRecognizer_create()
20
-
21
- # Helper Functions
22
- def load_emotion_labels():
23
- return ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
24
-
25
- def train_recognizer():
26
- faces = []
27
- labels = []
28
- label_map = {}
29
- for idx, person_name in enumerate(os.listdir(KNOWN_FACES_DIR)):
30
- person_path = os.path.join(KNOWN_FACES_DIR, person_name)
31
- if not os.path.isdir(person_path):
32
- continue
33
- label_map[idx] = person_name
34
- for filename in os.listdir(person_path):
35
- filepath = os.path.join(person_path, filename)
36
- if filepath.lower().endswith(('.jpg', '.jpeg', '.png')):
37
- img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
38
- if img is not None:
39
- faces.append(img)
40
- labels.append(idx)
41
- if len(faces) == 0:
42
- st.warning("No valid training data found. Add faces first.")
43
- return {}
44
- face_recognizer.train(faces, np.array(labels))
45
- return {v: k for k, v in label_map.items()}
46
-
47
- def detect_faces(image):
48
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
49
- faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
50
- return gray, faces
51
-
52
- def detect_emotions(face_img):
53
- resized_face = cv2.resize(face_img, (48, 48))
54
- normalized_face = resized_face / 255.0
55
- reshaped_face = np.expand_dims(normalized_face, axis=(0, -1))
56
- emotion_probabilities = emotion_model.predict(reshaped_face)
57
- emotion_idx = np.argmax(emotion_probabilities)
58
- return load_emotion_labels()[emotion_idx]
59
 
60
  # Streamlit App
61
- st.title("Face Recognition and Emotion Detection")
62
  st.sidebar.title("Options")
63
- option = st.sidebar.selectbox("Choose an action", ["Home", "Register New Face", "Recognize Faces"])
64
-
65
- # Train the recognizer initially
66
- if option != "Register New Face":
67
- label_map = train_recognizer()
68
 
69
  if option == "Home":
70
- st.write("Use the sidebar to register new faces or recognize them.")
71
-
72
- elif option == "Register New Face":
73
- person_name = st.text_input("Enter the person's name")
74
- capture_mode = st.radio("Select input method", ["Use Camera", "Upload Image(s)"])
75
-
76
- if person_name and st.button("Register Face"):
77
- person_dir = os.path.join(KNOWN_FACES_DIR, person_name)
78
- os.makedirs(person_dir, exist_ok=True)
79
-
80
- if capture_mode == "Use Camera":
81
- st.warning("Ensure you are running this locally to access the camera.")
82
- # Camera logic (only available locally)
83
- cap = cv2.VideoCapture(0)
84
- if not cap.isOpened():
85
- st.error("Could not access the camera. Make sure it's connected and permissions are granted.")
86
- else:
87
- # Capture a frame from the camera
88
- ret, frame = cap.read()
89
- if ret:
90
- st.image(frame, channels="BGR")
91
- cap.release()
92
-
93
- elif capture_mode == "Upload Image(s)":
94
- uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
95
- if uploaded_files:
96
- for uploaded_file in uploaded_files:
97
- img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR)
98
- gray, faces = detect_faces(img)
99
- for (x, y, w, h) in faces:
100
- face_img = gray[y:y+h, x:x+w]
101
- resized_img = cv2.resize(face_img, IMG_SIZE)
102
- timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
103
- filepath = os.path.join(person_dir, f"{timestamp}.jpg")
104
- cv2.imwrite(filepath, resized_img)
105
- st.success(f"Faces registered successfully for {person_name}!")
106
- label_map = train_recognizer()
107
-
108
- elif option == "Recognize Faces":
109
- uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
110
- if uploaded_file:
111
- img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_COLOR)
112
- gray, faces = detect_faces(img)
113
- for (x, y, w, h) in faces:
114
- face_img = gray[y:y+h, x:x+w]
115
- resized_img = cv2.resize(face_img, IMG_SIZE)
116
- label, confidence = face_recognizer.predict(resized_img)
117
- name = label_map.get(label, "Unknown")
118
- emotion = detect_emotions(face_img)
119
- cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
120
- cv2.putText(img, f"{name}, {emotion}", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
121
- st.image(img, channels="BGR")
 
3
  import numpy as np
4
  import streamlit as st
5
  from datetime import datetime
6
+ from huggingface_hub import HfApi
 
 
 
 
 
7
 
8
  # Constants
9
+ KNOWN_FACES_DIR = "known_faces"
10
  IMG_SIZE = (200, 200)
11
 
12
+ # Initialize Hugging Face API
13
+ api = HfApi()
14
+
15
+ # Helper Function to upload image to Hugging Face
16
+ def upload_to_huggingface(image_path, repo_id="LovnishVerma/face__emotion_detection"):
17
+ try:
18
+ api.upload_file(
19
+ path_or_fileobj=image_path,
20
+ path_in_repo=os.path.basename(image_path), # Name of the image in the repo
21
+ repo_id=repo_id,
22
+ repo_type="dataset" # You can also set it as "model" if uploading to a model repo
23
+ )
24
+ st.success(f"Photo uploaded to Hugging Face repository: {repo_id}")
25
+ except Exception as e:
26
+ st.error(f"Error uploading photo: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  # Streamlit App
29
+ st.title("Webcam Photo Capture and Upload to Hugging Face")
30
  st.sidebar.title("Options")
31
+ option = st.sidebar.selectbox("Choose an action", ["Home", "Capture Photo"])
 
 
 
 
32
 
33
  if option == "Home":
34
+ st.write("Capture a photo using your webcam and upload it to Hugging Face.")
35
+
36
+ elif option == "Capture Photo":
37
+ # Ask the user to capture a photo using webcam
38
+ photo = st.camera_input("Capture a photo")
39
+
40
+ if photo is not None:
41
+ # Convert the uploaded photo to an image (using PIL or OpenCV)
42
+ img = cv2.imdecode(np.frombuffer(photo.getvalue(), np.uint8), cv2.IMREAD_COLOR)
43
+ if img is not None:
44
+ # Save the photo to a temporary file
45
+ timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
46
+ photo_path = f"temp_photo_{timestamp}.jpg"
47
+ cv2.imwrite(photo_path, img)
48
+
49
+ # Display the photo
50
+ st.image(img, caption="Captured Photo", channels="BGR")
51
+
52
+ # Ask the user if they want to upload the photo
53
+ if st.button("Upload Photo to Hugging Face"):
54
+ # Replace with your Hugging Face repository
55
+ upload_to_huggingface(photo_path, repo_id="your-username/your-repo")
56
+
57
+ # Optionally, delete the temporary photo file after upload
58
+ os.remove(photo_path)