LovnishVerma commited on
Commit
bb6896d
·
verified ·
1 Parent(s): 4270d9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -129
app.py CHANGED
@@ -1,136 +1,188 @@
1
  import streamlit as st
2
  import cv2
 
3
  import numpy as np
4
- import time
5
  from keras.models import load_model
6
  from PIL import Image
7
- from huggingface_hub import HfApi, Repository
8
- import os
9
- import tempfile
10
-
11
- # Page configuration
12
- st.set_page_config(page_title="Emotion Detection", layout="centered")
13
-
14
- # Title and Subtitle
15
- st.markdown("<h1 style='text-align: center;'>Emotion Detection</h1>", unsafe_allow_html=True)
16
- st.markdown("<h3 style='text-align: center;'>angry, fear, happy, neutral, sad, surprise</h3>", unsafe_allow_html=True)
17
-
18
- # Load Model
19
- @st.cache_resource
20
- def load_emotion_model():
21
- model = load_model('CNN_Model_acc_75.h5')
22
- return model
23
-
24
- start_time = time.time()
25
- model = load_emotion_model()
26
- st.write(f"Model loaded in {time.time() - start_time:.2f} seconds.")
27
-
28
- # Emotion labels and constants
29
- emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
30
- img_shape = 48
31
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
32
-
33
- def process_frame(frame):
34
- """Detect faces and predict emotions."""
35
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
36
- faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
37
- for (x, y, w, h) in faces:
38
- roi_gray = gray_frame[y:y+h, x:x+w]
39
- roi_color = frame[y:y+h, x:x+w]
40
- face_roi = cv2.resize(roi_color, (img_shape, img_shape))
41
- face_roi = np.expand_dims(face_roi, axis=0)
42
- face_roi = face_roi / float(img_shape)
43
- predictions = model.predict(face_roi)
44
- emotion = emotion_labels[np.argmax(predictions[0])]
45
-
46
- # Draw rectangle and emotion label
47
- cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
48
- cv2.putText(frame, emotion, (x, y + h + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
49
- return frame
50
-
51
- # Sidebar for input selection
52
- st.sidebar.title("Choose Input Source")
53
- upload_choice = st.sidebar.radio("Select:", ["Camera", "Upload Video", "Upload Image", "Upload to Hugging Face"])
54
-
55
- if upload_choice == "Camera":
56
- # Use Streamlit's camera input widget
57
- st.sidebar.info("Click a picture to analyze emotion.")
58
- picture = st.camera_input("Take a picture")
59
- if picture:
60
- image = Image.open(picture)
61
- frame = np.array(image)
62
- frame = process_frame(frame)
63
- st.image(frame, caption="Processed Image", use_column_width=True)
64
-
65
- elif upload_choice == "Upload Video":
66
- uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi", "mkv", "webm"])
67
- if uploaded_video:
68
- with tempfile.NamedTemporaryFile(delete=False) as tfile:
69
- tfile.write(uploaded_video.read())
70
- video_source = cv2.VideoCapture(tfile.name)
71
- frame_placeholder = st.empty()
72
- while video_source.isOpened():
73
- ret, frame = video_source.read()
74
- if not ret:
75
- break
76
- frame = process_frame(frame)
77
- frame_placeholder.image(frame, channels="BGR", use_column_width=True)
78
- video_source.release()
79
-
80
- elif upload_choice == "Upload Image":
81
- uploaded_image = st.camera_input("Take a picture")
82
- if uploaded_image:
83
- image = Image.open(uploaded_image)
84
- frame = np.array(image)
85
- frame = process_frame(frame)
86
- st.image(frame, caption="Processed Image", use_column_width=True)
87
-
88
- elif upload_choice == "Upload to Hugging Face":
89
- st.sidebar.info("Upload images to the 'known_faces' directory in the Hugging Face repository.")
90
-
91
- # Configure Hugging Face Repository
92
- REPO_NAME = "face_and_emotion_detection"
93
- REPO_ID = "LovnishVerma/" + REPO_NAME
94
- hf_token = os.getenv("upload") # Set your Hugging Face token as an environment variable
95
-
96
- if not hf_token:
97
- st.error("Hugging Face token not found. Please set it as an environment variable named 'HF_TOKEN'.")
98
- st.stop()
99
-
100
- # Initialize Hugging Face API
101
- api = HfApi()
102
-
103
- def create_hugging_face_repo():
104
- """Create or verify the Hugging Face repository."""
105
- try:
106
- api.create_repo(repo_id=REPO_ID, repo_type="dataset", token=hf_token, exist_ok=True)
107
- st.success(f"Repository '{REPO_NAME}' is ready on Hugging Face!")
108
- except Exception as e:
109
- st.error(f"Error creating Hugging Face repository: {e}")
110
-
111
- def upload_to_hugging_face(file):
112
- """Upload a file to the Hugging Face repository."""
 
 
 
113
  try:
114
- with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
115
- temp_file.write(file.read())
116
- temp_file_path = temp_file.name
117
-
118
- api.upload_file(
119
- path_or_fileobj=temp_file_path,
120
- path_in_repo=f"known_faces/{os.path.basename(temp_file_path)}",
121
- repo_id=REPO_ID,
122
- token=hf_token,
123
- )
124
- st.success("File uploaded successfully to Hugging Face!")
125
  except Exception as e:
126
- st.error(f"Error uploading file to Hugging Face: {e}")
127
-
128
- # Create the repository if it doesn't exist
129
- create_hugging_face_repo()
130
-
131
- # Upload image file
132
- hf_uploaded_image = st.file_uploader("Upload Image to Hugging Face", type=["png", "jpg", "jpeg"])
133
- if hf_uploaded_image:
134
- upload_to_hugging_face(hf_uploaded_image)
135
-
136
- st.sidebar.write("Emotion Labels: Angry, Fear, Happy, Neutral, Sad, Surprise")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import cv2
3
+ import os
4
  import numpy as np
 
5
  from keras.models import load_model
6
  from PIL import Image
7
+ import sqlite3
8
+ from huggingface_hub import HfApi
9
+ from datetime import datetime
10
+
11
+ # Constants
12
+ KNOWN_FACES_DIR = "known_faces" # Directory to save user images
13
+ DATABASE = "students.db" # SQLite database file to store student information
14
+ EMOTION_MODEL_FILE = "CNN_Model_acc_75.h5"
15
+ EMOTION_LABELS = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
16
+ REPO_NAME = "face_and_emotion_detection"
17
+ REPO_ID = f"LovnishVerma/{REPO_NAME}"
18
+
19
+ # Ensure the directories exist
20
+ os.makedirs(KNOWN_FACES_DIR, exist_ok=True)
21
+
22
+ # Retrieve Hugging Face token from environment variable
23
+ hf_token = os.getenv("upload") # Replace with your actual Hugging Face token
24
+ if not hf_token:
25
+ st.error("Hugging Face token not found. Please set the environment variable.")
26
+ st.stop()
27
+
28
+ # Initialize Hugging Face API
29
+ api = HfApi()
30
+ try:
31
+ api.create_repo(repo_id=REPO_ID, repo_type="space", space_sdk="streamlit", token=hf_token, exist_ok=True)
32
+ st.success(f"Repository '{REPO_NAME}' is ready on Hugging Face!")
33
+ except Exception as e:
34
+ st.error(f"Error creating Hugging Face repository: {e}")
35
+
36
+ # Load the emotion detection model
37
+ try:
38
+ emotion_model = load_model(EMOTION_MODEL_FILE)
39
+ except Exception as e:
40
+ st.error(f"Error loading emotion model: {e}")
41
+ st.stop()
42
+
43
+ # Database Functions
44
+ def initialize_database():
45
+ """ Initializes the SQLite database by creating the students table if it doesn't exist. """
46
+ conn = sqlite3.connect(DATABASE)
47
+ cursor = conn.cursor()
48
+ cursor.execute("""
49
+ CREATE TABLE IF NOT EXISTS students (
50
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
51
+ name TEXT NOT NULL,
52
+ roll_no TEXT NOT NULL UNIQUE,
53
+ image_path TEXT NOT NULL,
54
+ timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
55
+ )
56
+ """)
57
+ conn.commit()
58
+ conn.close()
59
+
60
+ def save_to_database(name, roll_no, image_path):
61
+ """ Saves the student's data to the database. """
62
+ conn = sqlite3.connect(DATABASE)
63
+ cursor = conn.cursor()
64
+ try:
65
+ cursor.execute("""
66
+ INSERT INTO students (name, roll_no, image_path)
67
+ VALUES (?, ?, ?)
68
+ """, (name, roll_no, image_path))
69
+ conn.commit()
70
+ st.success("Data saved successfully!")
71
+ except sqlite3.IntegrityError:
72
+ st.error("Roll number already exists!")
73
+ finally:
74
+ conn.close()
75
+
76
+ def save_image_to_hugging_face(image, name, roll_no):
77
+ """ Saves the image locally and uploads it to Hugging Face. """
78
+ filename = f"{name}_{roll_no}.jpg"
79
+ local_path = os.path.join(KNOWN_FACES_DIR, filename)
80
+ image.save(local_path)
81
+
82
+ try:
83
+ api.upload_file(path_or_fileobj=local_path, path_in_repo=filename, repo_id=REPO_ID, repo_type="space", token=hf_token)
84
+ st.success(f"Image uploaded to Hugging Face: {filename}")
85
+ except Exception as e:
86
+ st.error(f"Error uploading image to Hugging Face: {e}")
87
+
88
+ return local_path
89
+
90
+ # Initialize the database when the app starts
91
+ initialize_database()
92
+
93
+ # Streamlit user interface (UI)
94
+ st.title("Student Registration with Hugging Face Image Upload")
95
+
96
+ # Input fields for student details
97
+ name = st.text_input("Enter your name")
98
+ roll_no = st.text_input("Enter your roll number")
99
+
100
+ # Choose input method for the image (webcam or file upload)
101
+ capture_mode = st.radio("Choose an option to upload your image", ["Use Webcam", "Upload File"])
102
+
103
+ # Handle webcam capture or file upload
104
+ if capture_mode == "Use Webcam":
105
+ picture = st.camera_input("Take a picture") # Capture image using webcam
106
+ elif capture_mode == "Upload File":
107
+ picture = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) # Upload image from file system
108
+
109
+ # Save data and process image on button click
110
+ if st.button("Register"):
111
+ if not name or not roll_no:
112
+ st.error("Please fill in both name and roll number.")
113
+ elif not picture:
114
+ st.error("Please upload or capture an image.")
115
+ else:
116
  try:
117
+ # Open the image based on capture mode
118
+ if capture_mode == "Use Webcam" and picture:
119
+ image = Image.open(picture)
120
+ elif capture_mode == "Upload File" and picture:
121
+ image = Image.open(picture)
122
+
123
+ # Save the image locally and upload it to Hugging Face
124
+ image_path = save_image_to_hugging_face(image, name, roll_no)
125
+ save_to_database(name, roll_no, image_path)
 
 
126
  except Exception as e:
127
+ st.error(f"An error occurred: {e}")
128
+
129
+ # Display registered student data
130
+ if st.checkbox("Show registered students"):
131
+ conn = sqlite3.connect(DATABASE)
132
+ cursor = conn.cursor()
133
+ cursor.execute("SELECT name, roll_no, image_path, timestamp FROM students")
134
+ rows = cursor.fetchall()
135
+ conn.close()
136
+
137
+ st.write("### Registered Students")
138
+ for row in rows:
139
+ name, roll_no, image_path, timestamp = row
140
+ st.write(f"**Name:** {name}, **Roll No:** {roll_no}, **Timestamp:** {timestamp}")
141
+ st.image(image_path, caption=f"{name} ({roll_no})", use_column_width=True)
142
+
143
+ # Face and Emotion Detection Function
144
+ def detect_faces_and_emotions(image):
145
+ gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
146
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
147
+ faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.3, minNeighbors=5)
148
+
149
+ for (x, y, w, h) in faces:
150
+ face = gray_image[y:y+h, x:x+w]
151
+ resized_face = cv2.resize(face, (48, 48)) # Resize face to 48x48
152
+ rgb_face = cv2.cvtColor(resized_face, cv2.COLOR_GRAY2RGB)
153
+ normalized_face = rgb_face / 255.0
154
+ reshaped_face = np.reshape(normalized_face, (1, 48, 48, 3))
155
+
156
+ # Predict the emotion
157
+ emotion_prediction = emotion_model.predict(reshaped_face)
158
+ emotion_label = np.argmax(emotion_prediction)
159
+ return EMOTION_LABELS[emotion_label]
160
+ return None
161
+
162
+ # UI for Emotion Detection
163
+ if st.sidebar.selectbox("Menu", ["Register Student", "Face Recognition and Emotion Detection", "View Attendance"]) == "Face Recognition and Emotion Detection":
164
+ st.subheader("Recognize Faces and Detect Emotions")
165
+ action = st.radio("Choose Action", ["Upload Image", "Use Webcam"])
166
+
167
+ if action == "Upload Image":
168
+ uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
169
+ if uploaded_file:
170
+ img = Image.open(uploaded_file)
171
+ img_array = np.array(img)
172
+ emotion_label = detect_faces_and_emotions(img_array)
173
+ if emotion_label:
174
+ st.success(f"Emotion Detected: {emotion_label}")
175
+ else:
176
+ st.warning("No face detected.")
177
+
178
+ elif action == "Use Webcam":
179
+ st.info("Use the camera input widget to capture an image.")
180
+ camera_image = st.camera_input("Take a picture")
181
+ if camera_image:
182
+ img = Image.open(camera_image)
183
+ img_array = np.array(img)
184
+ emotion_label = detect_faces_and_emotions(img_array)
185
+ if emotion_label:
186
+ st.success(f"Emotion Detected: {emotion_label}")
187
+ else:
188
+ st.warning("No face detected.")