Spaces:
Running
Running
############################################################################################################################# | |
# Filename : app.py | |
# Description: A Streamlit application to detect facial expressions from images and provide responses. | |
# Author : Lucas Yao | |
# | |
# Copyright © 2024 by Lucas Yao | |
############################################################################################################################# | |
# Import libraries. | |
import os # Load environment variable(s). | |
import streamlit as st # Build the GUI of the application. | |
from PIL import Image # Handle image operations. | |
from dotenv import load_dotenv # Load environment variables. | |
from fer import FER # Import the FER model for facial expression recognition. | |
import openai # OpenAI API for generating text responses. | |
############################################################################################################################# | |
# Load environment variable(s). | |
load_dotenv() | |
# Set up OpenAI API key. | |
openai.api_key = os.getenv('OPENAI_API_KEY') | |
############################################################################################################################# | |
# Function to query the facial expression recognition model using FER. | |
def query_emotion(image): | |
detector = FER() | |
emotions = detector.detect_emotions(image) | |
if emotions: | |
# Get the emotion with the highest score. | |
dominant_emotion = max(emotions[0]['emotions'], key=emotions[0]['emotions'].get) | |
return dominant_emotion | |
else: | |
st.error("Could not detect any emotion.") | |
return None | |
############################################################################################################################# | |
# Function to generate a response using OpenAI based on detected emotion. | |
def generate_text_based_on_mood(emotion, response_type): | |
try: | |
if response_type == "Joke": | |
prompt = f"Generate a light-hearted joke for someone who is feeling {emotion}." | |
else: # Motivational Message | |
prompt = f"Generate a motivational message for someone who is feeling {emotion}." | |
# Call OpenAI's API using GPT-4. | |
response = openai.ChatCompletion.create( | |
model="gpt-4", # Specify the GPT-4 model | |
messages=[ | |
{"role": "user", "content": prompt} | |
] | |
) | |
# Extract the generated text. | |
generated_text = response['choices'][0]['message']['content'] | |
return generated_text.strip() | |
except Exception as e: | |
st.error(f"Error generating text: {e}") | |
return "Sorry, I couldn't come up with a message at this moment." | |
############################################################################################################################# | |
# Function to convert text to speech using gTTS. | |
def text_to_speech(text): | |
from gtts import gTTS | |
try: | |
tts = gTTS(text, lang='en') | |
audio_file = "output.mp3" | |
tts.save(audio_file) # Save the audio file. | |
return audio_file | |
except Exception as e: | |
st.error(f"Error with TTS: {e}") | |
return None | |
############################################################################################################################# | |
# Main function to create the Streamlit web application. | |
def main(): | |
st.title("Facial Expression Mood Detector") | |
st.write("Upload an image of a face to detect mood and receive a response.") | |
# Upload image. | |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
# Load and display the image. | |
image = Image.open(uploaded_file) | |
st.image(image, caption='Uploaded Image', use_column_width=True) | |
# Detect facial expression. | |
emotion = query_emotion(image) | |
if emotion: | |
st.write(f"Detected emotion: {emotion}") | |
# Dropdown for selecting response type. | |
response_type = st.selectbox("Select the type of response:", ["Joke", "Motivational Message"]) | |
# Generate text based on detected emotion and user preference. | |
if st.button("Get Response"): | |
message = generate_text_based_on_mood(emotion, response_type) | |
st.write("Here's your response:") | |
st.write(message) | |
# Convert the generated message to audio. | |
audio_file = text_to_speech(message) | |
# Provide an audio player in the Streamlit app if audio file exists. | |
if audio_file: | |
st.audio(audio_file) # Streamlit will handle playback. | |
############################################################################################################################# | |
# Run the application. | |
if __name__ == "__main__": | |
main() | |