Spaces:
Running
Running
import os | |
import requests | |
import openai | |
import streamlit as st | |
from PIL import Image | |
from dotenv import load_dotenv | |
import time | |
# Load environment variables from .env file | |
load_dotenv() | |
# Set up the Hugging Face API URL and your API key | |
emotion_model_url = "https://api-inference.huggingface.co/models/trpakov/vit-face-expression" | |
headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"} | |
# Set up OpenAI API key | |
openai.api_key = os.getenv('OPENAI_API_KEY') | |
# Function to query the facial expression recognition model | |
def query_emotion(filename): | |
with open(filename, "rb") as f: | |
data = f.read() | |
response = requests.post(emotion_model_url, headers=headers, data=data) | |
if response.status_code == 200: | |
return response.json() | |
else: | |
st.error("Error detecting facial expression: " + response.text) | |
return None | |
# Function to generate a response using OpenAI based on detected emotion | |
def generate_text_based_on_mood(emotion): | |
try: | |
# Create a dynamic prompt based on the detected emotion | |
if emotion == "happy": | |
prompt = "Give a motivational quote to celebrate happiness." | |
elif emotion == "sad": | |
prompt = "Provide a comforting message for someone feeling sad." | |
elif emotion == "angry": | |
prompt = "Suggest a way to calm down someone feeling angry." | |
elif emotion == "fear": | |
prompt = "Give an encouraging message for someone feeling fearful." | |
elif emotion == "surprised": | |
prompt = "Offer a fun fact or light-hearted comment for someone feeling surprised." | |
elif emotion == "neutral": | |
prompt = "Provide a general motivational quote." | |
# Call OpenAI's API using the new interface | |
response = openai.ChatCompletion.create( | |
model="gpt-4", # Specify the GPT-4 model | |
messages=[ | |
{"role": "user", "content": prompt} | |
] | |
) | |
# Extract the generated text | |
generated_text = response['choices'][0]['message']['content'] | |
return generated_text.strip() | |
except Exception as e: | |
st.error(f"Error generating text: {e}") | |
return "Sorry, I couldn't come up with a message at this moment." | |
# Function to convert text to speech using gTTS | |
def text_to_speech(text): | |
from gtts import gTTS | |
try: | |
tts = gTTS(text, lang='en') | |
audio_file = "output.mp3" | |
tts.save(audio_file) # Save the audio file | |
return audio_file | |
except Exception as e: | |
st.error(f"Error with TTS: {e}") | |
return None | |
# Streamlit UI | |
st.title("Facial Expression Mood Detector") | |
st.write("Upload an image of a face to detect mood and receive uplifting messages or jokes.") | |
# Upload image | |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
# Load and display the image | |
image = Image.open(uploaded_file) | |
st.image(image, caption='Uploaded Image', use_column_width=True) | |
# Save the uploaded file temporarily | |
with open("uploaded_image.jpg", "wb") as f: | |
f.write(uploaded_file.getbuffer()) | |
# Detect facial expression | |
expression_output = query_emotion("uploaded_image.jpg") | |
if expression_output: | |
# Assuming the response has a 'label' field with the detected emotion | |
emotion = expression_output[0]['label'] # Adjust based on response structure | |
st.write(f"Detected emotion: {emotion}") | |
# Generate text based on detected emotion | |
message = generate_text_based_on_mood(emotion) | |
st.write("Here's something to cheer you up:") | |
st.write(message) | |
# Convert the generated message to audio | |
audio_file = text_to_speech(message) | |
# Provide an audio player in the Streamlit app if audio file exists | |
if audio_file: | |
st.audio(audio_file) # Streamlit will handle playback | |