Spaces:
Running
Running
Lucasstranger1
commited on
Commit
•
a1b1b7f
1
Parent(s):
c884652
update
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import openai
|
|
4 |
import streamlit as st
|
5 |
from PIL import Image
|
6 |
from dotenv import load_dotenv
|
|
|
7 |
|
8 |
# Load environment variables from .env file
|
9 |
load_dotenv()
|
@@ -27,15 +28,26 @@ def query_emotion(filename):
|
|
27 |
st.error("Error detecting facial expression: " + response.text)
|
28 |
return None
|
29 |
|
30 |
-
# Function to generate a
|
31 |
def generate_text_based_on_mood(emotion):
|
32 |
try:
|
33 |
-
# Create a prompt
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
# Call OpenAI's API
|
37 |
response = openai.ChatCompletion.create(
|
38 |
-
model="gpt-
|
39 |
messages=[
|
40 |
{"role": "user", "content": prompt}
|
41 |
]
|
@@ -47,7 +59,7 @@ def generate_text_based_on_mood(emotion):
|
|
47 |
|
48 |
except Exception as e:
|
49 |
st.error(f"Error generating text: {e}")
|
50 |
-
return "Sorry, I couldn't come up with a
|
51 |
|
52 |
# Function to convert text to speech using gTTS
|
53 |
def text_to_speech(text):
|
@@ -63,7 +75,7 @@ def text_to_speech(text):
|
|
63 |
|
64 |
# Streamlit UI
|
65 |
st.title("Facial Expression Mood Detector")
|
66 |
-
st.write("Upload an image of a face to detect mood and receive uplifting messages or
|
67 |
|
68 |
# Upload image
|
69 |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
@@ -85,12 +97,12 @@ if uploaded_file is not None:
|
|
85 |
st.write(f"Detected emotion: {emotion}")
|
86 |
|
87 |
# Generate text based on detected emotion
|
88 |
-
|
89 |
st.write("Here's something to cheer you up:")
|
90 |
-
st.write(
|
91 |
|
92 |
-
# Convert the generated
|
93 |
-
audio_file = text_to_speech(
|
94 |
|
95 |
# Provide an audio player in the Streamlit app if audio file exists
|
96 |
if audio_file:
|
|
|
4 |
import streamlit as st
|
5 |
from PIL import Image
|
6 |
from dotenv import load_dotenv
|
7 |
+
import time
|
8 |
|
9 |
# Load environment variables from .env file
|
10 |
load_dotenv()
|
|
|
28 |
st.error("Error detecting facial expression: " + response.text)
|
29 |
return None
|
30 |
|
31 |
+
# Function to generate a response using OpenAI based on detected emotion
|
32 |
def generate_text_based_on_mood(emotion):
|
33 |
try:
|
34 |
+
# Create a dynamic prompt based on the detected emotion
|
35 |
+
if emotion == "happy":
|
36 |
+
prompt = "Give a motivational quote to celebrate happiness."
|
37 |
+
elif emotion == "sad":
|
38 |
+
prompt = "Provide a comforting message for someone feeling sad."
|
39 |
+
elif emotion == "angry":
|
40 |
+
prompt = "Suggest a way to calm down someone who is feeling angry."
|
41 |
+
elif emotion == "fear":
|
42 |
+
prompt = "Give an encouraging message for someone feeling fearful."
|
43 |
+
elif emotion == "surprised":
|
44 |
+
prompt = "Offer a fun fact or light-hearted comment for someone feeling surprised."
|
45 |
+
elif emotion == "neutral":
|
46 |
+
prompt = "Provide a general motivational quote."
|
47 |
+
|
48 |
# Call OpenAI's API
|
49 |
response = openai.ChatCompletion.create(
|
50 |
+
model="gpt-4", # Use GPT-4 model
|
51 |
messages=[
|
52 |
{"role": "user", "content": prompt}
|
53 |
]
|
|
|
59 |
|
60 |
except Exception as e:
|
61 |
st.error(f"Error generating text: {e}")
|
62 |
+
return "Sorry, I couldn't come up with a message at this moment."
|
63 |
|
64 |
# Function to convert text to speech using gTTS
|
65 |
def text_to_speech(text):
|
|
|
75 |
|
76 |
# Streamlit UI
|
77 |
st.title("Facial Expression Mood Detector")
|
78 |
+
st.write("Upload an image of a face to detect mood and receive uplifting messages or motivations.")
|
79 |
|
80 |
# Upload image
|
81 |
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
|
|
97 |
st.write(f"Detected emotion: {emotion}")
|
98 |
|
99 |
# Generate text based on detected emotion
|
100 |
+
message = generate_text_based_on_mood(emotion)
|
101 |
st.write("Here's something to cheer you up:")
|
102 |
+
st.write(message)
|
103 |
|
104 |
+
# Convert the generated message to audio
|
105 |
+
audio_file = text_to_speech(message)
|
106 |
|
107 |
# Provide an audio player in the Streamlit app if audio file exists
|
108 |
if audio_file:
|