Lucasstranger1 commited on
Commit
3e57df9
1 Parent(s): 77aaaf6
Files changed (1) hide show
  1. app.py +37 -45
app.py CHANGED
@@ -4,12 +4,13 @@ import openai
4
  import streamlit as st
5
  from PIL import Image
6
  from dotenv import load_dotenv
7
- import time
 
8
 
9
  # Load environment variables from .env file
10
  load_dotenv()
11
 
12
- # Set up the Hugging Face API URL and your API key
13
  emotion_model_url = "https://api-inference.huggingface.co/models/trpakov/vit-face-expression"
14
  headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"}
15
 
@@ -17,35 +18,33 @@ headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"}
17
  openai.api_key = os.getenv('OPENAI_API_KEY')
18
 
19
  # Function to query the facial expression recognition model
20
- def query_emotion(filename):
21
- with open(filename, "rb") as f:
22
- data = f.read()
23
- response = requests.post(emotion_model_url, headers=headers, data=data)
 
 
 
 
 
 
 
24
 
25
- if response.status_code == 200:
26
- return response.json()
27
- else:
28
- st.error("Error detecting facial expression: " + response.text)
29
- return None
 
 
30
 
31
  # Function to generate a response using OpenAI based on detected emotion
32
  def generate_text_based_on_mood(emotion):
33
  try:
34
  # Create a dynamic prompt based on the detected emotion
35
- if emotion == "happy":
36
- prompt = "Give a motivational quote to celebrate happiness."
37
- elif emotion == "sad":
38
- prompt = "Provide a comforting message for someone feeling sad."
39
- elif emotion == "angry":
40
- prompt = "Suggest a way to calm down someone feeling angry."
41
- elif emotion == "fear":
42
- prompt = "Give an encouraging message for someone feeling fearful."
43
- elif emotion == "surprised":
44
- prompt = "Offer a fun fact or light-hearted comment for someone feeling surprised."
45
- elif emotion == "neutral":
46
- prompt = "Provide a general motivational quote."
47
 
48
- # Call OpenAI's API using the new interface
49
  response = openai.ChatCompletion.create(
50
  model="gpt-4", # Specify the GPT-4 model
51
  messages=[
@@ -85,25 +84,18 @@ if uploaded_file is not None:
85
  image = Image.open(uploaded_file)
86
  st.image(image, caption='Uploaded Image', use_column_width=True)
87
 
88
- # Save the uploaded file temporarily
89
- with open("uploaded_image.jpg", "wb") as f:
90
- f.write(uploaded_file.getbuffer())
91
-
92
  # Detect facial expression
93
- expression_output = query_emotion("uploaded_image.jpg")
94
- if expression_output:
95
- # Assuming the response has a 'label' field with the detected emotion
96
- emotion = expression_output[0]['label'] # Adjust based on response structure
97
- st.write(f"Detected emotion: {emotion}")
98
-
99
- # Generate text based on detected emotion
100
- message = generate_text_based_on_mood(emotion)
101
- st.write("Here's something to remind you :")
102
- st.write(message)
103
-
104
- # Convert the generated message to audio
105
- audio_file = text_to_speech(message)
106
-
107
- # Provide an audio player in the Streamlit app if audio file exists
108
- if audio_file:
109
- st.audio(audio_file) # Streamlit will handle playback
 
4
  import streamlit as st
5
  from PIL import Image
6
  from dotenv import load_dotenv
7
+ import torch
8
+ from transformers import AutoProcessor, AutoModelForImageClassification
9
 
10
  # Load environment variables from .env file
11
  load_dotenv()
12
 
13
+ # Set up the Hugging Face API for emotion detection
14
  emotion_model_url = "https://api-inference.huggingface.co/models/trpakov/vit-face-expression"
15
  headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"}
16
 
 
18
  openai.api_key = os.getenv('OPENAI_API_KEY')
19
 
20
  # Function to query the facial expression recognition model
21
+ def query_emotion(image):
22
+ # Load the processor and model
23
+ processor = AutoProcessor.from_pretrained("trpakov/vit-face-expression")
24
+ model = AutoModelForImageClassification.from_pretrained("trpakov/vit-face-expression")
25
+
26
+ # Preprocess the image
27
+ inputs = processor(images=image, return_tensors="pt")
28
+
29
+ # Perform inference
30
+ with torch.no_grad():
31
+ outputs = model(**inputs)
32
 
33
+ # Get predicted class index
34
+ logits = outputs.logits
35
+ predicted_class_idx = torch.argmax(logits, dim=-1).item()
36
+
37
+ # Get the predicted label
38
+ predicted_label = processor.decode(predicted_class_idx)
39
+ return predicted_label
40
 
41
  # Function to generate a response using OpenAI based on detected emotion
42
  def generate_text_based_on_mood(emotion):
43
  try:
44
  # Create a dynamic prompt based on the detected emotion
45
+ prompt = f"Generate a light-hearted joke or motivational message for someone who is feeling {emotion}."
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ # Call OpenAI's API using GPT-4
48
  response = openai.ChatCompletion.create(
49
  model="gpt-4", # Specify the GPT-4 model
50
  messages=[
 
84
  image = Image.open(uploaded_file)
85
  st.image(image, caption='Uploaded Image', use_column_width=True)
86
 
 
 
 
 
87
  # Detect facial expression
88
+ emotion = query_emotion(image)
89
+ st.write(f"Detected emotion: {emotion}")
90
+
91
+ # Generate text based on detected emotion
92
+ message = generate_text_based_on_mood(emotion)
93
+ st.write("Here's something to cheer you up:")
94
+ st.write(message)
95
+
96
+ # Convert the generated message to audio
97
+ audio_file = text_to_speech(message)
98
+
99
+ # Provide an audio player in the Streamlit app if audio file exists
100
+ if audio_file:
101
+ st.audio(audio_file) # Streamlit will handle playback