Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -57,26 +57,32 @@ emotions_target = pd.Categorical(df['emotion']).codes
|
|
57 |
emotion_classes = pd.Categorical(df['emotion']).categories
|
58 |
|
59 |
# Load pre-trained BERT model for emotion prediction
|
60 |
-
emotion_prediction_model =
|
61 |
-
emotion_prediction_tokenizer =
|
62 |
-
|
63 |
-
# Load pre-trained large language model and tokenizer for response generation
|
64 |
-
|
65 |
-
response_tokenizer =
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
# Enhanced Emotional States
|
72 |
emotions = {
|
73 |
-
'joy': {'percentage':
|
74 |
-
'sadness': {'percentage':
|
75 |
-
'anger': {'percentage':
|
76 |
-
'fear': {'percentage': 10, 'motivation': 'cautious and protective', 'intensity':
|
77 |
-
'love': {'percentage':
|
78 |
-
'surprise': {'percentage': 10, 'motivation': 'curious and intrigued', 'intensity':
|
79 |
-
'neutral': {'percentage':
|
80 |
}
|
81 |
|
82 |
total_percentage = 100
|
@@ -106,6 +112,9 @@ def update_emotion(emotion, percentage, intensity):
|
|
106 |
for e in emotions:
|
107 |
emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
|
108 |
|
|
|
|
|
|
|
109 |
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
|
110 |
creator.create("Individual", list, fitness=creator.FitnessMulti)
|
111 |
|
@@ -138,7 +147,7 @@ def evolve_emotions():
|
|
138 |
|
139 |
best_individual = tools.selBest(population, k=1)[0]
|
140 |
emotion_values = best_individual[:len(emotions)]
|
141 |
-
|
142 |
|
143 |
for i, (emotion, data) in enumerate(emotions.items()):
|
144 |
data['percentage'] = emotion_values[i]
|
@@ -150,6 +159,7 @@ def evolve_emotions():
|
|
150 |
emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
|
151 |
|
152 |
def predict_emotion(context):
|
|
|
153 |
inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
|
154 |
with torch.no_grad():
|
155 |
outputs = emotion_prediction_model(**inputs)
|
@@ -208,14 +218,12 @@ def generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion
|
|
208 |
emotion_visualization_path = None
|
209 |
return emotion_visualization_path
|
210 |
|
211 |
-
def generate_response(
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
# Add conversation history to the prompt
|
216 |
-
for entry in conversation_history[-100:]: # Use last 100 entries for context
|
217 |
-
prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
|
218 |
|
|
|
219 |
inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
|
220 |
|
221 |
# Adjust generation parameters based on emotion
|
@@ -251,7 +259,7 @@ def interactive_interface(input_text):
|
|
251 |
text_complexity = analyze_text_complexity(input_text)
|
252 |
ai_emotion, ai_emotion_percentage, ai_emotion_intensity = get_ai_emotion(input_text)
|
253 |
emotion_visualization = generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity)
|
254 |
-
response = generate_response(
|
255 |
|
256 |
# Update conversation history
|
257 |
conversation_history.append({'user': input_text, 'response': response})
|
@@ -268,7 +276,7 @@ def interactive_interface(input_text):
|
|
268 |
gr.Textbox(value=f"{ai_emotion_percentage:.2f}%", label="AI Emotion Percentage"),
|
269 |
gr.Textbox(value=f"{ai_emotion_intensity:.2f}", label="AI Emotion Intensity"),
|
270 |
gr.Image(value=emotion_visualization, label="Emotion Visualization"),
|
271 |
-
gr.Textbox(value=
|
272 |
)
|
273 |
|
274 |
# 443 additional features
|
@@ -305,8 +313,7 @@ def visualize_emotions():
|
|
305 |
|
306 |
# Create the Gradio interface
|
307 |
iface = gr.Interface(
|
308 |
-
fn=interactive_interface,
|
309 |
-
inputs=gr.Textbox(label="Input Text"),
|
310 |
outputs=[
|
311 |
gr.Textbox(label="Predicted Emotion"),
|
312 |
gr.Textbox(label="Sentiment Scores"),
|
@@ -322,4 +329,4 @@ iface = gr.Interface(
|
|
322 |
description="An AI assistant that can analyze the emotional content of text and generate responses based on its emotional state.",
|
323 |
)
|
324 |
|
325 |
-
iface.launch()
|
|
|
57 |
emotion_classes = pd.Categorical(df['emotion']).categories
|
58 |
|
59 |
# Load pre-trained BERT model for emotion prediction
|
60 |
+
emotion_prediction_model = None
|
61 |
+
emotion_prediction_tokenizer = None
|
62 |
+
|
63 |
+
# Load pre-trained large language model and tokenizer for response generation
|
64 |
+
response_model = None
|
65 |
+
response_tokenizer = None
|
66 |
+
|
67 |
+
def load_models():
|
68 |
+
global emotion_prediction_model, emotion_prediction_tokenizer, response_model, response_tokenizer
|
69 |
+
if emotion_prediction_model is None or response_model is None:
|
70 |
+
emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
|
71 |
+
emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
|
72 |
+
response_model_name = "gpt2-xl"
|
73 |
+
response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
|
74 |
+
response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
|
75 |
+
response_tokenizer.pad_token = response_tokenizer.eos_token
|
76 |
|
77 |
# Enhanced Emotional States
|
78 |
emotions = {
|
79 |
+
'joy': {'percentage': 20, 'motivation': 'positive and uplifting', 'intensity': 8},
|
80 |
+
'sadness': {'percentage': 15, 'motivation': 'reflective and introspective', 'intensity': 6},
|
81 |
+
'anger': {'percentage': 15, 'motivation': 'passionate and driven', 'intensity': 7},
|
82 |
+
'fear': {'percentage': 10, 'motivation': 'cautious and protective', 'intensity': 5},
|
83 |
+
'love': {'percentage': 15, 'motivation': 'affectionate and caring', 'intensity': 7},
|
84 |
+
'surprise': {'percentage': 10, 'motivation': 'curious and intrigued', 'intensity': 6},
|
85 |
+
'neutral': {'percentage': 15, 'motivation': 'balanced and composed', 'intensity': 4},
|
86 |
}
|
87 |
|
88 |
total_percentage = 100
|
|
|
112 |
for e in emotions:
|
113 |
emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
|
114 |
|
115 |
+
def normalize_context(context):
|
116 |
+
return context.lower().strip()
|
117 |
+
|
118 |
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
|
119 |
creator.create("Individual", list, fitness=creator.FitnessMulti)
|
120 |
|
|
|
147 |
|
148 |
best_individual = tools.selBest(population, k=1)[0]
|
149 |
emotion_values = best_individual[:len(emotions)]
|
150 |
+
intensities = best_individual[len(emotions):]
|
151 |
|
152 |
for i, (emotion, data) in enumerate(emotions.items()):
|
153 |
data['percentage'] = emotion_values[i]
|
|
|
159 |
emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
|
160 |
|
161 |
def predict_emotion(context):
|
162 |
+
load_models()
|
163 |
inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
|
164 |
with torch.no_grad():
|
165 |
outputs = emotion_prediction_model(**inputs)
|
|
|
218 |
emotion_visualization_path = None
|
219 |
return emotion_visualization_path
|
220 |
|
221 |
+
def generate_response(ai_emotion, input_text):
|
222 |
+
load_models()
|
223 |
+
# Prepare a prompt based on the current emotion
|
224 |
+
prompt = f"As an AI assistant, I am currently feeling {ai_emotion}. My response will reflect this emotional state."
|
|
|
|
|
|
|
225 |
|
226 |
+
# Generate the response
|
227 |
inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
|
228 |
|
229 |
# Adjust generation parameters based on emotion
|
|
|
259 |
text_complexity = analyze_text_complexity(input_text)
|
260 |
ai_emotion, ai_emotion_percentage, ai_emotion_intensity = get_ai_emotion(input_text)
|
261 |
emotion_visualization = generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity)
|
262 |
+
response = generate_response(ai_emotion, input_text)
|
263 |
|
264 |
# Update conversation history
|
265 |
conversation_history.append({'user': input_text, 'response': response})
|
|
|
276 |
gr.Textbox(value=f"{ai_emotion_percentage:.2f}%", label="AI Emotion Percentage"),
|
277 |
gr.Textbox(value=f"{ai_emotion_intensity:.2f}", label="AI Emotion Intensity"),
|
278 |
gr.Image(value=emotion_visualization, label="Emotion Visualization"),
|
279 |
+
gr.Textbox(value=response, label="AI Response")
|
280 |
)
|
281 |
|
282 |
# 443 additional features
|
|
|
313 |
|
314 |
# Create the Gradio interface
|
315 |
iface = gr.Interface(
|
316 |
+
fn=interactive_interface,inputs=gr.Textbox(label="Input Text"),
|
|
|
317 |
outputs=[
|
318 |
gr.Textbox(label="Predicted Emotion"),
|
319 |
gr.Textbox(label="Sentiment Scores"),
|
|
|
329 |
description="An AI assistant that can analyze the emotional content of text and generate responses based on its emotional state.",
|
330 |
)
|
331 |
|
332 |
+
iface.launch()
|