Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -60,7 +60,7 @@ emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("b
|
|
60 |
emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
|
61 |
|
62 |
# Load pre-trained LLM model and tokenizer for response generation with increased context window
|
63 |
-
response_model_name = "microsoft/DialoGPT-
|
64 |
response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
|
65 |
response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
|
66 |
|
@@ -82,7 +82,7 @@ total_percentage = 100
|
|
82 |
emotion_history_file = 'emotion_history.json'
|
83 |
global conversation_history
|
84 |
conversation_history = []
|
85 |
-
max_history_length =
|
86 |
|
87 |
def load_historical_data(file_path=emotion_history_file):
|
88 |
if os.path.exists(file_path):
|
@@ -151,6 +151,7 @@ def evolve_emotions():
|
|
151 |
total = sum(e['percentage'] for e in emotions.values())
|
152 |
for e in emotions:
|
153 |
emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
|
|
|
154 |
def update_emotion_history(emotion, percentage, intensity, context):
|
155 |
entry = {
|
156 |
'emotion': emotion,
|
@@ -172,16 +173,15 @@ def feature_transformations():
|
|
172 |
for feature in additional_features:
|
173 |
additional_features[feature] += random.uniform(-1, 1)
|
174 |
|
175 |
-
def generate_response(input_text, ai_emotion):
|
176 |
-
global conversation_history
|
177 |
# Prepare a prompt based on the current emotion and input
|
178 |
prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
|
179 |
|
180 |
# Add conversation history to the prompt
|
181 |
-
for entry in conversation_history[-
|
182 |
prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
|
183 |
|
184 |
-
inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=
|
185 |
|
186 |
# Adjust generation parameters based on emotion
|
187 |
temperature = 0.7
|
@@ -194,7 +194,7 @@ def generate_response(input_text, ai_emotion):
|
|
194 |
response_ids = response_model.generate(
|
195 |
inputs.input_ids,
|
196 |
attention_mask=inputs.attention_mask,
|
197 |
-
max_length=
|
198 |
num_return_sequences=1,
|
199 |
no_repeat_ngram_size=2,
|
200 |
do_sample=True,
|
@@ -271,7 +271,7 @@ def interactive_interface(input_text):
|
|
271 |
ai_emotion = max(emotions, key=lambda e: emotions[e]['percentage'])
|
272 |
|
273 |
# Generate response based on AI's emotion
|
274 |
-
response = generate_response(input_text, ai_emotion)
|
275 |
|
276 |
# Update conversation history
|
277 |
conversation_history.append({
|
@@ -294,39 +294,36 @@ def interactive_interface(input_text):
|
|
294 |
'sentiment_scores': sentiment_scores,
|
295 |
'entities': entities,
|
296 |
'text_complexity': text_complexity,
|
297 |
-
'current_emotional_state': emotions,
|
298 |
'response': response,
|
299 |
'emotion_visualization': emotion_visualization
|
300 |
}
|
301 |
|
302 |
return analysis_result
|
303 |
except Exception as e:
|
304 |
-
print(f"
|
305 |
-
return
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
f"AI Emotion: {response['ai_emotion']}\n"
|
315 |
-
f"AI Response: {response['response']}\n\n"
|
316 |
-
f"Sentiment: {response['sentiment_scores']}\n"
|
317 |
-
f"Entities: {response['entities']}\n"
|
318 |
-
f"Text Complexity: {response['text_complexity']}\n",
|
319 |
-
response['emotion_visualization']
|
320 |
-
)
|
321 |
|
322 |
-
# Create Gradio interface
|
323 |
-
|
324 |
-
fn=
|
325 |
-
inputs="
|
326 |
-
outputs=[
|
327 |
-
|
328 |
-
|
329 |
-
)
|
330 |
-
|
331 |
-
|
332 |
-
|
|
|
|
|
|
|
|
|
|
|
|
60 |
emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
|
61 |
|
62 |
# Load pre-trained LLM model and tokenizer for response generation with increased context window
|
63 |
+
response_model_name = "microsoft/DialoGPT-large"
|
64 |
response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
|
65 |
response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
|
66 |
|
|
|
82 |
emotion_history_file = 'emotion_history.json'
|
83 |
global conversation_history
|
84 |
conversation_history = []
|
85 |
+
max_history_length = 100 # Increase the maximum history length
|
86 |
|
87 |
def load_historical_data(file_path=emotion_history_file):
|
88 |
if os.path.exists(file_path):
|
|
|
151 |
total = sum(e['percentage'] for e in emotions.values())
|
152 |
for e in emotions:
|
153 |
emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
|
154 |
+
|
155 |
def update_emotion_history(emotion, percentage, intensity, context):
|
156 |
entry = {
|
157 |
'emotion': emotion,
|
|
|
173 |
for feature in additional_features:
|
174 |
additional_features[feature] += random.uniform(-1, 1)
|
175 |
|
176 |
+
def generate_response(input_text, ai_emotion, conversation_history):
|
|
|
177 |
# Prepare a prompt based on the current emotion and input
|
178 |
prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
|
179 |
|
180 |
# Add conversation history to the prompt
|
181 |
+
for entry in conversation_history[-20:]: # Use last 20 entries for context
|
182 |
prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
|
183 |
|
184 |
+
inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=2048)
|
185 |
|
186 |
# Adjust generation parameters based on emotion
|
187 |
temperature = 0.7
|
|
|
194 |
response_ids = response_model.generate(
|
195 |
inputs.input_ids,
|
196 |
attention_mask=inputs.attention_mask,
|
197 |
+
max_length=2048,
|
198 |
num_return_sequences=1,
|
199 |
no_repeat_ngram_size=2,
|
200 |
do_sample=True,
|
|
|
271 |
ai_emotion = max(emotions, key=lambda e: emotions[e]['percentage'])
|
272 |
|
273 |
# Generate response based on AI's emotion
|
274 |
+
response = generate_response(input_text, ai_emotion, conversation_history)
|
275 |
|
276 |
# Update conversation history
|
277 |
conversation_history.append({
|
|
|
294 |
'sentiment_scores': sentiment_scores,
|
295 |
'entities': entities,
|
296 |
'text_complexity': text_complexity,
|
|
|
297 |
'response': response,
|
298 |
'emotion_visualization': emotion_visualization
|
299 |
}
|
300 |
|
301 |
return analysis_result
|
302 |
except Exception as e:
|
303 |
+
print(f"Error: {e}")
|
304 |
+
return {
|
305 |
+
'predicted_user_emotion': 'unknown',
|
306 |
+
'ai_emotion': 'neutral',
|
307 |
+
'sentiment_scores': {'compound': 0, 'neg': 0, 'neu': 1, 'pos': 0},
|
308 |
+
'entities': [],
|
309 |
+
'text_complexity': {'word_count': 0, 'sentence_count': 0, 'average_sentence_length': 0, 'polarity': 0, 'subjectivity': 0},
|
310 |
+
'response': "I'm sorry, but I encountered an error and was unable to generate a response.",
|
311 |
+
'emotion_visualization': 'emotional_state.png'
|
312 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
|
314 |
+
# Create a Gradio interface
|
315 |
+
gr.Interface(
|
316 |
+
fn=interactive_interface,
|
317 |
+
inputs=gr.Textbox(label="Your Message"),
|
318 |
+
outputs=[
|
319 |
+
gr.Textbox(label="Predicted User Emotion"),
|
320 |
+
gr.Textbox(label="AI Emotion"),
|
321 |
+
gr.Textbox(label="Sentiment Scores"),
|
322 |
+
gr.Textbox(label="Extracted Entities"),
|
323 |
+
gr.Textbox(label="Text Complexity"),
|
324 |
+
gr.Textbox(label="AI Response"),
|
325 |
+
gr.Image(label="Emotional State Visualization")
|
326 |
+
],
|
327 |
+
title="Emotion-Aware AI Assistant",
|
328 |
+
description="Interact with an AI assistant that responds based on its emotional state."
|
329 |
+
).launch()
|