Sephfox commited on
Commit
064bce5
·
verified ·
1 Parent(s): f02e70c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -25
app.py CHANGED
@@ -59,8 +59,8 @@ emotion_classes = pd.Categorical(df['emotion']).categories
59
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
60
  emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
61
 
62
- # Load pre-trained LLM model and tokenizer for response generation with increased context window
63
- response_model_name = "microsoft/DialoGPT-large"
64
  response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
65
  response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
66
 
@@ -82,7 +82,7 @@ total_percentage = 100
82
  emotion_history_file = 'emotion_history.json'
83
  global conversation_history
84
  conversation_history = []
85
- max_history_length = 100 # Increase the maximum history length
86
 
87
  def load_historical_data(file_path=emotion_history_file):
88
  if os.path.exists(file_path):
@@ -178,10 +178,10 @@ def generate_response(input_text, ai_emotion, conversation_history):
178
  prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
179
 
180
  # Add conversation history to the prompt
181
- for entry in conversation_history[-20:]: # Use last 20 entries for context
182
  prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
183
 
184
- inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=2048)
185
 
186
  # Adjust generation parameters based on emotion
187
  temperature = 0.7
@@ -194,7 +194,7 @@ def generate_response(input_text, ai_emotion, conversation_history):
194
  response_ids = response_model.generate(
195
  inputs.input_ids,
196
  attention_mask=inputs.attention_mask,
197
- max_length=2048,
198
  num_return_sequences=1,
199
  no_repeat_ngram_size=2,
200
  do_sample=True,
@@ -254,6 +254,7 @@ def visualize_emotions():
254
  plt.close()
255
 
256
  return 'emotional_state.png'
 
257
  def interactive_interface(input_text):
258
  global conversation_history
259
  try:
@@ -287,26 +288,28 @@ def interactive_interface(input_text):
287
 
288
  emotion_visualization = visualize_emotions()
289
 
290
- return (
291
- predicted_emotion,
292
- ai_emotion,
293
- str(sentiment_scores),
294
- str(entities),
295
- str(text_complexity),
296
- response,
297
- emotion_visualization
298
- )
 
 
299
  except Exception as e:
300
  print(f"Error: {e}")
301
- return (
302
- 'unknown',
303
- 'neutral',
304
- str({'compound': 0, 'neg': 0, 'neu': 1, 'pos': 0}),
305
- '[]',
306
- str({'word_count': 0, 'sentence_count': 0, 'average_sentence_length': 0, 'polarity': 0, 'subjectivity': 0}),
307
- "I'm sorry, but I encountered an error and was unable to generate a response.",
308
- 'emotional_state.png'
309
- )
310
 
311
  # Create a Gradio interface
312
  gr.Interface(
@@ -322,5 +325,5 @@ gr.Interface(
322
  gr.Image(label="Emotional State Visualization")
323
  ],
324
  title="Emotion-Aware AI Assistant",
325
- description="Interact with an AI assistant that responds based on its emotional state."
326
  ).launch()
 
59
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
60
  emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
61
 
62
+ # Load pre-trained large language model and tokenizer for response generation with increased context window
63
+ response_model_name = "facebook/opt-5b"
64
  response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
65
  response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
66
 
 
82
  emotion_history_file = 'emotion_history.json'
83
  global conversation_history
84
  conversation_history = []
85
+ max_history_length = 1000 # Increase the maximum history length
86
 
87
  def load_historical_data(file_path=emotion_history_file):
88
  if os.path.exists(file_path):
 
178
  prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
179
 
180
  # Add conversation history to the prompt
181
+ for entry in conversation_history[-100:]: # Use last 100 entries for context
182
  prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
183
 
184
+ inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
185
 
186
  # Adjust generation parameters based on emotion
187
  temperature = 0.7
 
194
  response_ids = response_model.generate(
195
  inputs.input_ids,
196
  attention_mask=inputs.attention_mask,
197
+ max_length=8192,
198
  num_return_sequences=1,
199
  no_repeat_ngram_size=2,
200
  do_sample=True,
 
254
  plt.close()
255
 
256
  return 'emotional_state.png'
257
+
258
  def interactive_interface(input_text):
259
  global conversation_history
260
  try:
 
288
 
289
  emotion_visualization = visualize_emotions()
290
 
291
+ analysis_result = {
292
+ 'predicted_user_emotion': predicted_emotion,
293
+ 'ai_emotion': ai_emotion,
294
+ 'sentiment_scores': sentiment_scores,
295
+ 'entities': entities,
296
+ 'text_complexity': text_complexity,
297
+ 'response': response,
298
+ 'emotion_visualization': emotion_visualization
299
+ }
300
+
301
+ return analysis_result
302
  except Exception as e:
303
  print(f"Error: {e}")
304
+ return {
305
+ 'predicted_user_emotion': 'unknown',
306
+ 'ai_emotion': 'neutral',
307
+ 'sentiment_scores': {'compound': 0, 'neg': 0, 'neu': 1, 'pos': 0},
308
+ 'entities': [],
309
+ 'text_complexity': {'word_count': 0, 'sentence_count': 0, 'average_sentence_length': 0, 'polarity': 0, 'subjectivity': 0},
310
+ 'response': "I'm sorry, but I encountered an error and was unable to generate a response.",
311
+ 'emotion_visualization': 'emotional_state.png'
312
+ }
313
 
314
  # Create a Gradio interface
315
  gr.Interface(
 
325
  gr.Image(label="Emotional State Visualization")
326
  ],
327
  title="Emotion-Aware AI Assistant",
328
+ description="Interact with an AI assistant that responds based on its emotional state.",
329
  ).launch()