Sephfox commited on
Commit
2dfa2da
·
verified ·
1 Parent(s): 7783422

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -25
app.py CHANGED
@@ -62,7 +62,7 @@ emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/di
62
  # Load pre-trained large language model and tokenizer for response generation with increased context window
63
  response_model_name = "gpt2-xl"
64
  response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
65
- response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
66
 
67
  # Set the pad token
68
  response_tokenizer.pad_token = response_tokenizer.eos_token
@@ -147,10 +147,10 @@ def evolve_emotions():
147
  data['percentage'] = emotion_values[i]
148
  data['intensity'] = intensities[i]
149
 
150
- # Normalize percentages
151
  total = sum(e['percentage'] for e in emotions.values())
152
  for e in emotions:
153
- emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
154
 
155
  def update_emotion_history(emotion, percentage, intensity, context):
156
  entry = {
@@ -175,7 +175,7 @@ def feature_transformations():
175
 
176
  def generate_response(input_text, ai_emotion, conversation_history):
177
  # Prepare a prompt based on the current emotion and input
178
- prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
179
 
180
  # Add conversation history to the prompt
181
  for entry in conversation_history[-100:]: # Use last 100 entries for context
@@ -190,7 +190,7 @@ def generate_response(input_text, ai_emotion, conversation_history):
190
  elif ai_emotion == 'joy':
191
  temperature = 0.5 # More focused responses for joyful state
192
 
193
- with torch.no_grad():
194
  response_ids = response_model.generate(
195
  inputs.input_ids,
196
  attention_mask=inputs.attention_mask,
@@ -288,26 +288,28 @@ def interactive_interface(input_text):
288
 
289
  emotion_visualization = visualize_emotions()
290
 
291
- return (
292
- predicted_emotion,
293
- ai_emotion,
294
- str(sentiment_scores),
295
- str(entities),
296
- str(text_complexity),
297
- response,
298
- emotion_visualization
299
- )
 
 
300
  except Exception as e:
301
  print(f"Error: {e}")
302
- return (
303
- 'unknown',
304
- 'neutral',
305
- str({'compound': 0, 'neg': 0, 'neu': 1, 'pos': 0}),
306
- '[]',
307
- str({'word_count': 0, 'sentence_count': 0, 'average_sentence_length': 0, 'polarity': 0, 'subjectivity': 0}),
308
- "I'm sorry, but I encountered an error and was unable to generate a response.",
309
- 'emotional_state.png'
310
- )
311
 
312
  # Create a Gradio interface
313
  gr.Interface(
@@ -322,6 +324,6 @@ gr.Interface(
322
  gr.Textbox(label="AI Response"),
323
  gr.Image(label="Emotional State Visualization")
324
  ],
325
- title="Emotion-Aware AI Assistant",
326
- description="Interact with an AI assistant that responds based on its emotional state.",
327
  ).launch()
 
62
  # Load pre-trained large language model and tokenizer for response generation with increased context window
63
  response_model_name = "gpt2-xl"
64
  response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
65
+ response_model = AutoModelForCausalLM.from_pretrained(response_model_name, torch_dtype=torch.float16, device_map="auto")
66
 
67
  # Set the pad token
68
  response_tokenizer.pad_token = response_tokenizer.eos_token
 
147
  data['percentage'] = emotion_values[i]
148
  data['intensity'] = intensities[i]
149
 
150
+ # Normalize percentages
151
  total = sum(e['percentage'] for e in emotions.values())
152
  for e in emotions:
153
+ emotions[e]['percentage'] = (emotions[e]['percentage'] /total) * 100
154
 
155
  def update_emotion_history(emotion, percentage, intensity, context):
156
  entry = {
 
175
 
176
  def generate_response(input_text, ai_emotion, conversation_history):
177
  # Prepare a prompt based on the current emotion and input
178
+ prompt = f"You are an AI assistant created by Sephiroth Baptiste, and you are currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
179
 
180
  # Add conversation history to the prompt
181
  for entry in conversation_history[-100:]: # Use last 100 entries for context
 
190
  elif ai_emotion == 'joy':
191
  temperature = 0.5 # More focused responses for joyful state
192
 
193
+ with torch.no_grad(), torch.cuda.amp.autocast():
194
  response_ids = response_model.generate(
195
  inputs.input_ids,
196
  attention_mask=inputs.attention_mask,
 
288
 
289
  emotion_visualization = visualize_emotions()
290
 
291
+ analysis_result = {
292
+ 'predicted_user_emotion': predicted_emotion,
293
+ 'ai_emotion': ai_emotion,
294
+ 'sentiment_scores': sentiment_scores,
295
+ 'entities': entities,
296
+ 'text_complexity': text_complexity,
297
+ 'response': response,
298
+ 'emotion_visualization': emotion_visualization
299
+ }
300
+
301
+ return analysis_result
302
  except Exception as e:
303
  print(f"Error: {e}")
304
+ return {
305
+ 'predicted_user_emotion': 'unknown',
306
+ 'ai_emotion': 'neutral',
307
+ 'sentiment_scores': {'compound': 0, 'neg': 0, 'neu': 1, 'pos': 0},
308
+ 'entities': [],
309
+ 'text_complexity': {'word_count': 0, 'sentence_count': 0, 'average_sentence_length': 0, 'polarity': 0, 'subjectivity': 0},
310
+ 'response': "I'm sorry, but I encountered an error and was unable to generate a response.",
311
+ 'emotion_visualization': 'emotional_state.png'
312
+ }
313
 
314
  # Create a Gradio interface
315
  gr.Interface(
 
324
  gr.Textbox(label="AI Response"),
325
  gr.Image(label="Emotional State Visualization")
326
  ],
327
+ title="Emotion-Aware AI Assistant by Sephfox",
328
+ description="Interact with an AI assistant created by Sephfox that responds based on its emotional state.",
329
  ).launch()