Sephfox commited on
Commit
93bbf6a
·
verified ·
1 Parent(s): c0ba949

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -77,6 +77,7 @@ emotions = {
77
 
78
  total_percentage = 100
79
  emotion_history_file = 'emotion_history.json'
 
80
  conversation_history = []
81
  max_history_length = 30
82
 
@@ -170,6 +171,7 @@ def feature_transformations():
170
  additional_features[feature] += random.uniform(-1, 1)
171
 
172
  def generate_response(input_text, ai_emotion):
 
173
  # Prepare a prompt based on the current emotion and input
174
  prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
175
 
@@ -177,7 +179,7 @@ def generate_response(input_text, ai_emotion):
177
  for entry in conversation_history[-5:]: # Use last 5 entries for context
178
  prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
179
 
180
- inputs = response_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
181
 
182
  # Adjust generation parameters based on emotion
183
  temperature = 0.7
@@ -189,9 +191,11 @@ def generate_response(input_text, ai_emotion):
189
  with torch.no_grad():
190
  response_ids = response_model.generate(
191
  inputs.input_ids,
 
192
  max_length=1024,
193
  num_return_sequences=1,
194
  no_repeat_ngram_size=2,
 
195
  top_k=50,
196
  top_p=0.95,
197
  temperature=temperature
@@ -249,6 +253,7 @@ def visualize_emotions():
249
  return 'emotional_state.png'
250
 
251
  def interactive_interface(input_text):
 
252
  try:
253
  evolve_emotions()
254
  predicted_emotion = predict_emotion(input_text)
@@ -321,4 +326,4 @@ iface = gr.Interface(
321
  )
322
 
323
  if __name__ == "__main__":
324
- iface.launch()
 
77
 
78
  total_percentage = 100
79
  emotion_history_file = 'emotion_history.json'
80
+ global conversation_history
81
  conversation_history = []
82
  max_history_length = 30
83
 
 
171
  additional_features[feature] += random.uniform(-1, 1)
172
 
173
  def generate_response(input_text, ai_emotion):
174
+ global conversation_history
175
  # Prepare a prompt based on the current emotion and input
176
  prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
177
 
 
179
  for entry in conversation_history[-5:]: # Use last 5 entries for context
180
  prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
181
 
182
+ inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=1024)
183
 
184
  # Adjust generation parameters based on emotion
185
  temperature = 0.7
 
191
  with torch.no_grad():
192
  response_ids = response_model.generate(
193
  inputs.input_ids,
194
+ attention_mask=inputs.attention_mask,
195
  max_length=1024,
196
  num_return_sequences=1,
197
  no_repeat_ngram_size=2,
198
+ do_sample=True,
199
  top_k=50,
200
  top_p=0.95,
201
  temperature=temperature
 
253
  return 'emotional_state.png'
254
 
255
  def interactive_interface(input_text):
256
+ global conversation_history
257
  try:
258
  evolve_emotions()
259
  predicted_emotion = predict_emotion(input_text)
 
326
  )
327
 
328
  if __name__ == "__main__":
329
+ iface.launch(share=True)