Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,6 @@ from nltk.chunk import ne_chunk
|
|
17 |
from textblob import TextBlob
|
18 |
import matplotlib.pyplot as plt
|
19 |
import seaborn as sns
|
20 |
-
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
|
21 |
|
22 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
23 |
|
@@ -63,8 +62,7 @@ emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/di
|
|
63 |
# Load pre-trained large language model and tokenizer for response generation with increased context window
|
64 |
response_model_name = "gpt2-xl"
|
65 |
response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
|
66 |
-
|
67 |
-
response_model = load_checkpoint_and_dispatch(AutoModelForCausalLM, response_model_name, device_map="auto")
|
68 |
|
69 |
# Set the pad token
|
70 |
response_tokenizer.pad_token = response_tokenizer.eos_token
|
@@ -98,7 +96,6 @@ def save_historical_data(historical_data, file_path=emotion_history_file):
|
|
98 |
|
99 |
emotion_history = load_historical_data()
|
100 |
|
101 |
-
|
102 |
def update_emotion(emotion, percentage, intensity):
|
103 |
emotions[emotion]['percentage'] += percentage
|
104 |
emotions[emotion]['intensity'] = intensity
|
@@ -150,10 +147,10 @@ def evolve_emotions():
|
|
150 |
data['percentage'] = emotion_values[i]
|
151 |
data['intensity'] = intensities[i]
|
152 |
|
153 |
-
|
154 |
total = sum(e['percentage'] for e in emotions.values())
|
155 |
for e in emotions:
|
156 |
-
emotions[e]['percentage'] = (emotions[e]['percentage'] /total) * 100
|
157 |
|
158 |
def update_emotion_history(emotion, percentage, intensity, context):
|
159 |
entry = {
|
@@ -178,114 +175,155 @@ def feature_transformations():
|
|
178 |
|
179 |
def generate_response(input_text, ai_emotion, conversation_history):
|
180 |
# Prepare a prompt based on the current emotion and input
|
181 |
-
prompt = f"You are an AI assistant
|
182 |
|
183 |
# Add conversation history to the prompt
|
184 |
for entry in conversation_history[-100:]: # Use last 100 entries for context
|
185 |
prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
|
186 |
|
187 |
-
inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
|
188 |
|
189 |
# Adjust generation parameters based on emotion
|
190 |
temperature = 0.7
|
191 |
-
if
|
192 |
-
temperature = 0.9 #
|
193 |
-
elif
|
194 |
-
temperature = 0.5 #
|
195 |
-
|
196 |
-
outputs = response_model.generate(
|
197 |
-
inputs['input_ids'],
|
198 |
-
max_length=500,
|
199 |
-
num_return_sequences=1,
|
200 |
-
temperature=temperature,
|
201 |
-
pad_token_id=response_tokenizer.eos_token_id
|
202 |
-
)
|
203 |
-
|
204 |
-
response = response_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
205 |
-
response = response.replace(prompt, "").strip()
|
206 |
-
conversation_history.append({'user': input_text, 'response': response})
|
207 |
-
return response
|
208 |
|
209 |
-
def process_input(input_text):
|
210 |
-
# Predict emotion of the input text
|
211 |
-
inputs = emotion_prediction_tokenizer(input_text, return_tensors='pt', truncation=True, padding=True).to(device)
|
212 |
with torch.no_grad():
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
response = generate_response(input_text, predicted_emotion, conversation_history)
|
227 |
|
228 |
-
#
|
229 |
-
|
230 |
-
|
231 |
return response
|
232 |
|
233 |
-
def
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
|
244 |
-
|
245 |
-
|
246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
-
|
249 |
-
plt.show()
|
250 |
|
251 |
-
def
|
252 |
global conversation_history
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
from textblob import TextBlob
|
18 |
import matplotlib.pyplot as plt
|
19 |
import seaborn as sns
|
|
|
20 |
|
21 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
22 |
|
|
|
62 |
# Load pre-trained large language model and tokenizer for response generation with increased context window
|
63 |
response_model_name = "gpt2-xl"
|
64 |
response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
|
65 |
+
response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
|
|
|
66 |
|
67 |
# Set the pad token
|
68 |
response_tokenizer.pad_token = response_tokenizer.eos_token
|
|
|
96 |
|
97 |
emotion_history = load_historical_data()
|
98 |
|
|
|
99 |
def update_emotion(emotion, percentage, intensity):
|
100 |
emotions[emotion]['percentage'] += percentage
|
101 |
emotions[emotion]['intensity'] = intensity
|
|
|
147 |
data['percentage'] = emotion_values[i]
|
148 |
data['intensity'] = intensities[i]
|
149 |
|
150 |
+
# Normalize percentages
|
151 |
total = sum(e['percentage'] for e in emotions.values())
|
152 |
for e in emotions:
|
153 |
+
emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
|
154 |
|
155 |
def update_emotion_history(emotion, percentage, intensity, context):
|
156 |
entry = {
|
|
|
175 |
|
176 |
def generate_response(input_text, ai_emotion, conversation_history):
|
177 |
# Prepare a prompt based on the current emotion and input
|
178 |
+
prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
|
179 |
|
180 |
# Add conversation history to the prompt
|
181 |
for entry in conversation_history[-100:]: # Use last 100 entries for context
|
182 |
prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
|
183 |
|
184 |
+
inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
|
185 |
|
186 |
# Adjust generation parameters based on emotion
|
187 |
temperature = 0.7
|
188 |
+
if ai_emotion == 'anger':
|
189 |
+
temperature = 0.9 # More randomness for angry responses
|
190 |
+
elif ai_emotion == 'joy':
|
191 |
+
temperature = 0.5 # More focused responses for joyful state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
|
|
|
|
|
|
|
193 |
with torch.no_grad():
|
194 |
+
response_ids = response_model.generate(
|
195 |
+
inputs.input_ids,
|
196 |
+
attention_mask=inputs.attention_mask,
|
197 |
+
max_length=8192,
|
198 |
+
num_return_sequences=1,
|
199 |
+
no_repeat_ngram_size=2,
|
200 |
+
do_sample=True,
|
201 |
+
top_k=50,
|
202 |
+
top_p=0.95,
|
203 |
+
temperature=temperature,
|
204 |
+
pad_token_id=response_tokenizer.eos_token_id
|
205 |
+
)
|
206 |
+
response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
|
|
|
207 |
|
208 |
+
# Extract only the AI's response
|
209 |
+
response = response.split("AI:")[-1].strip()
|
|
|
210 |
return response
|
211 |
|
212 |
+
def predict_emotion(context):
|
213 |
+
inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
|
214 |
+
with torch.no_grad():
|
215 |
+
outputs = emotion_prediction_model(**inputs)
|
216 |
+
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
217 |
+
predicted_class = torch.argmax(probabilities, dim=-1).item()
|
218 |
+
emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
|
219 |
+
return emotion_labels[predicted_class]
|
220 |
+
|
221 |
+
def sentiment_analysis(text):
|
222 |
+
sia = SentimentIntensityAnalyzer()
|
223 |
+
sentiment_scores = sia.polarity_scores(text)
|
224 |
+
return sentiment_scores
|
225 |
+
|
226 |
+
def extract_entities(text):
|
227 |
+
chunked = ne_chunk(pos_tag(word_tokenize(text)))
|
228 |
+
entities = []
|
229 |
+
for chunk in chunked:
|
230 |
+
if hasattr(chunk, 'label'):
|
231 |
+
entities.append(((' '.join(c[0] for c in chunk)), chunk.label()))
|
232 |
+
return entities
|
233 |
+
|
234 |
+
def analyze_text_complexity(text):
|
235 |
+
blob = TextBlob(text)
|
236 |
+
return {
|
237 |
+
'word_count': len(blob.words),
|
238 |
+
'sentence_count': len(blob.sentences),
|
239 |
+
'average_sentence_length': len(blob.words) / len(blob.sentences) if len(blob.sentences) > 0 else 0,
|
240 |
+
'polarity': blob.sentiment.polarity,
|
241 |
+
'subjectivity': blob.sentiment.subjectivity
|
242 |
+
}
|
243 |
|
244 |
+
def visualize_emotions():
|
245 |
+
emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
|
246 |
+
columns=['Emotion', 'Percentage', 'Intensity'])
|
247 |
+
|
248 |
+
plt.figure(figsize=(12, 6))
|
249 |
+
sns.barplot(x='Emotion', y='Percentage', data=emotions_df)
|
250 |
+
plt.title('Current Emotional State')
|
251 |
+
plt.xticks(rotation=45, ha='right')
|
252 |
+
plt.tight_layout()
|
253 |
+
plt.savefig('emotional_state.png')
|
254 |
+
plt.close()
|
255 |
|
256 |
+
return 'emotional_state.png'
|
|
|
257 |
|
258 |
+
def interactive_interface(input_text):
|
259 |
global conversation_history
|
260 |
+
try:
|
261 |
+
evolve_emotions()
|
262 |
+
predicted_emotion = predict_emotion(input_text)
|
263 |
+
sentiment_scores = sentiment_analysis(input_text)
|
264 |
+
entities = extract_entities(input_text)
|
265 |
+
text_complexity = analyze_text_complexity(input_text)
|
266 |
+
|
267 |
+
# Update AI's emotional state based on input
|
268 |
+
update_emotion(predicted_emotion, random.uniform(5, 15), random.uniform(0, 10))
|
269 |
+
|
270 |
+
# Determine AI's current dominant emotion
|
271 |
+
ai_emotion = max(emotions, key=lambda e: emotions[e]['percentage'])
|
272 |
+
|
273 |
+
# Generate response based on AI's emotion
|
274 |
+
response = generate_response(input_text, ai_emotion, conversation_history)
|
275 |
+
|
276 |
+
# Update conversation history
|
277 |
+
conversation_history.append({
|
278 |
+
'user': input_text,
|
279 |
+
'response': response
|
280 |
+
})
|
281 |
+
|
282 |
+
# Trim conversation history if it exceeds the maximum length
|
283 |
+
if len(conversation_history) > max_history_length:
|
284 |
+
conversation_history = conversation_history[-max_history_length:]
|
285 |
+
|
286 |
+
update_emotion_history(ai_emotion, emotions[ai_emotion]['percentage'], emotions[ai_emotion]['intensity'], input_text)
|
287 |
+
feature_transformations()
|
288 |
+
|
289 |
+
emotion_visualization = visualize_emotions()
|
290 |
+
|
291 |
+
analysis_result = {
|
292 |
+
'predicted_user_emotion': predicted_emotion,
|
293 |
+
'ai_emotion': ai_emotion,
|
294 |
+
'sentiment_scores': sentiment_scores,
|
295 |
+
'entities': entities,
|
296 |
+
'text_complexity': text_complexity,
|
297 |
+
'response': response,
|
298 |
+
'emotion_visualization': emotion_visualization
|
299 |
+
}
|
300 |
+
|
301 |
+
return analysis_result
|
302 |
+
except Exception as e:
|
303 |
+
print(f"Error: {e}")
|
304 |
+
return {
|
305 |
+
'predicted_user_emotion': 'unknown',
|
306 |
+
'ai_emotion': 'neutral',
|
307 |
+
'sentiment_scores': {'compound': 0, 'neg': 0, 'neu': 1, 'pos': 0},
|
308 |
+
'entities': [],
|
309 |
+
'text_complexity': {'word_count': 0, 'sentence_count': 0, 'average_sentence_length': 0, 'polarity': 0, 'subjectivity': 0},
|
310 |
+
'response': "I'm sorry, but I encountered an error and was unable to generate a response.",
|
311 |
+
'emotion_visualization': 'emotional_state.png'
|
312 |
+
}
|
313 |
+
|
314 |
+
# Create a Gradio interface
|
315 |
+
gr.Interface(
|
316 |
+
fn=interactive_interface,
|
317 |
+
inputs=gr.Textbox(label="Your Message"),
|
318 |
+
outputs=[
|
319 |
+
gr.Textbox(label="Predicted User Emotion"),
|
320 |
+
gr.Textbox(label="AI Emotion"),
|
321 |
+
gr.Textbox(label="Sentiment Scores"),
|
322 |
+
gr.Textbox(label="Extracted Entities"),
|
323 |
+
gr.Textbox(label="Text Complexity"),
|
324 |
+
gr.Textbox(label="AI Response"),
|
325 |
+
gr.Image(label="Emotional State Visualization")
|
326 |
+
],
|
327 |
+
title="Emotion-Aware AI Assistant",
|
328 |
+
description="Interact with an AI assistant that responds based on its emotional state.",
|
329 |
+
).launch()
|