Sephfox commited on
Commit
c4d75ea
·
verified ·
1 Parent(s): d8d4f16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -57
app.py CHANGED
@@ -18,6 +18,14 @@ from textblob import TextBlob
18
  import matplotlib.pyplot as plt
19
  import seaborn as sns
20
  import ssl
 
 
 
 
 
 
 
 
21
 
22
  # NLTK data download
23
  try:
@@ -38,6 +46,13 @@ nltk.data.path.append('/home/user/nltk_data')
38
 
39
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
40
 
 
 
 
 
 
 
 
41
  # Initialize Example Dataset (For Emotion Prediction)
42
  data = {
43
  'context': [
@@ -144,6 +159,7 @@ def evolve_emotions():
144
  toolbox.register("attr_float", random.uniform, 0, 100)
145
  toolbox.register("attr_intensity", random.uniform, 0, 10)
146
  toolbox.register("individual", tools.initCycle, creator.Individual,
 
147
  (toolbox.attr_float,) * len(emotions) +
148
  (toolbox.attr_intensity,) * len(emotions), n=1)
149
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
@@ -176,28 +192,50 @@ def sentiment_analysis(text):
176
  return sentiment_scores
177
 
178
  def extract_entities(text):
179
- # Lazy load the necessary NLTK data
180
- try:
181
- nltk.data.find('tokenizers/punkt')
182
- except LookupError:
183
- nltk.download('punkt', quiet=True, raise_on_error=True)
184
-
185
- try:
186
- nltk.data.find('taggers/averaged_perceptron_tagger')
187
- except LookupError:
188
- nltk.download('averaged_perceptron_tagger', quiet=True, raise_on_error=True)
189
-
190
- try:
191
- nltk.data.find('chunkers/maxent_ne_chunker')
192
- except LookupError:
193
- nltk.download('maxent_ne_chunker', quiet=True, raise_on_error=True)
 
 
 
 
 
 
 
 
 
 
194
 
195
- chunked = ne_chunk(pos_tag(word_tokenize(text)))
196
- entities = []
197
- for chunk in chunked:
198
- if hasattr(chunk, 'label'):
199
- entities.append(((' '.join(c[0] for c in chunk)), chunk.label()))
200
- return entities
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
  def analyze_text_complexity(text):
203
  blob = TextBlob(text)
@@ -217,10 +255,8 @@ def get_ai_emotion(input_text):
217
  return ai_emotion, ai_emotion_percentage, ai_emotion_intensity
218
 
219
  def generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity):
220
- # Generate an emotion visualization based on the AI's emotional state
221
  emotion_visualization_path = 'emotional_state.png'
222
  try:
223
- # Generate and save the emotion visualization
224
  plt.figure(figsize=(8, 6))
225
  emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
226
  columns=['emotion', 'percentage', 'intensity'])
@@ -236,20 +272,20 @@ def generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion
236
  emotion_visualization_path = None
237
  return emotion_visualization_path
238
 
239
- def generate_response(ai_emotion, input_text):
240
  load_models()
241
- # Prepare a prompt based on the current emotion
242
- prompt = f"As an AI assistant, I am currently feeling {ai_emotion}. My response will reflect this emotional state."
 
 
243
 
244
- # Generate the response
245
  inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
246
 
247
- # Adjust generation parameters based on emotion
248
  temperature = 0.7
249
  if ai_emotion == 'anger':
250
- temperature = 0.9 # More randomness for angry responses
251
  elif ai_emotion == 'joy':
252
- temperature = 0.5 # More focused responses for joyful state
253
 
254
  with torch.no_grad():
255
  response_ids = response_model.generate(
@@ -266,43 +302,57 @@ def generate_response(ai_emotion, input_text):
266
  )
267
  response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
268
 
269
- # Extract only the AI's response
270
  return response.strip()
271
 
272
  def interactive_interface(input_text):
273
- # Perform your processing logic here
274
  predicted_emotion = predict_emotion(input_text)
275
  sentiment_scores = sentiment_analysis(input_text)
276
- entities = extract_entities(input_text)
277
  text_complexity = analyze_text_complexity(input_text)
278
  ai_emotion, ai_emotion_percentage, ai_emotion_intensity = get_ai_emotion(input_text)
279
  emotion_visualization = generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity)
280
- response = generate_response(ai_emotion, input_text)
 
 
 
 
281
 
282
- # Update conversation history
283
  conversation_history.append({'user': input_text, 'response': response})
284
  if len(conversation_history) > max_history_length:
285
  conversation_history.pop(0)
 
 
 
 
 
 
 
 
 
 
 
 
286
 
287
- # Save conversation history to a file
288
- save_historical_data(conversation_history)
289
-
290
- # Return the expected outputs in the correct order
291
- return (
292
- gr.Textbox(value=predicted_emotion, label="Predicted Emotion"),
293
- gr.Textbox(value=str(sentiment_scores), label="Sentiment Scores"),
294
- gr.Textbox(value=str(entities), label="Extracted Entities"),
295
- gr.Textbox(value=str(text_complexity), label="Text Complexity"),
296
- gr.Textbox(value=response, label="AI Response"),
297
- gr.Image(value=emotion_visualization, label="Emotion Visualization")
298
- )
299
-
300
- # Create the Gradio interface
301
- iface = gr.Interface(fn=interactive_interface, inputs="text", outputs=[
302
- "text", "text", "text", "text", "text", "image"
303
- ], title="Emotion-Aware AI Assistant")
304
-
305
- # Launch the interface
306
- iface.launch()
307
-
308
-
 
 
 
18
  import matplotlib.pyplot as plt
19
  import seaborn as sns
20
  import ssl
21
+ import spacy
22
+ from spacy import displacy
23
+ from collections import Counter
24
+ import en_core_web_sm
25
+ from gensim import corpora
26
+ from gensim.models import LdaModel
27
+ from gensim.utils import simple_preprocess
28
+ from neuralcoref import NeuralCoref
29
 
30
  # NLTK data download
31
  try:
 
46
 
47
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
48
 
49
+ # Load spaCy model
50
+ nlp = en_core_web_sm.load()
51
+
52
+ # Add NeuralCoref to spaCy pipeline
53
+ coref = NeuralCoref(nlp.vocab)
54
+ nlp.add_pipe(coref, name='neuralcoref')
55
+
56
  # Initialize Example Dataset (For Emotion Prediction)
57
  data = {
58
  'context': [
 
159
  toolbox.register("attr_float", random.uniform, 0, 100)
160
  toolbox.register("attr_intensity", random.uniform, 0, 10)
161
  toolbox.register("individual", tools.initCycle, creator.Individual,
162
+ toolbox.register("individual", tools.initCycle, creator.Individual,
163
  (toolbox.attr_float,) * len(emotions) +
164
  (toolbox.attr_intensity,) * len(emotions), n=1)
165
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
 
192
  return sentiment_scores
193
 
194
  def extract_entities(text):
195
+ doc = nlp(text)
196
+
197
+ # Named Entity Recognition
198
+ named_entities = [(ent.text, ent.label_) for ent in doc.ents]
199
+
200
+ # Noun Phrases
201
+ noun_phrases = [chunk.text for chunk in doc.noun_chunks]
202
+
203
+ # Key Phrases (using textrank algorithm)
204
+ from textacy.extract import keyterms as kt
205
+ keyterms = kt.textrank(doc, normalize="lemma", topn=5)
206
+
207
+ # Dependency Parsing
208
+ dependencies = [(token.text, token.dep_, token.head.text) for token in doc]
209
+
210
+ # Part-of-Speech Tagging
211
+ pos_tags = [(token.text, token.pos_) for token in doc]
212
+
213
+ return {
214
+ "named_entities": named_entities,
215
+ "noun_phrases": noun_phrases,
216
+ "key_phrases": keyterms,
217
+ "dependencies": dependencies,
218
+ "pos_tags": pos_tags
219
+ }
220
 
221
+ def analyze_context(text):
222
+ doc = nlp(text)
223
+
224
+ # Coreference resolution
225
+ resolved_text = doc._.coref_resolved
226
+
227
+ # Topic modeling
228
+ processed_text = simple_preprocess(resolved_text)
229
+ dictionary = corpora.Dictionary([processed_text])
230
+ corpus = [dictionary.doc2bow(processed_text)]
231
+
232
+ lda_model = LdaModel(corpus=corpus, id2word=dictionary, num_topics=3, random_state=42)
233
+ topics = lda_model.print_topics()
234
+
235
+ return {
236
+ "resolved_text": resolved_text,
237
+ "topics": topics
238
+ }
239
 
240
  def analyze_text_complexity(text):
241
  blob = TextBlob(text)
 
255
  return ai_emotion, ai_emotion_percentage, ai_emotion_intensity
256
 
257
  def generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity):
 
258
  emotion_visualization_path = 'emotional_state.png'
259
  try:
 
260
  plt.figure(figsize=(8, 6))
261
  emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
262
  columns=['emotion', 'percentage', 'intensity'])
 
272
  emotion_visualization_path = None
273
  return emotion_visualization_path
274
 
275
+ def generate_response(ai_emotion, input_text, entities, context_analysis):
276
  load_models()
277
+ prompt = f"As an AI assistant, I am currently feeling {ai_emotion}. My response will reflect this emotional state. "
278
+ prompt += f"The input text contains the following entities: {entities['named_entities']}. "
279
+ prompt += f"The main topics are: {context_analysis['topics']}. "
280
+ prompt += f"Considering this context, here's my response to '{input_text}': "
281
 
 
282
  inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=8192)
283
 
 
284
  temperature = 0.7
285
  if ai_emotion == 'anger':
286
+ temperature = 0.9
287
  elif ai_emotion == 'joy':
288
+ temperature = 0.5
289
 
290
  with torch.no_grad():
291
  response_ids = response_model.generate(
 
302
  )
303
  response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
304
 
 
305
  return response.strip()
306
 
307
  def interactive_interface(input_text):
 
308
  predicted_emotion = predict_emotion(input_text)
309
  sentiment_scores = sentiment_analysis(input_text)
 
310
  text_complexity = analyze_text_complexity(input_text)
311
  ai_emotion, ai_emotion_percentage, ai_emotion_intensity = get_ai_emotion(input_text)
312
  emotion_visualization = generate_emotion_visualization(ai_emotion, ai_emotion_percentage, ai_emotion_intensity)
313
+
314
+ entities = extract_entities(input_text)
315
+ context_analysis = analyze_context(input_text)
316
+
317
+ response = generate_response(ai_emotion, input_text, entities, context_analysis)
318
 
 
319
  conversation_history.append({'user': input_text, 'response': response})
320
  if len(conversation_history) > max_history_length:
321
  conversation_history.pop(0)
322
+ return {
323
+ "emotion": predicted_emotion,
324
+ "sentiment": sentiment_scores,
325
+ "entities": entities,
326
+ "context_analysis": context_analysis,
327
+ "text_complexity": text_complexity,
328
+ "ai_emotion": ai_emotion,
329
+ "ai_emotion_percentage": ai_emotion_percentage,
330
+ "ai_emotion_intensity": ai_emotion_intensity,
331
+ "emotion_visualization": emotion_visualization,
332
+ "response": response
333
+ }
334
 
335
+ # Gradio interface
336
+ def gradio_interface(input_text):
337
+ result = interactive_interface(input_text)
338
+
339
+ output = f"Predicted Emotion: {result['emotion']}\n"
340
+ output += f"Sentiment: {result['sentiment']}\n"
341
+ output += f"AI Emotion: {result['ai_emotion']} ({result['ai_emotion_percentage']:.2f}%, Intensity: {result['ai_emotion_intensity']:.2f})\n"
342
+ output += f"Entities: {result['entities']}\n"
343
+ output += f"Context Analysis: {result['context_analysis']}\n"
344
+ output += f"Text Complexity: {result['text_complexity']}\n"
345
+ output += f"AI Response: {result['response']}"
346
+
347
+ return output, result['emotion_visualization']
348
+
349
+ iface = gr.Interface(
350
+ fn=gradio_interface,
351
+ inputs="text",
352
+ outputs=["text", gr.Image(type="filepath")],
353
+ title="Enhanced AI Assistant",
354
+ description="Enter your text to interact with the AI assistant."
355
+ )
356
+
357
+ if __name__ == "__main__":
358
+ iface.launch()