Sephfox commited on
Commit
979d590
·
verified ·
1 Parent(s): b43a3e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +156 -302
app.py CHANGED
@@ -1,331 +1,185 @@
1
- import warnings
2
- import numpy as np
3
- import pandas as pd
4
  import os
5
  import json
6
  import random
7
  import gradio as gr
8
  import torch
9
- from sklearn.preprocessing import OneHotEncoder
10
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
11
- from deap import base, creator, tools, algorithms
12
  import nltk
13
  from nltk.sentiment import SentimentIntensityAnalyzer
14
- from nltk.tokenize import word_tokenize
15
- from nltk.tag import pos_tag
16
- from nltk.chunk import ne_chunk
17
  from textblob import TextBlob
18
- import matplotlib.pyplot as plt
19
- import seaborn as sns
 
20
 
21
- warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
22
  # Download necessary NLTK data
23
  nltk.download('vader_lexicon', quiet=True)
24
- nltk.download('punkt', quiet=True)
25
- nltk.download('averaged_perceptron_tagger', quiet=True)
26
- nltk.download('maxent_ne_chunker', quiet=True)
27
- nltk.download('words', quiet=True)
28
-
29
- # Initialize Example Dataset (For Emotion Prediction)
30
- data = {
31
- 'context': [
32
- 'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
33
- 'I am feeling joyful', 'I am grieving', 'I am feeling peaceful', 'I am frustrated',
34
- 'I am determined', 'I feel resentment', 'I am feeling glorious', 'I am motivated',
35
- 'I am surprised', 'I am fearful', 'I am trusting', 'I feel disgust', 'I am optimistic',
36
- 'I am pessimistic', 'I feel bored', 'I am envious'
37
- ],
38
- 'emotion': [
39
- 'joy', 'sadness', 'anger', 'joy', 'calmness', 'joy', 'grief', 'calmness', 'anger',
40
- 'determination', 'resentment', 'glory', 'motivation', 'surprise', 'fear', 'trust',
41
- 'disgust', 'optimism', 'pessimism', 'boredom', 'envy'
42
- ]
43
- }
44
- df = pd.DataFrame(data)
45
 
46
- # Encoding the contexts using One-Hot Encoding (memory-efficient)
 
 
 
 
47
  try:
48
- encoder = OneHotEncoder(handle_unknown='ignore', sparse_output=True)
49
- except TypeError:
50
- encoder = OneHotEncoder(handle_unknown='ignore', sparse=True)
51
- contexts_encoded = encoder.fit_transform(df[['context']])
52
-
53
- # Encoding emotions
54
- emotions_target = pd.Categorical(df['emotion']).codes
55
- emotion_classes = pd.Categorical(df['emotion']).categories
56
-
57
- # Load pre-trained BERT model for emotion prediction
58
- emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
59
- emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
60
-
61
- # Load pre-trained LLM model and tokenizer for response generation with increased context window
62
- response_model_name = "microsoft/DialoGPT-medium"
63
- response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
64
- response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
65
-
66
- # Set the pad token
67
- response_tokenizer.pad_token = response_tokenizer.eos_token
68
-
69
- # Enhanced Emotional States
70
- emotions = {
71
- 'joy': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
72
- 'sadness': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
73
- 'anger': {'percentage': 10, 'motivation': 'traumatic or strong', 'intensity': 0},
74
- 'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
75
- 'love': {'percentage': 10, 'motivation': 'affectionate', 'intensity': 0},
76
- 'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
77
- 'neutral': {'percentage': 40, 'motivation': 'balanced', 'intensity': 0},
78
- }
79
-
80
- total_percentage = 100
81
- emotion_history_file = 'emotion_history.json'
82
- global conversation_history
83
- conversation_history = []
84
- max_history_length = 30
85
-
86
- def load_historical_data(file_path=emotion_history_file):
87
- if os.path.exists(file_path):
88
- with open(file_path, 'r') as file:
89
- return json.load(file)
90
- return []
91
-
92
- def save_historical_data(historical_data, file_path=emotion_history_file):
93
- with open(file_path, 'w') as file:
94
- json.dump(historical_data, file)
95
-
96
- emotion_history = load_historical_data()
97
-
98
- def update_emotion(emotion, percentage, intensity):
99
- emotions[emotion]['percentage'] += percentage
100
- emotions[emotion]['intensity'] = intensity
101
-
102
- # Normalize percentages
103
- total = sum(e['percentage'] for e in emotions.values())
104
- for e in emotions:
105
- emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
106
-
107
- def normalize_context(context):
108
- return context.lower().strip()
109
-
110
- # Create FitnessMulti and Individual outside of evolve_emotions
111
- creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
112
- creator.create("Individual", list, fitness=creator.FitnessMulti)
113
-
114
- def evaluate(individual):
115
- emotion_values = individual[:len(emotions)]
116
- intensities = individual[len(emotions):]
117
-
118
- total_diff = abs(100 - sum(emotion_values))
119
- intensity_range = max(intensities) - min(intensities)
120
- emotion_balance = max(emotion_values) - min(emotion_values)
121
-
122
- return total_diff, intensity_range, emotion_balance
123
-
124
- def evolve_emotions():
125
- toolbox = base.Toolbox()
126
- toolbox.register("attr_float", random.uniform, 0, 100)
127
- toolbox.register("attr_intensity", random.uniform, 0, 10)
128
- toolbox.register("individual", tools.initCycle, creator.Individual,
129
- (toolbox.attr_float,) * len(emotions) +
130
- (toolbox.attr_intensity,) * len(emotions), n=1)
131
- toolbox.register("population", tools.initRepeat, list, toolbox.individual)
132
- toolbox.register("mate", tools.cxTwoPoint)
133
- toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
134
- toolbox.register("select", tools.selNSGA2)
135
- toolbox.register("evaluate", evaluate)
136
-
137
- population = toolbox.population(n=100)
138
- algorithms.eaMuPlusLambda(population, toolbox, mu=50, lambda_=100, cxpb=0.7, mutpb=0.2, ngen=50,
139
- stats=None, halloffame=None, verbose=False)
140
-
141
- best_individual = tools.selBest(population, k=1)[0]
142
- emotion_values = best_individual[:len(emotions)]
143
- intensities = best_individual[len(emotions):]
144
-
145
- for i, (emotion, data) in enumerate(emotions.items()):
146
- data['percentage'] = emotion_values[i]
147
- data['intensity'] = intensities[i]
148
-
149
- # Normalize percentages
150
- total = sum(e['percentage'] for e in emotions.values())
151
- for e in emotions:
152
- emotions[e]['percentage'] = (emotions[e]['percentage'] / total) * 100
153
- def update_emotion_history(emotion, percentage, intensity, context):
154
- entry = {
155
- 'emotion': emotion,
156
- 'percentage': percentage,
157
- 'intensity': intensity,
158
- 'context': context,
159
- 'timestamp': pd.Timestamp.now().isoformat()
160
- }
161
- emotion_history.append(entry)
162
- save_historical_data(emotion_history)
163
-
164
- # Adding 443 features
165
- additional_features = {}
166
- for i in range(443):
167
- additional_features[f'feature_{i+1}'] = 0
168
-
169
- def feature_transformations():
170
- global additional_features
171
- for feature in additional_features:
172
- additional_features[feature] += random.uniform(-1, 1)
173
-
174
- def generate_response(input_text, ai_emotion):
175
- global conversation_history
176
- # Prepare a prompt based on the current emotion and input
177
- prompt = f"You are an AI assistant currently feeling {ai_emotion}. Your response should reflect this emotion. Human: {input_text}\nAI:"
178
-
179
- # Add conversation history to the prompt
180
- for entry in conversation_history[-5:]: # Use last 5 entries for context
181
- prompt = f"Human: {entry['user']}\nAI: {entry['response']}\n" + prompt
182
-
183
- inputs = response_tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=1024)
184
-
185
- # Adjust generation parameters based on emotion
186
- temperature = 0.7
187
- if ai_emotion == 'anger':
188
- temperature = 0.9 # More randomness for angry responses
189
- elif ai_emotion == 'joy':
190
- temperature = 0.5 # More focused responses for joyful state
191
-
192
- with torch.no_grad():
193
- response_ids = response_model.generate(
194
- inputs.input_ids,
195
- attention_mask=inputs.attention_mask,
196
- max_length=1024,
197
- num_return_sequences=1,
198
- no_repeat_ngram_size=2,
199
- do_sample=True,
200
- top_k=50,
201
- top_p=0.95,
202
- temperature=temperature,
203
- pad_token_id=response_tokenizer.eos_token_id
204
  )
205
- response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
206
-
207
- # Extract only the AI's response
208
- response = response.split("AI:")[-1].strip()
209
- return response
210
-
211
- def predict_emotion(context):
212
- inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
213
- with torch.no_grad():
214
- outputs = emotion_prediction_model(**inputs)
215
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
216
- predicted_class = torch.argmax(probabilities, dim=-1).item()
217
- emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
218
- return emotion_labels[predicted_class]
219
-
220
- def sentiment_analysis(text):
221
- sia = SentimentIntensityAnalyzer()
222
- sentiment_scores = sia.polarity_scores(text)
223
- return sentiment_scores
224
-
225
- def extract_entities(text):
226
- chunked = ne_chunk(pos_tag(word_tokenize(text)))
227
- entities = []
228
- for chunk in chunked:
229
- if hasattr(chunk, 'label'):
230
- entities.append(((' '.join(c[0] for c in chunk)), chunk.label()))
231
- return entities
 
 
 
 
 
 
 
232
 
233
- def analyze_text_complexity(text):
234
- blob = TextBlob(text)
235
- return {
236
- 'word_count': len(blob.words),
237
- 'sentence_count': len(blob.sentences),
238
- 'average_sentence_length': len(blob.words) / len(blob.sentences) if len(blob.sentences) > 0 else 0,
239
- 'polarity': blob.sentiment.polarity,
240
- 'subjectivity': blob.sentiment.subjectivity
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  }
 
242
 
243
- def visualize_emotions():
244
- emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
245
- columns=['Emotion', 'Percentage', 'Intensity'])
246
-
247
- plt.figure(figsize=(12, 6))
248
- sns.barplot(x='Emotion', y='Percentage', data=emotions_df)
249
- plt.title('Current Emotional State')
250
- plt.xticks(rotation=45, ha='right')
251
- plt.tight_layout()
252
- plt.savefig('emotional_state.png')
253
- plt.close()
254
 
255
- return 'emotional_state.png'
256
 
257
- def interactive_interface(input_text):
258
- global conversation_history
259
- try:
260
- evolve_emotions()
261
- predicted_emotion = predict_emotion(input_text)
262
- sentiment_scores = sentiment_analysis(input_text)
263
- entities = extract_entities(input_text)
264
- text_complexity = analyze_text_complexity(input_text)
265
-
266
- # Update AI's emotional state based on input
267
- update_emotion(predicted_emotion, random.uniform(5, 15), random.uniform(0, 10))
268
-
269
- # Determine AI's current dominant emotion
270
- ai_emotion = max(emotions, key=lambda e: emotions[e]['percentage'])
271
-
272
- # Generate response based on AI's emotion
273
- response = generate_response(input_text, ai_emotion)
274
-
275
- # Update conversation history
276
- conversation_history.append({
277
- 'user': input_text,
278
- 'response': response
279
- })
280
-
281
- # Trim conversation history if it exceeds the maximum length
282
- if len(conversation_history) > max_history_length:
283
- conversation_history = conversation_history[-max_history_length:]
284
-
285
- update_emotion_history(ai_emotion, emotions[ai_emotion]['percentage'], emotions[ai_emotion]['intensity'], input_text)
286
- feature_transformations()
287
-
288
- emotion_visualization = visualize_emotions()
289
-
290
- analysis_result = {
291
- 'predicted_user_emotion': predicted_emotion,
292
- 'ai_emotion': ai_emotion,
293
- 'sentiment_scores': sentiment_scores,
294
- 'entities': entities,
295
- 'text_complexity': text_complexity,
296
- 'current_emotional_state': emotions,
297
- 'response': response,
298
- 'emotion_visualization': emotion_visualization
299
- }
300
-
301
- return analysis_result
302
- except Exception as e:
303
- print(f"An error occurred: {str(e)}")
304
- return "I apologize, but I encountered an error while processing your input. Please try again."
305
 
306
  def gradio_interface(input_text):
307
- response = interactive_interface(input_text)
308
- if isinstance(response, str):
309
- return response, None
310
- else:
311
- return (
312
- f"User Emotion: {response['predicted_user_emotion']}\n"
313
- f"AI Emotion: {response['ai_emotion']}\n"
314
- f"AI Response: {response['response']}\n\n"
315
- f"Sentiment: {response['sentiment_scores']}\n"
316
- f"Entities: {response['entities']}\n"
317
- f"Text Complexity: {response['text_complexity']}\n",
318
- response['emotion_visualization']
319
- )
320
-
321
- # Create Gradio interface
322
  iface = gr.Interface(
323
  fn=gradio_interface,
324
  inputs="text",
325
  outputs=["text", gr.Image(type="filepath")],
326
- title="Enhanced Emotional AI Interface",
327
- description="Enter text to interact with the AI and analyze emotions."
328
  )
329
 
330
  if __name__ == "__main__":
331
  iface.launch(share=True)
 
 
1
+ import warnings
 
 
2
  import os
3
  import json
4
  import random
5
  import gradio as gr
6
  import torch
7
+ import matplotlib.pyplot as plt
8
+ import seaborn as sns
9
+ import pandas as pd
10
  import nltk
11
  from nltk.sentiment import SentimentIntensityAnalyzer
 
 
 
12
  from textblob import TextBlob
13
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM
14
+
15
+ warnings.filterwarnings('ignore', category=FutureWarning)
16
 
 
17
  # Download necessary NLTK data
18
  nltk.download('vader_lexicon', quiet=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ # ---------------------------
21
+ # Backend Support for GGUF Models
22
+ # ---------------------------
23
+ # Attempt to import a llama_cpp binding if available.
24
+ # Otherwise, fallback to the Hugging Face transformers interface.
25
  try:
26
+ # Hypothetical llama_cpp Python binding for GGUF support
27
+ from llama_cpp import Llama
28
+ BACKEND = "llama_cpp"
29
+ except ImportError:
30
+ BACKEND = "transformers"
31
+
32
+ # ---------------------------
33
+ # Emotional Analysis Module
34
+ # ---------------------------
35
+ class EmotionalAnalyzer:
36
+ def __init__(self):
37
+ # Load a pre-trained emotion classifier model and tokenizer
38
+ self.emotion_model = AutoModelForSequenceClassification.from_pretrained(
39
+ "bhadresh-savani/distilbert-base-uncased-emotion"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  )
41
+ self.emotion_tokenizer = AutoTokenizer.from_pretrained(
42
+ "bhadresh-savani/distilbert-base-uncased-emotion"
43
+ )
44
+ # Define labels according to the model card
45
+ self.emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
46
+ self.sia = SentimentIntensityAnalyzer()
47
+
48
+ def predict_emotion(self, text):
49
+ inputs = self.emotion_tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
50
+ with torch.no_grad():
51
+ outputs = self.emotion_model(**inputs)
52
+ probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
53
+ predicted_idx = torch.argmax(probabilities, dim=-1).item()
54
+ return self.emotion_labels[predicted_idx]
55
+
56
+ def sentiment_analysis(self, text):
57
+ return self.sia.polarity_scores(text)
58
+
59
+ def detailed_emotional_analysis(self, text):
60
+ """Combine VADER and TextBlob analysis for richer emotional insight."""
61
+ vader_scores = self.sentiment_analysis(text)
62
+ blob = TextBlob(text)
63
+ textblob_analysis = {
64
+ 'polarity': blob.sentiment.polarity,
65
+ 'subjectivity': blob.sentiment.subjectivity,
66
+ 'word_count': len(blob.words),
67
+ 'sentence_count': len(blob.sentences)
68
+ }
69
+ predicted_emotion = self.predict_emotion(text)
70
+ return {
71
+ 'predicted_emotion': predicted_emotion,
72
+ 'vader': vader_scores,
73
+ 'textblob': textblob_analysis
74
+ }
75
 
76
+ def visualize_emotions(self, emotions_dict):
77
+ """Plot a bar chart of the current emotional state."""
78
+ emotions_df = pd.DataFrame(list(emotions_dict.items()), columns=['Emotion', 'Percentage'])
79
+ plt.figure(figsize=(8, 4))
80
+ sns.barplot(x='Emotion', y='Percentage', data=emotions_df)
81
+ plt.title('Current Emotional State')
82
+ plt.tight_layout()
83
+ image_path = 'emotional_state.png'
84
+ plt.savefig(image_path)
85
+ plt.close()
86
+ return image_path
87
+
88
+ # ---------------------------
89
+ # LLM Response Generator Module
90
+ # ---------------------------
91
+ class LLMResponder:
92
+ def __init__(self, model_name="SicariusSicariiStuff/Impish_LLAMA_3B_GGUF"):
93
+ self.model_name = model_name
94
+ if BACKEND == "llama_cpp":
95
+ # Initialize using llama_cpp backend (adjust parameters as needed)
96
+ self.llm = Llama(model_path="path/to/gguf/file.gguf", n_ctx=1024)
97
+ self.backend = "llama_cpp"
98
+ else:
99
+ # Load using Hugging Face transformers interface
100
+ self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name)
101
+ self.llm_model = AutoModelForCausalLM.from_pretrained(model_name)
102
+ self.backend = "transformers"
103
+
104
+ def generate_response(self, prompt):
105
+ if self.backend == "llama_cpp":
106
+ # Use llama_cpp inference (example API call, adjust as needed)
107
+ result = self.llm(prompt=prompt, max_tokens=256, temperature=0.95, top_p=0.95)
108
+ response = result.get("response", "")
109
+ else:
110
+ inputs = self.llm_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
111
+ with torch.no_grad():
112
+ output_ids = self.llm_model.generate(
113
+ inputs.input_ids,
114
+ max_length=1024,
115
+ do_sample=True,
116
+ top_p=0.95,
117
+ top_k=50,
118
+ pad_token_id=self.llm_tokenizer.eos_token_id
119
+ )
120
+ response = self.llm_tokenizer.decode(output_ids[0], skip_special_tokens=True)
121
+ return response
122
+
123
+ # ---------------------------
124
+ # Main Interactive Interface Function
125
+ # ---------------------------
126
+ def interactive_interface(input_text):
127
+ # Initialize modules
128
+ emotion_analyzer = EmotionalAnalyzer()
129
+ llm_responder = LLMResponder()
130
+
131
+ # Perform detailed emotional analysis
132
+ emotional_data = emotion_analyzer.detailed_emotional_analysis(input_text)
133
+ # For demonstration, we simulate a dynamic emotional state dictionary update.
134
+ # In a real-world scenario, this could be updated based on conversation history.
135
+ current_emotions = {
136
+ 'joy': random.randint(10, 30),
137
+ 'sadness': random.randint(5, 20),
138
+ 'anger': random.randint(10, 25),
139
+ 'fear': random.randint(5, 15),
140
+ 'love': random.randint(10, 30),
141
+ 'surprise': random.randint(5, 20)
142
  }
143
+ emotion_image = emotion_analyzer.visualize_emotions(current_emotions)
144
 
145
+ # Create a prompt that combines the input and the detected emotion
146
+ prompt = (f"Input: {input_text}\n"
147
+ f"Detected Emotion: {emotional_data['predicted_emotion']}\n"
148
+ f"VADER Scores: {emotional_data['vader']}\n"
149
+ "Provide a thoughtful, emotionally aware response that reflects the above data:")
 
 
 
 
 
 
150
 
151
+ llm_response = llm_responder.generate_response(prompt)
152
 
153
+ # Organize the result into a dictionary
154
+ result = {
155
+ 'detailed_emotional_analysis': emotional_data,
156
+ 'llm_response': llm_response,
157
+ 'emotion_visualization': emotion_image
158
+ }
159
+ return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  def gradio_interface(input_text):
162
+ result = interactive_interface(input_text)
163
+ output_text = (
164
+ f"Detailed Emotional Analysis:\n"
165
+ f" - Predicted Emotion: {result['detailed_emotional_analysis']['predicted_emotion']}\n"
166
+ f" - VADER: {result['detailed_emotional_analysis']['vader']}\n"
167
+ f" - TextBlob: {result['detailed_emotional_analysis']['textblob']}\n\n"
168
+ f"LLM Response:\n{result['llm_response']}"
169
+ )
170
+ return output_text, result['emotion_visualization']
171
+
172
+ # ---------------------------
173
+ # Create Gradio Interface
174
+ # ---------------------------
 
 
175
  iface = gr.Interface(
176
  fn=gradio_interface,
177
  inputs="text",
178
  outputs=["text", gr.Image(type="filepath")],
179
+ title="Enhanced Emotional Analysis with GGUF LLM Support",
180
+ description="Enter text to perform detailed emotional analysis and generate an emotionally aware response using the Impish_LLAMA_3B_GGUF model."
181
  )
182
 
183
  if __name__ == "__main__":
184
  iface.launch(share=True)
185
+