Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -10,9 +10,22 @@ from sklearn.model_selection import train_test_split
|
|
10 |
from sklearn.preprocessing import OneHotEncoder
|
11 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
|
12 |
from deap import base, creator, tools, algorithms
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
# Initialize Example Dataset (For Emotion Prediction)
|
17 |
data = {
|
18 |
'context': [
|
@@ -110,6 +123,10 @@ def update_emotion(emotion, percentage, intensity):
|
|
110 |
def normalize_context(context):
|
111 |
return context.lower().strip()
|
112 |
|
|
|
|
|
|
|
|
|
113 |
def evaluate(individual):
|
114 |
emotion_values = individual[:len(emotions) - 1]
|
115 |
intensities = individual[len(emotions) - 1:-1]
|
@@ -120,11 +137,7 @@ def evaluate(individual):
|
|
120 |
intensity_range = max(intensities) - min(intensities)
|
121 |
|
122 |
return ideal_diff, sum_non_ideal, intensity_range
|
123 |
-
|
124 |
def evolve_emotions():
|
125 |
-
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
|
126 |
-
creator.create("Individual", list, fitness=creator.FitnessMulti)
|
127 |
-
|
128 |
toolbox = base.Toolbox()
|
129 |
toolbox.register("attr_float", random.uniform, 0, 20)
|
130 |
toolbox.register("attr_intensity", random.uniform, 0, 10)
|
@@ -147,10 +160,11 @@ def evolve_emotions():
|
|
147 |
intensities = best_individual[len(emotions) - 1:-1]
|
148 |
ideal_state = best_individual[-1]
|
149 |
|
150 |
-
for i, emotion in enumerate(emotions):
|
151 |
-
if
|
152 |
-
|
153 |
-
|
|
|
154 |
|
155 |
emotions['ideal_state']['percentage'] = ideal_state
|
156 |
|
@@ -176,41 +190,116 @@ def feature_transformations():
|
|
176 |
additional_features[feature] += random.uniform(-1, 1)
|
177 |
|
178 |
def generate_response(input_text):
|
179 |
-
inputs = response_tokenizer(input_text, return_tensors="pt")
|
180 |
-
|
181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
return response
|
183 |
|
184 |
def predict_emotion(context):
|
185 |
-
inputs = emotion_prediction_tokenizer(context, return_tensors="pt")
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
|
202 |
-
# Gradio Interface
|
203 |
def gradio_interface(input_text):
|
204 |
response = interactive_interface(input_text)
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
|
|
|
207 |
iface = gr.Interface(
|
208 |
fn=gradio_interface,
|
209 |
inputs="text",
|
210 |
-
outputs="text",
|
211 |
-
title="
|
212 |
-
description="
|
213 |
)
|
214 |
|
215 |
if __name__ == "__main__":
|
216 |
-
iface.launch()
|
|
|
10 |
from sklearn.preprocessing import OneHotEncoder
|
11 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
|
12 |
from deap import base, creator, tools, algorithms
|
13 |
+
import nltk
|
14 |
+
from nltk.sentiment import SentimentIntensityAnalyzer
|
15 |
+
from textblob import TextBlob
|
16 |
+
import spacy
|
17 |
+
import matplotlib.pyplot as plt
|
18 |
+
import seaborn as sns
|
19 |
|
20 |
warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
|
21 |
|
22 |
+
# Download necessary NLTK data
|
23 |
+
nltk.download('vader_lexicon', quiet=True)
|
24 |
+
nltk.download('punkt', quiet=True)
|
25 |
+
|
26 |
+
# Load spaCy model
|
27 |
+
nlp = spacy.load("en_core_web_sm")
|
28 |
+
|
29 |
# Initialize Example Dataset (For Emotion Prediction)
|
30 |
data = {
|
31 |
'context': [
|
|
|
123 |
def normalize_context(context):
|
124 |
return context.lower().strip()
|
125 |
|
126 |
+
# Create FitnessMulti and Individual outside of evolve_emotions
|
127 |
+
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.5, -0.2))
|
128 |
+
creator.create("Individual", list, fitness=creator.FitnessMulti)
|
129 |
+
|
130 |
def evaluate(individual):
|
131 |
emotion_values = individual[:len(emotions) - 1]
|
132 |
intensities = individual[len(emotions) - 1:-1]
|
|
|
137 |
intensity_range = max(intensities) - min(intensities)
|
138 |
|
139 |
return ideal_diff, sum_non_ideal, intensity_range
|
|
|
140 |
def evolve_emotions():
|
|
|
|
|
|
|
141 |
toolbox = base.Toolbox()
|
142 |
toolbox.register("attr_float", random.uniform, 0, 20)
|
143 |
toolbox.register("attr_intensity", random.uniform, 0, 10)
|
|
|
160 |
intensities = best_individual[len(emotions) - 1:-1]
|
161 |
ideal_state = best_individual[-1]
|
162 |
|
163 |
+
for i, (emotion, data) in enumerate(list(emotions.items())[:-1]): # Exclude 'ideal_state'
|
164 |
+
if i < len(emotion_values):
|
165 |
+
data['percentage'] = emotion_values[i]
|
166 |
+
if i < len(intensities):
|
167 |
+
data['intensity'] = intensities[i]
|
168 |
|
169 |
emotions['ideal_state']['percentage'] = ideal_state
|
170 |
|
|
|
190 |
additional_features[feature] += random.uniform(-1, 1)
|
191 |
|
192 |
def generate_response(input_text):
|
193 |
+
inputs = response_tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512)
|
194 |
+
with torch.no_grad():
|
195 |
+
response_ids = response_model.generate(
|
196 |
+
inputs.input_ids,
|
197 |
+
max_length=150,
|
198 |
+
num_return_sequences=1,
|
199 |
+
no_repeat_ngram_size=2,
|
200 |
+
top_k=50,
|
201 |
+
top_p=0.95,
|
202 |
+
temperature=0.7
|
203 |
+
)
|
204 |
+
response = response_tokenizer.decode(response_ids[0], skip_special_tokens=True)
|
205 |
return response
|
206 |
|
207 |
def predict_emotion(context):
|
208 |
+
inputs = emotion_prediction_tokenizer(context, return_tensors="pt", truncation=True, max_length=512)
|
209 |
+
with torch.no_grad():
|
210 |
+
outputs = emotion_prediction_model(**inputs)
|
211 |
+
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
212 |
+
predicted_class = torch.argmax(probabilities, dim=-1).item()
|
213 |
+
emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
|
214 |
+
return emotion_labels[predicted_class]
|
215 |
+
|
216 |
+
def sentiment_analysis(text):
|
217 |
+
sia = SentimentIntensityAnalyzer()
|
218 |
+
sentiment_scores = sia.polarity_scores(text)
|
219 |
+
return sentiment_scores
|
220 |
+
|
221 |
+
def extract_entities(text):
|
222 |
+
doc = nlp(text)
|
223 |
+
entities = [(ent.text, ent.label_) for ent in doc.ents]
|
224 |
+
return entities
|
225 |
+
|
226 |
+
def analyze_text_complexity(text):
|
227 |
+
blob = TextBlob(text)
|
228 |
+
return {
|
229 |
+
'word_count': len(blob.words),
|
230 |
+
'sentence_count': len(blob.sentences),
|
231 |
+
'average_sentence_length': len(blob.words) / len(blob.sentences) if len(blob.sentences) > 0 else 0,
|
232 |
+
'polarity': blob.sentiment.polarity,
|
233 |
+
'subjectivity': blob.sentiment.subjectivity
|
234 |
+
}
|
235 |
+
|
236 |
+
def visualize_emotions():
|
237 |
+
emotions_df = pd.DataFrame([(e, d['percentage'], d['intensity']) for e, d in emotions.items()],
|
238 |
+
columns=['Emotion', 'Percentage', 'Intensity'])
|
239 |
+
|
240 |
+
plt.figure(figsize=(12, 6))
|
241 |
+
sns.barplot(x='Emotion', y='Percentage', data=emotions_df)
|
242 |
+
plt.title('Current Emotional State')
|
243 |
+
plt.xticks(rotation=45, ha='right')
|
244 |
+
plt.tight_layout()
|
245 |
+
plt.savefig('emotional_state.png')
|
246 |
+
plt.close()
|
247 |
+
|
248 |
+
return 'emotional_state.png'
|
249 |
+
|
250 |
+
def interactive_interface(input_text):
|
251 |
+
try:
|
252 |
+
evolve_emotions()
|
253 |
+
predicted_emotion = predict_emotion(input_text)
|
254 |
+
sentiment_scores = sentiment_analysis(input_text)
|
255 |
+
entities = extract_entities(input_text)
|
256 |
+
text_complexity = analyze_text_complexity(input_text)
|
257 |
+
|
258 |
+
update_emotion(predicted_emotion, random.uniform(5, 15), random.uniform(0, 10))
|
259 |
+
update_emotion_history(predicted_emotion, emotions[predicted_emotion]['percentage'], emotions[predicted_emotion]['intensity'], input_text)
|
260 |
+
feature_transformations()
|
261 |
+
|
262 |
+
response = generate_response(input_text)
|
263 |
+
|
264 |
+
emotion_visualization = visualize_emotions()
|
265 |
+
|
266 |
+
analysis_result = {
|
267 |
+
'predicted_emotion': predicted_emotion,
|
268 |
+
'sentiment_scores': sentiment_scores,
|
269 |
+
'entities': entities,
|
270 |
+
'text_complexity': text_complexity,
|
271 |
+
'current_emotional_state': emotions,
|
272 |
+
'response': response,
|
273 |
+
'emotion_visualization': emotion_visualization
|
274 |
+
}
|
275 |
+
|
276 |
+
return analysis_result
|
277 |
+
except Exception as e:
|
278 |
+
print(f"An error occurred: {str(e)}")
|
279 |
+
return "I apologize, but I encountered an error while processing your input. Please try again."
|
280 |
|
|
|
281 |
def gradio_interface(input_text):
|
282 |
response = interactive_interface(input_text)
|
283 |
+
if isinstance(response, str):
|
284 |
+
return response
|
285 |
+
else:
|
286 |
+
return (
|
287 |
+
f"Predicted Emotion: {response['predicted_emotion']}\n"
|
288 |
+
f"Sentiment: {response['sentiment_scores']}\n"
|
289 |
+
f"Entities: {response['entities']}\n"
|
290 |
+
f"Text Complexity: {response['text_complexity']}\n"
|
291 |
+
f"Response: {response['response']}\n"
|
292 |
+
f"Emotion Visualization: {response['emotion_visualization']}"
|
293 |
+
)
|
294 |
|
295 |
+
# Create Gradio interface
|
296 |
iface = gr.Interface(
|
297 |
fn=gradio_interface,
|
298 |
inputs="text",
|
299 |
+
outputs=["text", gr.Image(type="filepath")],
|
300 |
+
title="Enhanced Emotional AI Interface",
|
301 |
+
description="Enter text to interact with the AI and analyze emotions."
|
302 |
)
|
303 |
|
304 |
if __name__ == "__main__":
|
305 |
+
iface.launch()
|