Spaces:
Sleeping
Sleeping
import warnings | |
import os | |
import json | |
import random | |
import gradio as gr | |
import torch | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
import pandas as pd | |
import nltk | |
from nltk.sentiment import SentimentIntensityAnalyzer | |
from textblob import TextBlob | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, LlamaConfig | |
warnings.filterwarnings('ignore', category=FutureWarning) | |
# --- Monkey Patch for Gradio Client JSON Schema Bug --- | |
import gradio_client.utils as client_utils | |
original_get_type = client_utils.get_type | |
def patched_get_type(schema): | |
if not isinstance(schema, dict): | |
return type(schema).__name__ | |
return original_get_type(schema) | |
client_utils.get_type = patched_get_type | |
if not hasattr(client_utils, "_original_json_schema_to_python_type"): | |
client_utils._original_json_schema_to_python_type = client_utils._json_schema_to_python_type | |
def patched_json_schema_to_python_type(schema, defs=None): | |
if isinstance(schema, bool): | |
return "bool" | |
return client_utils._original_json_schema_to_python_type(schema, defs) | |
client_utils._json_schema_to_python_type = patched_json_schema_to_python_type | |
# --- End of Monkey Patch --- | |
# Download necessary NLTK data | |
nltk.download('vader_lexicon', quiet=True) | |
# --------------------------- | |
# Backend Support for GGUF Models | |
# --------------------------- | |
try: | |
from llama_cpp import Llama | |
BACKEND = "llama_cpp" | |
except ImportError: | |
BACKEND = "transformers" | |
# --------------------------- | |
# Emotional Analysis Module | |
# --------------------------- | |
class EmotionalAnalyzer: | |
def __init__(self): | |
self.emotion_model = AutoModelForSequenceClassification.from_pretrained( | |
"bhadresh-savani/distilbert-base-uncased-emotion" | |
) | |
self.emotion_tokenizer = AutoTokenizer.from_pretrained( | |
"bhadresh-savani/distilbert-base-uncased-emotion" | |
) | |
self.emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"] | |
self.sia = SentimentIntensityAnalyzer() | |
def predict_emotion(self, text): | |
inputs = self.emotion_tokenizer(text, return_tensors="pt", truncation=True, max_length=512) | |
with torch.no_grad(): | |
outputs = self.emotion_model(**inputs) | |
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
predicted_idx = torch.argmax(probabilities, dim=-1).item() | |
return self.emotion_labels[predicted_idx] | |
def sentiment_analysis(self, text): | |
return self.sia.polarity_scores(text) | |
def detailed_emotional_analysis(self, text): | |
vader_scores = self.sentiment_analysis(text) | |
blob = TextBlob(text) | |
textblob_analysis = { | |
'polarity': blob.sentiment.polarity, | |
'subjectivity': blob.sentiment.subjectivity, | |
'word_count': len(blob.words), | |
'sentence_count': len(blob.sentences) | |
} | |
predicted_emotion = self.predict_emotion(text) | |
return { | |
'predicted_emotion': predicted_emotion, | |
'vader': vader_scores, | |
'textblob': textblob_analysis | |
} | |
def visualize_emotions(self, emotions_dict): | |
emotions_df = pd.DataFrame(list(emotions_dict.items()), columns=['Emotion', 'Percentage']) | |
plt.figure(figsize=(8, 4)) | |
sns.barplot(x='Emotion', y='Percentage', data=emotions_df) | |
plt.title('Current Emotional State') | |
plt.tight_layout() | |
image_path = 'emotional_state.png' | |
plt.savefig(image_path) | |
plt.close() | |
return image_path | |
# --------------------------- | |
# LLM Response Generator Module | |
# --------------------------- | |
class LLMResponder: | |
def __init__(self, model_name="SicariusSicariiStuff/Impish_LLAMA_3B_GGUF"): | |
self.model_name = model_name | |
if BACKEND == "llama_cpp": | |
# Replace with the actual path to your GGUF file. | |
self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024) | |
self.backend = "llama_cpp" | |
else: | |
# Create a dummy config using LlamaConfig so the model loads despite missing keys. | |
dummy_config = LlamaConfig.from_dict({"model_type": "llama"}) | |
try: | |
self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name, config=dummy_config, trust_remote_code=True) | |
except Exception as e: | |
print(f"Error loading tokenizer from {model_name}; using fallback tokenizer.") | |
fallback_model = "sentence-transformers/all-MiniLM-L6-v2" | |
self.llm_tokenizer = AutoTokenizer.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True) | |
try: | |
self.llm_model = AutoModelForCausalLM.from_pretrained(model_name, config=dummy_config, trust_remote_code=True) | |
except Exception as e: | |
print(f"Error loading model from {model_name}; using fallback model.") | |
fallback_model = "sentence-transformers/all-MiniLM-L6-v2" | |
self.llm_model = AutoModelForCausalLM.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True) | |
self.backend = "transformers" | |
def generate_response(self, prompt): | |
if self.backend == "llama_cpp": | |
result = self.llm(prompt=prompt, max_tokens=256, temperature=0.95, top_p=0.95) | |
response = result.get("response", "") | |
else: | |
inputs = self.llm_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024) | |
with torch.no_grad(): | |
output_ids = self.llm_model.generate( | |
inputs.input_ids, | |
max_length=1024, | |
do_sample=True, | |
top_p=0.95, | |
top_k=50, | |
pad_token_id=self.llm_tokenizer.eos_token_id | |
) | |
response = self.llm_tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
return response | |
# --------------------------- | |
# Main Interactive Interface Function | |
# --------------------------- | |
def interactive_interface(input_text): | |
emotion_analyzer = EmotionalAnalyzer() | |
llm_responder = LLMResponder() | |
emotional_data = emotion_analyzer.detailed_emotional_analysis(input_text) | |
current_emotions = { | |
'joy': random.randint(10, 30), | |
'sadness': random.randint(5, 20), | |
'anger': random.randint(10, 25), | |
'fear': random.randint(5, 15), | |
'love': random.randint(10, 30), | |
'surprise': random.randint(5, 20) | |
} | |
emotion_image = emotion_analyzer.visualize_emotions(current_emotions) | |
prompt = ( | |
f"Input: {input_text}\n" | |
f"Detected Emotion: {emotional_data['predicted_emotion']}\n" | |
f"VADER Scores: {emotional_data['vader']}\n" | |
"Provide a thoughtful, emotionally aware response that reflects the above data:" | |
) | |
llm_response = llm_responder.generate_response(prompt) | |
result = { | |
'detailed_emotional_analysis': emotional_data, | |
'llm_response': llm_response, | |
'emotion_visualization': emotion_image | |
} | |
return result | |
def gradio_interface(input_text): | |
result = interactive_interface(input_text) | |
output_text = ( | |
f"Detailed Emotional Analysis:\n" | |
f" - Predicted Emotion: {result['detailed_emotional_analysis']['predicted_emotion']}\n" | |
f" - VADER: {result['detailed_emotional_analysis']['vader']}\n" | |
f" - TextBlob: {result['detailed_emotional_analysis']['textblob']}\n\n" | |
f"LLM Response:\n{result['llm_response']}" | |
) | |
return output_text, result['emotion_visualization'] | |
# --------------------------- | |
# Create Gradio Interface | |
# --------------------------- | |
iface = gr.Interface( | |
fn=gradio_interface, | |
inputs="text", | |
outputs=["text", gr.Image(type="filepath")], | |
title="Enhanced Emotional Analysis with GGUF LLM Support", | |
description="Enter text to perform detailed emotional analysis and generate an emotionally aware response using the Impish_LLAMA_3B_GGUF model." | |
) | |
if __name__ == "__main__": | |
iface.launch() | |