Cain / app.py
Sephfox's picture
Update app.py
45cfc26 verified
raw
history blame
5.76 kB
import warnings
import os
import random
import gradio as gr
import torch
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
from nltk.sentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
AutoModelForSequenceClassification,
)
# Suppress warnings and fix Gradio schema bug
warnings.filterwarnings('ignore', category=FutureWarning)
nltk.download('vader_lexicon', quiet=True)
# --- Emotion Analyzer ---
class EmotionalAnalyzer:
def __init__(self):
self.model = AutoModelForSequenceClassification.from_pretrained(
"bhadresh-savani/distilbert-base-uncased-emotion"
)
self.tokenizer = AutoTokenizer.from_pretrained(
"bhadresh-savani/distilbert-base-uncased-emotion"
)
self.labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
self.sia = SentimentIntensityAnalyzer()
def predict_emotion(self, text):
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = self.model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
return self.labels[torch.argmax(probs).item()]
def analyze(self, text):
vader_scores = self.sia.polarity_scores(text)
blob = TextBlob(text)
blob_data = {
"polarity": blob.sentiment.polarity,
"subjectivity": blob.sentiment.subjectivity,
"word_count": len(blob.words),
"sentence_count": len(blob.sentences),
}
return {
"emotion": self.predict_emotion(text),
"vader": vader_scores,
"textblob": blob_data,
}
def plot_emotions(self):
simulated_emotions = {
"joy": random.randint(10, 30),
"sadness": random.randint(5, 20),
"anger": random.randint(10, 25),
"fear": random.randint(5, 15),
"love": random.randint(10, 30),
"surprise": random.randint(5, 20),
}
df = pd.DataFrame(list(simulated_emotions.items()), columns=["Emotion", "Percentage"])
plt.figure(figsize=(8, 4))
sns.barplot(x="Emotion", y="Percentage", data=df)
plt.title("Simulated Emotional State")
plt.tight_layout()
path = "emotions.png"
plt.savefig(path)
plt.close()
return path
# --- Text Completion LLM ---
tokenizer = AutoTokenizer.from_pretrained("diabolic6045/ELN-Llama-1B-base")
model = AutoModelForCausalLM.from_pretrained("diabolic6045/ELN-Llama-1B-base")
def generate_completion(message, temperature, max_length):
inputs = tokenizer(message, return_tensors="pt", truncation=True, max_length=512)
input_ids = inputs["input_ids"]
current_text = message
for _ in range(max_length - input_ids.shape[1]):
with torch.no_grad():
outputs = model(input_ids)
logits = outputs.logits[:, -1, :] / temperature
probs = torch.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
if next_token.item() == tokenizer.eos_token_id:
break
input_ids = torch.cat([input_ids, next_token], dim=-1)
new_token_text = tokenizer.decode(next_token[0], skip_special_tokens=True)
current_text += new_token_text
yield current_text
# --- Emotion-Aware LLM Response ---
def emotion_aware_response(input_text):
analyzer = EmotionalAnalyzer()
results = analyzer.analyze(input_text)
image_path = analyzer.plot_emotions()
prompt = (
f"Input: {input_text}\n"
f"Detected Emotion: {results['emotion']}\n"
f"VADER Scores: {results['vader']}\n"
f"Respond thoughtfully and emotionally aware:"
)
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
output_ids = model.generate(
inputs.input_ids,
max_length=512,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
summary = (
f"Emotion: {results['emotion']}\n"
f"VADER: {results['vader']}\n"
f"TextBlob: {results['textblob']}\n\n"
f"LLM Response:\n{response}"
)
return summary, image_path
# --- Gradio Interface ---
with gr.Blocks(title="ELN LLaMA 1B Enhanced Demo") as app:
gr.Markdown("## 🧠 ELN-LLaMA Emotion-Aware & Completion Interface")
with gr.Tab("💬 Emotion-Aware Response"):
with gr.Row():
input_text = gr.Textbox(label="Input Text", lines=4, placeholder="Type something with emotion or meaning...")
with gr.Row():
text_output = gr.Textbox(label="Response", lines=8)
img_output = gr.Image(label="Emotional Visualization")
emotion_btn = gr.Button("Generate Emotion-Aware Response")
emotion_btn.click(emotion_aware_response, inputs=input_text, outputs=[text_output, img_output])
with gr.Tab("📝 Text Completion"):
comp_text = gr.Textbox(label="Prompt", lines=4)
comp_temp = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
comp_len = gr.Slider(minimum=50, maximum=500, value=200, step=50, label="Max Length")
comp_output = gr.Textbox(label="Generated Completion", lines=8)
comp_button = gr.Button("Complete Text")
comp_button.click(generate_completion, inputs=[comp_text, comp_temp, comp_len], outputs=comp_output)
app.launch(share=True)