Cain / app.py
Sephfox's picture
Update app.py
f372e0a verified
import warnings
# Suppress FutureWarnings
warnings.filterwarnings("ignore", category=FutureWarning)
# --- Monkey Patch for Gradio Schema Parsing ---
# This patch prevents APIInfoParseError by handling boolean schema values.
try:
import gradio_client.utils as client_utils
# Patch the helper function to handle bool types in the schema.
original_json_schema_to_python_type = client_utils._json_schema_to_python_type
def patched_json_schema_to_python_type(schema, defs=None):
if isinstance(schema, bool):
# If the schema is a boolean, simply return a generic type.
return "Any"
return original_json_schema_to_python_type(schema, defs)
client_utils._json_schema_to_python_type = patched_json_schema_to_python_type
# Also patch get_type to be extra safe.
original_get_type = client_utils.get_type
def patched_get_type(schema):
if isinstance(schema, bool):
return "Any"
if not isinstance(schema, dict):
return "Any"
return original_get_type(schema)
client_utils.get_type = patched_get_type
except Exception as e:
print("Warning: Failed to patch gradio_client schema utils:", e)
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import nltk
import gradio as gr
from nltk.sentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
import torch
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
AutoModelForSequenceClassification,
)
# Download necessary NLTK data
nltk.download("vader_lexicon", quiet=True)
# --- Emotion Analyzer ---
class EmotionalAnalyzer:
def __init__(self):
try:
self.model = AutoModelForSequenceClassification.from_pretrained(
"bhadresh-savani/distilbert-base-uncased-emotion"
)
self.tokenizer = AutoTokenizer.from_pretrained(
"bhadresh-savani/distilbert-base-uncased-emotion"
)
except Exception:
self.model = None
self.tokenizer = None
self.labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
self.sia = SentimentIntensityAnalyzer()
def predict_emotion(self, text):
try:
if self.model is None or self.tokenizer is None:
raise ValueError("Model or tokenizer not initialized properly.")
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = self.model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
return self.labels[torch.argmax(probs).item()]
except Exception:
return "Unknown"
def analyze(self, text):
try:
vader_scores = self.sia.polarity_scores(text)
blob = TextBlob(text)
blob_data = {
"polarity": blob.sentiment.polarity,
"subjectivity": blob.sentiment.subjectivity,
"word_count": len(blob.words),
"sentence_count": len(blob.sentences),
}
return {
"emotion": self.predict_emotion(text),
"vader": vader_scores,
"textblob": blob_data,
}
except Exception:
return {"emotion": "Unknown", "vader": {}, "textblob": {}}
def plot_emotions(self):
try:
simulated_emotions = {
"joy": random.randint(10, 30),
"sadness": random.randint(5, 20),
"anger": random.randint(10, 25),
"fear": random.randint(5, 15),
"love": random.randint(10, 30),
"surprise": random.randint(5, 20),
}
df = pd.DataFrame(list(simulated_emotions.items()), columns=["Emotion", "Percentage"])
plt.figure(figsize=(8, 4))
sns.barplot(x="Emotion", y="Percentage", data=df)
plt.title("Simulated Emotional State")
plt.tight_layout()
path = "emotions.png"
plt.savefig(path)
plt.close()
return path
except Exception:
return None # Ensures that if there's an issue, we return None
# --- Text Completion LLM ---
tokenizer = AutoTokenizer.from_pretrained("diabolic6045/ELN-Llama-1B-base")
model = AutoModelForCausalLM.from_pretrained("diabolic6045/ELN-Llama-1B-base")
def generate_completion(message, temperature, max_length):
try:
inputs = tokenizer(message, return_tensors="pt", truncation=True, max_length=512)
input_ids = inputs["input_ids"]
current_text = message
for _ in range(max_length - input_ids.shape[1]):
with torch.no_grad():
outputs = model(input_ids)
logits = outputs.logits[:, -1, :] / temperature
probs = torch.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
if next_token.item() == tokenizer.eos_token_id:
break
input_ids = torch.cat([input_ids, next_token], dim=-1)
new_token_text = tokenizer.decode(next_token[0], skip_special_tokens=True)
current_text += new_token_text
return current_text
except Exception:
return "Error generating text."
# --- Emotion-Aware LLM Response ---
def emotion_aware_response(input_text):
try:
analyzer = EmotionalAnalyzer()
results = analyzer.analyze(input_text)
image_path = analyzer.plot_emotions() # This could return None if plotting fails
prompt = (
f"Input: {input_text}\n"
f"Detected Emotion: {results['emotion']}\n"
f"VADER Scores: {results['vader']}\n"
f"Respond thoughtfully and emotionally aware:"
)
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
output_ids = model.generate(
inputs.input_ids,
max_length=512,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
summary = (
f"Emotion: {results['emotion']}\n"
f"VADER: {results['vader']}\n"
f"TextBlob: {results['textblob']}\n\n"
f"LLM Response:\n{response}"
)
return summary, image_path if image_path else None
except Exception:
return "Error processing emotion-aware response", None
# --- Gradio Interface ---
with gr.Blocks(title="ELN LLaMA 1B Enhanced Demo") as app:
gr.Markdown("## 🧠 ELN-LLaMA Emotion-Aware & Completion Interface")
with gr.Tab("💬 Emotion-Aware Response"):
with gr.Row():
input_text = gr.Textbox(label="Input Text", lines=4, placeholder="Type something with emotion or meaning...")
with gr.Row():
text_output = gr.Textbox(label="Response", lines=8)
img_output = gr.Image(label="Emotional Visualization")
emotion_btn = gr.Button("Generate Emotion-Aware Response")
emotion_btn.click(emotion_aware_response, inputs=input_text, outputs=[text_output, img_output])
with gr.Tab("📝 Text Completion"):
comp_text = gr.Textbox(label="Prompt", lines=4)
comp_temp = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
comp_len = gr.Slider(minimum=50, maximum=500, value=200, step=50, label="Max Length")
comp_output = gr.Textbox(label="Generated Completion", lines=8)
comp_button = gr.Button("Complete Text")
comp_button.click(generate_completion, inputs=[comp_text, comp_temp, comp_len], outputs=comp_output)
# Launch the Gradio app (remove share=True if running in an environment that doesn't support it)
app.launch(share=True)