File size: 1,461 Bytes
69ff9d1 292d77d 321e2dc 292d77d 321e2dc f4b35df 69ff9d1 f4b35df 321e2dc f4b35df 69ff9d1 321e2dc 69ff9d1 321e2dc 69ff9d1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
# app.py
import subprocess
# Install dependencies
subprocess.run(["pip", "install", "-r", "requirements.txt"])
# Rest of your code
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
# Load the model from Hugging Face Model Hub
model_name = "SamLowe/roberta-base-go_emotions"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
# Define emotion labels used by the model
emotion_labels = ["admiration", "amusement", "anger", "annoyance", "approval",
"caring", "confusion", "curiosity", "desire", "disappointment",
"disapproval", "disgust", "embarrassment", "excitement",
"fear", "gratitude", "grief", "joy", "love", "nervousness",
"optimism", "pride", "realization", "relief", "remorse",
"sadness", "surprise", "neutral"]
def predict_emotion(text):
inputs = tokenizer(text, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
predicted_class = logits.argmax().item()
predicted_emotion = emotion_labels[predicted_class]
return predicted_emotion # Return the predicted emotion directly
iface = gr.Interface(
fn=predict_emotion,
inputs=gr.Textbox(),
outputs="text",
live=True,
title="Emotion Prediction",
description="Enter a sentence for emotion prediction.",
)
iface.launch() |