Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
import torch | |
import re | |
model_name = "alperugurcan/nlp-disaster" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
def clean_text(text): | |
return re.sub(r'http\S+|[^\w\s]', '', text).strip() | |
def predict(text): | |
if not text or len(text.strip()) == 0: | |
return "Please enter some text" | |
try: | |
text = clean_text(text) | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
prediction = torch.argmax(outputs.logits, dim=-1) | |
confidence = probabilities[0][prediction.item()].item() | |
result = "Disaster" if prediction.item() == 1 else "Not Disaster" | |
return f"{result} (Confidence: {confidence:.2%})" | |
except Exception as e: | |
return f"Error in prediction: {str(e)}" | |
iface = gr.Interface( | |
fn=predict, | |
inputs=gr.Textbox( | |
label="Tweet Text", | |
placeholder="Enter a tweet to analyze...", | |
lines=3 | |
), | |
outputs=gr.Textbox(label="Prediction"), | |
title="🚨 Disaster Tweet Classifier", | |
description="Enter a tweet to determine if it's about a real disaster or not.", | |
examples=[ | |
["Just happened: Massive earthquake hits California"], | |
["I'm dying to see the new Spider-Man movie!"], | |
["Forest fire spreading rapidly near residential areas"], | |
["This game is a complete disaster lol"] | |
], | |
theme=gr.themes.Base( | |
primary_hue="red", | |
secondary_hue="yellow", | |
) | |
) | |
iface.launch(share=True) |