Spaces:
Paused
Paused
import gradio as gr | |
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification | |
import torch | |
# Load Model & Tokenizer | |
model_name = "AventIQ-AI/distilbert-spam-detection" | |
tokenizer = DistilBertTokenizer.from_pretrained(model_name) | |
model = DistilBertForSequenceClassification.from_pretrained(model_name) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model.to(device) | |
def predict_spam(text): | |
model.eval() | |
inputs = tokenizer(text, return_tensors="pt", padding="max_length", truncation=True, max_length=128).to(device) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probs = torch.softmax(outputs.logits, dim=-1) | |
pred_class = torch.argmax(probs).item() | |
return "π¨ Spam" if pred_class == 1 else "β Not Spam" | |
# Create Gradio Interface | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# π AI-Powered Spam Detector") | |
gr.Markdown("Enter a message below to check if it's spam or not!") | |
with gr.Row(): | |
input_box = gr.Textbox(placeholder="Type a message here...", lines=2) | |
output_label = gr.Label() | |
button = gr.Button("π Analyze") | |
button.click(predict_spam, inputs=input_box, outputs=output_label) | |
# Launch | |
if __name__ == "__main__": | |
demo.launch() |