File size: 1,561 Bytes
4950ce7
 
 
 
f04015a
4950ce7
 
f04015a
4950ce7
 
 
 
f04015a
 
 
 
 
 
 
 
 
 
4950ce7
f04015a
 
 
 
4950ce7
f04015a
 
 
 
4950ce7
 
 
 
 
 
 
 
 
f04015a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import gradio as gr
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import os
import re

MODEL_NAME = "NeuroSpaceX/ruSpamNS"
TOKEN = os.getenv("HF_TOKEN")

tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=TOKEN)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, use_auth_token=TOKEN)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)

def clean_text(text):
    text = text.strip()
    text = text.replace('\n', ' ')
    text = re.sub(r'[^\w\s,.!?]', '', text, flags=re.UNICODE)
    text = re.sub(r'[!?]', '', text)
    return text.lower()

def classify_text(text):
    message = clean_text(text)
    encoding = tokenizer(message, padding='max_length', truncation=True, max_length=128, return_tensors='pt')
    input_ids = encoding['input_ids'].to(device)
    attention_mask = encoding['attention_mask'].to(device)
    with torch.no_grad():
        outputs = model(input_ids, attention_mask=attention_mask).logits
        prediction = torch.sigmoid(outputs).cpu().numpy()[0][0]
    label = "СПАМ" if prediction >= 0.5 else "НЕ СПАМ"
    return f"{label} (вероятность: {prediction*100:.2f}%)"

iface = gr.Interface(
    fn=classify_text,
    inputs=gr.Textbox(lines=3, placeholder="Введите текст..."),
    outputs="text",
    title="ruSpamNS - Проверка на спам",
    description="Введите текст, чтобы проверить, является ли он спамом."
)

iface.launch()