import gradio as gr from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch import os import re import emoji MODEL_NAME = "NeuroSpaceX/ruSpamNS" TOKEN = os.getenv("HF_TOKEN") tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=TOKEN) model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, use_auth_token=TOKEN) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) def clean_text(text): text = emoji.replace_emoji(text, replace='') text = re.sub(r'[^a-zA-Zа-яА-ЯёЁ ]', '', text, flags=re.UNICODE) text = text.lower() text = text.capitalize() text = re.sub(r'\s+', ' ', text).strip() return text def classify_text(text, model_choice): model_name = f"NeuroSpaceX/ruSpamNS_{model_choice}" tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=TOKEN) model = AutoModelForSequenceClassification.from_pretrained(model_name, use_auth_token=TOKEN) model.to(device) message = clean_text(text) encoding = tokenizer(message, padding='max_length', truncation=True, max_length=128, return_tensors='pt') input_ids = encoding['input_ids'].to(device) attention_mask = encoding['attention_mask'].to(device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask).logits prediction = torch.sigmoid(outputs).cpu().numpy()[0][0] label = "СПАМ" if prediction >= 0.5 else "НЕ СПАМ" return f"{label} (вероятность: {prediction*100:.2f}%)" iface = gr.Interface( fn=classify_text, inputs=[ gr.Textbox(lines=3, placeholder="Введите текст..."), gr.Radio(["small", "big"], label="Выберите модель") ], outputs="text", title="ruSpamNS - Проверка на спам", description="Введите текст, чтобы проверить, является ли он спамом." ) iface.launch()