|
import os |
|
os.system("pip install torch transformers gradio matplotlib") |
|
|
|
|
|
|
|
|
|
import torch |
|
import gradio as gr |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
|
|
model_name = "HyperX-Sentience/RogueBERT-Toxicity-85K" |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model.to(device) |
|
|
|
|
|
labels = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] |
|
|
|
|
|
def predict_toxicity(comment): |
|
inputs = tokenizer([comment], truncation=True, padding="max_length", max_length=128, return_tensors="pt") |
|
inputs = {key: val.to(device) for key, val in inputs.items()} |
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
probabilities = torch.sigmoid(logits).cpu().numpy()[0] |
|
|
|
toxicity_scores = {label: float(probabilities[i]) for i, label in enumerate(labels)} |
|
return toxicity_scores |
|
|
|
|
|
def plot_toxicity(comment): |
|
toxicity_scores = predict_toxicity(comment) |
|
categories = list(toxicity_scores.keys()) |
|
scores = list(toxicity_scores.values()) |
|
|
|
plt.figure(figsize=(8, 5), facecolor='black') |
|
ax = plt.gca() |
|
ax.set_facecolor('black') |
|
bars = plt.bar(categories, scores, color='#20B2AA', edgecolor='white') |
|
|
|
plt.xticks(color='white', fontsize=12) |
|
plt.yticks(color='white', fontsize=12) |
|
plt.title("Toxicity Score Analysis", color='white', fontsize=14) |
|
plt.ylim(0, 1) |
|
|
|
for bar in bars: |
|
yval = bar.get_height() |
|
plt.text(bar.get_x() + bar.get_width()/2, yval + 0.02, f'{yval:.2f}', ha='center', color='white', fontsize=10) |
|
|
|
plt.tight_layout() |
|
plt.savefig("toxicity_chart.png", facecolor='black') |
|
plt.close() |
|
|
|
return "toxicity_chart.png" |
|
|
|
|
|
demo = gr.Interface( |
|
fn=plot_toxicity, |
|
inputs=gr.Textbox(label="Enter a comment"), |
|
outputs=gr.Image(type="file", label="Toxicity Analysis"), |
|
title="Toxicity Detector", |
|
description="Enter a comment to analyze its toxicity scores across different categories.", |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|