|
import gradio as gr |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
import torch |
|
|
|
|
|
model_name = "vai0511/ai-content-classifier" |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
def classify_text(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
logits = outputs.logits |
|
predicted_class = torch.argmax(logits, dim=1).item() |
|
|
|
labels = {0: "Human-Written", 1: "AI-Generated", 2: "Paraphrased"} |
|
return labels[predicted_class] |
|
|
|
|
|
iface = gr.Interface( |
|
fn=classify_text, |
|
inputs=gr.Textbox(lines=5, placeholder="Enter your text here..."), |
|
outputs="text", |
|
title="AI-Driven Content Source Identification", |
|
description="Detect whether the given text is human-written, AI-generated, or paraphrased." |
|
) |
|
|
|
iface.launch() |
|
|