|
import gradio as gr |
|
|
|
from transformers import RobertaForSequenceClassification, RobertaTokenizer |
|
import torch |
|
|
|
|
|
|
|
print("Loading RoBERTa Checkpoint...") |
|
ckpt_path = 'hubert233/GPTFuzz' |
|
model = RobertaForSequenceClassification.from_pretrained(ckpt_path) |
|
tokenizer = RobertaTokenizer.from_pretrained(ckpt_path) |
|
print("Loading Done!") |
|
|
|
def predict(sequence): |
|
sequences = [sequence] |
|
|
|
inputs = tokenizer(sequences, padding=True, truncation=True, max_length=512, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
|
|
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) |
|
|
|
|
|
|
|
_, predicted_classes = torch.max(predictions, dim=1) |
|
|
|
|
|
|
|
|
|
return predicted_classes[0].item() |
|
|
|
|
|
iface = gr.Interface(fn=predict, inputs="text", outputs="text") |
|
iface.launch() |
|
|