|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
from transformers import AutoModelForSequenceClassification, AutoConfig, AutoTokenizer |
|
import torch |
|
import numpy as np |
|
|
|
|
|
MODEL_NAME = "URaBOT2024/debertaV3_FullFeature" |
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels = 2) |
|
config = AutoConfig.from_pretrained(MODEL_NAME) |
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
|
|
|
|
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
|
model.to(device) |
|
model.eval() |
|
|
|
|
|
|
|
def verify(display_name, tweet_content, is_verified, likes): |
|
|
|
input = tweet_content + tokenizer.sep_token + display_name + tokenizer.sep_token + is_verified + tokenizer.sep_token + likes |
|
tokenized_input = tokenizer(input, return_tensors='pt', padding=True, truncation=True).to(device) |
|
with torch.no_grad(): |
|
outputs = model(**tokenized_input) |
|
|
|
|
|
sigmoid = (1 / (1 + np.exp(-outputs.logits.detach().numpy()))).tolist()[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
label = np.argmax(outputs.logits.detach().numpy(), axis=-1).item() |
|
|
|
|
|
|
|
if label == 0: |
|
return 1 - sigmoid[0] |
|
else: |
|
return sigmoid[1] |
|
|
|
|
|
|
|
""" |
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
""" |
|
|
|
iface = gr.Interface( |
|
fn=verify, |
|
inputs=[gr.Textbox(label= "Display Name"), gr.Textbox(label= "Tweet Content"), gr.Textbox(label= "IsVerified"), gr.Textbox(label= "Number of Likes")], |
|
outputs=gr.Textbox(), |
|
live=True |
|
) |
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
iface.launch(share=True) |
|
|
|
|