import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification from huggingface_hub import hf_hub_download import torch import json def predict(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512) with torch.no_grad(): outputs = model(**inputs) probs = torch.nn.functional.softmax(outputs.logits, dim=-1) predicted_class = torch.argmax(probs, dim=-1).item() return id2label[predicted_class], probs[0][predicted_class].item() if __name__ == '__main__': model_path = "Dunateo/roberta-cwe-classifier-kelemia-v0.2" # init the model tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForSequenceClassification.from_pretrained(model_path) # get the dict file label_dict_file = hf_hub_download(repo_id=model_path, filename="label_dict.json") with open(label_dict_file, "r") as f: content = f.read() label_dict = json.loads(content) global id2label id2label = {v: k for k, v in label_dict.items()} # gradio specific to create an IHM iface = gr.Interface( fn=predict, inputs=gr.Textbox(lines=5, label="Enter vulnerability description"), outputs=[gr.Label(label="Predicted CWE"), gr.Number(label="Confidence")], title="Vulnerability CWE Classification", description="Enter a vulnerability description to classify it into a CWE category." ) iface.launch()