from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model_path = "./training/bert-allsides-bias-detector/checkpoint-10494" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForSequenceClassification.from_pretrained(model_path) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) label_mapping = {0: "Left", 1: "Center", 2: "Right"} def predict_bias(text): """Predicts the political bias of the given text.""" inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512).to(device) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits predicted_class = torch.argmax(logits, dim=-1).item() return label_mapping[predicted_class] if __name__ == "__main__": while True: text = input("\nEnter text to classify (or type 'exit' to quit): ") if text.lower() == "exit": break bias_label = predict_bias(text) print(f"Predicted Bias: {bias_label}")