Spaces:
Running
Running
import pandas as pd | |
from sklearn.model_selection import train_test_split | |
from google.colab import drive | |
import torch | |
from torch.utils.data import Dataset, DataLoader | |
from transformers import BertTokenizer, BertForSequenceClassification, AdamW | |
from sklearn.metrics import accuracy_score, classification_report | |
dataset_path = "" | |
model_path = "" | |
news_df = pd.read_csv(dataset_path) | |
X = news_df['title'] | |
y = news_df['labels'] | |
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1) | |
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=1) # 0.25 x 0.8 = 0.2 | |
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') | |
def tokenize_data(texts, tokenizer, max_len=128): | |
return tokenizer( | |
list(texts), | |
padding=True, | |
truncation=True, | |
max_length=max_len, | |
return_tensors="pt" | |
) | |
# Tokenize the training and test datasets | |
train_encodings = tokenize_data(X_train, tokenizer) | |
test_encodings = tokenize_data(X_test, tokenizer) | |
# Create a custom Dataset class | |
class NewsDataset(Dataset): | |
def __init__(self, encodings, labels): | |
self.encodings = encodings | |
self.labels = labels | |
def __len__(self): | |
return len(self.labels) | |
def __getitem__(self, idx): | |
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()} | |
item['labels'] = torch.tensor(self.labels[idx]) | |
return item | |
train_dataset = NewsDataset(train_encodings, y_train.tolist()) | |
test_dataset = NewsDataset(test_encodings, y_test.tolist()) | |
# Load DataLoader for batching | |
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True) | |
test_loader = DataLoader(test_dataset, batch_size=16) | |
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2) | |
model.load_state_dict(torch.load(model_path)) | |
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') | |
model.to(device) | |
# Define optimizer and scheduler | |
# optimizer = AdamW(model.parameters(), lr=5e-5) | |
# num_training_steps = len(train_loader) * 4 # Assume 4 epochs | |
# lr_scheduler = get_scheduler("linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps) | |
# Evaluate the model | |
def evaluate_model(model, test_loader): | |
model.eval() | |
y_true, y_pred = [], [] | |
with torch.no_grad(): | |
for batch in test_loader: | |
batch = {k: v.to(device) for k, v in batch.items()} | |
outputs = model(**batch) | |
logits = outputs.logits | |
predictions = torch.argmax(logits, dim=-1) | |
y_true.extend(batch['labels'].tolist()) | |
y_pred.extend(predictions.tolist()) | |
return y_true, y_pred | |
y_true, y_pred = evaluate_model(model, test_loader) | |
# Print evaluation metrics | |
print(f"Accuracy: {accuracy_score(y_true, y_pred):.4f}") | |
print("Classification Report:\n", classification_report(y_true, y_pred)) |