Spaces:
Sleeping
Sleeping
import subprocess | |
# Install the required packages | |
subprocess.check_call(["pip", "install", "-U", "git+https://github.com/huggingface/transformers.git"]) | |
subprocess.check_call(["pip", "install", "-U", "git+https://github.com/huggingface/accelerate.git"]) | |
subprocess.check_call(["pip", "install", "datasets"]) | |
subprocess.check_call(["pip", "install", "evaluate"]) | |
subprocess.check_call(["pip", "install", "scikit-learn"]) | |
subprocess.check_call(["pip", "install", "torchvision"]) | |
model_checkpoint = "microsoft/resnet-50" | |
batch_size = 128 | |
from datasets import load_dataset | |
from evaluate import load | |
metric = load("accuracy") | |
# Load the dataset directly from Hugging Face | |
dataset = load_dataset("DamarJati/Face-Mask-Detection") | |
labels = dataset["train"].features["label"].names | |
label2id, id2label = dict(), dict() | |
for i, label in enumerate(labels): | |
label2id[label] = i | |
id2label[i] = label | |
from transformers import AutoImageProcessor | |
image_processor = AutoImageProcessor.from_pretrained(model_checkpoint) | |
image_processor | |
from torchvision.transforms import ( | |
CenterCrop, | |
Compose, | |
Normalize, | |
RandomHorizontalFlip, | |
RandomResizedCrop, | |
Resize, | |
ToTensor, | |
ColorJitter, | |
RandomRotation | |
) | |
normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) | |
size = image_processor.size["shortest_edge"] | |
train_transforms = Compose( | |
[ | |
RandomResizedCrop(size), | |
RandomHorizontalFlip(), | |
RandomRotation(degrees=15), | |
ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), | |
ToTensor(), | |
normalize, | |
] | |
) | |
val_transforms = Compose( | |
[ | |
Resize(size), | |
CenterCrop(size), | |
RandomRotation(degrees=15), | |
ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), | |
ToTensor(), | |
normalize, | |
] | |
) | |
def preprocess_train(example_batch): | |
example_batch["pixel_values"] = [ | |
train_transforms(image.convert("RGB")) for image in example_batch["image"] | |
] | |
return example_batch | |
def preprocess_val(example_batch): | |
example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] | |
return example_batch | |
splits = dataset["train"].train_test_split(test_size=0.3) | |
train_ds = splits['train'] | |
val_ds = splits['test'] | |
train_ds.set_transform(preprocess_train) | |
val_ds.set_transform(preprocess_val) | |
from transformers import AutoModelForImageClassification, TrainingArguments, Trainer | |
model = AutoModelForImageClassification.from_pretrained(model_checkpoint, | |
label2id=label2id, | |
id2label=id2label, | |
ignore_mismatched_sizes=True) | |
model_name = model_checkpoint.split("/")[-1] | |
args = TrainingArguments( | |
f"{model_name}-finetuned", | |
remove_unused_columns=False, | |
evaluation_strategy="epoch", | |
save_strategy="epoch", | |
save_total_limit=5, | |
learning_rate=1e-3, | |
per_device_train_batch_size=batch_size, | |
gradient_accumulation_steps=2, | |
per_device_eval_batch_size=batch_size, | |
num_train_epochs=2, | |
warmup_ratio=0.1, | |
weight_decay=0.01, | |
lr_scheduler_type="cosine", | |
logging_steps=10, | |
load_best_model_at_end=True, | |
metric_for_best_model="accuracy", | |
) | |
import numpy as np | |
def compute_metrics(eval_pred): | |
"""Computes accuracy on a batch of predictions""" | |
predictions = np.argmax(eval_pred.predictions, axis=1) | |
return metric.compute(predictions=predictions, references=eval_pred.label_ids) | |
import torch | |
def collate_fn(examples): | |
pixel_values = torch.stack([example["pixel_values"] for example in examples]) | |
labels = torch.tensor([example["label"] for example in examples]) | |
return {"pixel_values": pixel_values, "labels": labels} | |
trainer = Trainer( | |
model=model, | |
args=args, | |
train_dataset=train_ds, | |
eval_dataset=val_ds, | |
tokenizer=image_processor, | |
compute_metrics=compute_metrics, | |
data_collator=collate_fn, | |
) | |
train_results = trainer.train() | |
# Save model | |
trainer.save_model() | |
trainer.log_metrics("train", train_results.metrics) | |
trainer.save_metrics("train", train_results.metrics) | |
trainer.save_state() | |
metrics = trainer.evaluate() | |
# Log and save metrics | |
trainer.log_metrics("eval", metrics) | |
trainer.save_metrics("eval", metrics) | |
# Print evaluation metrics | |
print("Evaluation Metrics:") | |
for key, value in metrics.items(): | |
print(f"{key}: {value}") | |