pytorch / pages /19_ResNet.py
eaglelandsonce's picture
Update pages/19_ResNet.py
eb2cc92 verified
raw
history blame
7.61 kB
# Install required packages
# !pip install streamlit torch torchvision matplotlib datasets transformers
# Import Libraries
import streamlit as st
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision # Add this import
from torchvision import datasets, models, transforms
from torch.utils.data import DataLoader
import numpy as np
import time
import os
import copy
import matplotlib.pyplot as plt
from transformers import Trainer, TrainingArguments
from datasets import load_dataset
# Streamlit Interface
st.title("Fine-Tuning ResNet for Custom Image Classification")
# Introduction Section
st.markdown("""
### Introduction
In this exercise, we will fine-tune a pre-trained ResNet model on a custom image classification task using PyTorch. The ResNet (Residual Network) architecture helps in training very deep neural networks by using skip connections to mitigate the vanishing gradient problem.
""")
# User Inputs
st.sidebar.header("Model Parameters")
input_size = st.sidebar.number_input("Input Size", value=224)
batch_size = st.sidebar.number_input("Batch Size", value=32)
num_epochs = st.sidebar.number_input("Number of Epochs", value=25)
learning_rate = st.sidebar.number_input("Learning Rate", value=0.001)
momentum = st.sidebar.number_input("Momentum", value=0.9)
# Data Preparation Section
st.markdown("""
### Data Preparation
We will use the CIFAR-10 dataset, which contains 60,000 images from 10 classes. The dataset will be split into training and validation sets, and transformations will be applied to augment the data and normalize it.
""")
transform = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
val_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
dataloaders = {'train': train_loader, 'val': val_loader}
dataset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}
class_names = train_dataset.classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Visualize a few training images
st.markdown("#### Sample Training Images")
def imshow(inp, title=None):
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
fig, ax = plt.subplots()
ax.imshow(inp)
if title is not None:
ax.set_title(title)
st.pyplot(fig)
inputs, classes = next(iter(dataloaders['train']))
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
# Model Preparation Section
st.markdown("""
### Model Preparation
We will use a pre-trained ResNet-18 model and fine-tune the final fully connected layer to match the number of classes in our custom dataset.
""")
# Load Pre-trained ResNet Model
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, len(class_names))
model_ft = model_ft.to(device)
# Define Loss Function and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=learning_rate, momentum=momentum)
exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# Training Section
st.markdown("""
### Training
We will train the model using stochastic gradient descent (SGD) with momentum and a learning rate scheduler. The training and validation loss and accuracy will be plotted to monitor the training process.
""")
# Train and Evaluate the Model
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
train_loss_history = []
val_loss_history = []
train_acc_history = []
val_acc_history = []
for epoch in range(num_epochs):
st.write('Epoch {}/{}'.format(epoch, num_epochs - 1))
st.write('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
st.write('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
if phase == 'train':
train_loss_history.append(epoch_loss)
train_acc_history.append(epoch_acc)
else:
val_loss_history.append(epoch_loss)
val_acc_history.append(epoch_acc)
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
st.write()
time_elapsed = time.time() - since
st.write('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
st.write('Best val Acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
# Plot training history
epochs_range = range(num_epochs)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
ax1.plot(epochs_range, train_loss_history, label='Training Loss')
ax1.plot(epochs_range, val_loss_history, label='Validation Loss')
ax1.legend(loc='upper right')
ax1.set_title('Training and Validation Loss')
ax2.plot(epochs_range, train_acc_history, label='Training Accuracy')
ax2.plot(epochs_range, val_acc_history, label='Validation Accuracy')
ax2.legend(loc='lower right')
ax2.set_title('Training and Validation Accuracy')
st.pyplot(fig)
return model
if st.button('Train Model'):
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs)
# Save the Model
torch.save(model_ft.state_dict(), 'fine_tuned_resnet.pth')
st.write("Model saved as 'fine_tuned_resnet.pth'")
# Hugging Face Integration Section
st.markdown("""
### Hugging Face Integration
We will use the Hugging Face library to load the dataset and prepare it for training. This integration will allow us to leverage the benefits of Hugging Face's powerful tools and APIs.
""")
# This part is just illustrative since Hugging Face's Trainer does not natively support ResNet.
# However, you can still follow a similar approach for transformer models and NLP datasets.
if st.button('Train with Hugging Face'):
st.write("This section is illustrative and typically used for NLP tasks with Hugging Face transformers.")