File size: 7,620 Bytes
3276ada
 
03020ab
3276ada
 
03020ab
 
 
600d0d3
03020ab
 
 
 
 
 
 
3276ada
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
083d57e
3276ada
03020ab
083d57e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3276ada
03020ab
 
3276ada
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03020ab
 
 
3276ada
03020ab
 
3276ada
03020ab
3276ada
 
 
 
 
 
 
 
03020ab
3276ada
03020ab
 
 
 
3276ada
 
 
 
03020ab
 
3276ada
 
03020ab
 
 
3276ada
03020ab
3276ada
03020ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3276ada
 
 
 
 
 
 
 
03020ab
 
 
 
 
3276ada
03020ab
 
3276ada
 
03020ab
 
3276ada
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03020ab
 
3276ada
 
 
 
 
 
 
 
 
 
 
 
083d57e
 
3276ada
083d57e
03020ab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
# Install required packages
# !pip install streamlit torch torchvision matplotlib datasets transformers

# Import Libraries
import streamlit as st
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision  # Add this import
from torchvision import datasets, models, transforms
from torch.utils.data import DataLoader
import numpy as np
import time
import os
import copy
import matplotlib.pyplot as plt
from transformers import Trainer, TrainingArguments
from datasets import load_dataset

# Streamlit Interface
st.title("Fine-Tuning ResNet for Custom Image Classification")

# Introduction Section
st.markdown("""
### Introduction
In this exercise, we will fine-tune a pre-trained ResNet model on a custom image classification task using PyTorch. The ResNet (Residual Network) architecture helps in training very deep neural networks by using skip connections to mitigate the vanishing gradient problem.
""")

# User Inputs
st.sidebar.header("Model Parameters")
input_size = st.sidebar.number_input("Input Size", value=224)
batch_size = st.sidebar.number_input("Batch Size", value=32)
num_epochs = st.sidebar.number_input("Number of Epochs", value=25)
learning_rate = st.sidebar.number_input("Learning Rate", value=0.001)
momentum = st.sidebar.number_input("Momentum", value=0.9)

# Data Preparation Section
st.markdown("""
### Data Preparation
We will use the CIFAR-10 dataset, which contains 60,000 images from 10 classes. The dataset will be split into training and validation sets, and transformations will be applied to augment the data and normalize it.
""")

transform = transforms.Compose([
    transforms.Resize(input_size),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
val_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

dataloaders = {'train': train_loader, 'val': val_loader}
dataset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}
class_names = train_dataset.classes

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Visualize a few training images
st.markdown("#### Sample Training Images")
def imshow(inp, title=None):
    inp = inp.numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    inp = std * inp + mean
    inp = np.clip(inp, 0, 1)
    plt.imshow(inp)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)

inputs, classes = next(iter(dataloaders['train']))
out = torchvision.utils.make_grid(inputs)
st.pyplot(imshow(out, title=[class_names[x] for x in classes]))

# Model Preparation Section
st.markdown("""
### Model Preparation
We will use a pre-trained ResNet-18 model and fine-tune the final fully connected layer to match the number of classes in our custom dataset.
""")

# Load Pre-trained ResNet Model
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, len(class_names))

model_ft = model_ft.to(device)

# Define Loss Function and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=learning_rate, momentum=momentum)
exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

# Training Section
st.markdown("""
### Training
We will train the model using stochastic gradient descent (SGD) with momentum and a learning rate scheduler. The training and validation loss and accuracy will be plotted to monitor the training process.
""")

# Train and Evaluate the Model
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
    since = time.time()
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    train_loss_history = []
    val_loss_history = []
    train_acc_history = []
    val_acc_history = []

    for epoch in range(num_epochs):
        st.write('Epoch {}/{}'.format(epoch, num_epochs - 1))
        st.write('-' * 10)

        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            if phase == 'train':
                scheduler.step()

            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects.double() / dataset_sizes[phase]

            st.write('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))

            if phase == 'train':
                train_loss_history.append(epoch_loss)
                train_acc_history.append(epoch_acc)
            else:
                val_loss_history.append(epoch_loss)
                val_acc_history.append(epoch_acc)

            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())

        st.write()

    time_elapsed = time.time() - since
    st.write('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    st.write('Best val Acc: {:4f}'.format(best_acc))

    model.load_state_dict(best_model_wts)
    
    # Plot training history
    epochs_range = range(num_epochs)
    plt.figure(figsize=(10, 5))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, train_loss_history, label='Training Loss')
    plt.plot(epochs_range, val_loss_history, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')

    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, train_acc_history, label='Training Accuracy')
    plt.plot(epochs_range, val_acc_history, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')
    plt.show()
    st.pyplot(plt)
    
    return model

if st.button('Train Model'):
    model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs)
    # Save the Model
    torch.save(model_ft.state_dict(), 'fine_tuned_resnet.pth')
    st.write("Model saved as 'fine_tuned_resnet.pth'")

# Hugging Face Integration Section
st.markdown("""
### Hugging Face Integration
We will use the Hugging Face library to load the dataset and prepare it for training. This integration will allow us to leverage the benefits of Hugging Face's powerful tools and APIs.
""")

# This part is just illustrative since Hugging Face's Trainer does not natively support ResNet. 
# However, you can still follow a similar approach for transformer models and NLP datasets.
if st.button('Train with Hugging Face'):
    st.write("This section is illustrative and typically used for NLP tasks with Hugging Face transformers.")