Spaces:
No application file
No application file
File size: 10,483 Bytes
d625688 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 |
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import multiprocessing as mp
from conf.config import Config
from datasets import get_datasets
import ray
import time
cfg = Config()
@ray.remote(num_gpus=cfg.NUM_GPUS)
def train_net_on_gpu(net, epochs=1):
trainset, trainloader, validset, validloader = get_datasets()
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f"The network will train on {dev} device")
criterion = nn.CrossEntropyLoss()
#breakpoint()
optimizer = optim.SGD(net.to(dev).parameters(), lr=0.001, momentum=0.9)
print("------TRAINING------")
#net.train()
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
correct = 0.0
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(dev), labels.to(dev)
# zero the parameter gradients
optimizer.zero_grad()
outputs = net(inputs).to(dev)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
correct += (outputs == labels).float().sum()
training_accuracy = 100 * correct / len(trainset)
valid_loss = 0.0
valid_correct = 0.0
net.eval() # Optional when not using Model Specific layer
for i, d in enumerate(validloader):
# Transfer Data to GPU if available
data, labels = d
data, labels = data.to(dev), labels.to(dev)
target = net(data).to(dev)
# Find the Loss
loss = criterion(target,labels)
# Calculate Loss
valid_loss += loss.item()
valid_correct += (target == labels).float().sum()
valid_accuracy = 100 * valid_correct / len(validset)
print(f'Training Epochs: {epoch}\t\t Loss: {running_loss}\t\t Train Accuracy: {training_accuracy}')
print(f'\t\t Valid Loss: {valid_loss}\t\t Valid Accuracy: {valid_accuracy}')
return valid_loss
@ray.remote(num_cpus=cfg.NUM_CPUS)
def train_net_on_cpu(net, epochs=1):
trainset, trainloader, validset, validloader = get_datasets()
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f"The network will train on {dev} device")
criterion = nn.CrossEntropyLoss()
#breakpoint()
optimizer = optim.SGD(net.to(dev).parameters(), lr=0.001, momentum=0.9)
print("------TRAINING------")
#net.train()
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
correct = 0.0
training_loss = 0.0
valid_loss = 0.0
valid_correct = 0.0
valid_running_loss = 0.0
total = 0.0
val_total = 0.0
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(dev), labels.to(dev)
# zero the parameter gradients
optimizer.zero_grad()
outputs = net(inputs).to(dev)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
training_loss += loss.item()
total += labels.size(0)
correct += (outputs == labels).float().sum()
if i % 2000 == 1999:
#print('[%d, %5d] loss: %.3f, accuracy: %.3f' %
# (epoch + 1, i + 1, running_loss / 2000, correct / total))
running_loss = 0.0
training_accuracy = 100 * correct / total
net.eval() # Optional when not using Model Specific layer
for i, d in enumerate(validloader):
# Transfer Data to GPU if available
data, labels = d
data, labels = data.to(dev), labels.to(dev)
target = net(data).to(dev)
# Find the Loss
loss = criterion(target,labels)
# Calculate Loss
valid_running_loss += loss.item()
valid_loss += loss.item()
val_total += labels.size(0)
valid_correct += (target == labels).float().sum()
if i % 2000 == 1999:
#print('[%d, %5d] loss: %.3f, accuracy: %.3f' %
# (epoch + 1, i + 1, running_loss / 2000, valid_correct / val_total))
valid_running_loss = 0.0
valid_accuracy = 100 * valid_correct / val_total
print(f'Training Epochs: {epoch}\t\t Training Loss: {training_loss / len(trainloader)}\t\t Train Accuracy: {training_accuracy}')
print(f'\t\t Valid Loss: {valid_loss / len(validloader)}\t\t Valid Accuracy: {valid_accuracy}')
return valid_loss / len(validloader)
@ray.remote(num_cpus=cfg.NUM_CPUS)
def forward_pass_on_cpu(net):
trainset, trainloader, validset, validloader = get_datasets()
# Define model architecture
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f"The network will train on {dev} device")
print("------Forward passing------")
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
#breakpoint()
optimizer = optim.SGD(net.to(dev).parameters(), lr=0.001, momentum=0.9)
batches = 0
running_loss = 0
correct = 0.0
start = time.time()
# Perform forward pass
for batch_idx, (data, target) in enumerate(trainloader):
# Set the gradients to zero
optimizer.zero_grad()
# Forward pass
output = net(data).to(dev)
# Compute loss
loss = criterion(output, target)
running_loss += loss.item()
correct += (target == output).float().sum()
#if batches == 2:
# break
#batches +=1
# Print loss every 1000 batches
#if batch_idx % 1000 == 0:
# print('Batch Index : {} Loss : {}'.format(batch_idx, running_loss))
inference_time = time.time() - start
accuracy = 100 * correct / len(trainset)
print(f'\t\t Loss: {running_loss/len(validloader)}\t\t Accuracy: {accuracy}\t\t Inference Time: {inference_time}')
return running_loss / len(validloader), inference_time
@ray.remote(num_gpus=cfg.NUM_GPUS)
def forward_pass_on_gpu(net):
trainset, trainloader, validset, validloader = get_datasets()
# Define model architecture
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(f"The network will train on {dev} device")
print("------Forward passing------")
# Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
#breakpoint()
optimizer = optim.SGD(net.to(dev).parameters(), lr=0.001, momentum=0.9)
batches = 0
running_loss = 0
correct = 0.0
start = time.time()
# Perform forward pass
for batch_idx, (data, target) in enumerate(trainloader):
# Set the gradients to zero
optimizer.zero_grad()
# Forward pass
output = net(data).to(dev)
# Compute loss
loss = criterion(output, target)
running_loss += loss.item()
correct += (target == output).float().sum()
#if batches == 2:
# break
#batches +=1
# Print loss every 1000 batches
#if batch_idx % 1000 == 0:
# print('Batch Index : {} Loss : {}'.format(batch_idx, running_loss))
inference_time = time.time() - start
accuracy = 100 * correct / len(trainset)
print(f'\t\t Loss: {running_loss/len(validloader)}\t\t Accuracy: {accuracy}\t\t Inference Time: {inference_time}')
return running_loss / len(validloader), inference_time
def multiprocess_training(training_func, num_processes, *args, **kwargs):
# Define a function to apply the training function to a set of arguments.
#def process_training(args):
# return training_func(*args)
# Define a multiprocessing pool with the desired number of processes.
pool = mp.Pool(num_processes)
# Apply the PyTorch training function to each set of arguments using the multiprocessing pool.
results = pool.map(training_func, zip(*args))
# Close the multiprocessing pool and join the processes.
pool.close()
pool.join()
return results
def transfer_weights(netA, netB, layer_type):
# Get the parameters of the two networks
paramsA = netA.state_dict()
paramsB = netB.state_dict()
# Transfer the weights from the layers of the given type in netA to the corresponding layers in netB
for name, module in netA.named_modules():
if type(module).__name__ == layer_type:
#print(f"paramsB[name + '.weight'].shape: {len(paramsB[name + '.bias'].shape)}")
if len(paramsB[name + '.bias'].shape) == 5:
paramsB[name + '.weight'][:, :, :, :, :] = paramsA[name + '.weight'][:, :, :, :, :]
elif len(paramsB[name + '.bias'].shape) == 4:
paramsB[name + '.weight'][:, :, :, :] = paramsA[name + '.weight'][:, :, :, :]
elif len(paramsB[name + '.bias'].shape) == 3:
paramsB[name + '.weight'][:, :, :] = paramsA[name + '.weight'][:, :, :]
elif len(paramsB[name + '.bias'].shape) == 2:
paramsB[name + '.weight'][:, :] = paramsA[name + '.weight'][:, :]
elif len(paramsB[name + '.bias'].shape) == 1:
paramsB[name + '.weight'][:] = paramsA[name + '.weight'][:]
paramsB[name + '.bias'][:] = paramsA[name + '.bias'][:]
# Set the state dict of netB to the updated parameters
netB.load_state_dict(paramsB)
return netB
def detect_layers(model):
layers = []
def detect_layers_recursively(module):
for child in module.children():
if isinstance(child, nn.Sequential):
detect_layers_recursively(child)
elif isinstance(child, nn.Module):
layers.append(child.__class__.__name__)
detect_layers_recursively(child)
detect_layers_recursively(model)
return list(set(layers)) |