id
stringlengths 3
8
| text
stringlengths 1
115k
|
---|---|
st83468 | I want to use a custom filter in CNN. The filter has size 5*5 and each entry is a function of three variables: theta, Lambda, psi. There are two convolutional layers followed by two fully connected layers. I tested my filter on MNIST dataset. But when I run it on GPU, I encounter the error: RuntimeError: expected backend CUDA and dtype Float but got backend CPU and dtype Float. I guess it might be due to how I generate the filter box, but I cannot find where I made mistake exactly. Basically I modified this example code 10, I only modified the network structure with my custom filter. Training and test remained unchanged. I attach my code here. Thank you!
from __future__ import print_function
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self, kernel_size, in_channels, channel1, channel2):
super(Net, self).__init__()
self.theta1, self.Lambda1, self.psi1, self.bias1 = self.generate_parameters(channel1, in_channels)
self.filter1 = self.whole_filter(in_channels, channel1, kernel_size, self.theta1, self.Lambda1, self.psi1).cuda()
self.theta2, self.Lambda2, self.psi2, self.bias2 = self.generate_parameters(channel2, channel1)
self.filter2 = self.whole_filter(channel1, channel2, kernel_size, self.theta2, self.Lambda2, self.psi2).cuda()
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.conv2d(x, self.filter1, bias=self.bias1)
x = F.max_pool2d(x, 2, 2)
x = F.conv2d(x, self.filter2, bias=self.bias2)
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def generate_parameters(self, dim_out, dim_in):
theta = nn.Parameter(torch.randn(dim_out, dim_in))
Lambda = nn.Parameter(torch.randn(dim_out, dim_in))
psi = nn.Parameter(torch.randn(dim_out, dim_in))
bias = nn.Parameter(torch.randn(dim_out))
return theta, Lambda, psi, bias
def whole_filter(self, in_channels, out_channels, kernel_size, theta_column, Lambda_column, psi_column):
result = torch.zeros(out_channels, in_channels, kernel_size, kernel_size) # \text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW
for i in range(out_channels):
result[i] = self.one_filter(in_channels, kernel_size, theta_column[i], Lambda_column[i], psi_column[i])
return result
def one_filter(self, in_channels, kernel_size, theta, Lambda, psi):
result = torch.zeros(in_channels, kernel_size, kernel_size)
for i in range(in_channels):
result[i] = self.filter_fn(theta[i], Lambda[i], psi[i], kernel_size)
return result
def filter_fn(self, theta, Lambda, psi, kernel_size):
# Bounding box
half_size = (kernel_size - 1) // 2
ymin, xmin = -half_size, -half_size
ymax, xmax = half_size, half_size
(y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))
y = torch.FloatTensor(y)
x = torch.FloatTensor(x)
# Rotation
x_theta = x * torch.cos(theta) + y * torch.sin(theta)
y_theta = -x * torch.sin(theta) + y * torch.cos(theta)
box = torch.cos(y_theta) * torch.sin(2 * np.pi / Lambda * x_theta + psi)
return box
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward(retain_graph=True)
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net(5, 1, 20, 50).to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for param in model.parameters():
print(type(param.data), param.size())
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if (args.save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
if __name__ == '__main__':
main() |
st83469 | Solved by ptrblck in post #2
Try to use nn.Parameter for your return values in whole_filter and one_filter, as this will properly register these filters as internal parameters, and will thus push them also to the GPU in the model.to(device) call. |
st83470 | Try to use nn.Parameter for your return values in whole_filter and one_filter, as this will properly register these filters as internal parameters, and will thus push them also to the GPU in the model.to(device) call. |
st83471 | Hi Ptrblck, I added nn.Parameter and it works! Thank you very much!! A further question: when I deleted .cuda() in self.filter1 and self.filter2, the parameters have size:
<class 'torch.Tensor'> torch.Size([20, 1])
<class 'torch.Tensor'> torch.Size([20, 1])
<class 'torch.Tensor'> torch.Size([20, 1])
<class 'torch.Tensor'> torch.Size([20])
<class 'torch.Tensor'> torch.Size([20, 1, 5, 5])
<class 'torch.Tensor'> torch.Size([50, 20])
<class 'torch.Tensor'> torch.Size([50, 20])
<class 'torch.Tensor'> torch.Size([50, 20])
<class 'torch.Tensor'> torch.Size([50])
<class 'torch.Tensor'> torch.Size([50, 20, 5, 5])
<class 'torch.Tensor'> torch.Size([500, 800])
<class 'torch.Tensor'> torch.Size([500])
<class 'torch.Tensor'> torch.Size([10, 500])
<class 'torch.Tensor'> torch.Size([10])
It’s wrong, I should not have the parameters with size ([20, 1, 5, 5]) and ([50, 20, 5, 5]), since I am not using the conventional convolution kernel here. Moreover, the loss is nan during training. But when I keep .cuda() there, it’s correct and training is ok:
<class 'torch.Tensor'> torch.Size([20, 1])
<class 'torch.Tensor'> torch.Size([20, 1])
<class 'torch.Tensor'> torch.Size([20, 1])
<class 'torch.Tensor'> torch.Size([20])
<class 'torch.Tensor'> torch.Size([50, 20])
<class 'torch.Tensor'> torch.Size([50, 20])
<class 'torch.Tensor'> torch.Size([50, 20])
<class 'torch.Tensor'> torch.Size([50])
<class 'torch.Tensor'> torch.Size([500, 800])
<class 'torch.Tensor'> torch.Size([500])
<class 'torch.Tensor'> torch.Size([10, 500])
<class 'torch.Tensor'> torch.Size([10])
So I am curious why I cannot delete .cuda(), since you mentioned that nn.Parameter already put the filters on GPU. Thanks a lot! |
st83472 | The .cuda() call shouldn’t change the shape of a tensor.
Could you point me to the line of code or tensor name, which has the shape [20, 1, 5, 5] now? |
st83473 | That happens when I dropped .cuda() at line 16 and 19:
self.filter1 = self.whole_filter(in_channels, channel1, kernel_size, self.theta1, self.Lambda1, self.psi1).cuda()
self.filter2 = self.whole_filter(channel1, channel2, kernel_size, self.theta2, self.Lambda2, self.psi2).cuda()
My guess is that after I dropped .cuda(), when I call x = F.conv2d(x, self.filter1, bias=self.bias1) on line 25, self.filter1 is not on GPU, so pytorch generate a filter of the corresponding size and do the convolution. |
st83474 | copyrightly:
so pytorch generate a filter of the corresponding size and do the convolution.
That shouldn’t be the case, as luckily PyTorch raises a loud RuntimeError, if that’s the case:
x = torch.randn(1, 1, 4, 4, device='cuda')
filt = torch.randn(6, 1, 3, 3, device='cuda')
output = F.conv2d(x, filt) # works
filt = filt.cpu()
output = F.conv2d(x, filt)
> RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same |
st83475 | Thank you for you reply. I found out that when I dropped .cuda(), self.filter1 and self.filter2 appear in model.parameters(), that’s where I got those extra shape. Further in this case, self.bias1 and self.bias2 have value nan, although self.theta1, self.Lambda1, self.psi1 have their value. This makes me quite confused. |
st83476 | copyrightly:
I found out that when I dropped .cuda() , self.filter1 and self.filter2 appear in model.parameters() , that’s where I got those extra shape.
filter1 and filter2 shouldn’t be returned by model.parameters() regardless of the cuda() call, as they are defined as tensors, not nn.Parameters.
If you want to register them as parameters, warp them in nn.Parameter.
copyrightly:
Further in this case, self.bias1 and self.bias2 have value nan , although self.theta1, self.Lambda1, self.psi1 have their value. This makes me quite confused.
I removed the cuda() call, and changed both filters to be parameters:
def whole_filter(self, in_channels, out_channels, kernel_size, theta_column, Lambda_column, psi_column):
result = torch.zeros(out_channels, in_channels, kernel_size, kernel_size) # \text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW
for i in range(out_channels):
result[i] = self.one_filter(in_channels, kernel_size, theta_column[i], Lambda_column[i], psi_column[i])
return nn.Parameter(result)
It seems the model is working for at least one epoch:
...
Train Epoch: 1 [59520/60000 (99%)] Loss: 0.054967
Test set: Average loss: 0.1043, Accuracy: 9678/10000 (97%) |
st83477 | I think I did the same thing as you, however the result is different. Could you run the following code to have a look? (I added nn.Parameter to both one_filter and whole_filter and removed cuda()). Thank you very much for your time!
from __future__ import print_function
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self, kernel_size, in_channels, channel1, channel2):
super(Net, self).__init__()
self.theta1, self.Lambda1, self.psi1, self.bias1 = self.generate_parameters(channel1, in_channels)
self.filter1 = self.whole_filter(in_channels, channel1, kernel_size, self.theta1, self.Lambda1, self.psi1)
self.theta2, self.Lambda2, self.psi2, self.bias2 = self.generate_parameters(channel2, channel1)
self.filter2 = self.whole_filter(channel1, channel2, kernel_size, self.theta2, self.Lambda2, self.psi2)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.conv2d(x, self.filter1, bias=self.bias1)
x = F.max_pool2d(x, 2, 2)
x = F.conv2d(x, self.filter2, bias=self.bias2)
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def generate_parameters(self, dim_out, dim_in):
theta = nn.Parameter(torch.randn(dim_out, dim_in))
Lambda = nn.Parameter(torch.randn(dim_out, dim_in))
psi = nn.Parameter(torch.randn(dim_out, dim_in))
bias = nn.Parameter(torch.randn(dim_out))
return theta, Lambda, psi, bias
def whole_filter(self, in_channels, out_channels, kernel_size, theta_column, Lambda_column, psi_column):
result = torch.zeros(out_channels, in_channels, kernel_size, kernel_size) # \text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW
for i in range(out_channels):
result[i] = self.one_filter(in_channels, kernel_size, theta_column[i], Lambda_column[i], psi_column[i])
return nn.Parameter(result)
def one_filter(self, in_channels, kernel_size, theta, Lambda, psi):
result = torch.zeros(in_channels, kernel_size, kernel_size)
for i in range(in_channels):
result[i] = self.filter_fn(theta[i], Lambda[i], psi[i], kernel_size)
return nn.Parameter(result)
def filter_fn(self, theta, Lambda, psi, kernel_size):
# Bounding box
half_size = (kernel_size - 1) // 2
ymin, xmin = -half_size, -half_size
ymax, xmax = half_size, half_size
(y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))
y = torch.FloatTensor(y)
x = torch.FloatTensor(x)
# Rotation
x_theta = x * torch.cos(theta) + y * torch.sin(theta)
y_theta = -x * torch.sin(theta) + y * torch.cos(theta)
box = torch.cos(y_theta) * torch.sin(2 * np.pi / Lambda * x_theta + psi)
return box
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward(retain_graph=True)
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net(5, 1, 20, 50).to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for param in model.parameters():
print(type(param.data), param.size())
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
# for param in model.parameters():
# print(param.size(), param.data)
# print(model.state_dict())
for name, param in model.state_dict().items():
print(name, param)
if (args.save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
if __name__ == '__main__':
main() |
st83478 | You are right! It seems I was just lucky to have used another seed (in fact running the code line by line a few times) and it seems your parameter initialization might be quite sensitive.
If you set the seed to 2809 you should get valid results.
However, I would recommend to try to stabilize the training. |
st83479 | Thank you! I set the seed and it works! But the old question still confuses me, why filter1 and filter2 appear in model.parameters() when I remove cuda()? (I printed out parameters in the code above). |
st83480 | They won’t. If I remove cuda() and the nn.Parameter() wrapping, I’ll get:
theta1
Lambda1
psi1
bias1
theta2
Lambda2
psi2
bias2
fc1.weight
fc1.bias
fc2.weight
fc2.bias
Wrapping them in an nn.Parameter will make them appear in model.parameters().
Could you check, if you were calling nn.Parameter or self.register_parameter somewhere else in the code? |
st83481 | I added nn.Parameter to the returned value of one_filter and whole_filter as you mentioned and I didn’t use self.register_parameter. In my code, filter1 and filter2 are the returned value of whole_filter, does this make them appear in model.parameters()? But when I added cuda(), they would not appear. You may run my code above to see it. Thank you a lot again! |
st83482 | I might have misunderstood your use case.
If you call .cuda() on an nn.Parameter, it won’t be a leaf variable anymore, which is explaiend here 20 and thus won’t appear in the .parameters(). |
st83483 | Hi All,
How can I modify the deeplabv3_resnet101 and fcn_resnet101 models available from torchvision segmentation models to accept input images with only 1 color channel?
I have seen some example of how I can modify resnet, but I am not sure how to do it for these
Thanks
Nishanth |
st83484 | You could replace the first conv layer in a similar manner as done with resnet:
model = torchvision.models.segmentation.deeplabv3_resnet101()
model.backbone.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
x = torch.randn(2, 1, 224, 224)
output = model(x) |
st83485 | Hi everyone,
Excuse me If you find my question very intuitive because I’m still new to Pytorch.
I have some confusion about torch.nn.LSTMCell Class.
According to the documentation, the inputs to the LSTM cell as follows: input and (h_0, c_0), where input is of shape (batch, input_size) which is a tensor containing input features.
The problem is that I can’t understand why in the provided example the given input has different dimensions, instead of being:
input(batch,input_size)
the given input is:
input(seq_len,input_size)
As long as I understand, the number 6 in the example represents the batch size.
Here is the example used in the documentation:
rnn = nn.LSTMCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> cx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
hx, cx = rnn(input[i], (hx, cx))
output.append(hx) |
st83486 | Hello,
I am working on the Dsprites Dataset and have created a Causal Variational Auto Encoder. I am trying to answer counterfactual queries like “given this image of a heart with this orientation, position, and scale, what would it have looked like if it were a square?”
While building the Structural Causal Model and conditioning on it I am getting the runtime error of l og_vml_cpu not implemented for 'Long' while running the Inference. This looks like some GPU to CPU issue of Pyro or Pytorch but I am not sure. Here is the error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-35-a6f1c970e088> in <module>()
13 #posterior = MCMC(kernel, num_samples=1000, warmup_steps=50)
14
---> 15 posterior = pyro.infer.Importance(conditioned_model, num_samples = 1).run(vae, mu, sigma)
16 #posterior.run(vae, mu, sigma)
17
3 frames
/usr/local/lib/python3.6/dist-packages/pyro/infer/abstract_infer.py in run(self, *args, **kwargs)
222 self._reset()
223 with poutine.block():
--> 224 for i, vals in enumerate(self._traces(*args, **kwargs)):
225 if len(vals) == 2:
226 chain_id = 0
/usr/local/lib/python3.6/dist-packages/pyro/infer/importance.py in _traces(self, *args, **kwargs)
48 model_trace = poutine.trace(
49 poutine.replay(self.model, trace=guide_trace)).get_trace(*args, **kwargs)
---> 50 log_weight = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
51 yield (model_trace, log_weight)
52
/usr/local/lib/python3.6/dist-packages/pyro/poutine/trace_struct.py in log_prob_sum(self, site_filter)
189 else:
190 try:
--> 191 log_p = site["fn"].log_prob(site["value"], *site["args"], **site["kwargs"])
192 except ValueError:
193 _, exc_value, traceback = sys.exc_info()
/usr/local/lib/python3.6/dist-packages/pyro/distributions/delta.py in log_prob(self, x)
58 def log_prob(self, x):
59 v = self.v.expand(self.shape())
---> 60 log_prob = (x == v).type(x.dtype).log()
61 log_prob = sum_rightmost(log_prob, self.event_dim)
62 return log_prob + self.log_density
RuntimeError: log_vml_cpu not implemented for 'Long'
Here is the code of my SCM:
from pyro.infer.importance import Importance
from pyro.infer.mcmc import MCMC
from pyro.infer.mcmc.nuts import HMC
intervened_model = pyro.do(SCM, data={"Y_shape": torch.tensor(1)})
conditioned_model = pyro.condition(intervened_model, data={
"X": recon_x1,
"Y_shape": torch.tensor(0),
"Z":z1})
#kernel = HMC(conditioned_model, step_size=0.8, num_steps=4)
#posterior = MCMC(kernel, num_samples=1000, warmup_steps=50)
posterior = pyro.infer.Importance(conditioned_model, num_samples = 1).run(vae, mu, sigma)
#posterior.run(vae, mu, sigma)
marginal = posterior.EmpiricalMarginal(posterior, )
print(type(posterior))
print(posterior)
result = []
for i in range(10):
trace = posterior()
x = trace.nodes['Nx']['value']
y = trace.nodes['Ny']['value']
z = trace.nodes['Nz']['value']
con_obj = pyro.condition(intervened_model, data = {"Nx": x,"Ny": y, "Nz": z})
# result.append(con_obj()[2])
# recon_x2,y2,z2 = con_obj(vae, mu, sigma)
# print(y2)
# recon_check(recon_x1.reshape(-1, 64, 64)[0], recon_x2.reshape(-1, 64, 64)[0])
Please let me know how to debug this or what the issue is. Highly appreciated |
st83487 | Could you post the type() of the data you are passing, i.e. recon_x1, vae, mu, etc.?
It seems the internal cast to x.dtype is creating the error, but I’m not sure, which input value corresponds to x. |
st83488 | Thanks! Could you post the recon_x1.type() etc.?
This yields more information as the built-in Python type method.
PS: It’s better to post code snippets than images. |
st83489 | Hey, I think I have resolved that particular error for now. Thank you for your support, much appreciated! |
st83490 | The issue was in dist.Delta(Nx < p.cpu()). @neerajprad helped on the pyro forum with this. https://forum.pyro.ai/t/runtimeerror-log-vml-cpu-not-implemented-for-long/1206/7?u=viralpandey 387 |
st83491 | I often use torch.manual_seed 200 in my code. And I also set the same seed to numpy and native python’s random.
But I noticed that there is also torch.cuda.manual_seed 146. I definitely use a single GPU.
So what’s happening if I do not set torch.cuda.manual_seed? For example, torch.randn returns same values without torch.cuda.manual_seed. So I want to know what situations I should use cuda’s manual_seed.
So the following code will be better?
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed) |
st83492 | Solved by amity137 in post #4
Hey but shouldn’t torch.manual_seed take care of both as written in https://pytorch.org/docs/stable/notes/randomness.html
You can use torch.manual_seed() to seed the RNG for all devices (both CPU and CUDA) |
st83493 | The cuda manual seed should be set if you want to have reproducible results when using random generation on the gpu, for example if you do torch.cuda.FloatTensor(100).uniform_(). |
st83494 | Thank you!
I usually do not write such a code, however, I should call the seed function. |
st83495 | Hey but shouldn’t torch.manual_seed take care of both as written in https://pytorch.org/docs/stable/notes/randomness.html 334
You can use torch.manual_seed() 193 to seed the RNG for all devices (both CPU and CUDA) |
st83496 | Yes, it the behavior was changed some time ago and was most likely different, when @albanD answered in this thread. |
st83497 | I was just wondering best practice for using seeding. I’m using
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
for running experiments on a new loss function, with the changed loss and with standard loss. Is it best to keep using a specific seed value or to vary the seed? I’m thinking some seeds may affect initialisation and therefore get into a better solution, thinking of all you need is a good init…
I’m training two models simultaneously in the same script, so should I have the above seed lines prior to instantiating each model individually to ensure the same initialisation? For a fair comparison of one loss over the other.
Is it wise using a seed for this type of research in general? |
st83498 | To be reproducible you may try all this:
seed_value= 0
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
# 3. Set `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
# 4. Set `pytorch` pseudo-random generator at a fixed value
import torch
torch.manual_seed(seed_value) |
st83499 | I have an application where I am training and discarding a large number of models (neural architecture search). After running for a while, I begin to get GPU OOMs. I have not completely ruled out the idea that the models are getting excessively large, but based on what I’ve been able to debug I do not think this is the issue (especially since the models are not particularly large…). My guess is that each model allocates a chunk of the GPU’s RAM and then does not release the memory when it is destroyed (or “relases” it by PyTorch doesn’t actually free up the memory). My question is: is there a simple way to make sure the allocate memory is released? |
st83500 | I have a PyTorch tensor of shape ((1,512,16,3)).
The (16,3) part of the tensor represents 16 - (x,y,z) points.
These 16 points could be unique points or repeated.
I want to convert it to a list of 512 lists only with unique points.
Dimensions of the resulting list of lists (1,512,p,3) ==> 1 list which has 512 lists each of varying “p” size which in turn contain a list of 3 points.
So far I tried using two for loops to fetch the (16,3) points and use torch.unique to pick the unique points. But 2 for loops make my code slower.
Please let me know if there is a function to do it in PyTorch.
Thanks in advance. |
st83501 | If I understand the use case correctly, p won’t be a fixed number, but is varying for each of the 512 entries, e.g.:
x = torch.randn(1, 512, 16, 3)
# your operation
result = ...
print(result[0, 0].shape) # [4, 3]
print(result[0, 1].shape) # [7, 3]
...
If that’s the case, you won’t be able to create the result as a tensor without padding.
Let me know, if I misunderstood the use case. |
st83502 | I am trying to multi-task regression problem.
Input shape: 200 * 60000, Output shape: 200*3 (here, 200 = Total number of data, 60000 = number of features)
So, for each data point, I have to predict 3 values (continuous).
Sample Code:
class Classifier(nn.Module):
def __init__(self,input_nodes):
super(Classifier, self).__init__()
self.input_nodes = input_nodes
self.sharedlayer = nn.Sequential(
nn.Linear(input_nodes, 300),
nn.ReLU(),
nn.Dropout(),
nn.Linear(300, 100),
nn.ReLU(),
nn.Dropout(),
)
self.att1 = nn.Sequential(
nn.Linear(100, 40),
nn.ReLU(),
nn.Dropout(),
nn.Linear(40, 20),
nn.ReLU(),
nn.Dropout(),
nn.Linear(20, 1)
)
self.att2 = nn.Sequential(
nn.Linear(100, 40),
nn.ReLU(),
nn.Dropout(),
nn.Linear(40, 20),
nn.ReLU(),
nn.Dropout(),
nn.Linear(20, 1)
)
self.att3 = nn.Sequential(
nn.Linear(100, 40),
nn.ReLU(),
nn.Dropout(),
nn.Linear(40, 20),
nn.ReLU(),
nn.Dropout(),
nn.Linear(20, 1)
)
def forward(self, x):
h_shared = self.sharedlayer(x)
out1 = self.att1(h_shared)
out2 = self.att2(h_shared)
out3 = self.att3(h_shared)
return out1, out2, out3
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
for epoch in range(n_epochs):
running_loss = 0
i = 0
model.train()
for data, label in trainloader:
i = i + 1
out1, out2, out3 = model(data)
l1 = criterion(out1, label[:,0].view(-1,1))
l2 = criterion(out2, label[:,1].view(-1,1))
l3 = criterion(out3, label[:,2].view(-1,1))
loss = (l1 + l2 + l3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
Problem: The model always produces the same value for all test data.
Example: Suppose, 3 Test data:
For output1: 3.5 3.5 3.5
For output2: 9.5 9.5 9.5
For output3: 0.2 0.2 0.2
Can you please help me to figure out what is the problem here?
Why does it generate the same value for all test data? |
st83503 | Could you check the bias values of the last linear layers?
Sometimes bad hyperparameters (e.g. high learning rate) throws the parameters out so that only the bias will be predicted (often the mean value of your target). |
st83504 | @ptrblck Thanks for your suggestion.
Bias value is different than the output values.
For 11 test data —
Generated Outputs for output3 are:
tensor([[0.8806],
[0.8806],
[0.8806],
[0.8806],
[0.8806],
[0.8806],
[0.8806],
[0.8806],
[0.8806],
[0.8806],
[0.8806]], grad_fn=<"AddmmBackward">),
Access the weights and bias using the below code:
a) model.att3[6].weight
Parameter containing:
tensor([[-0.0960, 0.1035, -0.0493, 0.4335, -0.2495, 0.1452, 0.1481, -0.4376,
-0.0052, 0.0901, 0.0533, -0.0907, 0.0778, 0.1182, -0.4527, 0.2129,
-0.2819, -0.0838, 0.0184, -0.0613]], requires_grad=True)
b) model.att3[6].bias
Parameter containing:
tensor([0.5688], requires_grad=True)
If you have any other idea, please let me know |
st83505 | Hi Forum!
Would anybody know of a pre-built pytorch windows / CUDA 3.0
version? (It’s windows 10, if that matters.)
I’m aware that pytorch no longer formally supports older CUDA
versions, but I have seen older pre-built packages floating
around on the internet – just not this configuration.
(I will be installing pytorch on an older laptop with a CUDA 3.0
quadro k1100m graphics card. I’ll just be monkeying around,
so I don’t need the gpu, but I’d like to monkey around with
the gpu, as well. I’d prefer python 3, but I’d be willing to go
with python 2 to get the gpu.)
Thanks for any advice!
K. Frank |
st83506 | Solved by ptrblck in post #2
Hi Frank,
I assume you are referring to the compute capability 3.0, which should work with CUDA6.0 - CUDA10.1.
If I’m not mistaken, the minimal compute capability for the current binaries is >=3.5, so you could build from source to support this older GPU.
However, if you would like to play around… |
st83507 | Hi Frank,
I assume you are referring to the compute capability 3.0, which should work with CUDA6.0 - CUDA10.1.
If I’m not mistaken, the minimal compute capability for the current binaries is >=3.5, so you could build from source to support this older GPU.
However, if you would like to play around with some legacy PyTorch version, you might get lucky finding some supported binaries here 300 (built by @peterjc123).
Since these binaries are quite old by now, I would recommend building from source. |
st83508 | Hello Peter!
Thanks for your reply.
ptrblck:
I assume you are referring to the compute capability 3.0, which should work with CUDA6.0 - CUDA10.1.
Indeed. I was referring to compute capability 3.0 (not that I knew
it at the time …). Thanks for clearing up that confusion of mine.
If I’m not mistaken, the minimal compute capability for the current binaries is >=3.5,
Do you know how I might determine which compute capability a
binary uses before installing it?
For example, on the main pytorch.org 20 page, in the “QUICK
START LOCALLY” section, one is given the choice of CUDA
9.0 and 10.0 (and none), which I suppose I now understand
to be CUDA SDKs 9.0 and 10.0. Is there any way for me
deduce whether such a download will support compute
capability 3.0, or whether it starts earliest at 3.5?
However, if you would like to play around with some legacy PyTorch version, you might get lucky finding some supported binaries here (built by @peterjc123).
Thanks for that link. In a similar vein, do you know how I might
figure out which of these binaries support which compute capability?
For example, the first file on that google drive is
pytorch_legacy-0.1.12-py35_0.1.12cu80.tar.bz2.
Am I right that the “cu80” part of the file name suggests that the
binary is built with CUDA SDK 8.0 and that it could therefore
potentially support compute capability 2.0 – 6.2? Is there any
to determine its minimum required compute capability (before
installing)?
(Also am I right that the .tar.bz2 files are linux binaries, while the
.whl file is the sole windows binary?)
Thanks for helping clear these things up for me.
K. Frank |
st83509 | KFrank:
Do you know how I might determine which compute capability a
binary uses before installing it?
The compute capability is unfortunately not encoded in the file names, so the best approach would be to install the binary and just see, if your GPU works, or print:
print(torch.__config__.show())
> PyTorch built with:
- GCC 4.9
- Intel(R) Math Kernel Library Version 2019.0.4 Product Build 20190411 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v0.18.1 (Git Hash 7de7e5d02bf687f971e7668963649728356e0c20)
- OpenMP 201307 (a.k.a. OpenMP 4.0)
- NNPACK is enabled
- CUDA Runtime 10.0
- NVCC architecture flags: -gencode;arch=compute_35,code=sm_35;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_50,code=compute_50
- CuDNN 7.5.1
- Magma 2.5.0
- Build settings: BLAS=MKL, BUILD_TYPE=Release, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -fopenmp -O2 -fPIC -Wno-narrowing -Wall -Wextra -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math, DISABLE_NUMA=1, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, USE_CUDA=True, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=True, USE_NNPACK=True, USE_OPENMP=ON,
KFrank:
Am I right that the “cu80” part of the file name suggests that the
binary is built with CUDA SDK 8.0 and that it could therefore
potentially support compute capability 2.0 – 6.2?
Generally yes, but the used compute capability has to be specified using the TORCH_CUDA_ARCH_LIST flag as used here 47.
So while the binaries support e.g. CUDA10, the compute capability might be limited so specific architectures.
KFrank:
(Also am I right that the .tar.bz2 files are linux binaries, while the
.whl file is the sole windows binary?)
That shouldn’t be the case. These .whl packages 14 are used for linux systems.
I’m not sure, if conda uses tar.bz2, while pip uses .whl?
Anyway, both are just zipped containers, so I’m not sure if the file ending is even important. |
st83510 | Hello Peter!
Thanks for your reply (and sorry for being a little slow in following up).
ptrblck:
KFrank:
Am I right that the “cu80” part of the file name suggests that the
binary is built with CUDA SDK 8.0 and that it could therefore
potentially support compute capability 2.0 – 6.2?
Generally yes, but the used compute capability has to be specified using the TORCH_CUDA_ARCH_LIST flag as used here.
So while the binaries support e.g. CUDA10, the compute capability might be limited so specific architectures.
Okay, this makes sense. (I tried installing a “CUDA 9.0” version from
the pytorch main page. It installed fined, and seemed to run – I didn’t
try anything substantive – but announced that its support for compute
capability started earliest at 3.5, and was not compatible with my 3.0
gpu. I imagine it runs fine with the cpu, but I didn’t test it.)
Now on to my follow-up question:
Coming back to your earlier post:
ptrblck:
However, if you would like to play around with some legacy PyTorch version, you might get lucky finding some supported binaries here (built by @peterjc123).
I downloaded a CUDA 8.0 version from your legacy-binaries link
(specifically pytorch-0.3.0-py36_0.3.0cu80.tar.bz). It has some
.dll’s in it so I am supposing it is a windows build. I chose a CUDA
8.0 version in the hope that it might support compute capability 3.0
(but I don’t know that yet).
I ran:
pip3 install file:///C:/<path_to_bz_file>/pytorch-0.3.0-py36_0.3.0cu80.tar.bz2
and got the following error:
Processing c:\<path_to_bz_file>\pytorch-0.3.0-py36_0.3.0cu80.tar.bz2
ERROR: Complete output from command python setup.py egg_info:
ERROR: Traceback (most recent call last):
File "<string>", line 1, in <module>
File "c:\<path_to_python>\python36\lib\tokenize.py", line 452, in open
buffer = _builtin_open(filename, 'rb')
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\USERXY~1\\AppData\\Local\\Temp\\pip-req-build-d2relny2\\setup.py'
----------------------------------------
ERROR: Command "python setup.py egg_info" failed with error code 1 in C:\Users\USERXY~1\AppData\Local\Temp\pip-req-build-d2relny2\
Note the “8.3” format of the username; a ten-alphabetic-character
(no spaces, numbers, or special characters) username has been
translated to the 8.3 form. I don’t know why or where.
I can confirm that C:\Users\USERXY~1\AppData\Local\Temp\ is
accessible (through the 8.3 username), and that the installing user
can create (and read) subdirectories and files there. (Note that
nothing else in the path to the “Temp” directory is longer than
eight characters, although the “pip-req-build-d2relny2” subdirectory
that pip created (or tried to create) is.)
I do not see a “pip-req-build-d2relny2” subdirectory in the Temp
directory (nor a setup.py file in such a subdirectory), so either it
wasn’t created, or pip cleaned up after itself after the install failed.
Would you (or anyone else) have some ideas about what might be
going on and how to fix it? This is on windows 10, if that matters.
Since I wasn’t able to install this particular pytorch build, I wasn’t
able to query it for which compute capability levels it supports. But
I can unzip / untar the .tar.bz file, so would you know if there is a
way I can figure out the compute capability from the unzipped file,
even if I can’t install it?
Thanks again for your help.
K. Frank |
st83511 | I assume @peterjc123 used this repo 17 to create the legacy Windows binaries. So you might find these paths there.
I’m unfortunately not really familiar with Windows installations etc.
KFrank:
so would you know if there is a
way I can figure out the compute capability from the unzipped file,
even if I can’t install it?
There might be a way using strings, grep and check all strings for the compute capability in the libraries.
Something like this might work, but I’m not even sure which library really contains all the interesting strings:
strings torch/lib/libnvrtc-builtins.so | grep -Eo 'compute_[0-9]+' | sort --unique
Consider it a really dirty hack, but you might get lucky with this command.
That being said, I’m currently not sure, if Windows has similar tools to inspect a .dll |
st83512 | On Windows, the similar tools is called dumpbin and it is accessible when you activate the VC develop environment.The command to use would be dumpbin /rawdata torch\lib\nvrtc-builtins64_90.dll. And the result will be like:
0000000180007990: 20 63 6F 6D 70 75 74 65 72 20 73 6F 66 74 77 61 computer softwa
00000001800079B0: 69 61 6C 0D 0A 20 2A 20 63 6F 6D 70 75 74 65 72 ial.. * computer
0000000180010DF0: 0D 0A 69 6E 74 20 63 6F 6D 70 75 74 65 4D 6F 64 ..int computeMod
0000000180019390: 0D 0A 69 6E 74 20 63 6F 6D 70 75 74 65 50 72 65 ..int computePre
0000000180025770: 74 20 63 6F 6D 70 75 74 65 4D 6F 64 65 3B 0D 0A t computeMode;..
000000018002D880: 72 79 3B 0D 0A 69 6E 74 20 63 6F 6D 70 75 74 65 ry;..int compute
000000018002DD10: 74 20 63 6F 6D 70 75 74 65 50 72 65 65 6D 70 74 t computePreempt
000000018003F5B0: 3B 0D 0A 69 6E 74 20 63 6F 6D 70 75 74 65 4D 6F ;..int computeMo
000000018003FA40: 63 6F 6D 70 75 74 65 50 72 65 65 6D 70 74 69 6F computePreemptio
0000000180047B50: 3B 0D 0A 69 6E 74 20 63 6F 6D 70 75 74 65 50 72 ;..int computePr
0000000180059770: 69 6E 74 20 63 6F 6D 70 75 74 65 4D 6F 64 65 3B int computeMode;
0000000180061D10: 69 6E 74 20 63 6F 6D 70 75 74 65 50 72 65 65 6D int computePreem
000000018006EEA0: 3B 0D 0A 69 6E 74 20 63 6F 6D 70 75 74 65 4D 6F ;..int computeMo
000000018006F330: 63 6F 6D 70 75 74 65 50 72 65 65 6D 70 74 69 6F computePreemptio
0000000180077440: 3B 0D 0A 69 6E 74 20 63 6F 6D 70 75 74 65 50 72 ;..int computePr
000000018007B810: 75 72 65 20 63 6F 6D 70 75 74 65 5F 36 30 20 77 ure compute_60 w
000000018007B870: 2D 61 72 63 68 3D 63 6F 6D 70 75 74 65 5F 36 30 -arch=compute_60
You 'll have to save it to a file and remove the left part, join the lines together, and then search for compute_[0-9]+. |
st83513 | BTW, .tar.bz is the package format for Conda. You should do conda install <pkgname>.tar.bz and pip install <pkgname>.whl. FYI, the packages >= 0.4 have CUDA CC >= 3.5. |
st83514 | Hello Pu!
peterjc123:
BTW, .tar.bz is the package format for Conda. You should do conda install <pkgname>.tar.bz and pip install <pkgname>.whl. FYI, the packages >= 0.4 have CUDA CC >= 3.5.
Just to clarify, is the file I downloaded,
pytorch-0.3.0-py36_0.3.0cu80.tar.bz, a “0.3.0” package, and
therefore is “< 0.4”, and therefore would be expected to support
CUDA CC 3.0?
Thanks.
K. Frank |
st83515 | I checked my answers in the Zhihu post and yes, it seems that CC3.0 is supported. |
st83516 | Hi Peter and Pu!
peterjc123:
BTW, .tar.bz is the package format for Conda. You should do `conda install .tar.bz
It looks like I was able to install an old pytorch with cuda for my gpu,
but I get an error when I try to do anything.
I installed* conda and (from the “Anaconda Prompt”) used conda to
install the legacy pytorch binary:
conda install file:///C:/<path_to_bz_file>/pytorch-0.3.0-py36_0.3.0cu80.tar.bz2
I then ran python (from the “Anaconda Prompt”) and ran:
>>> import torch
>>> print(torch.__version__)
0.3.0b0+591e73e
>>> print(torch.version.cuda)
8.0
>>> print(torch.cuda.is_available())
True
>>> print(torch.cuda.current_device())
THCudaCheck FAIL file=D:\pytorch\pytorch\torch\lib\THC\THCGeneral.c line=120 error=30 : unknown error
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 302, in current_device
_lazy_init()
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 140, in _lazy_init
torch._C._cuda_init()
RuntimeError: cuda runtime error (30) : unknown error at D:\pytorch\pytorch\torch\lib\THC\THCGeneral.c:120
So it looks like pytorch recognizes my gpu and considers it acceptable,
but can’t actually lazy_init / initialize it.
Do you think there is something broken in my installation (that I can
fix), or should I regard this as a bug in the old pytorch 0.3.0 (and
potentially give up on trying to get this gpu working with pytorch)?
*) Further details on my installation process:
First I installed miniconda, specifically,
Miniconda3-4.5.4-Windows-x86_64.exe. I chose this older version
because it was the newest miniconda version that was python 3.6,
which I assume I need for this legacy python-3.6 version of pytorch.
(Of course, conda has to live in its own private sandbox, so now I
have two independent python 3.6 installations. But I expect that
anaconda considers this a feature, rather than a bug.)
I then used conda to install the legacy pytorch, as above.
When I ran python (from the “Anaconda Prompt”), and ran:
import torch
I got the error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\__init__.py", line 76, in <module>
from torch._C import *
ImportError: numpy.core.multiarray failed to import
Okay, no numpy. (So conda, which bills itself as a package manager,
installs pytorch, but doesn’t install its numpy dependency. I suppose
I consider this a bug, not a feature, but what do I know?)
conda install numpy
works, and now
import torch
works.
Regarding printing out the pytorch configuration in order to discover
the minimum cuda compute capability, I guess this older version
of pytorch doesn’t support this (or uses a different syntax):
>>> import torch
>>> print(torch.__config__.show())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: module 'torch' has no attribute '__config__'
So I don’t know how to definitively probe this version of pytorch for
its minimum required compute capability (but I assume the fact that
torch.cuda.is_available() returns True means that the minimum
required compute capability is 3.0 or lower).
Anyway, thanks again for your help, and any further suggestions on
how to get this going would be appreciated.
Best regards.
K. Frank |
st83517 | RuntimeError: cuda runtime error (30)
might point to the driver.
Could you have a look at this post 13 and check, if any suggestion could help? |
st83518 | Hi Peter and Pu!
Thanks for your help getting pytorch working with my old cuda
compute capability 3.0 gpu. Thanks also to Andrei for his
post (in the thread linked to below) for the observation
that helped get me past my sticking point.
I will select Peter’s post linking to the legacy builds as the
solution although the whole discussion has been helpful.
The solution (for me) is to use a work-around for the
“cuda runtime error (30)” issue – namely don’t call
torch.cuda.is_available().
I was led to this by this post in the thread linked to by Peter:
[resolved] Cuda Runtime Error(30)
This always works for me (Win10, Cuda 10.1, Python 3.7.2, PyTorch 1.0.1, NVIDIA GTX 1050 Ti):
import torch
torch.cuda.current_device()
but this always fails for me:
import torch
torch.cuda.is_available()
torch.cuda.current_device() # fails here
@Mohamed_Ghadban, how can I access the NVIDIA Nsight Options? Thanks in advance
I’ve posted some observations about “cuda runtime error (30)” here:
Some observations on "cuda runtime error (30)"
Hello Forum!
I have some information about the behavior of “cuda runtime
error (30)” (probably somewhat specific to my particular
configuration).
This is a follow-on to a number of threads about “error 30,”
and, in particular, to this post:
Clued in by Andrei’s observation that torch.cuda.is_available()
“breaks” cuda, I find (for me) that if torch.cuda.is_available()
is the first cuda call, subsequent cuda calls will throw “error 30”
unless the first subsequent call is called promptly…
Here is a script showing some simple gpu-tensor manipulations:
import torch
print (torch.__version__)
torch.cuda.get_device_capability (0)
torch.cuda.get_device_name (0)
ct = torch.cuda.FloatTensor([[3.3, 4.4, 5.5], [6.6, 7.7, 8.8]])
ct
ct + 0.01 * ct
quit()
And here is the output:
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import torch
>>> print (torch.__version__)
0.3.0b0+591e73e
>>> torch.cuda.get_device_capability (0)
(3, 0)
>>> torch.cuda.get_device_name (0)
'Quadro K1100M'
>>> ct = torch.cuda.FloatTensor([[3.3, 4.4, 5.5], [6.6, 7.7, 8.8]])
>>> ct
3.3000 4.4000 5.5000
6.6000 7.7000 8.8000
[torch.cuda.FloatTensor of size 2x3 (GPU 0)]
>>> ct + 0.01 * ct
3.3330 4.4440 5.5550
6.6660 7.7770 8.8880
[torch.cuda.FloatTensor of size 2x3 (GPU 0)]
>>> quit()
I’ll follow up if I have any issues actually running models on
this gpu.
Thanks again.
K. Frank |
st83519 | Hi everyone!
I have several questions for you:
I’m new with pytorch and I’m trying to perform a test on my NN model with JupyterLab and there is something strange happening. It seems to require the same GPU memory capacity as training (for a same input size and a batch size of 1 for the training). So I think it could be due to the gradient maps that are saved during the back propagation (I can see that the GPU memory explodes just after I run the output = model(input) line). Do you know a way to see if the gradient maps are actually computed and saved?
If I do
my_model.eval()
to avoid the back propagation, do I also need to do something like
with torch.no_grad:
model_input = some_tensor
or
with torch.no_grad:
model_output = my_model(model_input)
?
And If I do something like this:
my_model.eval()
with torch.no_grad:
model_output = my_model(model_input)
Is model_ouput.requires_grad supposed to be False ?
I’m working with pytorch 0.4.0 on Windows 10 and I have a 6Gb GPU.
Thank you! |
st83520 | Hi,
You have some more informations 'model.eval()' vs 'with torch.no_grad()' 57) .
Let me know if you have more questions. |
st83521 | Hello Forum!
I have some information about the behavior of “cuda runtime
error (30)” (probably somewhat specific to my particular
configuration).
This is a follow-on to a number of threads about “error 30,”
and, in particular, to this post:
[resolved] Cuda Runtime Error(30)
This always works for me (Win10, Cuda 10.1, Python 3.7.2, PyTorch 1.0.1, NVIDIA GTX 1050 Ti):
import torch
torch.cuda.current_device()
but this always fails for me:
import torch
torch.cuda.is_available()
torch.cuda.current_device() # fails here
@Mohamed_Ghadban, how can I access the NVIDIA Nsight Options? Thanks in advance
Clued in by Andrei’s observation that torch.cuda.is_available()
“breaks” cuda, I find (for me) that if torch.cuda.is_available()
is the first cuda call, subsequent cuda calls will throw “error 30”
unless the first subsequent call is called promptly (< ~1 sec.).
Specifically, start a fresh python session and import torch, then:
don’t call torch.cuda.is_available()
subsequent cuda calls work
call torch.cuda.is_available() first
make another cuda in less than (about) a second
it and subsequent cuda calls work
call torch.cuda.is_available() first
wait about two seconds
subsequent cuda calls throw “error 30” with stack trace A
(except torch.cuda.device_count() works in this case)
call torch.cuda.is_available() first
wait more than (about) ten seconds
subsequent cuda calls throw “error 30” with stack trace B
(See example scripts, below.)
This seems to be repeatable for me, although, because it involves
timing and the order in which calls are made, I don’t know whether
it is always consistently repeatable.
I am using pytorch 0.3.0 for cuda 8, specifically
pytorch-0.3.0-py36_0.3.0cu80.tar.bz2 from the legacy-builds link
in this post of Peter’s:
Pre-built pytorch for CUDA 3.0 on windows?
Hi Frank,
I assume you are referring to the compute capability 3.0, which should work with CUDA6.0 - CUDA10.1.
If I’m not mistaken, the minimal compute capability for the current binaries is >=3.5, so you could build from source to support this older GPU.
However, if you would like to play around with some legacy PyTorch version, you might get lucky finding some supported binaries here (built by @peterjc123).
Since these binaries are quite old by now, I would recommend building from source. :…
I am using a mobile (laptop) Quadro K1100M gpu with the nvidia driver
version 426.00 running on windows 10. (As a side note, I initially
saw this “error 30” issue using the nvidia driver version 425.45.
I would guess these observations also apply to that driver, but I don’t
know for sure because I upgraded the driver before I came across this
business with torch.cuda.is_available() “breaking” cuda and
performing these tests.)
Anyway, I have no idea what is going on here. I’m posting these
results in the hope they might shed some light on the “error 30”
issue, and be helpful to others. I suspect that these results
are specific to configurations similar to mine, and are not likely
to be relevant to all instances of “error 30”.
Thanks.
K. Frank
Here are some illustrative scripts and their outputs. Each was
run in a fresh python session started from the command line. (The
sleep (10.0)in the first script is just to show that it’s waiting
after torch.cuda.is_available() that is relevant, rather than,
for example, waiting after import torch.)
No wait – cuda works:
from time import sleep
import torch
print (torch.__version__)
sleep (10.0)
torch.cuda.is_available()
torch.cuda.current_device()
torch.cuda.device_count()
torch.cuda.get_device_name (0)
torch.cuda.get_device_capability (0)
quit()
No-wait result:
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> from time import sleep
>>> import torch
>>> print (torch.__version__)
0.3.0b0+591e73e
>>> sleep (10.0)
>>> torch.cuda.is_available()
True
>>> torch.cuda.current_device()
0
>>> torch.cuda.device_count()
1
>>> torch.cuda.get_device_name (0)
'Quadro K1100M'
>>> torch.cuda.get_device_capability (0)
(3, 0)
>>> quit()
Short wait – cuda works:
from time import sleep
import torch
print (torch.__version__)
torch.cuda.is_available()
sleep (1.0)
torch.cuda.current_device()
torch.cuda.device_count()
torch.cuda.get_device_name (0)
torch.cuda.get_device_capability (0)
quit()
Short-wait result:
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> from time import sleep
>>> import torch
>>> print (torch.__version__)
0.3.0b0+591e73e
>>> torch.cuda.is_available()
True
>>> sleep (1.0)
>>> torch.cuda.current_device()
0
>>> torch.cuda.device_count()
1
>>> torch.cuda.get_device_name (0)
'Quadro K1100M'
>>> torch.cuda.get_device_capability (0)
(3, 0)
>>> quit()
Medium wait – cuda fails (A):
from time import sleep
import torch
print (torch.__version__)
torch.cuda.is_available()
sleep (2.0)
torch.cuda.current_device()
torch.cuda.device_count()
torch.cuda.get_device_name (0)
torch.cuda.get_device_capability (0)
quit()
Medium-wait result:
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> from time import sleep
>>> import torch
>>> print (torch.__version__)
0.3.0b0+591e73e
>>> torch.cuda.is_available()
True
>>> sleep (2.0)
>>> torch.cuda.current_device()
THCudaCheck FAIL file=torch/csrc/cuda/Module.cpp line=143 error=30 : unknown error
Traceback (most recent call last):
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 151, in _lazy_init
queued_call()
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 103, in _check_capability
major = get_device_capability(d)[0]
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 266, in get_device_capability
return torch._C._cuda_getDeviceCapability(device)
RuntimeError: cuda runtime error (30) : unknown error at torch/csrc/cuda/Module.cpp:143
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 302, in current_device
_lazy_init()
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 155, in _lazy_init
raise_from(DeferredCudaCallError(msg), e)
File "<string>", line 3, in raise_from
torch.cuda.DeferredCudaCallError: CUDA call failed lazily at initialization with error: cuda runtime error (30) : unknown error at torch/csrc/cuda/Module.cpp:143
CUDA call was originally invoked at:
[' File "<stdin>", line 1, in <module>\n', ' File "<frozen importlib._bootstrap>", line 971, in _find_and_load\n', ' File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked\n', ' File "<frozen importlib._bootstrap>", line 665, in _load_unlocked\n', ' File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n', ' File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n', ' File "C:\\<path_to_miniconda>\\Miniconda3\\lib\\site-packages\\torch\\__init__.py", line 328, in <module>\n import torch.cuda\n', ' File "<frozen importlib._bootstrap>", line 971, in _find_and_load\n', ' File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked\n', ' File "<frozen importlib._bootstrap>", line 665, in _load_unlocked\n', ' File "<frozen importlib._bootstrap_external>", line 678, in exec_module\n', ' File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed\n', ' File "C:\\<path_to_miniconda>\\Miniconda3\\lib\\site-packages\\torch\\cuda\\__init__.py", line 118, in <module>\n _lazy_call(_check_capability)\n', ' File "C:\\<path_to_miniconda>\\Miniconda3\\lib\\site-packages\\torch\\cuda\\__init__.py", line 116, in _lazy_call\n _queued_calls.append((callable, traceback.format_stack()))\n']
>>> torch.cuda.device_count()
1
>>> torch.cuda.get_device_name (0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 253, in get_device_name
return torch._C._cuda_getDeviceName(device)
RuntimeError: cuda runtime error (30) : unknown error at torch/csrc/cuda/Module.cpp:131
>>> torch.cuda.get_device_capability (0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 266, in get_device_capability
return torch._C._cuda_getDeviceCapability(device)
RuntimeError: cuda runtime error (30) : unknown error at torch/csrc/cuda/Module.cpp:143
>>> quit()
Long wait – cuda fails (B):
from time import sleep
import torch
print (torch.__version__)
torch.cuda.is_available()
sleep (10.0)
torch.cuda.current_device()
torch.cuda.device_count()
torch.cuda.get_device_name (0)
torch.cuda.get_device_capability (0)
quit()
Long-wait result:
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> from time import sleep
>>> import torch
>>> print (torch.__version__)
0.3.0b0+591e73e
>>> torch.cuda.is_available()
True
>>> sleep (10.0)
>>> torch.cuda.current_device()
THCudaCheck FAIL file=D:\pytorch\pytorch\torch\lib\THC\THCGeneral.c line=120 error=30 : unknown error
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 302, in current_device
_lazy_init()
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 140, in _lazy_init
torch._C._cuda_init()
RuntimeError: cuda runtime error (30) : unknown error at D:\pytorch\pytorch\torch\lib\THC\THCGeneral.c:120
>>> torch.cuda.device_count()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 294, in device_count
_lazy_init()
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 140, in _lazy_init
torch._C._cuda_init()
RuntimeError: cuda runtime error (30) : unknown error at D:\pytorch\pytorch\torch\lib\THC\THCGeneral.c:120
>>> torch.cuda.get_device_name (0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 253, in get_device_name
return torch._C._cuda_getDeviceName(device)
RuntimeError: cuda runtime error (30) : unknown error at torch/csrc/cuda/Module.cpp:131
>>> torch.cuda.get_device_capability (0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\<path_to_miniconda>\Miniconda3\lib\site-packages\torch\cuda\__init__.py", line 266, in get_device_capability
return torch._C._cuda_getDeviceCapability(device)
RuntimeError: cuda runtime error (30) : unknown error at torch/csrc/cuda/Module.cpp:143
>>> quit() |
st83522 | I am trying to implement models from the paper Unsupervised Learning of Video Representations using LSTMs 2.
In particular, the authors describe a encoder-predictor LSTM model (see image below) where the sequence is first encoded and the next steps of the sequence are then predicted. When doing the predictions, at each time step they feed the input either the ground truth of the last step (training time) or the predicted output of the last step (testing time).
While it seems straightforward to implement the training stage, I am stuck with the testing stage. How to dynamicaly feed the output of a step to the input of the next step? I could do a for loop and computing the steps by hand one after the other, but it does not seem very sweet.
What do you think? |
st83523 | What I wanted to do is, in fact, called “teacher forcing”. An official PyTorch tutorial 32 covers this topic. |
st83524 | I am using a code that consumes much more CPU resources on 1.0.1 than 1.1.0, while keeping the GPU usage at ~40% and same runtime on both.
Any idea why, where and what to look in order to make it consume less resources on 1.1.0?
In detail:
Originally the code was developed for 0.3.1 (link), and I upgraded it to 0.4.1 according to the migration guide.
Specifically:
(1) I changed all “volatile” usages to “with torch.nograd()”
(2) replaced “.data[0]” to “.item()”
(3) replaced all “.cuda()” to “.to(device)”.
The exact same code uses much more CPU resources on 0.4.1 or 1.0.1 vs 1.1.0
pytorch was installed on anacoda with the following command.
for v1.1.0: conda install pytorch==1.1.0 torchvision cudatoolkit=10.0 -c pytorch
for v1.0.1: conda install pytorch==1.0.1 torchvision cudatoolkit=10.0 -c pytorch
for v0.4.1: conda install pytorch==0.4.1 torchvision cudatoolkit=10.0 -c pytorch
I am not the one who developed the code, and I am new to pytorch (but experienced with TF). So I am unsure on how to debug this problem, what to look, or how to create a minimal example to reproduce it. I’ll be thankful for any guidance here.
EDIT
Following the advice on https://github.com/pytorch/pytorch/issues/20311 5, setting OMP_NUM_THREADS=1 for 1.1.0 reduces CPU usage, but makes train time slower by 7.5% compared to 1.0.1 (and 16% compared to 0.4.1)
For the same code, the total runtime of pytorch 1.0.1 is slower than 0.4.1 by ~8% [notes: (a) in this bullet, I refer to 1.0.1, not 1.1.0, (b) I repeated this measure 3 times and it was consistent, with low variance] |
st83525 | Hi,
I want to assign filter = [[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]
as weight of convolution layer. Is this command ok?
self.conv1 = conv2d(...)
self.conv1.weight = Parameter(filter)
Thanks |
st83526 | Solved by ptrblck in post #2
I would recommend to wrap the assignment in a with torch.no_grad() block just to make sure Autograd won’t complain later. |
st83527 | I would recommend to wrap the assignment in a with torch.no_grad() block just to make sure Autograd won’t complain later. |
st83528 | ptrblck:
with torch.no_grad()
Thanks a lot.
Is it ok?
class Net(Module):
def __init__(self):
super(Net, self).__init__()
self.convZ = nn.Conv2d(...)
with torch.no_grad():
self.convZ.weight = Parameter(filter)
self.pool = nn.AvgPool2d(...)
self.fc1 = nn.Linear(....)
def forward(self, x):
out = torch.atan(self.convZ(x))
out = self.pool(out)
out = torch.atan(self.fc1(out))
return out
Best Regards |
st83529 | Looks alright, although I wouldn’t assign output_after_convZ back to self.convZ, but rather a new attribute. |
st83530 | Yes, you are right. It is just a mistake. I edited it.
Many thanks, you are the best |
st83531 | Hi,
I want to able to have a model/optimiser/scheduler object - which I can hot plug and play.
So for example, have a list of such objects, load to gpu in turn, do some training, switch objects.
Maybe then load some earlier ones and pick up training where we left off last time.
I’d like to be able to easily (deep) copy these objects, and save/load to disk.
Note - some models or optimisers or schedulers may be different in these different objects.
One idea - use the torch.save(model) - this will pickle the model class and reproduce the object and load the state_dict, but will it restore the scheduler/optimiser?
Can I use torch.save with these as well to reproduce them later?
How do I control which model they are attached to?
And how can I duplicate the whole model/optimizer/scheduler whilst in memory?
Any suggestions on the best way to do this please?
Many thanks in advance! |
st83532 | Solved by dpernes in post #8
If I store them in the same file - it only stores the state_dicts for each. It will not pickle the object.
This is not true. As long as you do not call state_dict(), it will save the whole variable. Please try the following.
For saving:
checkpoint = {
'epoch': epoch,
'model': model,
… |
st83533 | You can create a dictionary with everything you need and save it using torch.save(). Example:
checkpoint = {
'epoch': epoch,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_sched': lr_sched}
torch.save(checkpoint, 'checkpoint.pth')
Then you can load the checkpoint doing checkpoint = torch.load('checkpoint.pth')
More info here: Loading a saved model for continue training 305 |
st83534 | Hi, many thanks for your quick reply!
So I’ve seen that. But the problem with that is it only copies the models state dict.
If different objects say have different types of models, how do I know which model to create before giving it the saved state dict?
Same for optimizers and schedulers. |
st83535 | Then you can save the model itself, i.e. without calling state_dict(). This will dump the whole content of the variable model into a pickle file, which leads to a larger file than in the previous case. The same applies to the optimizer. The scheduler does not have a state_dict(), so the whole variable is saved anyways. |
st83536 | So I save the model. And then the optimizer. They will be in two separate files? Will they be connected to each other correctly on reloading. And if I want to say create 10 duplicates of this “group” is there a way to do that?
Many thanks |
st83537 | They will be in two separate files?
You can save them in separate files or wrap them in a dictionary like the one I showed (where you remove the calls to state_dict()).
Will they be connected to each other correctly on reloading.
There is no loss of information when you save the whole variable instead of saving the state_dict() only, so I assume that the model and the optimizer will remain “connected” after you reload them (although I’ve never tried it).
And if I want to say create 10 duplicates of this “group” is there a way to do that?
I’m not sure if I understand what you mean with a “duplicate”… Do you mean, for instance, saving a checkpoint of the training after every epoch? You may save each checkpoint file with a different name and that’s it. |
st83538 | If I store them in the same file - it only stores the state_dicts for each. It will not pickle the object. Problems then if different “objects” have different models.
Separate files sounds troublesome. Which order do I load them in and do they still work for training.
Imagine one of a group of 10 model/optimizer/scheduler does particularly well after round one - where each has had an hour on the gpu. This may be because I gave it a favourable set of hyper parameters.
I want to take this group, duplicate it x10, and then run each of those with slightly different hyperparameters.
In essence - I want to be able to duplicate the whole model/optimiser/scheduler state. |
st83539 | If I store them in the same file - it only stores the state_dicts for each. It will not pickle the object.
This is not true. As long as you do not call state_dict(), it will save the whole variable. Please try the following.
For saving:
checkpoint = {
'epoch': epoch,
'model': model,
'optimizer': optimizer,
'lr_sched': lr_sched}
torch.save(checkpoint, 'checkpoint.pth')
For loading:
checkpoint = torch.load('checkpoint.pth')
epoch = checkpoint['epoch']
model = checkpoint['model']
optimizer = checkpoint['optimizer']
lr_sched = checkpoint['lr_sched']
What’s the problem with this approach? |
st83540 | Oh, sorry - one last thing - wondered about copying the whole thing say x10?
Do I just reload the same thing from disk multiple times? |
st83541 | I’ve been working on a checkpoint helper for such use case. It is still WIP but check it out: https://pypi.org/project/pytorchcheckpoint/ 152 |
st83542 | Hey,
Until now I used Binary Cross entropy loss but since I need to use some other loss function I need to change my output so that it conforms to the Cross Entropy format. The result should be exactly the same, right?
When I tried a fake / handcrafted example I do not get the same results for both of the loss functions, probably I am just overseeing something …
Suppose in binary format my ground truth is [1., 0.] and my prediction [0.5, 0], then in normal CE format my ground truth should be the same (but with integers since those are now class indices, so [1 0]). My prediction however should be for the first element: [0.5, 0.5] and for the second element [1.0, 0.0] right?
Below you can find it in a jupyter notebook.
Cheers,
Lucas
image.png1063×1126 123 KB |
st83543 | Solved by InnovArul in post #2
Remember that cross entropy loss = ‘log softmax’ + ‘negative log likelihood’.
Hence, you may need to set
prediction[1,1]=float("-inf") |
st83544 | Remember that cross entropy loss = ‘log softmax’ + ‘negative log likelihood’.
Hence, you may need to set
prediction[1,1]=float("-inf") |
st83545 | I’m trying to measure the memory usage of each layer by torch.cuda.max_memory_allocated() and torch.cuda.reset_max_memory_allocated():
for layer in sequential_module:
torch.cuda.reset_max_memory_allocated(0)
x = layer(x)
size = torch.cuda.max_memory_allocated(0)
sizes.append(size)
I understood torch.cuda.reset_max_memory_allocated() can be used to reset the starting point to track the maximum memory allocations. So I’ve expected that torch.cuda.max_memory_allocated() after reset always returns 0. But it doesn’t.
In [1]: import torch
In [2]: torch.cuda.max_memory_allocated(0)
Out[2]: 0
In [3]: x = torch.rand(100, 100, 100, device=0)
In [4]: torch.cuda.max_memory_allocated(0)
Out[4]: 4865536
In [5]: torch.cuda.reset_max_memory_allocated(0)
In [6]: torch.cuda.max_memory_allocated(0)
Out[6]: 4865536
How do I understand this behavior? |
st83546 | Since you are not freeing any memory, torch.cuda.max_memory_allocated will still return the currently used memory, as it’s still the peak.
Have a look at this code snippet:
# Check for empty
torch.cuda.max_memory_allocated(0)
# Create one tensor
x = torch.rand(100, 100, 100, device='cuda:0')
# should yield the same value
torch.cuda.memory_allocated(0)
torch.cuda.max_memory_allocated(0)
# Create other tensor
y = torch.rand(100, 100, 100, device='cuda:0')
# Should be same, but higher
torch.cuda.memory_allocated(0)
torch.cuda.max_memory_allocated(0)
# Delete one tensor
del y
# max_memory_allocated should keep it's old value
torch.cuda.memory_allocated(0)
torch.cuda.max_memory_allocated(0)
# Reset to track new peak memory usage
torch.cuda.reset_max_memory_allocated(0)
torch.cuda.memory_allocated(0)
torch.cuda.max_memory_allocated(0) |
st83547 | I’m trying to train a LSTM connected to couple MLP layers. The model is coded as follows:
class RNNBlock(nn.Module):
def __init__(self, in_dim, hidden_dim, num_layer=1, dropout=0):
super(RNNBlock, self).__init__()
self.hidden_dim = hidden_dim
self.num_layer = num_layer
self.lstm = nn.LSTM(in_dim, hidden_dim, num_layer, dropout)
def forward(self, onehot, length):
batch_size = onehot.shape[0]
h_in = torch.randn(self.num_layer, batch_size, self.hidden_dim).cuda()
c_in = torch.randn(self.num_layer, batch_size, self.hidden_dim).cuda()
packed = nn.utils.rnn.pack_padded_sequence(onehot, length, batch_first=True).cuda()
output, (h_out, c_out) = self.lstm(packed, (h_in, c_in))
unpacked, unpacked_length = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
vectors = list()
for i, vector in enumerate(unpacked):
vectors.append(unpacked[i, unpacked_length[i]-1, :].view(1, -1))
out = torch.cat(vectors, 0)
return out
class Predictor(nn.Module):
def __init__(self, in_dim, out_dim, act=None):
super(Predictor, self).__init__()
self.linear = nn.Linear(in_dim, out_dim)
nn.init.xavier_normal_(self.linear.weight)
self.activation = act
def forward(self, x):
out = self.linear(x)
if self.activation != None:
out = self.activation(out)
return out
class RNNNet(nn.Module):
def __init__(self, args):
super(RNNNet, self).__init__()
self.rnnBlock = RNNBlock(args.in_dim, args.hidden_dim, args.num_layer, args.dropout)
self.pred1 = Predictor(args.hidden_dim, args.pred_dim1, act=nn.ReLU())
self.pred2 = Predictor(args.pred_dim1, args.pred_dim2, act=nn.ReLU())
self.pred3 = Predictor(args.pred_dim2, args.out_dim)
def forward(self, onehot, length):
out = self.rnnBlock(onehot, length)
out = self.pred1(out)
out = self.pred2(out)
out = self.pred3(out)
return out
and this is my train function
def train(model, device, optimizer, criterion, data_train, bar, args):
epoch_train_loss = 0
epoch_train_mae = 0
for i, batch in enumerate(data_train):
list_onehot = torch.tensor(batch[0]).cuda().float()
list_length = torch.tensor(batch[1]).cuda()
list_logP = torch.tensor(batch[2]).cuda().float()
# Sort onehot tensor with respect to the sequence length.
list_length, list_index = torch.sort(list_length, descending=True)
list_length.cuda()
list_index.cuda()
list_onehot = torch.Tensor([list_onehot.tolist()[i] for i in list_index]).cuda().float()
model.train()
optimizer.zero_grad()
list_pred_logP = model(list_onehot, list_length).squeeze().cuda()
list_pred_logP.require_grad = False
train_loss = criterion(list_pred_logP, list_logP)
train_mae = mean_absolute_error(list_pred_logP.tolist(), list_logP.tolist())
epoch_train_loss += train_loss.item()
epoch_train_mae += train_mae
train_loss.backward()
optimizer.step()
bar.update(len(list_onehot))
epoch_train_loss /= len(data_train)
epoch_train_mae /= len(data_train)
return model, epoch_train_loss, epoch_train_mae
The list_onehot and list_length tensors are loaded from the DataLoader and uploaded to GPU. Then, to use packed sequence as input, I’ve sorted the both list_onehot and list_length and uploaded to GPU. The model was uploaded to GPU and h_in, c_in tensors and packed sequence object were also uploaded to the GPU. However, when I try to run this code, it does not use GPU but only use CPU. What should I do to use GPU to train this model? |
st83548 | @beneyal
Yes I did. First I did model.to(device) then now I’m doing model.cuda() but both does not work |
st83549 | @beneyal
After calling model.cuda(). This is my experiment function
def experiment(dict_partition, device, bar, args):
time_start = time.time()
model = RNNNet(args)
model.cuda()
if args.optim == 'Adam':
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.l2_coef)
elif args.optim == 'RMSprop':
optimizer = optim.RMSprop(model.parameters(),
lr=args.lr,
weight_decay=args.l2_coef)
elif args.optim == 'SGD':
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
weight_decay=args.l2_coef)
else:
assert False, 'Undefined Optimizer Type'
criterion = nn.MSELoss()
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
list_train_loss = list()
list_val_loss = list()
list_train_mae = list()
list_val_mae = list()
data_train = DataLoader(dict_partition['train'], batch_size=args.batch_size, shuffle=args.shuffle)
data_val = DataLoader(dict_partition['val'], batch_size=args.batch_size, shuffle=args.shuffle)
for epoch in range(args.epoch):
scheduler.step()
model, train_loss, train_mae = train(model, device, optimizer, criterion, data_train, bar, args)
list_train_loss.append(train_loss)
list_train_mae.append(train_mae)
mode, val_loss, val_mae = validate(model, device, criterion, data_val, bar, args)
list_val_loss.append(val_loss)
list_val_mae.append(val_mae)
data_test = DataLoader(dict_partition['test'], batch_size=args.batch_size, shuffle=args.shuffle)
mae, std, logP_total, pred_logP_total = test(model, device, data_test, args)
time_end = time.time()
time_required = time_end - time_start
args.list_train_loss = list_train_loss
args.list_val_loss = list_val_loss
args.list_train_mae = list_train_mae
args.list_val_mae = list_val_mae
args.logP_total = logP_total
args.pred_logP_total = pred_logP_total
args.mae = mae
args.std = std
args.time_required = time_required
return args |
st83550 | Weird, I can’t think of any reason why it won’t work… You’re not getting any errors during training? |
st83551 | @beneyal
Yes the model is trained on CPU without error.
If I watch nvidia-smi, I can see that 477MB of data are uploaded to GPU memory, but not using GPU to train it. |
st83552 | @beneyal
GitHub
SeungsuKim/CH485--AI-and-Chemistry 52
Contribute to SeungsuKim/CH485--AI-and-Chemistry development by creating an account on GitHub.
This is the link. You can see the Assignment6_logP_RNN.ipynb file.
Thanks a lot, I’m struggling with this problem for two days. |
st83553 | Not sure what’s going on, as the model seems to be on the GPU.
I would assume @Probe would get an error in forward, as it seems that
h_in = nn.Parameter(torch.randn(self.num_layer, batch_size, self.hidden_dim))
c_in = nn.Parameter(torch.randn(self.num_layer, batch_size, self.hidden_dim))
are still on the CPU, while self.lstm should be on the GPU.
Could you check that? |
st83554 | @ptrblck
No error occurs but the model runs on CPU. To upload h_in and c_in to GPU, what should I do among 3 possibilities?
h_in = nn.Parameter(torch.randn(self.num_layer, batch_size, self.hidden_dim)).cuda()
h_in = nn.Parameter(torch.randn(self.num_layer, batch_size, self.hidden_dim).cuda())
h_in = torch.randn(self.num_layer, batch_size, self.hidden_dim).cuda() |
st83555 | Use the second approach and try it again. I’m still not sure why the code doesn’t throw an error.
Let me know, if the model still runs on CPU and I’ll try to debug it a bit later. |
st83556 | @ptrblck
The second approach still cause no error but runs on CPU.
Also, I’ve seen an article that pack_padded_sequence require length_list of CPU tensor. In addition, the forward function of RNN block returns only the last result of each batch. I’ve used torch.cat() function to do this. Is this might be the reason? |
st83557 | @beneyal @ptrblck
I’m not sure why, but the problem has been solved
The answer was using custom collate function in DataLoader, so that the DataLoader gives packedSequence object and labels, not generating packedSequence object in the forward function of custom LSTM module.
I will commit the working version soon. |
st83558 | Good to hear you’ve solved this issue!
I’m still a bit confused why your code didn’t throw an error, as it seems some parameters were on the GPU while others stayed on the CPU. |
st83559 | Hi everybody,
I am replying to this topic since I am facing a similar problem to the one of @Probe, but his solution of using a custom collate function in the DataLoader is not working for me.
I have a recurrent autoencoder, of which I have to gauge the enconding capability, therefore my net is composed of two layers (code below):
an encoding layer composed by the LSTM;
a decoding layer, which is nothing but a dense layer that tries to reconstruct the input from the LSTM output.
class RnnLSTMAutoEncoder(nn.Module):
""" Rnn based on the LSTM model
Args:
input_length (int): input dimension
code_length (int): LSTM output dimension
num_layers (int): LSTM layers' number
"""
## Constructor
def __init__(self, input_length, code_length, num_layers=1):
super(RnnLSTMAutoEncoder, self).__init__()
# Attributes
self.input_length = input_length
self.code_length = code_length
self.num_layers = num_layers
# Nets
self.encodeLayer = nn.LSTM(self.input_length, self.code_length, num_layers=self.num_layers, batch_first=True)
self.decodeLayer = nn.Linear(self.code_length, self.input_length)
# Decode layer parameters' initialization
torch.nn.init.uniform_(self.decodeLayer.weight)
self.decodeLayer.bias = nn.Parameter(torch.zeros_like(self.decodeLayer.bias))
## Encode function
def encode(self, x):
# CODING
output, _ = self.encodeLayer(x)
return output
## Decode function
def decode(self, x):
# DECODING (linear dense layer followed by an activation function [identity in this case, so none])
x = self.decodeLayer(x)
return x
## Forward function
def forward(self, x):
encoded = self.encode(x)
if isinstance(encoded, torch.Tensor):
decoded = self.decode(encoded)
else:
unpacked, unpacked_length = nn.utils.rnn.pad_packed_sequence(encoded, batch_first=True)
vectors = list()
for i, vector in enumerate(unpacked):
vectors.append(unpacked[i, unpacked_length[i] - 1, :].view(1, -1))
decoded = self.decode(torch.cat(vectors, 0))
return decoded
Following Probe’s suggestion, I wrote my custom collate function as follows:
def my_collate(batch):
data = [item[0] for item in batch]
x = torch.stack(data)
# Lengths vector for the correct packing of the input
lengths = torch.zeros(x.size()[0])
for i in range(x.size()[0]):
for j in range(seq_length):
if sum(1 for k in x[i, j, :] if k != 0) == x.size()[2]:
lengths[i] += 1
# Both padded sequences and lengths should be ordered descendingly wrt to the sequence length
lengths, indices = torch.sort(lengths, descending=True)
lengths = lengths.type(torch.ByteTensor)
x = x[indices, :, :]
y = torch.zeros(train_batch_size, x.size()[2])
for i in range(train_batch_size):
seq_el_idx = lengths[i].item() - 1
y[i, :] = x[i, seq_el_idx, :]
# Packing the data
x = torch.nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True)
return [x, y]
My dataset is made of vectors of features extracted from video frames, so what I give to the LSTM is a sequence of vectors that from step t goes back in time till step t-seq_length.
Obviously, for the first time steps (for example 1, the first video frame), I have nothing that goes back in time. Thus, I wrote a custom Dataset class which in this case fills the sequence with zeros till it reaches seq_length, while my collate function converts it in a PackedSequence object (the x element returned in the batch).
For evaluating the net’s perfomance instead, I just need to compute the loss between the last element of the sequence (rearranged in the y element returned in the batch), and the last element of the packed sequence I receive as output.
As Probe did in his code, with the custom collate function the DataLoader gives packedSequences as inputs to the autoencoder, while the padding of the output of the LSTM is handled in the forward function.
Everything works fine, but nonetheless my code is not running on the GPU.
I have debugged my code with PyCharm, and everything seems to be on the GPU: the input sequences, the LSTM output, the final autoencoder output, etc…, and in fact I can see the data uploaded to the GPU memory, but still, the whole training procedure takes place on the CPU.
I am currently managing the whole training procedure with Ignite, and my training code is the following:
## Data loader helper
def get_data_loaders(train_batch_size, val_batch_size, num_workers, train_dir, val_dir, seq_length):
# Custom data transformation
# example: data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
data_transform = transforms.Lambda(lambda x: normalize_feature_vector(x))
# Dataset instantiation
co_t_set = CoOccurrencesDatasetRnnTime(train_dir, seq_length, data_transform)
co_v_set = CoOccurrencesDatasetRnnTime(val_dir, seq_length, data_transform)
# Training set DataLoader
train_loader = Data.DataLoader(co_t_set, train_batch_size, collate_fn=my_collate, shuffle=False,
num_workers=num_workers)
# Validation set DataLoader
val_loader = Data.DataLoader(co_v_set, val_batch_size, collate_fn=my_collate, shuffle=False,
num_workers=num_workers)
return train_loader, val_loader
## Batch preparation
def autoencoder_batch(batch, device, non_blocking=False):
# Simply sends the data to GPU
x, y = batch
if device == 'cuda':
x = x.cuda().to(device)
y = y.cuda().to(device)
return x, y
## Training routine
def autoencoder_training(trainer, batch):
# Extract the input and "label"
bx, by = autoencoder_batch(batch, device)
# Send the model to GPU (if available)
if device == 'cuda':
model.to(device)
# Forwarding
model.train()
optimizer.zero_grad()
decoded = model(bx)
# Compute the loss
loss = loss_func(decoded, by)
# Optimize
loss.backward()
optimizer.step()
return loss.item()
### Model training ###
## Dataset loading parameters
train_path = 'training_set_path_on_my_machine'
val_path = 'validation_set_path_on_my_machine'
num_workers = 4
## Training parameters
epochs = 30
train_batch_size = 5
val_batch_size = 5
LR = 0.005 # learning rate
input_length = 625
code_length = 100
seq_length = 25
es_patience = 10
exp_decay = 0.95
log_dir = 'logging_directory_on_my_machine'
log_interval = 10000 # number of batches for each log on the console
## Logging configuration
logging.basicConfig(filename='logging_directory_on_my_machine',
filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
if __name__ == '__main__':
# Dataloaders instantiation
print('Loading the datasets and extracting the features...')
logging.info('Loading the datasets and extracting the features...')
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size,
num_workers, train_path, val_path, seq_length)
print('Features extracted!')
logging.info('Features extracted!')
# Model instantiation
model = RnnLSTMAutoEncoder(input_length, code_length)
# Writer instantiation for TensorboardX
writer = create_summary_writer(model, train_loader, log_dir) # creates a summary write with tensorboardX
# GPU loading (if available)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
# Optimizer, trainer and evaluator instantiation
optimizer = optim.Adam(model.parameters(), lr=LR)
loss_func = nn.MSELoss()
trainer = Engine(autoencoder_training)
evaluator = create_supervised_evaluator(model,
metrics={'MSE': Loss(nn.MSELoss())},
device=device,
prepare_batch=autoencoder_batch)
## EVENTS HANDLER FOR IGNITE ##
# HANDLER FOR EACH COMPLETED ITERATION
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_loader) + 1
if iter % log_interval == 0:
print("Epoch[{}] Iteration[{}/{}] Loss: {:.5f}"
"".format(engine.state.epoch, iter, len(train_loader), engine.state.output))
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
logging.info("Epoch[{}] Iteration[{}/{}] Loss: {:.5f}"
"".format(engine.state.epoch, iter, len(train_loader), engine.state.output))
# HANDLERS FOR EACH COMPLETED EPOCH
# Early stopping implementation
def score_function(engine):
val_loss = engine.state.metrics['MSE']
return -val_loss
handler = EarlyStopping(patience=es_patience, score_function=score_function, trainer=trainer)
evaluator.add_event_handler(Events.EPOCH_COMPLETED, handler)
# training results logging
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_MSE = metrics['MSE']
print("Training Results - Epoch: {}, Avg loss: {:.5f}"
.format(engine.state.epoch, avg_MSE))
writer.add_scalar("training/avg_loss", avg_MSE, engine.state.epoch)
logging.info('Training Results - Epoch: {}, Avg loss: {:.5f}'.format(engine.state.epoch, avg_MSE))
# validation results logging
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_MSE = metrics['MSE']
print("Validation Results - Epoch: {}, Avg loss: {:.5f}"
.format(engine.state.epoch, avg_MSE))
writer.add_scalar("valdation/avg_loss", avg_MSE, engine.state.epoch)
logging.info('Validation Results - Epoch: {}, Avg loss: {:.5f}'.format(engine.state.epoch, avg_MSE))
## RUNNING
print('Training...')
trainer.run(train_loader, max_epochs=epochs)
writer.close()
Any suggestion on what it might be? Any help or hint is greatly appreciated.
Thanks for your time! |
st83560 | Hey everybody,
I reply here since I managed to solve my issue: the error was so stupid that I’m really embarrassed to write about it
But still, I hope it may be useful for somebody, at least as a reminder that you need to take some breaks and rest from your code in order to clear your mind and see where the bugs are!
Turns out that the problem was how I wrote my_collate function: I spend a lot of time computing the sequences’ lengths on CPU, while the amount of computations executed on GPU is so low that I could not see it on any performance profiler.
Therefore, I simply solved my issue by doing the most obvious thing: make my custom Dataset class give me the sequence’s length along with the sequence itself, and using my_collate function to just create the PackedSequence object for the input sequence. Here is the code for both of them in case it may be helpful for somebody.
My custom Dataset class:
import torch
import numpy as np
class CoOccurrencesDatasetRnnTime(torch.utils.data.Dataset):
"""
Support class for the loading and batching of the co-occurrences of video frames extracted offline.
The class returns directly the sequence along with its length
Args:
root_dir (string): file path of the .npy file containing the co-occurrences
sequence_length (int): length of the analyzed sequence by the RNN
transforms (object torchvision.transform): Pytorch's transforms used to process the co-occurrences
"""
## Constructor
def __init__(self, root_dir, sequence_length=1, transforms=None):
self.root_dir = root_dir
self.seq_length = sequence_length
self.transforms = transforms
self.co_occurrences = torch.from_numpy(np.load(root_dir)).type(torch.FloatTensor)
self.co_occurrences = self.co_occurrences.view(int(self.co_occurrences.size()[0]/10875), -1,
self.co_occurrences.size()[1])
## Override total dataset's length getter
def __len__(self):
return int(self.co_occurrences.size()[0]*10875)
#10875 is the number of features vector for each video frame
## Override single items' getter
def __getitem__(self, idx):
f_idx = int(np.floor(idx / 10875)) #frame index
p_idx = int(np.floor(idx % 10875)) #patch index inside the frame from which the features have been extracted
if self.transforms is not None:
if f_idx-self.seq_length < 0:
seq = torch.zeros(self.seq_length, self.co_occurrences.size()[2])
seq[0:f_idx+1, :] = self.transforms(self.co_occurrences[0:f_idx+1, p_idx, :])
seq_len = f_idx + 1
return [seq, seq_len], self.transforms(self.co_occurrences[f_idx, p_idx, :])
#only need the last element of the sequence as target value for the loss
else:
return [self.transforms(self.co_occurrences[f_idx-self.seq_length:f_idx, p_idx, :]), self.seq_length], \
self.transforms(self.co_occurrences[f_idx, p_idx, :])
else:
if f_idx-self.seq_length < 0:
seq = torch.zeros(self.seq_length, self.co_occurrences.size()[2])
seq[0:f_idx+1, :] = self.co_occurrences[0:f_idx+1, p_idx, :]
seq_len = f_idx + 1
return [seq, seq_len], self.co_occurrences[f_idx, p_idx, :]
else:
return [self.co_occurrences[f_idx-self.seq_length:f_idx, p_idx, :], self.seq_length], \
self.co_occurrences[f_idx, p_idx, :]
and my custom collate function for the Dataloader:
def my_collate(batch):
# Preparing input sequences
data = [item[0][0] for item in batch]
x = torch.stack(data)
seqs_length = torch.ByteTensor([item[0][1] for item in batch])
# Both padded sequences and lengths should be ordered descendingly wrt to the sequence length
lengths, indices = torch.sort(seqs_length, descending=True)
x = x[indices, :, :]
# Packing the data
x = torch.nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True)
# Preparing target values
y = [item[1] for item in batch]
y = torch.stack(y)
return [x, y]
Now the whole procedure takes place on GPU! |
st83561 | The real bug here is that you use a profiler to determine the model is running on gpu or not. That makes no sense to me, GPU is under utilized all the time |
st83562 | Hi @lugiavn,
I’m not sure what you are asking, but probably I was not really clear in explaining what my problem was in the first place, so I’ll try to explain it again.
I have debugged my code with PyCharm, and everything seems to be on the GPU: the input sequences, the LSTM output, the final autoencoder output, etc…, and in fact I can see the data uploaded to the GPU memory, but still, the whole training procedure takes place on the CPU.
Initially, as I wrote in my first post, I used PyCharm’s debugger to check if my model was effectively uploaded on GPU, and in fact everything was so.
Nevertheless, I noticed that the GPU was not working, meaning that if I looked at the % of its resource used in a performance profiler, it always indicated 0% or 1%, even if I could see some data uploaded in its memory.
The reason of this behaviour I think was that in my previous code, for each batch loading, I spent a lot of time computing the sequences’ lengths (a very stupid thing to do) on the CPU with the Dataloader's collate function, while the amount of computation executed on the GPU was so little that I could not see its resource used (it continously switched between 0% and 1%).
So, maybe it’s better to say that I had a huge bottleneck in my code: when packing each batch, I did a lot of (useless) work on the CPU for computing the sequence’s length, while the work executed on the GPU itself was so rapidly done (since my network is rather small) that it gave me the impression of not being executed by the GPU at all.
With the fix of my last post, this bottleneck is removed, the training procedure goes as I expect it to go and I can see the GPU’s resources used (since probably there is no more a continous switch between the CPU and GPU during the training): a very stupid error, I’m really sorry if anyone has wasted some time on it!
Anyway, hope this makes more sense to you? Let me know!
Cheers |
st83563 | I am playing with a simple problem of regression with neural network. Here is the function to generate the data:
def generate_data():
while(1):
x,y,z = [random.uniform(0,100) for _ in range(3)]
gt = (x**3 + math.log(y)) * math.cos(z)
yield x,y,z,gt
I generated 36000 data points for this problem. To solve this problem, I proposed this network:
class Net(nn.Module):
def __init__(self, ):
super(Net, self).__init__()
self.l1 = nn.Sequential(nn.Linear(3,500), nn.ReLU())
self.l2 = nn.Sequential(nn.Linear(500,300), nn.ReLU())
self.l3 = nn.Sequential(nn.Linear(300,100), nn.ReLU())
self.l4 = nn.Linear(100,1)
def forward(self, x):
x = self.l1(x)
return self.l4(self.l3(self.l2(x)))
The settings for training as below:
net = Net()
loss_fn = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr = 0.0001)
### some code for dataset and dataloader here
for _ in range(3000):
loss_avg = 0
for x, gt in dataloader:
pred = net(x)
loss = loss_fn(pred, gt)
loss_avg += loss/1000.0
c+=1
loss.backward()
optimizer.step()
print(f"Loss : {loss_avg}")
However, my model doesn’t learn anything. The loss doesn’t reduce but bounce up and down. What I already tried:
Change the learning rate => doesn’t work
Add the norm layers after RELU => loss quickly grows up to inf.
Divided the input by its max value, doesn’t work either.
Normalize both input and output => loss up and down, didn’t reduce.
Could you help me point down what I did wrong? Or any suggestions to debug or hints? |
st83564 | I think the problem is that you are accumulating the gradients. You need to set them to zero after each gradient update. Try just adding before loss.backward, optimizer.zero_grad() . |
st83565 | Thanks @Link88. Indeed you pointed out what I missed on the pipeline. I put back it into my code. The problem still remains as before. I am still looking for a robust solution (and trying different ways) so if you could find anything else, please let me know. |
st83566 | How to change the VGG19 network in an existing program into a self-defined training network when performing convolutional neural network visualization training
code show as below
定义一个可视化的函数
def deepcamvis(imagepath):
import matplotlib.pyplot as plt
import numpy as np
import cv2
from keras import backend as K
from keras.applications.vgg19 import VGG19, preprocess_input, decode_predictions
from keras.preprocessing import image
# 导入模型
model = VGG19(weights="imagenet")
# 读取一张待判断的图像
im = image.load_img(imagepath, target_size=(224, 224))
# 转化为数组
imx = image.img_to_array(im)
# 添加一个纬度,得到1*224*224*3的矩阵
imx = np.expand_dims(imx, axis=0)
# 对图像进行预处理,即标准化
imx = preprocess_input(imx)
# 使用VGG19网络进行预测图像的类别
impre = model.predict(imx)
# 对预测的结果进行解码
imprename = decode_predictions(impre, top=5)
# 最有可能的
print(imprename)
# 计算图像所在位置的编码
impreindex = np.argmax(impre)
# 可视化类激活图像
# 计算预测向量种种的老虎元素
impre_output = model.output[:, impreindex]
## 输出的卷基层特征 block5_conv4 (Conv2D)
conv_femap = model.get_layer("block5_conv4")
## 输出特征图的梯度
grads = K.gradients(impre_output, conv_femap.output)[0]
## 计算每个元素在特定通道的梯度平均大小,shape=(512,)的向量
conv_grads = K.mean(grads, axis=(0, 1, 2))
## 访问刚定义变量的数值
iterate = K.function([model.input], [conv_grads, conv_femap.output[0]])
conv_grads_val, conv_femap_val = iterate([imx])
## 将特征图的每个通道乘以类别重要程度
for ii in range(conv_femap_val.shape[2]):
conv_femap_val[:, :, ii] = conv_femap_val[:, :, ii] * conv_grads_val[ii]
## 计算热力图
heatmap = np.mean(conv_femap_val, axis=-1)
## 对热力图进行标准化,数值到0~1之间
heatmap = np.maximum(heatmap, 0)
heatmap = heatmap / np.max(heatmap)
## 将生成的热力图叠加到原始图像上
imag = cv2.imread(imagepath)
heatmap = np.uint8(cv2.resize(heatmap, (imag.shape[1], imag.shape[0])) * 255)
## 生成伪彩色热力图
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
## 新的图像
imagnew = np.uint8(heatmap * 0.45 + imag)
## OpenCV image to Matplotlib
imagnewrgb = imagnew[..., ::-1]
plt.figure()
plt.imshow(imagnewrgb)
plt.show()
return imagnewrgb |
st83567 | a = torch.randn(2,3,2,2, requires_grad=True) #i.e.(batch,chnl,h,w)
amax = torch.argmax(a, dim=1)
print(a) :
print(amax):
My aim is to convert the results to binary values: such that value at max index (along dim=1) to be 1 and others to be 0.
For example, based on the above, i want to get this:
How do i achieve that?
Somewhat felt i am missing on a simple way to do it, but i just couldn’t figure out. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.