instruction
stringlengths 13
150
| input
stringlengths 36
29.1k
| output
stringlengths 31
29.5k
| source
stringlengths 45
45
|
---|---|---|---|
TypeError: img should be PIL Image. Got | import torch
import torch.nn as nn
import torchvision.transforms.functional as TF
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.conv(x)
class UNET(nn.Module):
def __init__(
self, in_channels=3, out_channels=1, features=[64, 128, 256, 512],
):
super(UNET, self).__init__()
self.ups = nn.ModuleList()
self.downs = nn.ModuleList()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# Down part of UNET
for feature in features:
self.downs.append(DoubleConv(in_channels, feature))
in_channels = feature
# Up part of UNET
for feature in reversed(features):
self.ups.append(
nn.ConvTranspose2d(
feature*2, feature, kernel_size=2, stride=2,
)
)
self.ups.append(DoubleConv(feature*2, feature))
self.bottleneck = DoubleConv(features[-1], features[-1]*2)
self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)
def forward(self, x):
skip_connections = []
for down in self.downs:
x = down(x)
skip_connections.append(x)
x = self.pool(x)
x = self.bottleneck(x)
skip_connections = skip_connections[::-1]
for idx in range(0, len(self.ups), 2):
x = self.ups[idx](x)
skip_connection = skip_connections[idx//2]
if x.shape != skip_connection.shape:
x = TF.resize(x, size=skip_connection.shape[2:])
concat_skip = torch.cat((skip_connection, x), dim=1)
x = self.ups[idx+1](concat_skip)
return self.final_conv(x)
def test():
x = torch.randn((3, 1, 161, 161))
model = UNET(in_channels=1, out_channels=1)
preds = model(x)
#print(preds.shape)
print(x.shape)
assert preds.shape == x.shape
if __name__ == "__main__":
test()
This is my code for UNet. The error massage I am getting is:
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
TypeError: img should be PIL Image. Got <class 'torch.Tensor'>
The error mentioning this line x = TF.resize(x, size=skip_connection.shape[2:])
What's wrong I am doing here?
| As stated in the error you recievced, Tf.resize expects an input of type PIL.Image
Pil is a python library found here, it's used for dealing and processing images.
https://python-pillow.org/
Docs for the function in pytorch:
https://pytorch.org/vision/0.8/_modules/torchvision/transforms/functional.html#resize
| https://stackoverflow.com/questions/66309024/ |
kmeans clustering python | There are coordinates that I want to cluster. The result of clustering using the kmeans
[[0, 107], [0, 108], [0, 109], [0, 115], [0, 116],
[0, 117], [0, 118], [0, 125], [0, 126], [0, 127],
[0, 128], [0, 135], [0, 136], [0, 194], [0, 195],
[1, 107], [1, 108], [1, 109], [1, 110], [1, 114],
[1, 115], [1, 116], [1, 117], [1, 118], [1, 119]...]
The result of clustering using the kmeans
from sklearn.cluster import KMeans
num_clusters = 9
km = KMeans(n_clusters=num_clusters)
km_fit = km.fit(nonzero_pred_sub)
>>>array([7, 7, 7, 1, 1, 1, 1, 5, 5, 5, 5, 3, 3, 0, 0, 7, 7, 7, 7, 1, 1, 1,
1, 1, 1, 5, 5, 5...]
I want to know the coordinates of i-th cluster
for example, i need elements of 1st cluster and i can assume [0, 107], [0, 108], [0, 109] was clustered into the 7-th cluster.
How can i get coordinates from cluster?
| I assume you want the coordinates affected to the 7th cluster. You can do so by storing you result in a dictionary :
from sklearn.cluster import KMeans
km = KMeans(n_clusters=9)
km_fit = km.fit(nonzero_pred_sub)
d = dict() # dictionary linking cluster id to coordinates
for i in range(len(km_fit)):
cluster_id = km_fit[i]
if cluster_id not in d:
d[cluster_id] = []
d[cluster_id].append(nonzero_pred_sub[i])
# that way you can access the 7th cluster coordinates like this
d[7]
>>> [[0, 107], [0, 108], [0, 109], [1, 107], [1, 108], [1, 109], [1, 110], ...]
To remove the "if" section in the loop, you can try looking into defaultdict objects.
You can also surely manage that with pandas dataframes to make manipulating more complex results easier.
If I misunderstood you question, and what you want is the coordinates of the center of the i-th cluster, you can get this by calling km_fit.cluster_centers_[i] (cf. doc).
| https://stackoverflow.com/questions/66312861/ |
pytorch KAIR example on Android | I stuck trying to trace/scipt ffdnet KAIR's model to android. Model's forward looks like:
def forward(self, x): #, paddingBottom, paddingRight): #, sigma):
noise_level_model = 15
sigma = torch.full((1, 1, 1, 1), noise_level_model / 255.).type_as(x)
h, w = x.size()[-2:]
paddingBottom = int(np.ceil(h/2)*2-h)
paddingRight = int(np.ceil(w/2)*2-w)
x = torch.nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x = self.m_down(x)
# m = torch.ones(sigma.size()[0], sigma.size()[1], x.size()[-2], x.size()[-1]).type_as(x).mul(sigma)
m = sigma.repeat(1, 1, x.size()[-2], x.size()[-1])
x = torch.cat((x, m), 1)
x = self.model(x)
x = self.m_up(x)
x = x[..., :h, :w]
return x
If I trace that I get some warnings about padding arguments but model works on Android. Problem is that it isn't work with input of different sizes, only size same as 'test1.jpeg':
model_name = 'ffdnet_color'
model_pool = 'model_zoo'
model_path = os.path.join(model_pool, model_name + '.pth')
n_channels = 3
nc = 96
nb = 12
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = net(in_nc=n_channels, out_nc=n_channels, nc=nc, nb=nb, act_mode='R')
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
for k, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
img = 'testsets/myset/test1.jpeg'
img_name, ext = os.path.splitext(os.path.basename(img))
img_L = util.imread_uint(img, n_channels=n_channels)
img_L = util.uint2single(img_L)
noise_level_model = 15
img_L = util.single2tensor4(img_L)
img_L = img_L.to(device)
sigma_ = torch.full((1, 1, 1, 1), noise_level_model / 255)
sigma = torch.full((1, 1, 1, 1), noise_level_model / 255.).type_as(img_L)
traced_model = torch.jit.trace(model, img_L)
traced_optimized = optimize_for_mobile(traced_model)
save_path = os.path.splitext(os.path.basename(model_path))[0] + '-mobile.pth'
traced_optimized.save(save_path)
I've tried to script model with traced_model = torch.jit.script(model) but got errors:
TypeError: cannot create weak reference to 'numpy.ufunc' object
What should I do to achieve model works with different input size on mobile?
| I encountered a similar issue. It was due to my model using numpy math operations (which are numpy.ufunc). I fixed the issue by replacing all of numpy ufuncs (i.e. np.add, np.ceil, and +, - etc on ndarrays) with corresponding torch versions (i.e. torch.add, torch.sub etc).
| https://stackoverflow.com/questions/66314426/ |
How to find r2score of my PyTorch model for regression | I have a UNet model. I'm trying for a regression model since, in my output, I have different floating values for each pixel. In order to check the r2score, I tried to put the below code in the model class, training_step, validation_step, and test_step.
from pytorch_lightning.metrics.functional import r2score
r2 = r2score(pred, y)
self.log('r2:',r2)
But it's giving the following error
ValueError: Expected both prediction and target to be 1D or 2D tensors, but recevied tensors with dimension torch.Size([50, 1, 32, 32])
How can I check my model fit?
| The issue is that the function accepts 1D or 2D tensors, but your tensor is 4D (B x C x H x W). So to use the function you should reshape it:
r2 = r2score(pred.view(pred.shape[1], -1), y.view(y.shape[1], -1))
| https://stackoverflow.com/questions/66317323/ |
How to create a tensor by accessing specific values at given indices of a 2 X 2 tensor in pytorch? | Suppose
mat = torch.rand((5,7)) and I want to get values from 1st dimension (here, 7) by passing the indices, say idxs=[0,4,2,3,6]. The way I am able to do it now is by doing mat[[0,1,2,3,4],idxs]. I expected mat[:,idxs] to work, but it didn't. Is the first option the only way or is there a better way?
| torch.gather is what you are looking for:
torch.gather(mat, 1, torch.tensor(idxs)[:, None])
| https://stackoverflow.com/questions/66329802/ |
Pytorch N - Beats model throwing error: 'str' object has no attribute '__name__' | I'm trying to replicate pytorch's N - Beats model in colab. I copied the same code from https://pytorch-forecasting.readthedocs.io/en/stable/tutorials/ar.html to a colab notebook. There is an error showing up at training cell.
import os
import warnings
warnings.filterwarnings("ignore")
os.chdir("../../..")
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
import torch
from pytorch_forecasting import Baseline, NBeats, TimeSeriesDataSet
from pytorch_forecasting.data import NaNLabelEncoder
from pytorch_forecasting.data.examples import generate_ar_data
from pytorch_forecasting.metrics import SMAPE
data = generate_ar_data(seasonality=10.0, timesteps=400, n_series=100, seed
= 42)
data["static"] = 2
data["date"] = pd.Timestamp("2020-01-01") + pd.to_timedelta(data.time_idx, "D")
data.head()
# create dataset and dataloaders
max_encoder_length = 60
max_prediction_length = 20
training_cutoff = data["time_idx"].max() - max_prediction_length
context_length = max_encoder_length
prediction_length = max_prediction_length
training = TimeSeriesDataSet(
data[lambda x: x.time_idx <= training_cutoff],
time_idx="time_idx",
target="value",
categorical_encoders={"series": NaNLabelEncoder().fit(data.series)},
group_ids=["series"],
# only unknown variable is "value" - and N-Beats can also not take any additional variables
time_varying_unknown_reals=["value"],
max_encoder_length=context_length,
max_prediction_length=prediction_length,
)
validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)
batch_size = 128
train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)
Error is:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-67-db4b0ef13391> in <module>()
25 net,
26 train_dataloader=train_dataloader,
---> 27 val_dataloaders=val_dataloader,
28 )
30 frames
/usr/local/lib/python3.7/dist-packages/yaml/representer.py in represent_object(self, data)
329 if dictitems is not None:
330 dictitems = dict(dictitems)
--> 331 if function.__name__ == '__newobj__':
332 function = args[0]
333 args = args[1:]
AttributeError: 'str' object has no attribute '__name__'
| downgrading pytorch-lightning from 1.2.1 to 1.1.8 solved it for me.
| https://stackoverflow.com/questions/66342637/ |
creating a progress bar for a validation test Python | The following code is used to find p-values for a significant test on Cifar-10 database.
Because we need a min of 1000 permutations, it is a very slow process, and I want to inlude a progress bar to show how time for each permutation.
I was thinking of using the tqdm library and sleep function, but am stuck on where to go from here. any help would be greatly appreciated!
'''
def validate_significance(val_loader, model, criterion, args):
model.eval()
vec_acc1 = []
vec_acc1_chance = []
vec_acc5 = []
vec_acc5_chance = []
for ss in range(0, args.num_permutations):
val_loader = get_rand_sample_loader(val_loader.dataset, args)
acc1_over_one_permutaion = 0
acc1_chance_over_one_permutation = 0
acc5_over_one_permutaion = 0
acc5_chance_over_one_permutation = 0
cnt = 0
with torch.no_grad():
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
# measure accuracy on true labels
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1_over_one_permutaion += acc1.item()
acc5_over_one_permutaion += acc5.item()
# now, measure accuracy on permuted labels
target = target[torch.randperm(len(target))] # scrambling the labels
acc1_perm, acc5_perm = accuracy(output, target, topk=(1, 5))
acc1_chance_over_one_permutation += acc1_perm.item()
acc5_chance_over_one_permutation += acc5_perm.item()
cnt += 1
vec_acc1.append(acc1_over_one_permutaion / cnt)
vec_acc1_chance.append(acc1_chance_over_one_permutation / cnt)
vec_acc5.append(acc5_over_one_permutaion / cnt)
vec_acc5_chance.append(acc5_chance_over_one_permutation / cnt)
p_acc1 = stats.ttest_ind(vec_acc1, vec_acc1_chance, equal_var=False)
p_acc5 = stats.ttest_ind(vec_acc5, vec_acc5_chance, equal_var=False)
return p_acc1, p_acc5
'''
| from tqdm import tqdm
def validate_significance(val_loader, model, criterion, args):
model.eval()
vec_acc1 = []
vec_acc1_chance = []
vec_acc5 = []
vec_acc5_chance = []
for ss in tqdm(range(0, args.num_permutations)):
....
| https://stackoverflow.com/questions/66351236/ |
Pytorch C++ API : CMake Issue | I want to include the pytorch C++ API to the large C++ software I am working on.
For legacy reasons, I must use find_package and the associated find_path and find_library functions, instead of the proposed target_link_libraries.
Here's my FindTORCH.cmake :
include( FindPackageHandleStandardArgs )
find_path( TORCH_INCLUDE_DIR torch/torch.h
PATHS
/path/to/libtorch/include/torch/csrc/api/include/
NO_DEFAULT_PATH )
find_library( TORCH_LIBRARIES libtorch.so
PATHS
/path/to/libtorch/lib/
NO_DEFAULT_PATH )
FIND_PACKAGE_HANDLE_STANDARD_ARGS( TORCH REQUIRED_VARS TORCH_INCLUDE_DIR TORCH_LIBRARIES )
if ( TORCH_FOUND )
message( STATUS "Torch found" )
endif( TORCH_FOUND )
mark_as_advanced( TORCH_LIBRARIES TORCH_INCLUDE_DIR )
At the compilation, the torch files are found and I can include <torch/torch.h> in a random .cxx in the project.
However, if I add to the .cxx :
torch::Tensor tensor = torch::rand({2, 3});
cout << tensor << std::endl;
Then I cannot compile anymore and I get the following error :
/path/to/libtorch/include/torch/csrc/api/include/torch/utils.h:4:10: fatal error: ATen/record_function.h: No such file or directory
#include <ATen/record_function.h>
^~~~~~~~~~~~~~~~~~~~~~~~
compilation terminated.
I'm working Ubuntu 18, C++ 14, and the cmake version is 3.10.2 .
Thanks in advance
| Torch exposes its own targets. To use them effectively, simply remove FindTORCH.cmake from your project, and add /path/to/libtorch/ to your prefix path:
cmake_minimum_required(VERSION 3.19) # or whatever version you use
project(your-project CXX)
list(APPEND CMAKE_PREFIX_PATH "/path/to/libtorch/")
find_package(Torch REQUIRED CONFIG) # this ensure it find the file provided by pytorch
add_executable(your-executable main.cpp)
target_link_libraries(your-executable PUBLIC torch::Tensor)
If you really insist to use your own FindTorch.cmake instead of the correct one, you can modify it to create an imported target that you will then link:
You can change your find module very slightly to gain a modern CMake interface from it:
include( FindPackageHandleStandardArgs )
find_path( TORCH_INCLUDE_DIR torch/torch.h
PATHS
/path/to/libtorch/include/torch/csrc/api/include/
NO_DEFAULT_PATH )
find_library( TORCH_LIBRARIES libtorch.so
PATHS
/path/to/libtorch/lib/
NO_DEFAULT_PATH )
FIND_PACKAGE_HANDLE_STANDARD_ARGS( TORCH REQUIRED_VARS TORCH_INCLUDE_DIR TORCH_LIBRARIES )
if ( TORCH_FOUND )
message( STATUS "Torch found" )
add_library(torch::Tensor SHARED IMPORTED) # mimic the names from pytorch maintainers
set_target_properties(torch::Tensor
PROPERTIES
IMPORTED_LOCATION "${TORCH_LIBRARIES}"
INTERFACE_INCLUDE_DIRECTORIES "${TORCH_INCLUDE_DIR}"
# on windows, set IMPORTED_IMPLIB to the .lib
)
endif( TORCH_FOUND )
mark_as_advanced( TORCH_LIBRARIES TORCH_INCLUDE_DIR )
Then, in your main CMake file, you can use the imported target like any other targets:
find_package(Torch REQUIRED)
add_executable(your-executable main.cpp)
target_link_libraries(your-executable PUBLIC torch::Tensor)
| https://stackoverflow.com/questions/66356955/ |
Why does Python's cProfile report a different elapsed time than using time.time() deltas when using PyTorch? | I am profiling some code using PyTorch. I am aware the CUDA normally has some asynchronous execution (see PyTorch docs), but I believe that transferring from GPU to CPU will generally force synchronization.
For this reason, I decided to naively profile using cProfile, but I notice that the time reported by Profile.enable() ... Profile.disable() was different than the time recorded across time.time() (as a delta).
Here's what the code looks like at a high-level:
gpu = torch.device("cuda")
cpu = torch.device("cpu")
setup = Setup()
net = make_fcn_resnet50(num_classes=setup.D)
net.eval().to(gpu)
rgb_tensor = setup.sample(device=cpu)
pr = profile.Profile()
pr.enable()
t_start = time.time()
rgb_tensor = rgb_tensor.to(gpu)
y = net(rgb_tensor)
dd_tensor = y["out"]
dd_mean = torch.mean(dd_tensor[[0]]).to(cpu).numpy()
assert dd_mean is not None
dt = time.time() - t_start
pr.disable()
stats = pstats.Stats(pr)
stats.print_stats(5)
print(f"dt: {dt:.4f}s")
Here's the discrepancy I see:
2925 function calls (2734 primitive calls) in 0.009 seconds
...
dt: 0.0355s
I would've expected cProfile to report about 35ms (same as dt), but instead it reports about 10ms.
Why does this happen?
Full code + repro is here:
https://github.com/EricCousineau-TRI/repro/tree/bdef8a14/python/cprofile_with_torch
| Empirically, it seems like cProfile will not "hook" into the code if you don't "flush" all of the outputs, or if you're code isn't fully wrapped in a function.
See more detail in the comments here:
https://github.com/EricCousineau-TRI/repro/blob/bdef8a14b5/python/cprofile_with_torch/repro.py#L75-L94
All timing results recorded with:
Ubuntu 18.04
CPython 3.6.9
nvidia-driver-450 (450.102.04-0ubuntu0.18.04.1)
NVidia Titan RTX
That all being said, may be better just to use PyTorch's provided mechanisms (:facepalm:):
https://pytorch.org/tutorials/beginner/profiler.html
https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html
| https://stackoverflow.com/questions/66361942/ |
CUDA initialization: Unexpected error from cudaGetDeviceCount() | I was running a deep learning program on my Linux server and I suddenly got this error.
UserWarning: CUDA initialization: Unexpected error from cudaGetDeviceCount(). Did you run some cuda functions before calling NumCudaDevices() that might have already set an error? Error 804: forward compatibility was attempted on non supported HW (Triggered internally at /opt/conda/conda-bld/pytorch_1603729096996/work/c10/cuda/CUDAFunctions.cpp:100.)
Earlier when I just created this conda environment, torch.cuda.is_available() returned true and I could use CUDA & GPU. But all of a sudden I could not use CUDA and torch.cuda.is_available()returned false. What should I do?
ps. I use GeForce RTX 3080 and cuda 11.0 + pytorch 1.7.0. It worked before but now it doesn't.
| I just tried rebooting. Problem solved. Turned out that it was caused by NVIDIA NVML Driver/library version mismatch.
| https://stackoverflow.com/questions/66371130/ |
from ._nnls import nnls ImportError: DLL load failed: The specified module could not be found | While running a UNet traning code I found DLL load failed error. Here is the code:
'''
import torch
import scipy
import albumentations as A
from ._nnls import nnls
from albumentations.pytorch import ToTensorV2
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
from unet_model import UNet
from utilscar import (
load_checkpoint,
save_checkpoint,
get_loaders,
check_accuracy,
save_predictions_as_imgs,
)
# Hyperparameters etc.
LEARNING_RATE = 1e-4
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
BATCH_SIZE = 16
NUM_EPOCHS = 3
NUM_WORKERS = 2
IMAGE_HEIGHT = 160 # 1280 originally
IMAGE_WIDTH = 240 # 1918 originally
PIN_MEMORY = True
LOAD_MODEL = False
TRAIN_IMG_DIR = "Dataset/train_images/"
TRAIN_MASK_DIR = "Dataset/train_masks/"
VAL_IMG_DIR = "Dataset/val_images/"
VAL_MASK_DIR = "Dataset/val_masks/"
def train_fn(loader, model, optimizer, loss_fn, scaler):
loop = tqdm(loader)
for batch_idx, (data, targets) in enumerate(loop):
data = data.to(device=DEVICE)
targets = targets.float().unsqueeze(1).to(device=DEVICE)
# forward
with torch.cuda.amp.autocast():
predictions = model(data)
loss = loss_fn(predictions, targets)
# backward
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# update tqdm loop
loop.set_postfix(loss=loss.item())
def main():
train_transform = A.Compose(
[
A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),
A.Rotate(limit=35, p=1.0),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.1),
A.Normalize(
mean=[0.0, 0.0, 0.0],
std=[1.0, 1.0, 1.0],
max_pixel_value=255.0,
),
ToTensorV2(),
],
)
val_transform = A.Compose(
[
A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),
A.Normalize(
mean=[0.0, 0.0, 0.0],
std=[1.0, 1.0, 1.0],
max_pixel_value=255.0,
),
ToTensorV2(),
],
)
model = UNet(in_channels=3, out_channels=1).to(DEVICE)
loss_fn = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
train_loader, val_loader = get_loaders(
TRAIN_IMG_DIR,
TRAIN_MASK_DIR,
VAL_IMG_DIR,
VAL_MASK_DIR,
BATCH_SIZE,
train_transform,
val_transform,
NUM_WORKERS,
PIN_MEMORY,
)
if LOAD_MODEL:
load_checkpoint(torch.load("my_checkpoint.pth.tar"), model)
check_accuracy(val_loader, model, device=DEVICE)
scaler = torch.cuda.amp.GradScaler()
for epoch in range(NUM_EPOCHS):
train_fn(train_loader, model, optimizer, loss_fn, scaler)
# save model
checkpoint = {
"state_dict": model.state_dict(),
"optimizer":optimizer.state_dict(),
}
save_checkpoint(checkpoint)
# check accuracy
check_accuracy(val_loader, model, device=DEVICE)
# print some examples to a folder
save_predictions_as_imgs(
val_loader, model, folder="saved_images/", device=DEVICE
)
if __name__ == "__main__":
main()
'''
I found this error:
from ._nnls import nnls
ImportError: DLL load failed: The specified module could not be found.
Here I want to mention that, my python is 64 bit and all the libraries are also in 64 bit. I updated everything using conda update.
| The below solution worked for me.
conda remove --force numpy, scipy
pip install -U numpy, scipy
Successfully installed numpy-1.19.5 scipy-1.5.4
Reference: https://github.com/conda/conda/issues/6396#issuecomment-350254762
| https://stackoverflow.com/questions/66378763/ |
sqrt_vml_cpu not implemented for 'Long' | a = torch.full([2, 2], 9)
b = a.sqrt()
print(b)
b = a.rsqrt()
print(b)
RuntimeError: sqrt_vml_cpu not implemented for 'Long'
a is torch.LongTensor, but sqrt and rsqrt do not suppor Long, what should I do?
| I couldn't reproduce your code because, when I run it, I get the following error (PyTorch 1.6):
RuntimeError: Providing a bool or integral fill value without setting the
optional `dtype` or `out` arguments is currently unsupported. In PyTorch 1.7,
when `dtype` and `out` are not set a bool fill value will return a tensor of
torch.bool dtype, and an integral fill value will return a tensor of
torch.long dtype.
However, I think your problem occurs because PyTorch does not support square root operation for Long/torch.int64. You should use another data type. This should do it:
a = torch.full([2, 2], 9, dtype=torch.float32)
b = a.sqrt()
b = a.rsqrt()
| https://stackoverflow.com/questions/66382240/ |
Why is my parameter not changing and its gradient 0? | I am building a really simple model to learn the parameter in a Poisson model and I am not sure where I am going wrong. I am using pytorch.nn and doing the following.
I made some really simple fake data
# This is the value I am trying to estimate
x = torch.tensor(2.0)
# This is a value drawn from the Poisson(x) distribution
# In this example it is 4
y = torch.poisson(x).reshape(1)
Then I just set up a really simple model
# I initialised the parameter that is going to estimate x with a random value (0.2)
# and set that it requires a gradient
a = torch.tensor([0.2], requires_grad = True)
# I define the loss function with log_input set to false
loss_function = torch.nn.PoissonNLLLoss(log_input = False)
# Defined the model
def model(a):
return torch.poisson(a)
# And the parameter to be optimised
# I chose SGD arbitrarily, maybe this is the problem?
optimizer = torch.optim.SGD([a], lr = 0.1)
Then I do iterations to update a
for i in range(2000):
# Forward pass
y_pred = model(a)
# Compute the loss
loss = loss_function(y_pred, y)
# Backprop
optimizer.zero_grad()
loss.backward()
# Update parameters
optimizer.step()
The problem is after this the a is still 0.2 and if I call a.grad it is 0. Where am I going wrong?
Thanks in advance
UPDATE
I have tried instead to initiate a class for the model inheriting a nn.Module. However the same problem persists :
class learning_model(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.rand(1))
self.a.requires_grad = True
def forward(self):
return torch.poisson(self.a)
model = learning_model()
loss_function = nn.PoissonNLLLoss(log_input = False)
optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
print(model.a)
Outputs:
Parameter containing:
tensor([0.1402], requires_grad=True)
Then:
for i in range(20):
# Forward pass
y_pred = model()
# Compute the loss
loss = loss_function(y_pred, y)
# Backprop
optimizer.zero_grad()
loss.backward()
# Update parameters
optimizer.step()
print(model.a, '\n gradient:', model.a.grad)
Outputs:
Parameter containing:
tensor([0.1402], requires_grad=True)
gradient: tensor([0.])
| Your model doesn't have any trainable parameters. See this link
torch.nn.parameter.Parameter
A kind of Tensor that is to be considered a module parameter.
| https://stackoverflow.com/questions/66386878/ |
Floating point operations results are different in Android, TensorFlow and Pytorch | I am trying to compare the floating point numbers on Android, Tensorflow and Pytorch. What I have observed is I am getting same result for Tensorflow and Android but different on Pytorch as Android and Tensorflow are performing round-down operation. Please see the following result:
TensorFlow
import tensorflow as tf
a=tf.convert_to_tensor(np.array([0.9764764, 0.79078835, 0.93181187]), dtype=tf.float32)
session = tf.Session()
result = session.run(a*a*a*a)
print(result)
PyTorch
import torch as th
th.set_printoptions(precision=8)
a=th.from_numpy(np.array([0.9764764, 0.79078835, 0.93181187])).type(th.FloatTensor)
result = a*a*a*a
print(result)
Android:
for (index in 0 until a.size) {
var res = a[index] * a[index] * a[index] * a[index]
result.add(res)
}
print("r=$result")
The result is as follows:
Android: [0.9091739, 0.3910579, 0.7538986]
TensorFlow: [0.9091739, 0.3910579, 0.7538986]
PyTorch: [0.90917391, 0.39105791, 0.75389862]
You can see that PyTorch value is different. I know that this effect is minimal in this example but when we are performing training, and we are running for 1000 rounds with different batches and epochs, this different can be accumulated and can show un-desirable results. Can anyone point out how can we fix to have same number on three platforms.
Thanks.
| You are not using the same level of precision when printing, hence why you get different results. Internally, those results are identical, it's just an artifact that you see due do the default of python to print only 7 digits after the comma.
If we set the same level of precision in numpy as the one you set in PyTorch, we get:
import numpy as np
import tensorflow as tf
# setting the print precision of numpy to 8 like in your pytorch example
np.set_printoptions(precision=8, floatmode="fixed")
a=tf.convert_to_tensor(np.array([0.9764764, 0.79078835, 0.93181187]), dtype=tf.float32)
session = tf.Session()
result = session.run(a*a*a*a)
print(result)
Results in:
[0.90917391 0.39105791 0.75389862]
Exactly the same as in PyTorch.
| https://stackoverflow.com/questions/66388745/ |
Why embed dimemsion must be divisible by num of heads in MultiheadAttention? | I am learning the Transformer. Here is the pytorch document for MultiheadAttention. In their implementation, I saw there is a constraint:
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
Why require the constraint: embed_dim must be divisible by num_heads? If we go back to the equation
Assume:
Q, K,V are n x emded_dim matrices; all the weight matrices W is emded_dim x head_dim,
Then, the concat [head_i, ..., head_h] will be a n x (num_heads*head_dim) matrix;
W^O with size (num_heads*head_dim) x embed_dim
[head_i, ..., head_h] * W^O will become a n x embed_dim output
I don't know why we require embed_dim must be divisible by num_heads.
Let say we have num_heads=10000, the resuts are the same, since the matrix-matrix product will absort this information.
| When you have a sequence of seq_len x emb_dim (ie. 20 x 8) and you want to use num_heads=2, the sequence will be split along the emb_dim dimension. Therefore you get two 20 x 4 sequences. You want every head to have the same shape and if emb_dim isn't divisible by num_heads this wont work. Take for example a sequence 20 x 9 and again num_heads=2. Then you would get 20 x 4 and 20 x 5 which are not the same dimension.
| https://stackoverflow.com/questions/66389707/ |
torch.nn.fucntional.interpolate(): Parameters Settings | I'm using torch.nn.functional.interpolate() to resize an image.
Firstly I use transforms.ToTensor() to transform an image into a tensor, which have size of (3, 252, 252), (252, 252) is the size of the imported image. What I want to do is to create a tensor with size (3, 504, 504) with interpolate() function.
I set the para scale_factor=2, but it returned a (3, 252, 504) tensor. Then I set it like scale_factor=(1,2,2) and received an error of dimensional conflict like this:
size shape must match input shape. Input is 1D, size is 3
So what way should I do to set the parameters in order to receive (3, 504, 504) tensor?
| If you're using scale_factor you need to give batch of images not single image. So you need to add one batch by using unsqueeze(0) then give it to interpolate function as follows:
import torch
import torch.nn.functional as F
img = torch.randn(3, 252, 252) # torch.Size([3, 252, 252])
img = img.unsqueeze(0) # torch.Size([1, 3, 252, 252])
out = F.interpolate(img, scale_factor=(2, 2), mode='nearest')
print(out.size()) # torch.Size([1, 3, 504, 504])
| https://stackoverflow.com/questions/66407004/ |
pytorch int32 to int64 conversion | I'm trying to convert a simple image mask to int64
image = np.array([[1, 2], [3, 4]], dtype='int32')
transform = Compose([
torch.from_numpy,
ConvertImageDtype(torch.int64)
])
However, transform(image) yields
tensor([[ 4294967296, 8589934592],
[12884901888, 17179869184]])
Is there something wrong, or am I fundamentally misunderstanding something about how the conversion should work?
| If you skip torch's conversion, the image is transformed correctly.
image = np.array([[1, 2], [3, 4]], dtype='int64')
transform = Compose([
torch.from_numpy
])
transform(image)
# tensor([[1, 2],
# [3, 4]])
| https://stackoverflow.com/questions/66420588/ |
Importing MNIST dataset from local directory in a closed system | I am trying to run a tutorial based on MNIST data in a cluster and the node where training script runs don't have internet access so I am manually placing the MNIST dataset in the desired directory but I am getting Dataset not found error.
I am trying to run this tutorial on the cluster.
I have tried this answer but the answer doesn't resolve my problem.
Below is my code modifications -
import horovod.torch as hvd
train_dataset = \
datasets.MNIST('/scratch/netra/MNIST/processed/training.pt-%d' % hvd.rank(), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
test_dataset = \
datasets.MNIST('/scratch/netra/MNIST/processed/test.pt-%d' % hvd.rank(), train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
How to resolve it?
| You have to specify a root folder, not a full path to the processed file:
root (string): Root directory of dataset where MNIST/processed/training.pt
and MNIST/processed/test.pt exist.
In your case:
root is /scratch/netra
Thus,
train_dataset = \
datasets.MNIST('/scratch/netra-%d' % hvd.rank(), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
test_dataset = \
datasets.MNIST('/scratch/netra-%d' % hvd.rank(), train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
| https://stackoverflow.com/questions/66430702/ |
how can i reshape these images as a 2d images tensors? | I am currently working with rgb images loaded as tensors and i would like to reshape them to be 2d tensors to implement deep neural networks on them
the shape which I am currently working on is :
images.shape
torch.Size([32, 3, 244, 244])
I dont know how to deal with the last two fields and also how to flatten the 3 channels of colors
| Your requirement is too hazy and it's unclear what you want to achieve with these images. Do they come with labels? If not, do you want to use an unsupervised method such as an autoencoder? Looking at the shape of your images tensor:
torch.Size([32, 3, 244, 244])
This means that there are 32 color (RGB) images in this tensor. If your definition of 2D means converting them to grayscale images, then you can use the torchvision library.
images = [torchvision.transforms.ToPILImage()(img) for img in images]
images = [torchvision.transforms.Grayscale()(img) for img in images]
And to convert the PIL grayscale images back to torch tensor, use:
images = [torchvision.transforms.ToTensor()(img) for img in images]
images = torch.stack(images).to(device)
Now, the shape of images would be [32, 244, 244]
Flattening the much high resolution image at the very first layer is not a recommended idea. So, that's why you see in the computer vision literature that folks apply few convolution layers in the beginning of the model architecture so as to downsample them to smaller size (resolution) feature descriptors.
| https://stackoverflow.com/questions/66448486/ |
Loss of Conv-neural-network not decreasing, instead obsoleting | I have a convolutional neural network in vgg architecture "style" (down below) to classify images if there is a cat on the picture, or a dog. My training set contains 25000 images cropped to 256px each side. I tried different learning rates, different loss functions and much more but my loss keeps fluctuating between 0.692 and 0.694, but it will not decrease...
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
normalize
])
# Output = [isDog, isCat]
train_data_list = []
train_data = []
target_list = []
plotlist = []
train_files = listdir("data/catsdogs/train/")
def loadTrainData():
global train_data_list
global train_data
global target_list
print("Loading data now...")
amount = len(listdir("data/catsdogs/train/"))
current = 0
for i in range(amount):
r = random.randint(0, len(train_files) - 1)
file = train_files[r]
train_files.remove(file)
img = Image.open("data/catsdogs/train/" + file)
img_tensor = transform(img) # (3, 256, 256)
isCat = 1 if 'cat' in file else 0
isDog = 1 if 'dog' in file else 0
target = [isCat, isDog]
train_data_list.append(img_tensor)
target_list.append(target)
if len(train_data_list) >= 64:
train_data.append((torch.stack(train_data_list), target_list))
train_data_list = []
target_list = []
current = current + 1
print("Loaded: {:.1f}%".format(current * 100 / amount))
print("Loaded data successfully!")
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.dropout = nn.Dropout2d()
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=3, stride=3, padding=0, dilation=1, ceil_mode=False)
self.fc1 = nn.Linear(5184, 1296)
self.fc2 = nn.Linear(1296, 2)
def forward(self, x):
# Block 1
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.relu(x)
x = self.pool(x)
# Block 2
x = self.conv3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.relu(x)
x = self.pool(x)
# Block 3
x = self.conv5(x)
x = self.relu(x)
x = self.conv6(x)
x = self.relu(x)
x = self.pool(x)
x = x.view(-1, 5184)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return torch.sigmoid(x)
model = Network()
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.0001, weight_decay=0.0016)
def train(epoch):
global optimizer
model.train()
batch_id = 0
for data, target in train_data:
data = data.cuda()
target = torch.Tensor(target).cuda()
data = Variable(data)
target = Variable(target)
optimizer.zero_grad()
out = model(data)
criterion = F.binary_cross_entropy
loss = criterion(out, target)
loss.backward()
optimizer.step()
plotlist.append(loss)
print('Train Epoch: {}, {:.0f}% ,\tLoss: {:.6f}'.format(
epoch, 100. * batch_id / len(train_data), loss.item()
))
batch_id = batch_id + 1
loadTrainData()
for epoch in range(25):
train(epoch)
plt.plot(plotlist)
plt.show()
plt.ylabel("Loss")
plt.savefig("lossPlot.png")
Here is a plot of my loss over 5 iterations:
Also with a higher learning rate the fluctuation only gets stronger, for 0.1 lr between 0.5 and 0.7.
| have you tried adding momentum to your SGD optimizer?
optimizer = optim.SGD(model.parameters(), lr=0.1, weight_decay=0.0016, momentum=0.9)
Or, a different optimizer such as Adam or AdaDelta, which will use adaptive learning rate?
Also, it does not look like your training data is shuffled - can it happen than some batches have all cats and some batches have all dogs, pulling the gradient descent in different in opposite directions every few steps? It may be better to shuffle your training data after every epoch and do the batching on top of that. torch.utils.data.DataLoader class may be of some help in this.
What's the naming scheme of your files? Do the variables 'isCat' and 'isDog' have correct values?
What happens when you try to train on only 100 examples - is your model able to learn the train data in this simple case - this should hopefully rule out some obvious bugs.
| https://stackoverflow.com/questions/66453419/ |
RuntimeError: CUDA error: device-side assert triggered on loss function | /pytorch/aten/src/ATen/native/cuda/Loss.cu:102: operator(): block: [18,0,0], thread: [54,0,0] Assertion input_val >= zero && input_val <= one failed.
/pytorch/aten/src/ATen/native/cuda/Loss.cu:102: operator(): block: [18,0,0], thread: [55,0,0] Assertion input_val >= zero && input_val <= one failed.
/pytorch/aten/src/ATen/native/cuda/Loss.cu:102: operator(): block: [18,0,0], thread: [56,0,0] Assertion input_val >= zero && input_val <= one failed.
/pytorch/aten/src/ATen/native/cuda/Loss.cu:102: operator(): block: [18,0,0], thread: [57,0,0] Assertion input_val >= zero && input_val <= one failed.
/pytorch/aten/src/ATen/native/cuda/Loss.cu:102: operator(): block: [18,0,0], thread: [58,0,0] Assertion input_val >= zero && input_val <= one failed.
/pytorch/aten/src/ATen/native/cuda/Loss.cu:102: operator(): block: [18,0,0], thread: [59,0,0] Assertion input_val >= zero && input_val <= one failed.
Traceback (most recent call last):
File "run_toys.py", line 215, in
loss = criterion(torch.reshape(out, [-1, dataset.out_dim]), torch.reshape(target, [-1, dataset.out_dim]))
File "/usr/local/python3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/python3/lib/python3.6/site-packages/torch/nn/modules/loss.py", line 530, in forward
return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
File "/usr/local/python3/lib/python3.6/site-packages/torch/nn/functional.py", line 2526, in
binary_cross_entropy
input, target, weight, reduction_enum)
RuntimeError: CUDA error: device-side assert triggered
The Code
criterion = nn.CrossEntropyLoss()
loss = criterion(torch.reshape(out, [-1, dataset.out_dim]), torch.reshape(target, [-1, dataset.out_dim]))
loss = torch.mean(loss)
The shape of the target and output is the same # torch.Size([640, 32])
The model runs on my CPU OK, but running on GPU is the issue
| There might be two reasons of the error:
As the log says input_val is not between the range [0; 1]. So you should ensure that model outputs are in that range. You can use torch.clamp() of pytorch. Before calculating the loss add the following line:
out = out.clamp(0, 1)
Maybe you are sure that model outputs are in the range [0; 1]. Then very common problem is output contains some nan values which triggers assert as well. To prevent this you can use the following trick, again before calculating the loss:
out[out!=out] = 0 # or 1 depending on your model's need
Here the trick is using nan!=nan property, we should change them to some valid number.
| https://stackoverflow.com/questions/66456541/ |
In training mode, targets should be passed | I am new to deep learning and have the project on detecting traffic lights in university where we can use open-source code.
So, I tried to run the code on kaggle https://www.kaggle.com/endoruk1234/trafficlightdetection-fasterrcnn-pytorch/log
However on the stage of testing the saved model on a video, I got this mistake 'In training mode, targets should be passed'.
I am not sure why do I need to pass targets on the testing stage. I don't understand is it a problem with the initial model, or video capture part is written with mistakes.
The model
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
def _get_instance_segmentation_model(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
N_CLASS = 4
INP_FEATURES = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(INP_FEATURES, N_CLASS)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(params)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
Training
lossHist = LossAverager()
valLossHist = LossAverager()
for epoch in range(EPOCHS):
start_time = time()
model.train()
lossHist.reset()
for images, targets, image_ids in tqdm(trainDataLoader):
#bbox = check_bbox(bbox)
images = torch.stack(images).to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
bs = images.shape[0]
loss_dict = model(images, targets)
totalLoss = sum(loss for loss in loss_dict.values())
lossValue = totalLoss.item()
lossHist.update(lossValue,bs)
optimizer.zero_grad()
totalLoss.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step(totalLoss)
print(f"[{str(datetime.timedelta(seconds = time() - start_time))[2:7]}]")
print(f"Epoch {epoch}/{EPOCHS}")
print(f"Train loss: {lossHist.avg}")
if(epoch == 10):
torch.save(model.state_dict(), 'fasterrcnn_resnet{}_fpn.pth'.format(epoch))
torch.save(model.state_dict(), 'fasterrcnn_resnet{}_fpn.pth'.format(epoch))
Testing on a video with a mistake
while(True):
ret, input = cap.read()
image = input.copy()
input = preprocess(input).float()
input = input.unsqueeze_(0)
input = input.type(torch.cuda.FloatTensor)
print(input)
result = model(input)
boxes = result[0]['boxes'].type(torch.cuda.FloatTensor)
scores = result[0]['scores'].type(torch.cuda.FloatTensor)
labels = result[0]['labels'].type(torch.cuda.FloatTensor)
mask = nms(boxes,scores,0.3)
boxes = boxes[mask]
scores = scores[mask]
labels = labels[mask]
boxes = boxes.data.cpu().numpy().astype(np.int32)
scores = scores.data.cpu().numpy()
labels = labels.data.cpu().numpy()
mask = scores >= 0.5
boxes = boxes[mask]
scores = scores[mask]
labels = labels[mask]
colors = {1:(0,255,0), 2:(255,255,0), 3:(255,0,0)}
for box,label in zip(boxes,labels):
image = cv2.rectangle(image,
(box[0], box[1]),
(box[2], box[3]),
(0,0,255), 1)
cv2.imshow("image", image)
if cv2.waitKey(0):
break
ValueError Traceback (most recent call last)
<ipython-input-84-e32f9d25d942> in <module>()
8 print(input)
9
---> 10 result = model(input)
11
12 boxes = result[0]['boxes'].type(torch.cuda.FloatTensor)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/torchvision/models/detection/generalized_rcnn.py in forward(self, images, targets)
58 """
59 if self.training and targets is None:
---> 60 raise ValueError("In training mode, targets should be passed")
61 if self.training:
62 assert targets is not None
ValueError: In training mode, targets should be passed
Thank you in advance! If you can tell me how to correct the model or videocapture code I will be very grateful.
| In your example you don't explain how you load your model, but I think you have forgotten model.eval(). This function is a kind of switch for some specific layers/parts of the model that behave differently during training and inference (evaluating) time.
To make inferences, you can load your model like this :
model.load_state_dict(torch.load("/content/gdrive/MyDrive/Models/model_Resnet.pth"))
model.eval()
| https://stackoverflow.com/questions/66475189/ |
Why does it take so long print the value of a GPU tensor in Pytorch? | I wrote this pytorch program to compute a 5000*5000 matrix multiplication on GPU, 100 iterations.
import torch
import numpy as np
import time
N = 5000
x1 = np.random.rand(N, N)
######## a 5000*5000 matrix multiplication on GPU, 100 iterations #######
x2 = torch.tensor(x1, dtype=torch.float32).to("cuda:0")
start_time = time.time()
for n in range(100):
G2 = x2.t() @ x2
print(G2.size())
print("It takes", time.time() - start_time, "seconds to compute")
print("G2.device:", G2.device)
start_time2 = time.time()
# G4 = torch.zeros((5,5),device="cuda:0")
G4 = G2[:5, :5]
print("G4.device:", G4.device)
print("G4======", G4)
# G5=G4.cpu()
# print("G5.device:",G5.device)
print("It takes", time.time() - start_time2, "seconds to transfer or display")
Here is the result on my laptop:
torch.Size([5000, 5000])
It takes 0.22243595123291016 seconds to compute
G2.device: cuda:0
G4.device: cuda:0
G4====== tensor([[1636.3195, 1227.1913, 1252.6871, 1242.4584, 1235.8160],
[1227.1913, 1653.0522, 1260.2621, 1246.9526, 1250.2871],
[1252.6871, 1260.2621, 1685.1147, 1257.2373, 1266.2213],
[1242.4584, 1246.9526, 1257.2373, 1660.5951, 1239.5414],
[1235.8160, 1250.2871, 1266.2213, 1239.5414, 1670.0034]],
device='cuda:0')
It takes 60.13639569282532 seconds to transfer or display
Process finished with exit code 0
I am confused why it takes so much time to display the variable G5 on GPU, since is only 5*5 in size.
BTW, I use "G5=G4.cpu()" to transfer the variable on GPU to CPU, it takes so much time too.
My develop enviroment (rather old laptop):
pytorch 1.0.0
CUDA 8.0
Nvidia GeForce GT 730m
Windows 10 Professional
when increasing iteration times, the compute time do not increase obviously, but the transfer or display increased obviously, Why? Can somebody interpret, thanks so much.
| Pytorch CUDA operations are asynchronous. Most operations on GPU tensors are actually non blocking until a derived result is requested. This means that until you ask for a CPU version of a tensor, commands like matrix multiply are basically being processed in parallel to your code. When you stop the timer there's no guarantee that the operation has been completed. You can read more about this in the docs.
To time chunks of your code properly you should add calls to torch.cuda.synchronize. This function should be called twice, once right before you start your timer, and once right before you stop your timer. Outside of profiling your code you should avoid calls to this function as it may slow down overall performance.
| https://stackoverflow.com/questions/66477371/ |
'LSTM' object has no attribute '_flat_weights_names' | While executing iNltk library, I am getting an error. I have latest versions of pytorch and torchvision.
'LSTM' object has no attribute '_flat_weights_names'
After re-searching on some blogs some people suggested to downgrade the version to 1.2 So i tried below installation from https://pytorch.org/get-started/previous-versions/
pip install torch==1.2.0+cu92 torchvision==0.4.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html
However, getting errors
ERROR: Could not find a version that satisfies the requirement torch==1.2.0+cpu
ERROR: No matching distribution found for torch==1.2.0+cpu
Also, 1.3.1 version is missing.
Anybody has any idea about how to downgrade to 1.3.1 or 1.2.0?
Thanks in advance
PD
| I tried using pip, but that did not work for me. conda fixed the issue.
Setup a conda environment first and activate it. Install iNLTK using pip in conda as follows:
pip install inltk
Remove the version of PyTorch installed as a dependency for iNLTK.
pip uninstall torch
Install the desired version of PyTorch.
conda install pytorch==1.3.0 -c pytorch
Refer: Previous PyTorch Versions
| https://stackoverflow.com/questions/66478043/ |
How to solve dist.init_process_group from hanging (or deadlocks)? | I was to set up DDP (distributed data parallel) on a DGX A100 but it doesn't work. Whenever I try to run it simply hangs. My code is super simple just spawning 4 processes for 4 gpus (for the sake of debugging I simply destroy the group immediately but it doesn't even reach there):
def find_free_port():
""" https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number """
import socket
from contextlib import closing
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return str(s.getsockname()[1])
def setup_process(rank, world_size, backend='gloo'):
"""
Initialize the distributed environment (for each process).
gloo: is a collective communications library (https://github.com/facebookincubator/gloo). My understanding is that
it's a library/API for process to communicate/coordinate with each other/master. It's a backend library.
export NCCL_SOCKET_IFNAME=eth0
export NCCL_IB_DISABLE=1
https://stackoverflow.com/questions/61075390/about-pytorch-nccl-error-unhandled-system-error-nccl-version-2-4-8
https://pytorch.org/docs/stable/distributed.html#common-environment-variables
"""
if rank != -1: # -1 rank indicates serial code
print(f'setting up rank={rank} (with world_size={world_size})')
# MASTER_ADDR = 'localhost'
MASTER_ADDR = '127.0.0.1'
MASTER_PORT = find_free_port()
# set up the master's ip address so this child process can coordinate
os.environ['MASTER_ADDR'] = MASTER_ADDR
print(f"{MASTER_ADDR=}")
os.environ['MASTER_PORT'] = MASTER_PORT
print(f"{MASTER_PORT}")
# - use NCCL if you are using gpus: https://pytorch.org/tutorials/intermediate/dist_tuto.html#communication-backends
if torch.cuda.is_available():
# unsure if this is really needed
# os.environ['NCCL_SOCKET_IFNAME'] = 'eth0'
# os.environ['NCCL_IB_DISABLE'] = '1'
backend = 'nccl'
print(f'{backend=}')
# Initializes the default distributed process group, and this will also initialize the distributed package.
dist.init_process_group(backend, rank=rank, world_size=world_size)
# dist.init_process_group(backend, rank=rank, world_size=world_size)
# dist.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)
print(f'--> done setting up rank={rank}')
dist.destroy_process_group()
mp.spawn(setup_process, args=(4,), world_size=4)
why is this hanging?
nvidia-smi output:
$ nvidia-smi
Fri Mar 5 12:47:17 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 450.102.04 Driver Version: 450.102.04 CUDA Version: 11.0 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 A100-SXM4-40GB On | 00000000:07:00.0 Off | 0 |
| N/A 26C P0 51W / 400W | 0MiB / 40537MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 1 A100-SXM4-40GB On | 00000000:0F:00.0 Off | 0 |
| N/A 25C P0 52W / 400W | 3MiB / 40537MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 2 A100-SXM4-40GB On | 00000000:47:00.0 Off | 0 |
| N/A 25C P0 51W / 400W | 3MiB / 40537MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 3 A100-SXM4-40GB On | 00000000:4E:00.0 Off | 0 |
| N/A 25C P0 51W / 400W | 3MiB / 40537MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 4 A100-SXM4-40GB On | 00000000:87:00.0 Off | 0 |
| N/A 30C P0 52W / 400W | 3MiB / 40537MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 5 A100-SXM4-40GB On | 00000000:90:00.0 Off | 0 |
| N/A 29C P0 53W / 400W | 0MiB / 40537MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 6 A100-SXM4-40GB On | 00000000:B7:00.0 Off | 0 |
| N/A 29C P0 52W / 400W | 0MiB / 40537MiB | 0% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
| 7 A100-SXM4-40GB On | 00000000:BD:00.0 Off | 0 |
| N/A 48C P0 231W / 400W | 7500MiB / 40537MiB | 99% Default |
| | | Disabled |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 7 N/A N/A 147243 C python 7497MiB |
+-----------------------------------------------------------------------------+
How do I set up ddp in this new machine?
Update
btw I've successfully installed APEX because some other links say to do that but it still fails. For I did:
went to: https://github.com/NVIDIA/apex follwed their instructions
git clone https://github.com/NVIDIA/apex
cd apex
pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
but before the above I had to update gcc:
conda install -c psi4 gcc-5
it did install it as I successfully imported it but it didn't help.
Now it actually prints an error msg:
Traceback (most recent call last):
File "/home/miranda9/miniconda3/envs/metalearning/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/miranda9/miniconda3/envs/metalearning/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/miranda9/miniconda3/envs/metalearning/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap
fn(i, *args)
KeyboardInterrupt
Process SpawnProcess-3:
Traceback (most recent call last):
File "/home/miranda9/miniconda3/envs/metalearning/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap
fn(i, *args)
File "/home/miranda9/ML4Coq/ml4coq-proj/embeddings_zoo/tree_nns/main_brando.py", line 252, in train
setup_process(rank, world_size=opts.world_size)
File "/home/miranda9/ML4Coq/ml4coq-proj/embeddings_zoo/distributed.py", line 85, in setup_process
dist.init_process_group(backend, rank=rank, world_size=world_size)
File "/home/miranda9/miniconda3/envs/metalearning/lib/python3.8/site-packages/torch/distributed/distributed_c10d.py", line 436, in init_process_group
store, rank, world_size = next(rendezvous_iterator)
File "/home/miranda9/miniconda3/envs/metalearning/lib/python3.8/site-packages/torch/distributed/rendezvous.py", line 179, in _env_rendezvous_handler
store = TCPStore(master_addr, master_port, world_size, start_daemon, timeout)
RuntimeError: connect() timed out.
During handling of the above exception, another exception occurred:
related:
https://github.com/pytorch/pytorch/issues/9696
https://discuss.pytorch.org/t/dist-init-process-group-hangs-silently/55347/2
https://forums.developer.nvidia.com/t/imagenet-hang-on-dgx-1-when-using-multiple-gpus/61919
apex suggestion: https://discourse.mozilla.org/t/hangs-on-dist-init-process-group-in-distribute-py/44686
https://github.com/pytorch/pytorch/issues/15638
https://github.com/pytorch/pytorch/issues/53395
| The following fixes are based on Writing Distributed Applications with PyTorch, Initialization Methods.
Issue 1:
It will hang unless you pass in nprocs=world_size to mp.spawn(). In other words, it's waiting for the "whole world" to show up, process-wise.
Issue 2:
The MASTER_ADDR and MASTER_PORT need to be the same in each process' environment and need to be a free address:port combination on the machine where the process with rank 0 will be run.
Both of these are implied or directly read from the following quote from the link above (emphasis added):
Environment Variable
We have been using the environment variable initialization method
throughout this tutorial. By setting the following four environment
variables on all machines, all processes will be able to properly
connect to the master, obtain information about the other processes,
and finally handshake with them.
MASTER_PORT: A free port on the machine that will host the process with rank 0.
MASTER_ADDR: IP address of the machine that will host the process with rank 0.
WORLD_SIZE: The total number of processes, so that the master knows how many workers to wait for.
RANK: Rank of each process, so they will know whether it is the master of a worker.
Here's some code to demonstrate both of those in action:
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
import os
def find_free_port():
""" https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number """
import socket
from contextlib import closing
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return str(s.getsockname()[1])
def setup_process(rank, master_addr, master_port, world_size, backend='gloo'):
print(f'setting up {rank=} {world_size=} {backend=}')
# set up the master's ip address so this child process can coordinate
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = master_port
print(f"{master_addr=} {master_port=}")
# Initializes the default distributed process group, and this will also initialize the distributed package.
dist.init_process_group(backend, rank=rank, world_size=world_size)
print(f"{rank=} init complete")
dist.destroy_process_group()
print(f"{rank=} destroy complete")
if __name__ == '__main__':
world_size = 4
master_addr = '127.0.0.1'
master_port = find_free_port()
mp.spawn(setup_process, args=(master_addr,master_port,world_size,), nprocs=world_size)
| https://stackoverflow.com/questions/66498045/ |
unable to import pytorch-lightning | I installed pytorch-lightning using pip, and I'm running on Mac.
I tried:
! pip install pytorch-lightning --upgrade
! pip install pytorch-lightning-bolts
(finished successfully)
and then:
import pytorch_lightning as pl
and what I get is:
--
-------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-3-f3b4217dcea1> in <module>
7 from torchvision.datasets import MNIST
8 from torchvision import transforms
----> 9 import pytorch_lightning as pl
10 from pytorch_lightning.metrics.functional import accuracy
11 tmpdir = os.getcwd()
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/__init__.py in <module>
60 # We are not importing the rest of the lightning during the build process, as it may not be compiled yet
61 else:
---> 62 from pytorch_lightning import metrics
63 from pytorch_lightning.callbacks import Callback
64 from pytorch_lightning.core import LightningDataModule, LightningModule
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/metrics/__init__.py in <module>
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
---> 14 from pytorch_lightning.metrics.classification import ( # noqa: F401
15 Accuracy,
16 AUC,
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/metrics/classification/__init__.py in <module>
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
---> 14 from pytorch_lightning.metrics.classification.accuracy import Accuracy # noqa: F401
15 from pytorch_lightning.metrics.classification.auc import AUC # noqa: F401
16 from pytorch_lightning.metrics.classification.auroc import AUROC # noqa: F401
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/metrics/classification/accuracy.py in <module>
16 import torch
17
---> 18 from pytorch_lightning.metrics.functional.accuracy import _accuracy_compute, _accuracy_update
19 from pytorch_lightning.metrics.metric import Metric
20
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/metrics/functional/__init__.py in <module>
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
---> 14 from pytorch_lightning.metrics.functional.accuracy import accuracy # noqa: F401
15 from pytorch_lightning.metrics.functional.auc import auc # noqa: F401
16 from pytorch_lightning.metrics.functional.auroc import auroc # noqa: F401
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/metrics/functional/accuracy.py in <module>
16 import torch
17
---> 18 from pytorch_lightning.metrics.classification.helpers import _input_format_classification, DataType
19
20
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/metrics/classification/helpers.py in <module>
17 import torch
18
---> 19 from pytorch_lightning.metrics.utils import select_topk, to_onehot
20 from pytorch_lightning.utilities import LightningEnum
21
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/metrics/utils.py in <module>
16 import torch
17
---> 18 from pytorch_lightning.utilities import rank_zero_warn
19
20 METRIC_EPS = 1e-6
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/utilities/__init__.py in <module>
16 import numpy
17
---> 18 from pytorch_lightning.utilities.apply_func import move_data_to_device # noqa: F401
19 from pytorch_lightning.utilities.distributed import ( # noqa: F401
20 AllGatherGrad,
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/utilities/apply_func.py in <module>
23
24 from pytorch_lightning.utilities.exceptions import MisconfigurationException
---> 25 from pytorch_lightning.utilities.imports import _TORCHTEXT_AVAILABLE
26
27 if _TORCHTEXT_AVAILABLE:
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/utilities/imports.py in <module>
54 _TORCH_GREATER_EQUAL_1_7 = _compare_version("torch", operator.ge, "1.7.0")
55 _TORCH_QUANTIZE_AVAILABLE = bool([eg for eg in torch.backends.quantized.supported_engines if eg != 'none'])
---> 56 _APEX_AVAILABLE = _module_available("apex.amp")
57 _BOLTS_AVAILABLE = _module_available('pl_bolts')
58 _DEEPSPEED_AVAILABLE = not _IS_WINDOWS and _module_available('deepspeed')
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pytorch_lightning/utilities/imports.py in _module_available(module_path)
32 """
33 try:
---> 34 return find_spec(module_path) is not None
35 except AttributeError:
36 # Python 3.6
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/importlib/util.py in find_spec(name, package)
92 parent_name = fullname.rpartition('.')[0]
93 if parent_name:
---> 94 parent = __import__(parent_name, fromlist=['__path__'])
95 try:
96 parent_path = parent.__path__
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/apex/__init__.py in <module>
11 ISessionFactory)
12 from pyramid.security import NO_PERMISSION_REQUIRED
---> 13 from pyramid.session import UnencryptedCookieSessionFactoryConfig
14 from pyramid.settings import asbool
15
ImportError: cannot import name 'UnencryptedCookieSessionFactoryConfig' from 'pyramid.session' (unknown location
| I guess this is an outdated issue as we have cut out TorchMetrics to a standalone package. Please, check out the latest PytorchLightning.
| https://stackoverflow.com/questions/66505335/ |
pytorch deep learning loading data sequentially and efficiently | I have been doing neural network analysis on 20 thousand "images", each image represented in the form of the intensity of 100 * 100 * 100 neurons.
x = np.loadtxt('imgfile')
x = x.reshape(-1, img_channels, 100, 100, 100)
//similarly for target variable 'y'
Above, the first dimension of x will be the number of images. Am using DataLoader to get appropriate number of images for training during each iteration as shown below.
batch_size = 16
traindataset = TensorDataset(Tensor(x[:-testdatasize]), Tensor(y[:-testdatasize]) )
train_loader = DataLoader(dataset=traindataset, batch_size=batch_size, shuffle=True)
for epoch in range(num_epochs):
for i, (data,targets) in enumerate(train_loader):
...
I hope to increase the number of images to 50k but am restricted by the computer memory (imgfile is ~50 GB).
I was wondering if there is an efficient way to handle all the data? Like, rather than loading the whole imgfile, can we first divide them into sets, each with batch_size number of images, and load the sets periodically during training. I am not completely sure how to implement this.
I found some similar ideas using Keras here: https://machinelearningmastery.com/how-to-load-large-datasets-from-directories-for-deep-learning-with-keras/
Please point me towards any similar ideas implemented with pytorch or you have any ideas.
| Digging a while after posting the question, found out there is, of course, a way using torch.utils.data.Dataset. Each image-data can be saved in a separate file and all the filenames are listed in 'filelistdata'. Only the batch_size number of images will be loaded into memory when called using DataLoader (in the background, getitem method will fetch the images). The following worked for me:
traindataset = CustDataset(filename='filelistdata', root_dir=root_dir)
train_loader = DataLoader(dataset=traindataset, batch_size=batch_size, num_workers = 16)
num_workers is really important for performance and should be higher than the number of Cpus you are using (I am using 4 cpus above). Found the following resources useful for answering this question.
How to split and load huge dataset that doesn't fit into memory into pytorch Dataloader?
https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel
https://www.youtube.com/watch?v=ZoZHd0Zm3RY
| https://stackoverflow.com/questions/66522854/ |
How to do elementwise comparison sum without for loop | The for loop makes my program very slow. I would've used np.sum(target==output) but I need the argmax value for each row in the output. How can I speed this up?
The output is a tensor data type
for i, x in enumerate(target):
if target[i] == torch.argmax(output[i]):
correct_class += 1
| You could vectorize the above using np.argmax's axis argument, to obtain the indices of the maximum value across the rows:
(target==np.argmax(output, axis=1)).sum()
For instance:
output = np.random.choice([0,1],(4,2))
print(output)
array([[1, 1],
[0, 1],
[0, 1],
[0, 1]])
target = np.array([[0,1,0,1]])
(target==np.argmax(output, axis=1)).sum()
# 3
| https://stackoverflow.com/questions/66532614/ |
MMDetection loading from own training checkpoint for inference produces garbage detections | I've trained up a very simple model using the MMDetection colab tutorial and then verifying the result using:
img = mmcv.imread('/content/mmdetection/20210301_145246_123456.jpg')
img = cv2.resize(img, (0,0), fx=0.25, fy=0.25)
model.cfg = cfg
result = inference_detector(model, img)
show_result_pyplot(model, img, result)
confirms that it's working great.
I then follow the same steps as for training but instead I load my own training checkpoint, and I don't train. Then running the verification snippet above produces garbage results.
Here's that in code
from mmcv import Config
cfg = Config.fromfile('configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py')
from mmdet.apis import set_random_seed
# Modify dataset type and path
cfg.dataset_type = 'SamplesDataset'
cfg.data_root = 'samples_dataset/'
cfg.data.test.type = 'SamplesDataset'
cfg.data.test.data_root = 'samples_dataset/'
cfg.data.test.ann_file = 'train.txt'
cfg.data.test.img_prefix = 'o2h'
cfg.data.train.type = 'SamplesDataset'
cfg.data.train.data_root = 'samples_dataset/'
cfg.data.train.ann_file = 'train.txt'
cfg.data.train.img_prefix = 'o2h'
cfg.data.val.type = 'SamplesDataset'
cfg.data.val.data_root = 'samples_dataset/'
cfg.data.val.ann_file = 'val.txt'
cfg.data.val.img_prefix = 'o2h'
# modify num classes of the model in box head
cfg.model.roi_head.bbox_head.num_classes = 1
# We can still use the pre-trained Mask RCNN model though we do not need to
# use the mask branch
# cfg.load_from = 'checkpoints/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth'
cfg.load_from = './experiments/epoch_1.pth'
# Set up working dir to save files and logs.
cfg.work_dir = './experiments'
# The original learning rate (LR) is set for 8-GPU training.
# We divide it by 8 since we only use one GPU.
cfg.optimizer.lr = 0.02 / 8
cfg.lr_config.warmup = None
cfg.log_config.interval = 10
cfg.runner = dict(type='EpochBasedRunner', max_epochs=1)
cfg.total_epochs = 1
# Change the evaluation metric since we use customized dataset.
cfg.evaluation.metric = 'mAP'
# We can set the evaluation interval to reduce the evaluation times
# cfg.evaluation.interval = 12
# We can set the checkpoint saving interval to reduce the storage cost
cfg.checkpoint_config.interval = 1
# Set seed thus the results are more reproducible
cfg.seed = 0
set_random_seed(0, deterministic=False)
cfg.gpu_ids = range(1)
# We can initialize the logger for training and have a look
# at the final config used for training
# print(f'Config:\n{cfg.pretty_text}')
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.apis import train_detector
# Build dataset
# datasets = [build_dataset(cfg.data.train)]
# Build the detector
model = build_detector(cfg.model)
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# Create work_dir
# mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# train_detector(model, datasets, cfg, distributed=False, validate=True)
Obviously, I wouldn't normally do all that just for validating my model, but this is one of many debugging steps for me, as my goal is to download and run the model locally. This is what I'm trying to do locally:
import sys
import glob
import time
sys.path.insert(0, '../mmdetection')
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
from mmdet.models import build_detector
import mmcv
import numpy as np
file_paths = glob.glob('samples/o2h/*.jpg')
cfg = mmcv.Config.fromfile('../mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py')
cfg.model.roi_head.bbox_head.num_classes = 1
cfg.load_from = 'models/mmdet_faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.pth' # my own checkpoint
model = build_detector(cfg.model)
model.CLASSES = ('hash',)
model.cfg = cfg
file_path = np.random.choice(file_paths)
print(file_path)
start = time.time()
result = inference_detector(model, file_path)
print(f"Time taken for inference: {time.time() - start:.2f}s")
show_result_pyplot(model, file_path, result)
| One of the mistakes in your code is that you have not updated num_classes for mask_head.
Our aim here should be to replicate the same config file that was used for training should also be used for testing/validation. If you have trained the model using 1 num_classes for bbox_head and mask_head in the config file but for validation/testing you are using 80 num_classes as default, then that will cause a mismatch in the testing process, leading to garbage detections and segmentations.
There are 2 solutions for achieving the best result:
Change the num_classes in config file before doing inference
Save the model and config file as pickle as soon as training is completed.
Note: The first solution is best one.
Change the num_classes in config file before doing inference.
First, find the total number of classes in your dataset. Here num_classes is total number of classes in the training dataset.
Locate to this path:
mmdetection/configs/model_name (model_name is name used for training)
Here, inside model_name folder, find the ..._config.py that you have used for training.
Inside this config file, if you have found model = dict(...) then change the num_classes for each of these keys: bbox_head, mask_head.
bbox_head might be list. so, change num_classes for each keys in the list.
If model = dict(...) is not found, then at the first line there is
_base_ = '...' So, open that config file and check whether model=dict(...) is found or not. If not found keep on opening the file location of _base_.
After changing the num_classes, use this code for inference:
Code after changing the num_classes:
from mmdet.apis import init_detector, inference_detector
import mmcv
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
%matplotlib inline
config_file = './configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py' #(I have used SCNet for training)
checkpoint_file = 'tutorial_exps/epoch_40.pth' #(checkpoint saved after training)
model = init_detector(config_file, checkpoint_file, device='cuda:0') #loading the model
img = 'test.png'
result = inference_detector(model, img)
#visualize the results in a new window
im1 = cv2.imread(img)[:,:,::-1]
#im_ones = np.ones(im1.shape, dtype='uint')*255
# model.show_result(im_ones, result, out_file='fine_result6.jpg')
plt.imshow(model.show_result(im1, result))
Save the model and config as pickle as soon as training is completed.
Another solution is to save both model and config as pickle as soon as the training is completed, irrespective of depending on mmdetection to do it.
When you load default config file (without any change), it won't produce the required result. Your config should be exact to the config used for training. So, it is good idea to save the model and config as pickle instead of loading them.
Note: The pickle files should be saved right after training is completed.
Code for saving as pickle:
import pickle
with open('mdl.pkl','wb') as f:
pickle.dump(model, f)
with open('cfg.pkl','wb') as f:
pickle.dump(cfg, f)
You can use this model wherever and whenever you want. For inference with the saved model, use this:
import pickle, mmcv
from mmdet.apis import inference_detector, show_result_pyplot
model = pickle.load(open('mdl.pkl','rb'))
cfg = pickle.load(open('cfg.pkl','rb'))
img = mmcv.imread('images/test.png')
model.cfg = cfg
result = inference_detector(model, img)
show_result_pyplot(model, img, result)
| https://stackoverflow.com/questions/66537288/ |
Unable to import pytorch_lightning on google colab | I have done the following:
!pip install pytorch_lightning -qqq
import pytorch_lightning
But get the following error:
ImportError Traceback (most recent call last)
<ipython-input-7-d883b15aac58> in <module>()
----> 1 import pytorch_lightning
----------------------------------9 frames------------------------------------------------
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/utilities/apply_func.py in <module>()
26
27 if _TORCHTEXT_AVAILABLE:
---> 28 from torchtext.data import Batch
29 else:
30 Batch = type(None)
ImportError: cannot import name 'Batch' from 'torchtext.data' (/usr/local/lib/python3.7/dist-packages/torchtext/data/__init__.py)
What could the issue be?
| As said in Issue #6415 on Github, try installing from the GitHub.
It worked for me.
!pip install git+https://github.com/PyTorchLightning/pytorch-lightning
import pytorch_lightning as pl
print(pl.__version__)
Output:
1.3.0dev
It seems that the error is coming from Issue #6210 and they say it was fixed. I guess it wasn't uploaded to PyPi.
| https://stackoverflow.com/questions/66538407/ |
One hot encoding in pytorch | I am really new to coding, right now I am trying to turn my label to one hot encoding. I have already done transferring the np.array to tensor as shown below
tensor([4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 3., 3., 3., 3., 3., 3.,
3., 3., 3., 3., 3., 3., 3., 3., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2.], dtype=torch.float64)
and I am using code to do one hot encoding
aaa = F.one_hot(torch_qyh, num_classes=5)
However, there is an error showing "RuntimeError: one_hot is only applicable to index tensor." Any help would appreciated.
| You will have to convert it in long type. Can't do it with float. F.one_hot only takes LongTensor.
F.one_hot(t.long())
| https://stackoverflow.com/questions/66543659/ |
How to run a pytorch model server inside docker? | I am trying to create a docker image to make it run as a server for serving a model in pytorch.
I converted the .pt model file to .MAR file in my local machine
and i copied the .MAR file inside the docker image. I created a dockerfile:
FROM ubuntu:18.04
ENV TZ=Asia/Shanghai
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \
tzdata
RUN ln -snf /usr/share/zoneinfo/${TZ} /etc/localtime\
&& echo ${TZ} > /etc/timezone \
&& dpkg-reconfigure -f noninteractive tzdata
RUN apt-get install python3 python3-dev python3-pip openjdk-11-jre-headless git wget curl -y
RUN python3 -m pip install torch torchvision torch-model-archiver torchserve==0.2.0
COPY densenet161.mar /model_store/
CMD torchserve --start --model-store model_store --models densenet161=densenet161.mar
EXPOSE 8080
I was able to create the image but I was not able to access that container
when I tried to open the image and run the code it works
docker exec -it 4b126bd87f21 sh
# torchserve --start --ncs --model-store model_store --models densenet161.mar
The server is running. When I run the docker image it is not working. Docker container is running but I was not able to access the server.
I don't know what is the problem.
| I hope I understand the problem.
When you do docker run torchserve:local ...., by default it runs the CMD which is torchserve --start --model-store model_store --models densenet161=densenet161.mar but since the command runs in the background, your newly created docker container will immediately exit. Due to this same problem, i.e. to prevent docker exit it is possible to add tail -f /dev/null.
Look at the official docker entry point of torchserve https://github.com/pytorch/serve/blob/master/docker/dockerd-entrypoint.sh#L12
They tail it at the end to prevent docker exit
| https://stackoverflow.com/questions/66551874/ |
how to import a torch 1.7.1 when torch 1.4.0 is also installed | how to import a torch 1.7.1 when torch 1.4.0 is also installed
When I run the command: ! pip list
It lists all libraries with : torch 1.7.1
Now when I run:
>>>import torch
>>>torch.__version__
'1.4.0'
How Do I import torch==1.7.1 in the python program?
I am using python 3.8.3 and windows 10
| Slightly different way to answer your question, but if you want to have two versions of torch installed simultaneously for different purposes (e.g. running different programs), my recommendation would be to use torch 1.7.1 and torch 1.4.1 in separate virtual environments. Please see the links below for guides on getting started with these:
virtualenv:
# Create virtual environment
python3 -m venv torch-env
# Activate virtual environment
torch-env\Scripts\activate.bat
# Any packages you install now are ONLY for this virtual environment
pip install torch==1.7.1 # Or 1.4.1 in a separate environment
conda:
# Create virtual environment with Python 3.8.3
conda create --name torch-env python=3.8.3
# Activate virtual environment
conda activate torch-env
# Install torch
pip install torch==1.7.1 # Or 1.4.1
Creating separate virtual environments will allow you to have multiple versions of torch simultaneously without any issues. You should then be able to import as you do above and use them interchangeably simply by switching environments.
| https://stackoverflow.com/questions/66552454/ |
Understanding Jacobian tensor gradients in pytorch | I was going through official pytorch tut, where it explains tensor gradients and Jacobian products as follows:
Instead of computing the Jacobian matrix itself, PyTorch allows you to compute Jacobian Product for a given input vector v=(v1…vm). This is achieved by calling backward with v as an argument:
inp = torch.eye(5, requires_grad=True)
out = (inp+1).pow(2)
out.backward(torch.ones_like(inp), retain_graph=True)
print("First call\n", inp.grad)
out.backward(torch.ones_like(inp), retain_graph=True)
print("\nSecond call\n", inp.grad)
inp.grad.zero_()
out.backward(torch.ones_like(inp), retain_graph=True)
print("\nCall after zeroing gradients\n", inp.grad)
Ouptut:
First call
tensor([[4., 2., 2., 2., 2.],
[2., 4., 2., 2., 2.],
[2., 2., 4., 2., 2.],
[2., 2., 2., 4., 2.],
[2., 2., 2., 2., 4.]])
Second call
tensor([[8., 4., 4., 4., 4.],
[4., 8., 4., 4., 4.],
[4., 4., 8., 4., 4.],
[4., 4., 4., 8., 4.],
[4., 4., 4., 4., 8.]])
Call after zeroing gradients
tensor([[4., 2., 2., 2., 2.],
[2., 4., 2., 2., 2.],
[2., 2., 4., 2., 2.],
[2., 2., 2., 4., 2.],
[2., 2., 2., 2., 4.]])
Though I get what is Jacobian matrix is, I didnt get how is this Jacobian product is calculated.
Here, are different tensors I tried to print out to get understanding:
>>> out
tensor([[4., 1., 1., 1., 1.],
[1., 4., 1., 1., 1.],
[1., 1., 4., 1., 1.],
[1., 1., 1., 4., 1.],
[1., 1., 1., 1., 4.]], grad_fn=<PowBackward0>)
>>> torch.eye(5)
tensor([[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.]])
>>> torch.ones_like(inp)
tensor([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]])
>>> inp
tensor([[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.]], requires_grad=True)
But I didnt get how the tuts output is calculated. Can someone explain a bit of Jacobian matrix with calculations done in this example?
| We will go through the entire process: from computing the Jacobian to applying it to get the resulting gradient for this input. We're looking at the operation f(x) = (x + 1)², in the simple scalar setting, we get df/dx = 2(x + 1) as complete derivative.
In the multi-dimensional setting, we have an input x_ij, and an output y_mn, indexed by (i, j), and (m, n) respectively. The function mapping is defined as y_mn = (x_mn + 1)².
First, we should look at the Jacobian itself, this corresponds to the tensor J containing all partial derivatives J_ijmn = dy_mn/dx_ij. From the expression of y_mn we can say that for all i, j, m, and n: dy_mn/dx_ij = d(x_mn + 1)²/dx_ij which is 0 if m≠i or n≠j. Else, i.e. m=i or n=j, we have that d(x_mn + 1)²/dx_ij = d(x_ij + 1)²/dx_ij = 2(x_ij + 1).
As a result, J_ijmn can be simply defined as
↱ 2(x_ij + 1) if i=m, j=n
J_ijmn =
↳ 0 else
From the rule chain the gradient of the output with respect to the input x is denoted as dL/dx = dL/dy*dy/dx. From a PyTorch perspective we have the following relationships:
x.grad = dL/dx, shaped like x,
dL/dy is the incoming gradient: the gradient argument in the backward function
dL/dx is the Jacobian tensor described above.
As explained in the documentation, applying backward doesn't actually provide the Jacobian. It computes the chain rule product directly and stores the gradient (i.e. dL/dx inside x.grad).
In terms of shapes, the Jacobian multiplication dL/dy*dy/dx = gradient*J reduces itself to a tensor of the same shape as x.
The operation performed is defined by: [dL/dx]_ij = ∑_mn([dL/dy]_ij * J_ijmn).
If we apply this to your example. We have x = 1(i=j) (where 1(k): (k == True) -> 1 is the indicator function), essentially just the identity matrix.
We compute the Jacobian:
↱ 2(1(i=j) + 1) = if i=m, j=n
J_ijmn =
↳ 0 else
which becomes
↱ 2(1 + 1) = 4 if i=j=m=n
J_ijmn = → 2(0 + 1) = 2 if i=m, j=n, i≠j
↳ 0 else
For visualization purposes, we will stick with x = torch.eye(2):
>>> f = lambda x: (x+1)**2
>>> J = A.jacobian(f, inp)
tensor([[[[4., 0.],
[0., 0.]],
[[0., 2.],
[0., 0.]]],
[[[0., 0.],
[2., 0.]],
[[0., 0.],
[0., 4.]]]])
Then computing the matrix multiplication using torch.einsum (I won't go into details, look through this, then this for an in-depth overview of the EinSum summation operator):
>>> torch.einsum('ij,ijmn->mn', torch.ones_like(inp), J)
tensor([[4., 2.],
[2., 4.]])
This matches what you get when back propagating from out with torch.ones_like(inp) as incoming gradient:
>>> out = f(inp)
>>> out.backward(torch.ones_like(inp))
>>> inp.grad
tensor([[4., 2.],
[2., 4.]])
If you backpropagate twice (while retaining the graph of course) you end up computing the same operation which accumulating on the parameter's grad attribute. So, naturally, after two backward passes you have twice the gradient:
>>> out = f(inp)
>>> out.backward(torch.ones_like(inp), retain_graph=True)
>>> out.backward(torch.ones_like(inp))
>>> inp.grad
tensor([[8., 4.],
[4., 8.]])
Those gradients will accumulate, you can reset them by calling the inplace function zero_: inp.grad.zero_(). From there if you backpropagate again you will stand with one accumulate gradient only.
In practice, you would register your parameters on an optimizer, from which you can call zero_grad enabling you to handle and reset all parameters in that collection in one go.
I have imported torch.autograd.functional as A
| https://stackoverflow.com/questions/66569022/ |
HTTP Error when trying to download MNIST data | I am using Google Colab for training a LeNet-300-100 fully-connected neural network on MNIST using Python3 and PyTorch 1.8.
To apply the transformations and download the MNIST dataset, the following code is being used:
# MNIST dataset statistics:
# mean = tensor([0.1307]) & std dev = tensor([0.3081])
mean = np.array([0.1307])
std_dev = np.array([0.3081])
transforms_apply = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std_dev)
])
which gives the error:
Downloading
http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to
./data/MNIST/raw/train-images-idx3-ubyte.gz
--------------------------------------------------------------------------- HTTPError Traceback (most recent call
last) in ()
2 train_dataset = torchvision.datasets.MNIST(
3 root = './data', train = True,
----> 4 transform = transforms_apply, download = True
5 )
6
11 frames /usr/lib/python3.7/urllib/request.py in
http_error_default(self, req, fp, code, msg, hdrs)
647 class HTTPDefaultErrorHandler(BaseHandler):
648 def http_error_default(self, req, fp, code, msg, hdrs):
--> 649 raise HTTPError(req.full_url, code, msg, hdrs, fp)
650
651 class HTTPRedirectHandler(BaseHandler):
HTTPError: HTTP Error 503: Service Unavailable
What's wrong?
| I was having the same 503 error and this worked for me
!wget www.di.ens.fr/~lelarge/MNIST.tar.gz
!tar -zxvf MNIST.tar.gz
from torchvision.datasets import MNIST
from torchvision import transforms
train_set = MNIST('./', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=True)
test_set = MNIST('./', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False)
| https://stackoverflow.com/questions/66577151/ |
Bound optimization using pytorch | How to include bounds when using optimization method in pytorch. I have a tensor of variables, each variable has different bound.
upper_bound = torch.tensor([1,5,10], requires_grad=False)
lower_bound = torch.tensor([-1,-5,-10], requires_grad=False)
X = torch.tensor([10, -60, 105], require_grad=True)
for _ in range(100):
optimizer.zero_grad()
loss = ..
loss.backward()
optimizer.step()
X[:] = X.clamp(lower_bound, upper_bound)
But, clamp only uses a single number. Since each variable is bounded differently, I need to include the upper and lower bounds tensors.
| Gradient descent is not the best method to achieve constrained optimization, but here you can enforce your constraints with :
x = ((X-lower_bound).clamp(min=0)+lower_bound-upper_bound).clamp(max=0)+upper_bound
Requires two clamp instead of one but I could not find any native way to achieve this.
| https://stackoverflow.com/questions/66584157/ |
RuntimeError: mean is not implemented for type torch.ByteTensor | I am getting this error after running my code:
"RuntimeError: mean is not implemented for type torch.ByteTensor"?
Do anyone know what I am doing wrong here?
accuracy = torch.mean(output)
| Got it, basically torch.mean() isn't implemented on torch.ByteTensor so we can convert it to FloatTensor which is supported by torch.mean().
So the code will change to:
accuracy = torch.mean(output.type(torch.FloatTensor))
| https://stackoverflow.com/questions/66586141/ |
RuntimeError: cuDNN error: CUDNN_STATUS_NOT_INITIALIZED using pytorch | I am trying to run a simple pytorch sample code. It's works fine using CPU. But when using GPU, i get this error message:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py", line 263, in forward
return self._conv_forward(input, self.weight, self.bias)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py", line 260, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: cuDNN error: CUDNN_STATUS_NOT_INITIALIZED
The code i am trying to run is the following:
import torch
from torch import nn
m = nn.Conv1d(16, 33, 3, stride=2)
m=m.to('cuda')
input = torch.randn(20, 16, 50)
input=input.to('cuda')
output = m(input)
I am running this code in a NVIDIA docker with CUDA version 10.2 and my GPU is a RTX 2070
| There is some discussion regarding this here. I had the same issue but using cuda 11.1 resolved it for me.
This is the exact pip command
pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html
| https://stackoverflow.com/questions/66588715/ |
How to shift columns (or rows) in a tensor with different offsets in PyTorch? | In PyTorch, the build-in torch.roll function is only able to shift columns (or rows) with same offsets. But I want to shift columns with different offsets. Suppose the input tensor is
[[1,2,3],
[4,5,6],
[7,8,9]]
Let's say, I want to shift with offset i for the i-th column. Thus, the expected output is
[[1,8,6],
[4,2,9],
[7,5,3]]
An option to do so is to separately shift every column using torch.roll and concat each of them. But for the consideration of effectiveness and code compactness, I don't want to introduce the loop structure. Is there a better way?
|
I was sceptical about the performance of torch.gather so I searched for similar questions with numpy and found this post.
Similar solution from NumPy to Pytorch
I took the solution from @Andy L and translated it into pytorch. However, take it with a grain of salt, because I don't know how the strides work:
from numpy.lib.stride_tricks import as_strided
# NumPy solution:
def custom_roll(arr, r_tup):
m = np.asarray(r_tup)
arr_roll = arr[:, [*range(arr.shape[1]),*range(arr.shape[1]-1)]].copy() #need `copy`
#print(arr_roll)
strd_0, strd_1 = arr_roll.strides
#print(strd_0, strd_1)
n = arr.shape[1]
result = as_strided(arr_roll, (*arr.shape, n), (strd_0 ,strd_1, strd_1))
return result[np.arange(arr.shape[0]), (n-m)%n]
# Translated to PyTorch
def pcustom_roll(arr, r_tup):
m = torch.tensor(r_tup)
arr_roll = arr[:, [*range(arr.shape[1]),*range(arr.shape[1]-1)]].clone() #need `copy`
#print(arr_roll)
strd_0, strd_1 = arr_roll.stride()
#print(strd_0, strd_1)
n = arr.shape[1]
result = torch.as_strided(arr_roll, (*arr.shape, n), (strd_0 ,strd_1, strd_1))
return result[torch.arange(arr.shape[0]), (n-m)%n]
Here is also the solution from @Daniel M as plug and play.
def roll_by_gather(mat,dim, shifts: torch.LongTensor):
# assumes 2D array
n_rows, n_cols = mat.shape
if dim==0:
#print(mat)
arange1 = torch.arange(n_rows).view((n_rows, 1)).repeat((1, n_cols))
#print(arange1)
arange2 = (arange1 - shifts) % n_rows
#print(arange2)
return torch.gather(mat, 0, arange2)
elif dim==1:
arange1 = torch.arange(n_cols).view(( 1,n_cols)).repeat((n_rows,1))
#print(arange1)
arange2 = (arange1 - shifts) % n_cols
#print(arange2)
return torch.gather(mat, 1, arange2)
Benchmarking
First, I ran the methods on CPU.
Surprisingly, the gather solution from above is the fastest:
n_cols = 10000
n_rows = 100
shifts = torch.randint(-100,100,size=[n_rows,1])
data = torch.arange(n_rows*n_cols).reshape(n_rows,n_cols)
npdata = np.arange(n_rows*n_cols).reshape(n_rows,n_cols)
npshifts = shifts.numpy()
%timeit roll_by_gather(data,1,shifts)
%timeit pcustom_roll(data,shifts)
%timeit custom_roll(npdata,npshifts)
>> 2.41 ms ± 68.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
>> 90.4 ms ± 882 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
>> 247 ms ± 6.08 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Running the code on GPU shows similar results:
%timeit roll_by_gather(data,shifts)
%timeit pcustom_roll(data,shifts)
131 µs ± 6.79 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
3.29 ms ± 46.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
(Note: You need torch.arange(...,device='cuda:0') within the roll_by_gather method)
| https://stackoverflow.com/questions/66596699/ |
GPU in docker container for deep learning task | Recently, I have started working on using docker images. I want to deploy PyTorch based text classification model which requires GPU to run on. When the docker image is called upon, then it's not able to detect GPU in the VM. Hence, my code is failing by throwing no Cuda device found error.
This is my base image FROM gcr.io/deeplearning-platform-release/pytorch-gpu.1-0. I don't know what all steps to follow to install Nvidia drivers in the docker. Please help me out.
| Installing Nvidia drivers into the container doesn't help since the GPU device isn't exposed to the container. Running with nvidia-docker instead does.
| https://stackoverflow.com/questions/66603536/ |
Pytorch showing the error: 'NoneType' object has no attribute 'zero_' | I am using Python 3.8 and VSCode.
I tried to create a basic Neural Network without activations and biases but because of the error, I'm not able to update the gradients of the weights.
Matrix Details:
Layer Shape: (1, No. of Neurons)
Weight Layer Shape: (No. of Neurons in the previous layer, No. of Neurons in the next layer)
Here's my code:
# sample neural network
# importing libraries
import torch
import numpy as np
torch.manual_seed(0)
# hyperparameters
epochs = 100
lr = 0.01 # learning rate
# data
X_train = torch.tensor([[1]], dtype = torch.float)
y_train = torch.tensor([[2]], dtype = torch.float)
'''
Network Architecture:
1 neuron in the first layer
4 neurons in the second layer
4 neurons in the third layer
1 neuron in the last layer
* I haven't added bias and activation.
'''
# initializing the weights
weights = []
weights.append(torch.rand((1, 4), requires_grad = True))
weights.append(torch.rand((4, 4), requires_grad = True))
weights.append(torch.rand((4, 1), requires_grad = True))
# calculating y_pred
y_pred = torch.matmul(torch.matmul(torch.matmul(X_train, weights[0]), weights[1]), weights[2])
# calculating loss
loss = (y_pred - y_train)**2
# calculating the partial derivatives
loss.backward()
# updating the weights and zeroing the gradients
with torch.no_grad():
for i in range(len(weights)):
weights[i] = weights[i] - weights[i].grad
weights[i].grad.zero_()
It it showing the error:
File "test3.py", line 43, in <module>
weights[i].grad.zero_()
AttributeError: 'NoneType' object has no attribute 'zero_'
I don't understand why it is showing this error. Can someone please explain?
| Your model doesn't have any trainable parameters for the grad to be calculated. Use torch's Parameter. See this link for creating a module with learnable parameters.
torch.nn.parameter.Parameter
A kind of Tensor that is to be considered a module parameter.
| https://stackoverflow.com/questions/66610575/ |
Why does my pytorch NN return a tensor of nan? | I have a quite simple neural network which takes a flattened 6x6 grid as input and should output the values of four actions to take on that grid, so a 1x4 tensor of values.
Sometimes after a few runs though for some reason I am getting a 1x4 tensor of nan
tensor([[nan, nan, nan, nan]], grad_fn=<ReluBackward0>)
My model looks like this with input dim being 36 and output dim being 4:
class Model(nn.Module):
def __init__(self, input_dim, output_dim):
# super relates to nn.Module so this initializes nn.Module
super(Model, self).__init__()
# Gridsize as input,
# last layer needs 4 outputs because of 4 possible actions: left, right, up, down
# output values are Q Values need activation function for those like argmax
self.lin1 = nn.Linear(input_dim, 24)
self.lin2 = nn.Linear(24, 24)
self.lin3 = nn.Linear(24, output_dim)
# function to feed the input through the net
def forward(self, x):
# rectified linear as activation function for the first two layers
if isinstance(x, np.ndarray):
x = torch.tensor(x, dtype=torch.float)
activation1 = F.relu(self.lin1(x))
activation2 = F.relu(self.lin2(activation1))
output = F.relu(self.lin3(activation2))
return output
The input was:
tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.3333, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.3333,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.3333, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.3333, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.6667]])
What are possible causes for a nan output and how can i fix those?
| nan values as outputs just mean that the training is instable which can have about every possible cause including all kinds of bugs in the code. If you think your code is correct you can try addressing the instability by lowering the learning rate or use gradient clipping.
| https://stackoverflow.com/questions/66625645/ |
tuple unpacking in X_test | i am following a simple tutorial in PyTorch. The tutorial is using the diabetes dataset and built a simple two-layer network. In some part of the tutorial there exist the following part of the code where there exists an iteration on the X_test using enumerate.
predictions=[]
with torch.no_grad():
for i, data in enumerate(X_test):
y_pred=model(data)
predictions.append(y_pred.argmax().item())
print(y_pred.argmax().item())
while I thought that I was understanding enumerate I am a little confused. What the i and data referring to in the above code?
in general, I understandnd that is a typle unpacking procedure
| enumerate simply enumerates iterable items.
e.g.
my_list = ['cat', 'dog', 'elephant']
print(list(enumerate(my_list)))
>> [(0, 'cat'), (1, 'dog'), (2, 'elephant')]
In your case i is an index of data variable in some loop state (it is not used tho) and data is a feature tensor of one row in your dataset.
| https://stackoverflow.com/questions/66628243/ |
GPU underutilized in Actor Critic (A2C) Stable Baselines3 implementation | I am trying to use A2C of StablesBaselines3 for training an agent on my custom environment. My problem is that my GPU Utilization is very less (around 10 % only) while my CPU utilization has hit the ceiling. Because of this the training is very very slow. I have tried the following things as per this discussion thread addressed by @araffin
[https://github.com/hill-a/stable-baselines/issues/308]
Expanding the Actor and Critic architecture to a three layer neural
network having 256, 256 and 128 neurons respectively. The GPU
utilization did increase after that but it was only marginal
(increased from 10 % to 15 %) as in this suggestion
changed device argument of A2C method to 'cuda' from the default which is 'auto' -
No improvement
It is not entirely clear what worked for the user in the above mentioned discussion.
My A2C is intialized as follows:
import gym
from stable_baselines3 import A2C
policy_kwargs = dict(activation_fn=th.nn.ReLU,
net_arch=[dict(pi=[256, 256, 128], vf=[256, 256, 128])])
model = A2C("MlpPolicy", env, verbose=1, learning_rate=linear_schedule(0.001),\
n_steps=50, gamma=0.8, gae_lambda=1.0, ent_coef=0.2, vf_coef=0.2, \
tensorboard_log=logdir, policy_kwargs=policy_kwargs, seed=50, device='cuda' )
model.learn(total_timesteps=50000)
GPU configurations are:
Cuda version - 9.0
GPU - NVIDIA RTX 2080i
GPU mem - 11GB (out of that only about 1600MB is being used)
CPU configurations
Threads per Core - 2
Core per socket - 8
Intel i9 core 9900 K @ 3.6 Ghz
CPU Mem - 32GB
SW versions
Torch - 1.6.0
Tensorflow GPU - 2.4.1 (although I know SB3 implementation uses Torch)
Python 3.7 on anaconda environment
Any help will be appreciated!
| Stable baselines is using your gpu ... if you look task manager
on second tab click on your gpu and instead of 3D
select cuda and you will see the usage of cuda
I had some troubles with my env because my env uses pandas .. and pandas use cpu .. on windows is not possible easly to use cudf so my cpu was using 100%
I used pandas and numpy on my custom env ..
to improve it I should use cunumery and cudf instead, but to do it I need linux.
so for now, my cpu is using 100% gpu is using cuda 70% and gpu memory almost 1.7 gb
| https://stackoverflow.com/questions/66628280/ |
how to solve this (Pytorch RuntimeError: 1D target tensor expected, multi-target not supported) | i am newbie in pytorch and deep learning
my data set 53502 x 58,
i have problem this my code
model = nn.Sequential(
nn.Linear(58,64),
nn.ReLU(),
nn.Linear(64,32),
nn.ReLU(),
nn.Linear(32,16),
nn.ReLU(),
nn.Linear(16,2),
nn.LogSoftmax(1)
)
criterion = nn.NLLLoss()
optimizer = optim.AdamW(model.parameters(), lr = 0.0001)
epoch = 500
train_cost, test_cost = [], []
for i in range(epoch):
model.train()
cost = 0
for feature, target in trainloader:
output = model(feature) #feedforward
loss = criterion(output, target) #loss
loss.backward() #backprop
optimizer.step() #update weight
optimizer.zero_grad() #zero grad
cost += loss.item() * feature.shape[0]
train_cost.append(cost / len(train_set))
with torch.no_grad():
model.eval()
cost = 0
for feature, target in testloader:
output = model(feature) #feedforward
loss = criterion(output, target) #loss
cost += loss.item() * feature.shape
test_cost.append(cost / len(test_set))
print(f'\repoch {i+1}/{epoch} | train_cost: {train_cost[-1]} | test_cost : {test_cost[-1]}', end = "")
and then i get problem like this
2262 .format(input.size(0), target.size(0)))
2263 if dim == 2:
-> 2264 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2265 elif dim == 4:
2266 ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: 1D target tensor expected, multi-target not supported
whats wrong?
how to solve this problem?
why this happend?
Thank you very much in advance!
| When using NLLLoss the target tensor must contain the index representation of the labels and not one-hot. So for example:
I guess this is what your target looks like:
target = [0, 0, 1, 0]
Just convert it to just the number which is the index of the 1:
[0, 0, 1, 0] -> [2]
[1, 0, 0, 0] -> [0]
[0, 0, 0, 1] -> [3]
And then convert it to long tensor, ie:
target = [2]
target = torch.Tensor(target).type(torch.LongTensor)
It might be confusing, that your output is a tensor with the length of classes and your target is an number but that how it is.
You can check it out yourself here.
| https://stackoverflow.com/questions/66635987/ |
Difference between Xavier weights pytorch implementation | What is the difference between nn.init.xavier_uniform and nn.init.xavier_uniform_ when initialising weights?
| The _ convention in nn.init.xavier_uniform_ is PyTorch's way of doing an operation in place. This convention applies to many of its functions.
| https://stackoverflow.com/questions/66640099/ |
Pass an arbitrary image size to cnn in pytorch | I'm trying to train a lenet model in pytorch, The ideia is to put images of any size in it, so I started doing with nn.AdaptiveAvgPool2d but the error comes as
mat1 dim 1 must match mat2 dim 0
Here is my code
class LeNet5(nn.Module):
def __init__(self, num_classes=10):
super(LeNet5, self).__init__()
self.conv_1 = nn.Conv2d(
in_channels=1, out_channels=32, kernel_size=5, bias=False
)
self.relu_1 = nn.ReLU(inplace=True)
self.maxpool_1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_2 = nn.Conv2d(
in_channels=32, out_channels=256, kernel_size=5, bias=False
)
self.relu_2 = nn.ReLU(inplace=True)
self.maxpool_2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.flatten = nn.Flatten()
self.fc_1 = nn.Linear(in_features=4096, out_features=120, bias=False)
self.fc_2 = nn.Linear(in_features=120, out_features=84)
self.fc_3 = nn.Linear(in_features=84, out_features=num_classes)
def forward(self, input):
conv_1_output = self.conv_1(input)
relu_1_output = self.relu_1(conv_1_output)
maxpool_1_output = self.maxpool_1(relu_1_output)
conv_2_output = self.conv_2(maxpool_1_output)
relu_2_output = self.relu_2(conv_2_output)
maxpool_2_output = self.maxpool_2(relu_2_output)
flatten_output = self.flatten((self.avgpool(maxpool_2_output).view(maxpool_2_output.shape[0], -1)))
fc_1_output = self.fc_1(flatten_output)
fc_2_output = self.fc_2(fc_1_output)
fc_3_output = self.fc_3(fc_2_output)
return fc_3_output
| if you read the theory on AdaptiveAvgPool2d, this is what it says " we specify the output size And the stride and kernel-size are automatically selected to adapt to the needs"
More info available here
Hence Your spatial dimension is reduced by AdaptiveAvgPool2d and not the depth of feature maps.
So, the spatial dimension will be 1x1 and depth will still be 256 , making your
self.fc_1 = nn.Linear(in_features=256, out_features=120, bias=False) and not self.fc_1 = nn.Linear(in_features=4096, out_features=120, bias=False)
| https://stackoverflow.com/questions/66640116/ |
Performance in reading dicom files with SimpleITK and PyTorch | I want to directly load image from memory to python in pytorch tensor format.
I modified GetArrayViewFromImage() function by replacing those lines:
image_memory_view = _GetMemoryViewFromImage(image)
array_view = numpy.asarray(image_memory_view).view(dtype = dtype)
by:
image_memory_view = _GetMemoryViewFromImage(image)
array_view = torch.as_tensor(image_memory_view, dtype = dtype)
in practise it is so slow I replaced it with:
image_memory_view = _GetMemoryViewFromImage(image)
array_view = numpy.asarray(image_memory_view).view(dtype = dtype)
array_view = torch.as_tensor(array_view)
Now I have two questions:
it is much slower, and I don't really know why reading it with numpy and converting it is faster.
even though I add the dtype argument and it returns a tensor with a correct dtype it reads it wrong (ex. -1000 in numpy is read as 252 no matter what torch.dtype I choose) which is not a problem when reading with numpy and converting, why is that happening?
| While this does not directly answer your question, I strongly recommend using the torchio package, instead of dealing with these IO issues yourself (torchio uses SimpleITK under the hood).
| https://stackoverflow.com/questions/66653808/ |
How to vectorize loss for a LSTM doing sequential Language modelling | So I have an assignment involving Language Modelling and I passed all the unit tests but my code is too slow to run. I think it's because of the way I compute my loss. The formula we're given is the following:
My naive implementation is the following:
losses_batch_list = []
batch_size = log_probas.size(0)
for b in range(batch_size):
seq_length = max([i for i, e in enumerate(mask[b,:]) if e != 0]) + 1
loss_batch = 0
for t in range(seq_length):
for n in range(self.vocabulary_size):
if targets[b, t] == n:
loss_batch += log_probas[b, t, n].detach()
loss_batch = - loss_batch / seq_length
losses_batch_list.append(loss_batch)
loss = torch.tensor(np.mean(losses_batch_list))
return loss
But that loop runs for ever since the vocabulary size is the same approximately as GPT1 (~40 000) and the sequence length is up to 255 (something it is shorter because of padding, hence the mask).
Does anyone have any tips on how to vectorize/speed this up?
I know it's correct but I can't report any results with it...
Thanks!
| B = batch_size
T = sequence_length (padded)
N = vocab_size
if type(mask_b) == torch.bool:
mask = mask.view(-1) # (B, T) -> (B*T,)
else:
mask = mask.bool().view(-1) # (B, T) -> (B*T,)
log_probas = log_probas.view(-1, N) # (B, T, N) -> (B*T, N)
targets = target.view(-1, 1) # (B, T) -> (B*T, 1)
loss = torch.gather(log_probas[mask], -1, target[mask]) # loss without padded tokens
loss = loss.mean()
| https://stackoverflow.com/questions/66665589/ |
Pytorch MNIST autoencoder to learn 10-digit classification | I'm trying to build a simple autoencoder for MNIST, where the middle layer is just 10 neurons. My hope is that it will learn to classify the 10 digits, and I assume that would lead to the lowest error in the end (wrt reproducing the original image).
I have the following code, which I've already played around with a fair amount. If I run it for up-to 100 epochs, the loss doesn't really go below 1.0, and if I evaluate it, it's obviously not working. What am I missing?
Training:
import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
num_epochs = 100
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
trainset = tv.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder,self).__init__()
self.encoder = nn.Sequential(
# 28 x 28
nn.Conv2d(1, 4, kernel_size=5),
nn.Dropout2d(p=0.2),
# 4 x 24 x 24
nn.ReLU(True),
nn.Conv2d(4, 8, kernel_size=5),
nn.Dropout2d(p=0.2),
# 8 x 20 x 20 = 3200
nn.ReLU(True),
nn.Flatten(),
nn.Linear(3200, 10),
nn.ReLU(True),
# 10
nn.Softmax(),
# 10
)
self.decoder = nn.Sequential(
# 10
nn.Linear(10, 400),
nn.ReLU(True),
# 400
nn.Unflatten(1, (1, 20, 20)),
# 20 x 20
nn.Dropout2d(p=0.2),
nn.ConvTranspose2d(1, 10, kernel_size=5),
# 24 x 24
nn.ReLU(True),
nn.Dropout2d(p=0.2),
nn.ConvTranspose2d(10, 1, kernel_size=5),
# 28 x 28
nn.ReLU(True),
nn.Sigmoid(),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
model = Autoencoder().cpu()
distance = nn.MSELoss()
#optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-5)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(num_epochs):
for data in dataloader:
img, _ = data
img = Variable(img).cpu()
output = model(img)
loss = distance(output, img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch [{}/{}], loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
Already the training loss indicates that the thing is not working, but printing out the confusion matrix (which in this case should not necessarily be the identity matrix, since the neurons can be ordered arbitrarily, but should be row-col-reordarable and approximate the identity, if this would work):
import numpy as np
confusion_matrix = np.zeros((10, 10))
batch_size = 20*1000
testset = tv.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=4)
for data in dataloader:
imgs, labels = data
imgs = Variable(imgs).cpu()
encs = model.encoder(imgs).detach().numpy()
for i in range(len(encs)):
predicted = np.argmax(encs[i])
actual = labels[i]
confusion_matrix[actual][predicted] += 1
print(confusion_matrix)
| Autoencoder is technically not used as a classifier in general. They learn how to encode a given image into a short vector and reconstruct the same image from the encoded vector. It is a way of compressing image into a short vector:
Since you want to train autoencoder with classification capabilities, we need to make some changes to model. First of all, there will be two different losses:
MSE loss: Current autoencoder reconstruction loss. This will force network to output an image as close as possible to given image by taking the compressed representation.
Classification loss: Classic cross entropy should do the trick. This loss will take compressed representation (C dimensional) and target labels to calculate negative log likelihood loss. This loss will force encoder to output compressed representation such that it aligns well with the target class.
I've done a couple of changes to your code to get the combined model working. Firstly, let's see the code:
import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
num_epochs = 10
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
trainset = tv.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
testset = tv.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=4)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Autoencoderv3(nn.Module):
def __init__(self):
super(Autoencoderv3,self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=5),
nn.Dropout2d(p=0.1),
nn.ReLU(True),
nn.Conv2d(4, 8, kernel_size=5),
nn.Dropout2d(p=0.1),
nn.ReLU(True),
nn.Flatten(),
nn.Linear(3200, 10)
)
self.softmax = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(10, 400),
nn.ReLU(True),
nn.Unflatten(1, (1, 20, 20)),
nn.Dropout2d(p=0.1),
nn.ConvTranspose2d(1, 10, kernel_size=5),
nn.ReLU(True),
nn.Dropout2d(p=0.1),
nn.ConvTranspose2d(10, 1, kernel_size=5)
)
def forward(self, x):
out_en = self.encoder(x)
out = self.softmax(out_en)
out = self.decoder(out)
return out, out_en
model = Autoencoderv3().to(device)
distance = nn.MSELoss()
class_loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
mse_multp = 0.5
cls_multp = 0.5
model.train()
for epoch in range(num_epochs):
total_mseloss = 0.0
total_clsloss = 0.0
for ind, data in enumerate(dataloader):
img, labels = data[0].to(device), data[1].to(device)
output, output_en = model(img)
loss_mse = distance(output, img)
loss_cls = class_loss(output_en, labels)
loss = (mse_multp * loss_mse) + (cls_multp * loss_cls) # Combine two losses together
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Track this epoch's loss
total_mseloss += loss_mse.item()
total_clsloss += loss_cls.item()
# Check accuracy on test set after each epoch:
model.eval() # Turn off dropout in evaluation mode
acc = 0.0
total_samples = 0
for data in testloader:
# We only care about the 10 dimensional encoder output for classification
img, labels = data[0].to(device), data[1].to(device)
_, output_en = model(img)
# output_en contains 10 values for each input, apply softmax to calculate class probabilities
prob = nn.functional.softmax(output_en, dim = 1)
pred = torch.max(prob, dim=1)[1].detach().cpu().numpy() # Max prob assigned to class
acc += (pred == labels.cpu().numpy()).sum()
total_samples += labels.shape[0]
model.train() # Enables dropout back again
print('epoch [{}/{}], loss_mse: {:.4f} loss_cls: {:.4f} Acc on test: {:.4f}'.format(epoch+1, num_epochs, total_mseloss / len(dataloader), total_clsloss / len(dataloader), acc / total_samples))
This code should now train the model both as a classifier and a generative autoencoder. In general though, this type of approach can be a bit tricky to get the model training. In this case, MNIST data is simple enough to get those two complementary losses train together. In more complex cases like Generative Adversarial Networks (GAN), they apply model training switching, freezing one model etc. to get whole model trained. This autoencoder model trains easily on MNIST without doing those types of tricks:
epoch [1/10], loss_mse: 0.8928 loss_cls: 0.4627 Acc on test: 0.9463
epoch [2/10], loss_mse: 0.8287 loss_cls: 0.2105 Acc on test: 0.9639
epoch [3/10], loss_mse: 0.7803 loss_cls: 0.1574 Acc on test: 0.9737
epoch [4/10], loss_mse: 0.7513 loss_cls: 0.1290 Acc on test: 0.9764
epoch [5/10], loss_mse: 0.7298 loss_cls: 0.1117 Acc on test: 0.9762
epoch [6/10], loss_mse: 0.7110 loss_cls: 0.1017 Acc on test: 0.9801
epoch [7/10], loss_mse: 0.6962 loss_cls: 0.0920 Acc on test: 0.9794
epoch [8/10], loss_mse: 0.6824 loss_cls: 0.0859 Acc on test: 0.9806
epoch [9/10], loss_mse: 0.6733 loss_cls: 0.0797 Acc on test: 0.9814
epoch [10/10], loss_mse: 0.6671 loss_cls: 0.0764 Acc on test: 0.9813
As you can see, both mse loss and classification loss is decreasing, and accuracy on test set is increasing. In the code, MSE loss and classification loss are added together. This means respective gradients calculated from each loss are fighting against each other to force the network into their direction. I've added loss multiplier to control the contribution from each loss. If MSE has a higher multiplier, network will have more gradients from MSE loss, meaning it will better learn reconstruction, if CLS loss has a higher multiplier, network will get better classification accuracies. You can play with those multiplier to see how end result is changing, but MNIST is a very easy dataset so differences might be hard to see maybe. Currently, it doesn't do too bad at reconstructing inputs:
import numpy as np
import matplotlib.pyplot as plt
model.eval()
img, labels = list(dataloader)[0]
img = img.to(device)
output, output_en = model(img)
inp = img[0:10, 0, :, :].squeeze().detach().cpu()
out = output[0:10, 0, :, :].squeeze().detach().cpu()
# Just some trick to concatenate first ten images next to each other
inp = inp.permute(1,0,2).reshape(28, -1).numpy()
out = out.permute(1,0,2).reshape(28, -1).numpy()
combined = np.vstack([inp, out])
plt.imshow(combined)
plt.show()
I am sure with more training and fine tuning loss multipliers, you can get better results.
Lastly, decoder receives softmax of encoder output. This mean decoder tries to create output image from 0 - 1 probabilities of the input. So if the softmax probability vector is 0.98 at input location 0 and close to zero elsewhere, decoder should output an image that looks like 0.0. Here I give network input to create 0 to 9 reconstructions:
test_arr = np.zeros([10, 10], dtype = np.float32)
ind = np.arange(0, 10)
test_arr[ind, ind] = 1.0
model.eval()
img = torch.from_numpy(test_arr).to(device)
out = model.decoder(img)
out = out[0:10, 0, :, :].squeeze().detach().cpu()
out = out.permute(1,0,2).reshape(28, -1).numpy()
plt.imshow(out)
plt.show()
I've also done a few small changes in the code, printing epoch average loss etc. which doesn't really change the training logic, so you can see those changes in the code and let me know if anything looks weird.
| https://stackoverflow.com/questions/66667949/ |
pytorch collections.OrderedDict' object has no attribute 'to' | this is my main code,but I don't know how to fix the problem?
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.load('./checkpoints/fcn_model_5.pth') # 加载模型
model = model.to(device)
| You are loading the checkpoint as a state dict, it is not a nn.module object.
checkpoint = './checkpoints/fcn_model_5.pth'
model = your_model() # a torch.nn.Module object
model.load_state_dict(torch.load(checkpoint ))
model = model.to(device)
| https://stackoverflow.com/questions/66670326/ |
How to calculate the mean and the std of cifar10 data | Pytorch is using the following values as the mean and std for the cifar10 data:
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
I need to understand the concept behind calculating it because this data is 3 channel image and I do not understand what is summed and divided over what and so on.
Also if someone can share a code for calculating the mean and the std, would be so thankful.
| The 0.5 values are just approximates for cifar10 mean and std values over the three channels (r,g,b). The precise values for cifar10 train set are
mean: 0.49139968, 0.48215827 ,0.44653124
std: 0.24703233 0.24348505 0.26158768
You may calculate these using the following script:
import torch
import numpy
import torchvision.datasets as datasets
from torchvision import transforms
cifar_trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.ToTensor())
imgs = [item[0] for item in cifar_trainset] # item[0] and item[1] are image and its label
imgs = torch.stack(imgs, dim=0).numpy()
# calculate mean over each channel (r,g,b)
mean_r = imgs[:,0,:,:].mean()
mean_g = imgs[:,1,:,:].mean()
mean_b = imgs[:,2,:,:].mean()
print(mean_r,mean_g,mean_b)
# calculate std over each channel (r,g,b)
std_r = imgs[:,0,:,:].std()
std_g = imgs[:,1,:,:].std()
std_b = imgs[:,2,:,:].std()
print(std_r,std_g,std_b)
Also, you may find the same mean and std values here and here
| https://stackoverflow.com/questions/66678052/ |
How to incoporate mask into negative likelihood loss (torch.nn.functional.nll_loss) | Hello I am implementing a lstm for language modelling
for homework and I am at the loss implementation phase. Our instructor told us to use F.nll_loss but the sequences are padded and we have to take into account a mask that is given which tells us when the sequences stop.
input:
log_probas (batch_size, sequence_length(padded), vocabulary size)
targets (batch_size, sequence_length(padded))
mask (batch_size, sequence_length(padded)
naive implementation which works without taking into account the mask:
import torch.nn.functional as F
loss = F.nll_loss(log_probas.transpose(1, 2), targets)
I've been crawling the internet and banging my head but can't seem to find an answer on how to incorporate a mask into the averaging scheme of the loss.
| you could reshape the tensors and use mask to select non-padded tokens, and compute the loss
vocab_size = log_probas.size(-1)
log_probas = log_probas.view(-1, vocab_size)
target = target.view(-1)
mask = mask.view(-1).bool()
loss = F.nll_loss(log_probas[mask], targets[mask])
| https://stackoverflow.com/questions/66678314/ |
PyTorch can't find the name?? (NameError: name 'device' is not defined) | sorry - I'm a complete beginner!
I am trying to build a 'mini-system' using the Torchreid libraries from https://kaiyangzhou.github.io/deep-person-reid/index.html#
In their version they use CUDA but my Mac is not compatible with CUDA and it doesn't have a
CUDA enabled GPU so I installed the CPU-only version of PyTorch instead - therefore I changed model = model.cuda() to model = model.to(device) and added in device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') as you can see below. I thought this would work but I keep getting the NameError: name 'device' is not defined and I don't know what to do.
Please help!
(I also tried putting device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') at the top instead of the bottom to see if it made any difference but I just got another error - NameError: name 'torch' is not defined)
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='softmax',
pretrained=True
)
model = model.to(device)
optimizer = torchreid.optim.build_optimizer(
model,
optim='adam',
lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
| Define device variable before the usage:
import torch
...
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='softmax',
pretrained=True
)
# Just right before the actual usage
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
...
| https://stackoverflow.com/questions/66679163/ |
Why is pytorch softmax function not working? | so this is my code
import torch.nn.functional as F
import torch
inputs = [1,2,3]
input = torch.tensor(inputs)
output = F.softmax(input, dim=1)
print(output)
is the reason why the code not working because of the dim?
the error here:
File "c:\Users\user\Desktop\AI\pytorch_jovian\linear_reg.py", line 19, in <module>
output = F.softmax(input, dim=1)
File "C:\Users\user\AppData\Local\Programs\Python\Python39\lib\site-packages\torch\nn\functional.py", line 1583, in softmax
ret = input.softmax(dim)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
| Apart from dim=0, there is another issue in your code. Softmax doesn't work on a long tensor, so it should be converted to a float or double tensor first
>>> input = torch.tensor([1, 2, 3])
>>> input
tensor([1, 2, 3])
>>> F.softmax(input.float(), dim=0)
tensor([0.0900, 0.2447, 0.6652])
| https://stackoverflow.com/questions/66690233/ |
Mapping tensor in pytorch | I have the following two tensors:
img is a RGB image of shape (224,224,3)
uvs is a tensor with same spacial size e.g. (224, 224, 2) that maps to coordinates (x,y). In other words it provides (x,y) coordinates for every pixel of the input image.
I want to create now a new output image tensor that contains on index (x,y) the value of the input image. So the output should be an image as well with the pixels rearranged according to the mapping tensor.
Small toy example:
img = [[c1,c2], [c3, c4]] where c is a RGB color [r, g, b]
uvs = [[[0,0], [1,1]],[[0,1], [1,0]]]
out = [[c1, c3], [c4, c2]]
How would one achieve such a thing in pytorch in a fast vectorized manner?
| Try with:
out = img[idx[...,0], idx[...,1]]
| https://stackoverflow.com/questions/66693083/ |
Define manually sorted MNIST dataset with batch size = 1 in PyTorch | [] : this indicates a batch. For example, if the batch size is 5, then the batch will look something like this [1,4,7,4,2]. The length of [] indicates the batch size.
What I want to make a training set something looks like this:
[1] -> [1] -> [1] -> [1] -> [1] -> [7] -> [7] -> [7] -> [7] -> [7] -> [3] -> [3] -> [3] -> [3] -> [3] -> ... and so on
Which means that firstly five 1s (batch size = 1), secondly five 7s (batch size = 1), thirdly five 3s (batch size = 1) and so on...
Can someone please provide me an idea?
It will be very helpful if someone can explain how to implement this with codes.
Thank you! :)
| If you want a DataLoader where you just want to define the class label for each sample then you can make use of the torch.data.utils.Subset class. Despite its name it doesn't necessarily need to define a subset of dataset. For example
import torch
import torchvision
import torchvision.transforms as T
from itertools import cycle
mnist = torchvision.datasets.MNIST(root='./', train=True, transform=T.ToTensor())
# not sure what "...and so on" implies, but define this list however you like
target_classes = [1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 3, 3, 3, 3, 3]
# create cyclic iterators of indices for each class in MNIST
indices = dict()
for label in torch.unique(mnist.targets).tolist():
indices[label] = cycle(torch.nonzero(mnist.targets == label).flatten().tolist())
# define the order of indices in the new mnist subset based on target_classes
new_indices = []
for t in target_classes:
new_indices.append(next(indices[t]))
# create a Subset of MNIST based on new_indices
mnist_modified = torch.utils.data.Subset(mnist, new_indices)
dataloader = torch.utils.data.DataLoader(mnist_modified, batch_size=1, shuffle=False)
for idx, (x, y) in enumerate(dataloader):
# training loop
print(f'Batch {idx+1} labels: {y.tolist()}')
| https://stackoverflow.com/questions/66695251/ |
How to find partial derivative in pytorch | I have a model u(x,t) with layers 2X50, then 50X50, and 50X1.
I train the model with input x,t of size [100,2]. In the final layer I get u and now I want to differentiate it w.r.t to x and t and double differentiate w.r.t to x. How do I do this in PyTorch?
| You can use PyTorch's autograd engine like so:
import torch
x = torch.randn(100, requires_grad=True)
t = torch.randn(2, requires_grad=True)
u = u(x,t)
# 1st derivatives
dt = torch.autograd.grad(u, t)[0]
dx = torch.autograd.grad(u, x, create_graph=True)[0]
# 2nd derivatives (higher orders require `create_graph=True`)
ddx = torch.autograd.grad(dx, x)[0]
| https://stackoverflow.com/questions/66708568/ |
How can i use pytorch model in C#? | I have a pytorch model in NLP and a script for use it in python. Now i want to use this script in C#. I tried run python script from C# and it worked. I get user sentence in C#, pass it to python and its outputs use in C#. The problem is that i want to do this work in a loop until user select exit but every time it goes to python code, it must load pytorch model and its time consuming.
Is there a way to load model one time and then in a loop get input from user and inference with loaded model?
| you can export the model in ONNX format and then use the OpenCV DNN module or tensorrt for inference purposes. It will give you a significant boost in speed and your whole code will be in C#.
| https://stackoverflow.com/questions/66710421/ |
PyTorch How to code Multi Head Self Attention in parallel? | I want to encode the word (embedding) sequence with 16-Head Self-Attention.
Currently I use a nn.ModuleList together with a for loop to generate the output of each head then concatenate all of them. This approach is extremely slow and I wonder if there's way to code MHA in parallel?
To generalize, I would like to know if I can 'stack' multiple nn.Linear, as I feed the input vector, the multiple outputs will be computed in parallel.
| I figured it out. Since nn.Linear is acctually an affine transformation with a weights matrix and a bias matrix, one can easily wrap such matrices in nn.Parameter and take advantage of broadcast semantics to achieve the goal.
Edit: I also find a nn.Linear(d_model, n_heads*d_key) functions identically.
| https://stackoverflow.com/questions/66711170/ |
PyTorch using LR-Scheduler with param groups of different LR's | I have defined the following optimizer with different learning rates for each parameter group:
optimizer = optim.SGD([
{'params': param_groups[0], 'lr': CFG.lr, 'weight_decay': CFG.weight_decay},
{'params': param_groups[1], 'lr': 2*CFG.lr, 'weight_decay': 0},
{'params': param_groups[2], 'lr': 10*CFG.lr, 'weight_decay': CFG.weight_decay},
{'params': param_groups[3], 'lr': 20*CFG.lr, 'weight_decay': 0},
], lr=CFG.lr, momentum=0.9, weight_decay=CFG.weight_decay, nesterov=CFG.nesterov)
Now I want to use a LR-Scheduler to update all the learning rates and not only the first one, because by deafult, a scheduler would only update the param_groups[0]?
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=5, T_mult=2, eta_min=CFG.min_lr, last_epoch=-1, verbose=True)
Giving me:
Parameter Group 0
dampening: 0
initial_lr: 0.001
lr: 0.0009999603905218616
momentum: 0.9
nesterov: True
weight_decay: 0.0001
Parameter Group 1
dampening: 0
initial_lr: 0.002
lr: 0.002
momentum: 0.9
nesterov: True
weight_decay: 0
Parameter Group 2
dampening: 0
initial_lr: 0.01
lr: 0.01
momentum: 0.9
nesterov: True
weight_decay: 0.0001
Parameter Group 3
dampening: 0
initial_lr: 0.02
lr: 0.02
momentum: 0.9
nesterov: True
weight_decay: 0
)
after one update.
Any idea how to update all the learning rates with a scheduler?
| You are right, learning rate scheduler should update each group's learning rate one by one. After a bit of testing, it looks like, this problem only occurs with CosineAnnealingWarmRestarts scheduler. I've tested CosineAnnealingLR and couple of other schedulers, they updated each group's learning rate:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 100, verbose=True)
Then, to find the cause of the problem I had a look at the source code of learning rate scheduler: https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py
From a quick look through, it looks like there are some differences between the implementation of CosineAnnealingLR and CosineAnnealingWarmRestarts get_lr() functions:
# CosineAnnealingLR:
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == 0:
return self.base_lrs
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
return [group['lr'] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / self.T_max)) / 2
for base_lr, group in
zip(self.base_lrs, self.optimizer.param_groups)]
return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) /
(1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) *
(group['lr'] - self.eta_min) + self.eta_min
for group in self.optimizer.param_groups]
# CosineAnnealingWarmRestarts:
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
for base_lr in self.base_lrs]
So after looking through the code, I feel like this issue is a bug. Even the documentation of CosineAnnealingWarmRestart suggests that "Set the learning rate of each parameter group using a cosine annealing schedule".
| https://stackoverflow.com/questions/66711210/ |
How to join two tensors | I have two tensors of dimension [3,1]. I need to join them as a [3,2] tensor.
t = torch.tensor([[3.],[1],[2]], requires_grad=True)
x = torch.tensor([[1.],[4],[5]], requires_grad=True)
I tried torch.cat and torch.stack but neither work for me.
| With cat you need to specify the dimension the tensors are concatenated along. By default this is 0, but you wish to use 1:
import torch
res = torch.cat([t,x], axis=1)
| https://stackoverflow.com/questions/66713761/ |
RuntimeError: 1D target tensor expected, multi-target not supported Python: NumPy | I am dealing with a CNN and I get the following error on the line loss = criterion(outputs, data_y):
Here is the relevant code snippet:
def run(model, X_train, Y_train, X_test, Y_test, learning_rate=0.01,
num_epochs=100, minibatch_size=8, print_cost=True):
seed = 0 # to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
costs = [] # To keep track of the cost
criterion = nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Training loop
model.train() # Turn on the training mode
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = utils.generate_minibatch(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
(batch_x, batch_y) = minibatch
data_x = torch.Tensor(batch_x)
data_y = torch.LongTensor(batch_x)
### START YOUR CODE ###
# Zero the gradients
optimizer.zero_grad() # Hint: call zero_grad()
# Forward pass and compute loss
outputs = model(data_x) # Hint: use model as a callable
loss = criterion(outputs, data_y) # Hint: use criterion as a callable
# Backward and optimize
loss.backward() # Hint: call backward()
optimizer.step() # Hint: call step()
### END YOUR CODE ###
minibatch_cost += loss.item()
# Print the cost every epoch
minibatch_cost /= num_minibatches
if print_cost and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
costs.append(minibatch_cost)
# Calculate accuracy on the train and test datasets
data_x = torch.Tensor(X_test)
data_y = torch.LongTensor(Y_test)
model.eval() # Turn on the evaluation mode
with torch.no_grad():
test_pred = model(data_x)
num_correct = (torch.argmax(test_pred, dim=1).view(data_y.size()).data == data_y.data).float().sum()
test_acc = num_correct / test_pred.size()[0]
print("Test Accuracy:", test_acc.item())
model = CNN_Model()
torch.manual_seed(0)
run(model, X_train, Y_train, X_test, Y_test)
And here is the error I'm getting:
RuntimeError Traceback (most recent call last)
<ipython-input-6-9839fc42e5c2> in <module>
3
4 torch.manual_seed(0)
----> 5 run(model, X_train, Y_train, X_test, Y_test)
6
7 # NOTE: It could be slow to run 100 epochs. Make sure that your costs for after each epoch
<ipython-input-5-05ddcdc9ddf5> in run(model, X_train, Y_train, X_test, Y_test, learning_rate, num_epochs, minibatch_size, print_cost)
40 # Forward pass and compute loss
41 outputs = model(data_x) # Hint: use model as a callable
---> 42 loss = criterion(outputs, data_y) # Hint: use criterion as a callable
43
44 # Backward and optimize
C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
214 def forward(self, input: Tensor, target: Tensor) -> Tensor:
215 assert self.weight is None or isinstance(self.weight, Tensor)
--> 216 return F.nll_loss(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)
217
218
C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
2383 )
2384 if dim == 2:
-> 2385 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2386 elif dim == 4:
2387 ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: 1D target tensor expected, multi-target not supported
| This error usually appears when you pass a one-hot-encoded target to CrossEntropy or NLLLoss (instead of a single class index), but your problem is simpler - you just have a typo here:
data_y = torch.LongTensor(batch_x) # <- should be `batch_y`
| https://stackoverflow.com/questions/66720209/ |
Average Pooling layer in Deep Learning and gradient artifacts | I know that in Convolution layers the kernel size needs to be a multiplication of stride or else it will produce artefacts in gradient calculations like the checkerboard problem.
Now does it also work like that in Pooling layers? I read somewhere that max pooling can also cause problems like that. Take this line in the discriminator for example:
self.downsample = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
I have a model (MUNIT) with it, and this is the image it produced:
It looks like the checkerboard problem, or at least a gradient problem but I checked my Convolution layers and didn't found the error described above. They all are of size 4 with stride 2 or an uneven size with stride of 1.
| This doesn't look like a checkerboard artifact honestly. Also I don't think discriminator would be the problem, it's usually about image restoration (generator or decoder).
Took a quick look at the MUNIT and what they use in Decoder is torch.nn.Upsample with nearest neighbor upsampling (exact code line here).
You may try to use torch.nn.Conv2d followed by torch.nn.PixelShuffle, something like this:
import torch
in_channels = 32
upscale_factor = 2
out_channels = 16
upsampling = torch.nn.Sequential(
torch.nn.Conv2d(
in_channels,
out_channels * upscale_factor * upscale_factor,
kernel_size=3,
padding=1,
),
torch.nn.PixelShuffle(upscale_factor),
)
image = torch.randn(1, 32, 16, 16)
upsampling(image).shape # [1, 16, 32, 32]
This allows neural network to learn how to upsample the image instead of merely using torch.nn.Upsample which the network has no control over (and using below trick it should also be free of checkerboard artifacts).
Additionally, ICNR initialization for Conv2d should also help (possible implementation here or here). This init scheme initializes weights to act similar to nearest neighbor upsampling at the beginning (research paper here).
| https://stackoverflow.com/questions/66720639/ |
Pytorch RNN with no nonlinearity | Is it possible to implement an RNN layer with no nonlinearity in Pytorch like in Keras where one can set the activation to linear? By removing the nonlinearlity, I want to implement a first-order infinite-impulse-response (IIR) filter with a differentiable parameter and integrate it into my model for end-to-end learning. I can obviously implement the filter in Pytorch but I thought using an inbuilt function may be more efficient.
| Removing non-linearity from RNN turns it into a linear dense layer without any activation.
If that is what you want, then simply use nn.linear and set activation to None
Explanation
Here is why this happens. Fundamentally, an RNN for timesteps works as below -
h(t) = tanh(U.x(t) + W.h(t−1) + b)
h(0) = tanh(U0.x(0) + b0)
h(1) = tanh(U1.x(1) + W1.h(0) + b1)
h(2) = tanh(U2.x(2) + W2.h(1) + b2)
#... and so on.
If you remove linearity by removing the tanh, here is what happens -
h(0) = U0.x(0) + b0
h(1) = U1.x(1) + W1.h(0) + b1
= U1.x(1) + W1.(U0.x(0) + b0) + b1 #expanding x(0)
= U1.x(1) + W1.U0.x(0) + W1.b0 + b1
= U1.x(1) + W1.U0.x(0) + W1.b0 + b1
= V1.x(1) + V0.x(0) + C #Can be rewritten with new weights
= V . x + C #General form
So the final form of the state of an RNN after 2 timesteps is simply Wx+b like the linear layer without activation.
In other words, removing the non-linearity from an RNN turns it into a linear dense layer without any activation, completely removing the notion of time-steps.
| https://stackoverflow.com/questions/66726974/ |
Cannot perform reduction function min on tensor with no elements because the operation does not have an identity at THCTensorMathReduce.cu:64 | I am configuring a GitHub repo in which the author stated that you have to install pytorch=0.4 and python=3.7. Now, I have CUDA 11.0 and the Pytorch version is conflicting with CUDA. After installing the Pytorch it gives the below error. Any hint?
My Conda List
# Name Version Build Channel
_libgcc_mutex 0.1 main
_pytorch_select 0.1 cpu_0
blas 1.0 mkl
ca-certificates 2021.1.19 h06a4308_1
certifi 2020.12.5 py37h06a4308_0
cffi 1.14.0 py37h2e261b9_0
cuda100 1.0 0 pytorch
cudatoolkit 9.2 0
freetype 2.10.4 h5ab3b9f_0
intel-openmp 2019.4 243
jpeg 9b h024ee3a_2
lcms2 2.11 h396b838_0
libedit 3.1.20210216 h27cfd23_1
libffi 3.2.1 hf484d3e_1007
libgcc-ng 9.1.0 hdf63c60_0
libmklml 2019.0.5 0
libpng 1.6.37 hbc83047_0
libstdcxx-ng 9.1.0 hdf63c60_0
libtiff 4.2.0 h3942068_0
libwebp-base 1.2.0 h27cfd23_0
lz4-c 1.9.3 h2531618_0
mkl 2020.2 256
mkl-service 2.3.0 py37he8ac12f_0
mkl_fft 1.3.0 py37h54f3939_0
mkl_random 1.1.1 py37h0573a6f_0
ncurses 6.2 he6710b0_1
ninja 1.10.2 py37hff7bd54_0
numpy 1.20.1 pypi_0 pypi
numpy-base 1.19.2 py37hfa32c7d_0
olefile 0.46 py37_0
openssl 1.1.1j h27cfd23_0
pillow 8.1.2 py37he98fc37_0
pip 21.0.1 py37h06a4308_0
pycparser 2.20 py_2
python 3.7.1 h0371630_7
pytorch 1.2.0 py3.7_cuda9.2.148_cudnn7.6.2_0 pytorch
readline 7.0 h7b6447c_5
scipy 1.6.1 pypi_0 pypi
setuptools 52.0.0 py37h06a4308_0
six 1.15.0 py37h06a4308_0
sqlite 3.33.0 h62c20be_0
tk 8.6.10 hbc83047_0
torchaudio 0.8.0 pypi_0 pypi
torchvision 0.4.0 py37_cu92 pytorch
typing 3.7.4.3 py37h06a4308_0
typing-extensions 3.7.4.3 hd3eb1b0_0
typing_extensions 3.7.4.3 pyh06a4308_0
wheel 0.36.2 pyhd3eb1b0_0
xz 5.2.5 h7b6447c_0
zlib 1.2.11 h7b6447c_3
zstd 1.4.5 h9ceee32_0
Error
Model size: 44.17957M
==> Epoch 1/800 lr:0.0003
Traceback (most recent call last):
File "/media/khawar/HDD_Khawar1/hypergraph_reid/main_video_person_reid_hypergraphsage_part.py", line 369, in <module>
main()
File "/media/khawar/HDD_Khawar1/hypergraph_reid/main_video_person_reid_hypergraphsage_part.py", line 230, in main
train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
File "/media/khawar/HDD_Khawar1/hypergraph_reid/main_video_person_reid_hypergraphsage_part.py", line 279, in train
htri_loss = criterion_htri(features, pids)
File "/home/khawar/anaconda3/envs/hypergraph_reid/lib/python3.7/site-packages/torch/nn/modules/module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
File "/media/khawar/HDD_Khawar1/hypergraph_reid/losses.py", line 86, in forward
dist_an.append(dist[i][mask[i] == 0].min())
RuntimeError: invalid argument 1: cannot perform reduction function min on tensor with no elements because the operation does not have an identity at /opt/conda/conda-bld/pytorch_1565287025495/work/aten/src/THC/generic/THCTensorMathReduce.cu:64
| As the error message suggests, the argument for min function is empty.
The behavior of torch.min([]) is undefined.
Check that dist[i][mask[i] == 0] is not empty, before taking min of it.
| https://stackoverflow.com/questions/66729277/ |
Is torch.float32 different from numpy's float32? | Setting precision as 30 in PyTorch shows:
>>> torch.set_printoptions(precision=30)
>>> y
tensor([[-0.388252139091491699218750000000, -0.610148549079895019531250000000,
-1.333969473838806152343750000000, -1.027917861938476562500000000000,
-0.498563587665557861328125000000, -0.096793495118618011474609375000,
-0.895992159843444824218750000000, -0.752071321010589599609375000000,
-0.879653215408325195312500000000],
[ 1.960780262947082519531250000000, 0.290681242942810058593750000000,
0.111534759402275085449218750000, -1.412155270576477050781250000000,
1.015806078910827636718750000000, 0.201809123158454895019531250000,
0.131465137004852294921875000000, -1.262379050254821777343750000000,
-0.480409622192382812500000000000],
[ 0.111068181693553924560546875000, -2.129202365875244140625000000000,
0.538800299167633056640625000000, 1.165832757949829101562500000000,
0.194993987679481506347656250000, -1.110693812370300292968750000000,
-1.451576709747314453125000000000, -3.398952484130859375000000000000,
2.022404193878173828125000000000],
[-2.374018669128417968750000000000, -1.442466974258422851562500000000,
-0.406166225671768188476562500000, 0.045908458530902862548828125000,
-0.835370421409606933593750000000, -0.302138328552246093750000000000,
-0.421340197324752807617187500000, 0.931307554244995117187500000000,
1.061386585235595703125000000000],
[ 1.166660070419311523437500000000, 0.710260510444641113281250000000,
1.008558034896850585937500000000, 1.957847237586975097656250000000,
-1.070753335952758789062500000000, 0.319442749023437500000000000000,
-1.140496969223022460937500000000, -1.723430752754211425781250000000,
0.109533369541168212890625000000],
[ 0.501820147037506103515625000000, 0.349833250045776367187500000000,
-0.073374643921852111816406250000, -2.459295272827148437500000000000,
-1.853959321975708007812500000000, 0.153838425874710083007812500000,
-1.860147237777709960937500000000, -0.880943417549133300781250000000,
-1.352821707725524902343750000000]])
but in numpy setting precision as 30 shows:
>>> np.set_printoptions(precision=30)
>>> y.numpy()
array([[-0.38825214 , -0.61014855 , -1.3339695 , -1.0279179 ,
-0.4985636 , -0.096793495, -0.89599216 , -0.7520713 ,
-0.8796532 ],
[ 1.9607803 , 0.29068124 , 0.11153476 , -1.4121553 ,
1.0158061 , 0.20180912 , 0.13146514 , -1.262379 ,
-0.48040962 ],
[ 0.11106818 , -2.1292024 , 0.5388003 , 1.1658328 ,
0.19499399 , -1.1106938 , -1.4515767 , -3.3989525 ,
2.0224042 ],
[-2.3740187 , -1.442467 , -0.40616623 , 0.04590846 ,
-0.8353704 , -0.30213833 , -0.4213402 , 0.93130755 ,
1.0613866 ],
[ 1.1666601 , 0.7102605 , 1.008558 , 1.9578472 ,
-1.0707533 , 0.31944275 , -1.140497 , -1.7234308 ,
0.10953337 ],
[ 0.50182015 , 0.34983325 , -0.073374644, -2.4592953 ,
-1.8539593 , 0.15383843 , -1.8601472 , -0.8809434 ,
-1.3528217 ]], dtype=float32)e
Why are the results different?
| By default, if it takes less digits than the configured value of precision to distinguish a floating-point value from other values of the same dtype, NumPy will only print as many digits as necessary for that. You have to set the floatmode option to 'fixed' to get the behavior you were expecting:
numpy.set_printoptions(precision=30, floatmode='fixed')
Note that even if you print 30 decimal digits, 32-bit floats don't have anywhere near that level of precision.
| https://stackoverflow.com/questions/66730777/ |
Split a torch tensor using a same-sized tensor of indices | Let's say that I have tensor
t = torch.tensor([1,2,3,4,5])
I want to split it using a same-sized tensor of indices that tells me for each element, in which split it should go.
indices = torch.tensor([0,1,1,0,2])
So that the final result is
splits
[tensor([1,4]), tensor([2,3]), tensor([5])]
Is there a neat way to do this in Pytorch?
EDIT : In general there will be more than 2 or 3 splits.
| One could do it using argsort for general case:
def mask_split(tensor, indices):
sorter = torch.argsort(indices)
_, counts = torch.unique(indices, return_counts=True)
return torch.split(t[sorter], counts.tolist())
mask_split(t, indices)
Though it might be better to use @flawr answer if this is your real use case (also list comprehension might also be faster as it does not require sorting), something like this:
def mask_split(tensor, indices):
unique = torch.unique(indices)
return [tensor[indices == i] for i in unique]
| https://stackoverflow.com/questions/66736492/ |
Installing PyTorch with CUDA in setup.py | I'm trying to specify PyTorch with CUDA in install_requires. The command to install with pip is
pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio===0.8.0 -f https://download.pytorch.org/whl/torch_stable.html
How do I do that in the setup.py install_requires?
| I also faced same problem later I fixed it but using this in setup.py files and it worked, just add these lines as in your setup.py file.
"torch@https://download.pytorch.org/whl/cu111/torch-1.8.0%2Bcu111-cp37-cp37m-linux_x86_64.whl",
"torchvision@https://download.pytorch.org/whl/cu111/torchvision-0.9.0%2Bcu111-cp37-cp37m-linux_x86_64.wh",
"torchaudio@https://download.pytorch.org/whl/torchaudio-0.8.0-cp36-cp36m-linux_x86_64.whl"
All these are for linux version if you wantr any MACOSX or windows just change the link after @ in each line you can get link of your desired version from https://download.pytorch.org/whl/torch_stable.html
| https://stackoverflow.com/questions/66738473/ |
with torch.no_grad: AttributeError: __enter__ | with torch.no_grad:AttributeError: __enter__
I got this error while running pytorch code.
I have torch==0.4.1 torchvision==0.3.0, I run the code in google colab.
| torch.no_grad is a contextmanager it really has __enter__ and __exit__.
You should use it with with statement, like this
with context_manager():
pass
Thus, simply replace with torch.no_grad: (accessing the attribute) with with torch.no_grad(): (calling a method) to use contextmanager properly.
| https://stackoverflow.com/questions/66744675/ |
Same weights, implementation but different results n Keras and Pytorch | I have an encoder and a decoder model (monodepth2). I try convert them from Pytorch to Keras using Onnx2Keras, but :
Encoder(ResNet-18) succeeds
I build the decoder myself in Keras (with TF2.3), and copy the weights (numpy array, including weight and bias) for each layer from Pytorch to Keras, without any modification.
But it turns out both Onnx2Keras-converted Encoder and self-built Decoder fails to reproduce the same results. The cross-comparison pictures are below, but I'd first introduce the code of Decoder.
First the core Layer, all the conv2d layer (Conv3x3, ConvBlock) is based on this, but different dims or add an activation:
# Conv3x3 (normal conv2d without BN nor activation)
# There's also a ConvBlock, which is just "Conv3x3 + ELU activation", so I don't list it here.
def TF_Conv3x3(input_channel, filter_num, pad_mode='reflect', activate_type=None):
# Actually it's 'reflect, but I implement it with tf.pad() outside this
padding = 'valid'
# if TF_ConvBlock, then activate_type=='elu
conv = tf.keras.layers.Conv2D(filters=filter_num, kernel_size=3, activation=activate_type,
strides=1, padding=padding)
return conv
Then the structure. Note that the definition is EXACTLY the same as the original code. I think it must be some details about the implementation.
def DepthDecoder_keras(num_ch_enc=np.array([64, 64, 128, 256, 512]), channel_first=False,
scales=range(4), num_output_channels=1):
num_ch_dec = np.array([16, 32, 64, 128, 256])
convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = num_ch_enc[-1] if i == 4 else num_ch_dec[i + 1]
num_ch_out = num_ch_dec[i]
# convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
convs[("upconv", i, 0)] = TF_ConvBlock(num_ch_in, num_ch_out, pad_mode='reflect')
# upconv_1
num_ch_in = num_ch_dec[i]
if i > 0:
num_ch_in += num_ch_enc[i - 1]
num_ch_out = num_ch_dec[i]
convs[("upconv", i, 1)] = TF_ConvBlock(num_ch_in, num_ch_out, pad_mode='reflect') # Just Conv3x3 with ELU-activation
for s in scales:
convs[("dispconv", s)] = TF_Conv3x3(num_ch_dec[s], num_output_channels, pad_mode='reflect')
"""
Input_layer dims: (64, 96, 320), (64, 48, 160), (128, 24, 80), (256, 12, 40), (512, 6, 20)
"""
x0 = tf.keras.layers.Input(shape=(96, 320, 64))
# then define the the rest input layers
input_features = [x0, x1, x2, x3, x4]
"""
# connect layers
"""
outputs = []
ch = 1 if channel_first else 3
x = input_features[-1]
for i in range(4, -1, -1):
x = tf.pad(x, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
x = convs[("upconv", i, 0)](x)
x = [tf.keras.layers.UpSampling2D()(x)]
if i > 0:
x += [input_features[i - 1]]
x = tf.concat(x, ch)
x = tf.pad(x, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
x = convs[("upconv", i, 1)](x)
x = TF_ReflectPad2D_1()(x)
x = convs[("dispconv", 0)](x)
disp0 = tf.math.sigmoid(x)
"""
build keras Model ([input0, ...], [output0, ...])
"""
# decoder = tf.keras.Model(input_features, outputs)
decoder = tf.keras.Model(input_features, disp0)
return decoder
The cross-comparison is as follows... I would really appreciate it if anyone could offer some insights. Thanks!!!
Original results:
Original Encoder + Self-build Decoder:
ONNX-converted Enc + Original Dec (Texture is good, but the contrast is not enough, the car should be very close, i.e. very bright color):
ONNX-converted Enc + Self-built Dec:
| Solved!
It turns out there's indeed no problem with implementation (at least not significant ones). It's the problem with weights copying.
The original weights has (H, W, 3, 3), but TF-model requires dim of (3, 3, W, H), so I permuted it by [3,2,1,0], overlooking the (3, 3) also have their own sequence.
So it should be weights.permute([2,3,1,0]), and all is well!
| https://stackoverflow.com/questions/66744761/ |
Torch JIT Trace = TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect | I am following this tutorial: https://huggingface.co/transformers/torchscript.html
to create a trace of my custom BERT model, however when running the exact same dummy_input I receive an error:
TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect.
We cant record the data flow of Python values, so this value will be treated as a constant in the future.
Having loaded in my model and tokenizer, the code to create the trace is the following:
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
# Masking one of the input tokens
masked_index = 8
tokenized_text[masked_index] = '[MASK]'
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
dummy_input = [tokens_tensor, segments_tensors]
traced_model = torch.jit.trace(model, dummy_input)
The dummy_input is a list of tensors so I'm not sure where the Boolean type is coming into play here. Does anyone understand why this error is occurring and whether the Boolean conversion is happening?
Many Thanks
| What this error means
This warning occurs, when one tries to torch.jit.trace models which have data dependent control flow.
This simple example should make it clearer:
import torch
class Foo(torch.nn.Module):
def forward(self, tensor):
# It is data dependent
# Trace will only work with one path
if tensor.max() > 0.5:
return tensor ** 2
return tensor
model = Foo()
traced = torch.jit.script(model) # No warnings
traced = torch.jit.trace(model, torch.randn(10)) # Warning
In essence, BERT model has some control flow (like if, for loop) dependent on the data, hence you get the warning.
Warning itself
You can see BERT forward code here.
You are fine if:
arguments do not change (like None values passed to forward) and it will stay that way after script (e.g. during inference calls)
if there is control flow based on data gathered inside __init__ (like configs), because this will not change
For example:
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
Will only run as one branch with torch.jit.trace, as it just traces operations on tensor and is unaware of control flow like this.
HuggingFace teams is probably aware of that and this warning is not an issue (though you might double check with your use case or try to go with torch.jit.script)
Going with torch.jit.script
This one would be hard as the whole model has to be torchscript compatible (torchscript has a subset of Python available and more than likely will not work out of the box with BERT).
Do it only when necessary (probably not).
| https://stackoverflow.com/questions/66746307/ |
RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces) | I am using Pytorch. I got this RuntimeError while evaluating a model. Any idea how to solve this?
| SOLUTION: Just replace the view() function with reshape() function as suggested in the error and it works.
I guess this has to do with how the tensor is stored in memory.
| https://stackoverflow.com/questions/66750391/ |
Pytorch linear regression loss increase | I tried to implement a simple demo that gets a polynomial regression, but the linear model's loss fails to decrease.
I am confused about where I went wrong.
If I trained the model one sample(batch size = 1) each time, it works fine. but when I feed the model with many samples a time, the loss increase and get inf.
import numpy as np
import torch
import math
from matplotlib import pyplot as plt
def rand_series(size):
x = np.linspace(-100, 100, size)
np.random.shuffle(x)
base_y = 20 * np.sin(2 * math.pi / 200 * x)
y = base_y + 10 * np.random.rand(size)
return x, y
def rescale_vec(vector):
vec_as_tensor = torch.tensor(vector, dtype=torch.float32)
max_in_vec = torch.max(vec_as_tensor)
min_in_vec = torch.min(vec_as_tensor)
if max_in_vec - min_in_vec == 0:
return torch.ones(vec_as_tensor.size(), dtype=torch.float32)
else:
return (vec_as_tensor - min_in_vec) / (max_in_vec - min_in_vec)
def rescale(vectors):
if len(vectors.shape) == 1:
return rescale_vec(vectors)
nor_vecs = torch.empty(vectors.shape)
for i in range(vectors.shape[0]):
nor_vecs[i] = rescale_vec(vectors[i])
return nor_vecs
class LinearRegression (torch.nn.Module):
def __init__ (self, power=4):
super().__init__()
self.layer = torch.nn.Linear(power, 1)
def forward(self, x):
return self.layer(x)
def regression(x_, y_, learning_rate):
x = torch.t(torch.tensor(x_, dtype=torch.float32))
y = torch.tensor(y_, dtype=torch.float32)
dim_size = x.size()[1]
print(dim_size, x.size())
model = LinearRegression(dim_size)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
loss_func = torch.nn.MSELoss(reduction='sum')
batch_size = 400
for round in range(50):
sample_indices = torch.randint(0, len(x), (batch_size, ))
x_samples = torch.index_select(x, 0, sample_indices)
y_samples = torch.index_select(y, 0, sample_indices)
optimizer.zero_grad()
y_hat = model(x_samples.view(-1, dim_size))
loss = loss_func(y_hat, y_samples)
print(loss.item())
loss.backward()
optimizer.step()
return model
x_one, y = rand_series(1000)
b = np.ones(len(x_one))
x = np.array([b, x_one, x_one ** 2, x_one ** 3, x_one ** 4, x_one ** 5])
model = regression(rescale(x), torch.tensor(y, dtype=torch.float32), 0.002)
nor_x = rescale(x)
y_hat = model(torch.t(torch.tensor(x, dtype=torch.float32)))
plt.scatter(x_one, y)
plt.scatter(x_one, y_hat.data, c='red')
plt.show()
the loss:
4.7375866968775066e+19
1.6979300048622735e+26
6.0214270068868396e+32
inf
inf
inf
| You need to use loss_func = torch.nn.MSELoss(reduction='mean') to solve the NaN problem. A batch of one or two seems to work because the loss was small enough. By adding more epochs, you should see that your loss tend exponentially to infinity.
| https://stackoverflow.com/questions/66778368/ |
CNN Classifier only guesses one thing - PyTorch | I'm trying to make a model predict the race of a 75x75 image's ethnicity, but when ever I train the model, the accuracy always stays completely still at 53.2%. I didn't realize why until I actually ran it on some of photos. It turned out, that no matter what the photo was, it would always predict 'other'. I'm not entirely sure why though.
I copied the code over from the official PyTorch Quickstart tutorial, and in that dataset or the standard MNIST data, it worked fine. I changed the dataset to the UTKFace, and then it started only predicting one label, all the time.
Here's my code:
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import ToTensor
import torch.nn.functional as F
training_data = ImageFolder(
root = "data_training/",
transform = ToTensor(),
)
testing_data = ImageFolder(
root = "data_testing/",
transform = ToTensor()
)
training_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(testing_data, batch_size=64, shuffle=True)
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(1296, 1024)
self.fc2 = nn.Linear(1024, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, 84)
self.fc5 = nn.Linear(84, 5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
model = NeuralNetwork().to("cpu")
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to("cpu"), y.to("cpu")
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def tests(dataloader, model):
size = len(dataloader.dataset)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to("cpu"), y.to("cpu")
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(training_dataloader, model, loss_fn, optimizer)
tests(test_dataloader, model)
torch.save(model.state_dict(), "model.pth")
The training logs:
Epoch 1
-------------------------------
loss: 1.628994 [ 0/23705]
loss: 1.620698 [ 6400/23705]
loss: 1.615423 [12800/23705]
loss: 1.596390 [19200/23705]
Test Error:
Accuracy: 53.2%, Avg loss: 0.024725
Epoch 2
-------------------------------
loss: 1.593613 [ 0/23705]
loss: 1.581375 [ 6400/23705]
loss: 1.583656 [12800/23705]
loss: 1.591942 [19200/23705]
Test Error:
Accuracy: 53.2%, Avg loss: 0.024165
Epoch 3
-------------------------------
loss: 1.541260 [ 0/23705]
loss: 1.592345 [ 6400/23705]
loss: 1.540908 [12800/23705]
loss: 1.540741 [19200/23705]
Test Error:
Accuracy: 53.2%, Avg loss: 0.023705
Epoch 4
-------------------------------
loss: 1.566888 [ 0/23705]
loss: 1.524875 [ 6400/23705]
loss: 1.540764 [12800/23705]
loss: 1.510044 [19200/23705]
Test Error:
Accuracy: 53.2%, Avg loss: 0.023323
Epoch 5
-------------------------------
loss: 1.530084 [ 0/23705]
loss: 1.498773 [ 6400/23705]
loss: 1.537755 [12800/23705]
loss: 1.508989 [19200/23705]
Test Error:
Accuracy: 53.2%, Avg loss: 0.022993
....
No matter how many epochs I set it to, or how many layers I add in to try to get it to overfit, it always just seems to guess the same thing over and over again, with no signs of improvement.
I separated the UTKFace dataset into folders based on the ethnicity category of the name. There are 23705 images in the training data and 10134 in the testing.
I'm not sure why this is happening. Is my dataset not large enough? Are there not enough layers?
| The number of layers and the dataset size don't explain this behavior for this example. Your CNN is behaving as a constant function, so far I don't know why, but these might be some clues:
Since you have separated your data by label into folders, if you are training your model using only one of those folders you will obtain a constant function.
The last layer of your neural network has no activation function! This is, in method forward you are doing x = self.fc5(x) instead of x = F.<function>(self.fc5(x)).
Where do you indicate, when loading the training data, which is the label for each image? Are you sure that training_dataloader is loading the images with their correct label?
| https://stackoverflow.com/questions/66783997/ |
How to create a graph neural network dataset? (pytorch geometric) | How can I convert my own dataset to be usable by pytorch geometric for a graph neural network?
All the tutorials use existing dataset already converted to be usable by pytorch. For example if I have my own pointcloud dataset how can i use it to train for classification with graph neural network? What about my own image dataset for classification?
| How you need to transform your data depends on what format your model expects.
Graph neural networks typically expect (a subset of):
node features
edges
edge attributes
node targets
depending on the problem. You can create an object with tensors of these values (and extend the attributes as you need) in PyTorch Geometric wth a Data object like so:
data = Data(x=x, edge_index=edge_index, y=y)
data.train_idx = torch.tensor([...], dtype=torch.long)
data.test_mask = torch.tensor([...], dtype=torch.bool)
| https://stackoverflow.com/questions/66788555/ |
Why putting value on GPU slow the calculation? | I'm a new one in GPU parallezation. I found putting values on GPU in advance slows the calculations and indexing. My code is as follows:
import torch
A = torch.rand(600, 600, device='cuda:0')
row0 = torch.tensor(100, device='cuda:0')
col0 = torch.tensor(100, device='cuda:0')
row1 = torch.tensor(356, device='cuda:0')
col1 = torch.tensor(356, device='cuda:0')
B = torch.rand(256, 256, device='cuda:0')
a = 10
%timeit B[:] = A[row0:row1, col0:col1]
# 395 µs ± 4.01 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
%timeit a*A + a**2
# 17 µs ± 256 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
A = torch.rand(600, 600, device='cuda:0')
row0 = 100
col0 = 100
row1 = 356
col1 = 356
B = torch.rand(256, 256, device='cuda:0')
a1 = torch.as_tensor(a).cuda()
%timeit B[:] = A[row0:row1, col0:col1]
# 10.6 µs ± 141 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
%timeit a1*A + a1**2
# 30.2 µs ± 584 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
Could anyone explain the mechanism behind it?
| Your code is slow because there is nothing to parallelize, and you're just taking unnecessary GPU overheads.
GPU parallelism works by launching a large number of threads, and simultaneously computing chunks of some operation. Things like matrix multiplication and convolution are very GPU-friendly, because you can break them down into a lot of similar smaller operations.
However, there is also an overhead when doing things on the GPU.
Only when sufficient amount of threads have been launched to beat the CUDA overhead, we observe a speedup. Let's see an example:
import torch
device = torch.device('cuda:0')
A = torch.randn(5, 10, device=device)
B = torch.randn(10, 5, device=device)
A_ = torch.randn(5, 10)
B_ = torch.randn(10, 5)
%timeit A @ B
# 10.5 µs ± 745 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
%timeit A_ @ B_
# 5.21 µs ± 120 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
You may think this is going against common sense -- how can CPU matrix multiplication be faster than its GPU counterpart? It's simply because we haven't had a big enough operation to parallelize. Let's use retry the same thing, but on bigger inputs:
A = torch.randn(100, 200, device=device)
B = torch.randn(200, 100, device=device)
A_ = torch.randn(100, 200)
B_ = torch.randn(200, 100)
%timeit A @ B
# 10.4 µs ± 333 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
%timeit A_ @ B_
# 45.3 µs ± 647 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
We increased the input size by a factor of 20. GPU's still basically showing us the same time, the overhead, while CPU time has blown up. Because the input was bigger, GPU parallelism can show its magic.
In your case, you're not doing any parallelization at all. You're simply trying to slice your tensor with a GPU scalar, thus getting some sort of overhead but no benefits. Similar case in the other operation: there is nothing to parallelize.
%timeit a**2
# 200 ns ± 11.9 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
%timeit a1**2
# 16.8 µs ± 1.43 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
There is no way to break the operation a1 ** 2 into smaller repeatable chunks. Knowing when and when not to use the GPU is quite important. This can also be a useful starting point to get a glimpse of how CUDA works under the hood.
| https://stackoverflow.com/questions/66801874/ |
How to install the module pytorch_lightning.metrics in Raspberry pi3 | I am trying to execute a python file which has pytorch with lightning and torchvision modules. But after I downloaded and successfully installed whl file of pytorch in pi3 I am getting same error again and again.
The error is
ModuleNotFoundError: No module named 'pytorch_lightning.metrics'
Help would be highly appreciated as I am stuck for more than 3 days.
I have installed the modules using pip.
| Use instead:
from torchmetrics.functional import accuracy
| https://stackoverflow.com/questions/66807032/ |
How to solve the famous `unhandled cuda error, NCCL version 2.7.8` error? | I've seen multiple issue about the:
RuntimeError: NCCL error in: /opt/conda/conda-bld/pytorch_1614378083779/work/torch/lib/c10d/ProcessGroupNCCL.cpp:825, unhandled cuda error, NCCL version 2.7.8
ncclUnhandledCudaError: Call to CUDA function failed.
but none seem to fix it for me:
https://github.com/pytorch/pytorch/issues/54550
https://github.com/pytorch/pytorch/issues/47885
https://github.com/pytorch/pytorch/issues/50921
https://github.com/pytorch/pytorch/issues/54823
I've tried to do torch.cuda.set_device(device) manually at the beginning of every script. That didn't seem to work for me. I've tried different GPUS. I've tried downgrading pytorch version and cuda version. Different combinations of 1.6.0, 1.7.1, 1.8.0 and cuda 10.2, 11.0, 11.1. I am unsure what else to do. What did people do to solve this issue?
very related perhaps?
Pytorch "NCCL error": unhandled system error, NCCL version 2.4.8"
More complete error message:
('jobid', 4852)
('slurm_jobid', -1)
('slurm_array_task_id', -1)
('condor_jobid', 4852)
('current_time', 'Mar25_16-27-35')
('tb_dir', PosixPath('/home/miranda9/data/logs/logs_Mar25_16-27-35_jobid_4852/tb'))
('gpu_name', 'GeForce GTX TITAN X')
('PID', '30688')
torch.cuda.device_count()=2
opts.world_size=2
ABOUT TO SPAWN WORKERS
done setting sharing strategy...next mp.spawn
INFO:root:Added key: store_based_barrier_key:1 to store for rank: 1
INFO:root:Added key: store_based_barrier_key:1 to store for rank: 0
rank=0
mp.current_process()=<SpawnProcess name='SpawnProcess-1' parent=30688 started>
os.getpid()=30704
setting up rank=0 (with world_size=2)
MASTER_ADDR='127.0.0.1'
59264
backend='nccl'
--> done setting up rank=0
setup process done for rank=0
Traceback (most recent call last):
File "/home/miranda9/ML4Coq/ml4coq-proj/embeddings_zoo/tree_nns/main_brando.py", line 279, in <module>
main_distributed()
File "/home/miranda9/ML4Coq/ml4coq-proj/embeddings_zoo/tree_nns/main_brando.py", line 188, in main_distributed
spawn_return = mp.spawn(fn=train, args=(opts,), nprocs=opts.world_size)
File "/home/miranda9/miniconda3/envs/metalearning11.1/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 230, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/miranda9/miniconda3/envs/metalearning11.1/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 188, in start_processes
while not context.join():
File "/home/miranda9/miniconda3/envs/metalearning11.1/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 150, in join
raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException:
-- Process 0 terminated with the following error:
Traceback (most recent call last):
File "/home/miranda9/miniconda3/envs/metalearning11.1/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 59, in _wrap
fn(i, *args)
File "/home/miranda9/ML4Coq/ml4coq-proj/embeddings_zoo/tree_nns/main_brando.py", line 212, in train
tactic_predictor = move_to_ddp(rank, opts, tactic_predictor)
File "/home/miranda9/ultimate-utils/ultimate-utils-project/uutils/torch/distributed.py", line 162, in move_to_ddp
model = DistributedDataParallel(model, find_unused_parameters=True, device_ids=[opts.gpu])
File "/home/miranda9/miniconda3/envs/metalearning11.1/lib/python3.8/site-packages/torch/nn/parallel/distributed.py", line 446, in __init__
self._sync_params_and_buffers(authoritative_rank=0)
File "/home/miranda9/miniconda3/envs/metalearning11.1/lib/python3.8/site-packages/torch/nn/parallel/distributed.py", line 457, in _sync_params_and_buffers
self._distributed_broadcast_coalesced(
File "/home/miranda9/miniconda3/envs/metalearning11.1/lib/python3.8/site-packages/torch/nn/parallel/distributed.py", line 1155, in _distributed_broadcast_coalesced
dist._broadcast_coalesced(
RuntimeError: NCCL error in: /opt/conda/conda-bld/pytorch_1616554793803/work/torch/lib/c10d/ProcessGroupNCCL.cpp:825, unhandled cuda error, NCCL version 2.7.8
ncclUnhandledCudaError: Call to CUDA function failed.
Bonus 1:
I still have errors:
ncclSystemError: System call (socket, malloc, munmap, etc) failed.
Traceback (most recent call last):
File "/home/miranda9/diversity-for-predictive-success-of-meta-learning/div_src/diversity_src/experiment_mains/main_dist_maml_l2l.py", line 1423, in <module>
main()
File "/home/miranda9/diversity-for-predictive-success-of-meta-learning/div_src/diversity_src/experiment_mains/main_dist_maml_l2l.py", line 1365, in main
train(args=args)
File "/home/miranda9/diversity-for-predictive-success-of-meta-learning/div_src/diversity_src/experiment_mains/main_dist_maml_l2l.py", line 1385, in train
args.opt = move_opt_to_cherry_opt_and_sync_params(args) if is_running_parallel(args.rank) else args.opt
File "/home/miranda9/ultimate-utils/ultimate-utils-proj-src/uutils/torch_uu/distributed.py", line 456, in move_opt_to_cherry_opt_and_sync_params
args.opt = cherry.optim.Distributed(args.model.parameters(), opt=args.opt, sync=syn)
File "/home/miranda9/miniconda3/envs/meta_learning_a100/lib/python3.9/site-packages/cherry/optim.py", line 62, in __init__
self.sync_parameters()
File "/home/miranda9/miniconda3/envs/meta_learning_a100/lib/python3.9/site-packages/cherry/optim.py", line 78, in sync_parameters
dist.broadcast(p.data, src=root)
File "/home/miranda9/miniconda3/envs/meta_learning_a100/lib/python3.9/site-packages/torch/distributed/distributed_c10d.py", line 1090, in broadcast
work = default_pg.broadcast([tensor], opts)
RuntimeError: NCCL error in: ../torch/lib/c10d/ProcessGroupNCCL.cpp:911, unhandled system error, NCCL version 2.7.8
one of the answers suggested to have nvcca & pytorch.version.cuda to match but they do not:
(meta_learning_a100) [miranda9@hal-dgx ~]$ python -c "import torch;print(torch.version.cuda)"
11.1
(meta_learning_a100) [miranda9@hal-dgx ~]$ nvcc -V
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2020 NVIDIA Corporation
Built on Wed_Jul_22_19:09:09_PDT_2020
Cuda compilation tools, release 11.0, V11.0.221
Build cuda_11.0_bu.TC445_37.28845127_0
How do I match them?
| This is not a very satisfactory answer but this seems to be what ended up working for me. I simply used pytorch 1.7.1 and it's cuda version 10.2. As long as cuda 11.0 is loaded it seems to be working. To install that version do:
conda install -y pytorch==1.7.1 torchvision torchaudio cudatoolkit=10.2 -c pytorch -c conda-forge
if your are in an HPC do module avail to make sure the right cuda version is loaded. Perhaps you need to source bash and other things for the submission job to work. My setup looks as follows:
#!/bin/bash
echo JOB STARTED
# a submission job is usually empty and has the root of the submission so you probably need your HOME env var
export HOME=/home/miranda9
# to have modules work and the conda command work
source /etc/bashrc
source /etc/profile
source /etc/profile.d/modules.sh
source ~/.bashrc
source ~/.bash_profile
conda activate metalearningpy1.7.1c10.2
#conda activate metalearning1.7.1c11.1
#conda activate metalearning11.1
#module load cuda-toolkit/10.2
module load cuda-toolkit/11.1
#nvidia-smi
nvcc --version
#conda list
hostname
echo $PATH
which python
# - run script
python -u ~/ML4Coq/ml4coq-proj/embeddings_zoo/tree_nns/main_brando.py
I also echo other useful things like the nvcc version to make sure load worked (note the top of nvidia-smi doesn't show the right cuda version).
Note I think this is probably just a bug since cuda 11.1 + pytorch 1.8.1 are new as of this writing. I did try
torch.cuda.set_device(opts.gpu) # https://github.com/pytorch/pytorch/issues/54550
but I can't say that it always works or why it doesn't. I do have it in my current code but I think I still get error with pytorch 1.8.x + cuda 11.x.
see my conda list in case it helps:
$ conda list
# packages in environment at /home/miranda9/miniconda3/envs/metalearningpy1.7.1c10.2:
#
# Name Version Build Channel
_libgcc_mutex 0.1 main
absl-py 0.12.0 py38h06a4308_0
aioconsole 0.3.1 pypi_0 pypi
aiohttp 3.7.4 py38h27cfd23_1
anatome 0.0.1 pypi_0 pypi
argcomplete 1.12.2 pypi_0 pypi
astunparse 1.6.3 pypi_0 pypi
async-timeout 3.0.1 py38h06a4308_0
attrs 20.3.0 pyhd3eb1b0_0
beautifulsoup4 4.9.3 pyha847dfd_0
blas 1.0 mkl
blinker 1.4 py38h06a4308_0
boto 2.49.0 pypi_0 pypi
brotlipy 0.7.0 py38h27cfd23_1003
bzip2 1.0.8 h7b6447c_0
c-ares 1.17.1 h27cfd23_0
ca-certificates 2021.1.19 h06a4308_1
cachetools 4.2.1 pyhd3eb1b0_0
cairo 1.14.12 h8948797_3
certifi 2020.12.5 py38h06a4308_0
cffi 1.14.0 py38h2e261b9_0
chardet 3.0.4 py38h06a4308_1003
click 7.1.2 pyhd3eb1b0_0
cloudpickle 1.6.0 pypi_0 pypi
conda 4.9.2 py38h06a4308_0
conda-build 3.21.4 py38h06a4308_0
conda-package-handling 1.7.2 py38h03888b9_0
coverage 5.5 py38h27cfd23_2
crcmod 1.7 pypi_0 pypi
cryptography 3.4.7 py38hd23ed53_0
cudatoolkit 10.2.89 hfd86e86_1
cycler 0.10.0 py38_0
cython 0.29.22 py38h2531618_0
dbus 1.13.18 hb2f20db_0
decorator 5.0.3 pyhd3eb1b0_0
dgl-cuda10.2 0.6.0post1 py38_0 dglteam
dill 0.3.3 pyhd3eb1b0_0
expat 2.3.0 h2531618_2
fasteners 0.16 pypi_0 pypi
filelock 3.0.12 pyhd3eb1b0_1
flatbuffers 1.12 pypi_0 pypi
fontconfig 2.13.1 h6c09931_0
freetype 2.10.4 h7ca028e_0 conda-forge
fribidi 1.0.10 h7b6447c_0
future 0.18.2 pypi_0 pypi
gast 0.3.3 pypi_0 pypi
gcs-oauth2-boto-plugin 2.7 pypi_0 pypi
glib 2.63.1 h5a9c865_0
glob2 0.7 pyhd3eb1b0_0
google-apitools 0.5.31 pypi_0 pypi
google-auth 1.28.0 pyhd3eb1b0_0
google-auth-oauthlib 0.4.3 pyhd3eb1b0_0
google-pasta 0.2.0 pypi_0 pypi
google-reauth 0.1.1 pypi_0 pypi
graphite2 1.3.14 h23475e2_0
graphviz 2.40.1 h21bd128_2
grpcio 1.32.0 pypi_0 pypi
gst-plugins-base 1.14.0 hbbd80ab_1
gstreamer 1.14.0 hb453b48_1
gsutil 4.60 pypi_0 pypi
gym 0.18.0 pypi_0 pypi
h5py 2.10.0 pypi_0 pypi
harfbuzz 1.8.8 hffaf4a1_0
higher 0.2.1 pypi_0 pypi
httplib2 0.19.0 pypi_0 pypi
icu 58.2 he6710b0_3
idna 2.10 pyhd3eb1b0_0
importlib-metadata 3.7.3 py38h06a4308_1
intel-openmp 2020.2 254
jinja2 2.11.3 pyhd3eb1b0_0
joblib 1.0.1 pyhd3eb1b0_0
jpeg 9b h024ee3a_2
keras-preprocessing 1.1.2 pypi_0 pypi
kiwisolver 1.3.1 py38h2531618_0
lark-parser 0.6.5 pypi_0 pypi
lcms2 2.11 h396b838_0
ld_impl_linux-64 2.33.1 h53a641e_7
learn2learn 0.1.5 pypi_0 pypi
libarchive 3.4.2 h62408e4_0
libffi 3.2.1 hf484d3e_1007
libgcc-ng 9.1.0 hdf63c60_0
libgfortran-ng 7.3.0 hdf63c60_0
liblief 0.10.1 he6710b0_0
libpng 1.6.37 h21135ba_2 conda-forge
libprotobuf 3.14.0 h8c45485_0
libstdcxx-ng 9.1.0 hdf63c60_0
libtiff 4.1.0 h2733197_1
libuuid 1.0.3 h1bed415_2
libuv 1.40.0 h7b6447c_0
libxcb 1.14 h7b6447c_0
libxml2 2.9.10 hb55368b_3
lmdb 0.94 pypi_0 pypi
lz4-c 1.9.2 he1b5a44_3 conda-forge
markdown 3.3.4 py38h06a4308_0
markupsafe 1.1.1 py38h7b6447c_0
matplotlib 3.3.4 py38h06a4308_0
matplotlib-base 3.3.4 py38h62a2d02_0
memory-profiler 0.58.0 pypi_0 pypi
mkl 2020.2 256
mkl-service 2.3.0 py38h1e0a361_2 conda-forge
mkl_fft 1.3.0 py38h54f3939_0
mkl_random 1.2.0 py38hc5bc63f_1 conda-forge
mock 2.0.0 pypi_0 pypi
monotonic 1.5 pypi_0 pypi
multidict 5.1.0 py38h27cfd23_2
ncurses 6.2 he6710b0_1
networkx 2.5 py_0
ninja 1.10.2 py38hff7bd54_0
numpy 1.19.2 py38h54aff64_0
numpy-base 1.19.2 py38hfa32c7d_0
oauth2client 4.1.3 pypi_0 pypi
oauthlib 3.1.0 py_0
olefile 0.46 pyh9f0ad1d_1 conda-forge
openssl 1.1.1k h27cfd23_0
opt-einsum 3.3.0 pypi_0 pypi
ordered-set 4.0.2 pypi_0 pypi
pandas 1.2.3 py38ha9443f7_0
pango 1.42.4 h049681c_0
patchelf 0.12 h2531618_1
pbr 5.5.1 pypi_0 pypi
pcre 8.44 he6710b0_0
pexpect 4.6.0 pypi_0 pypi
pillow 7.2.0 pypi_0 pypi
pip 21.0.1 py38h06a4308_0
pixman 0.40.0 h7b6447c_0
pkginfo 1.7.0 py38h06a4308_0
progressbar2 3.39.3 pypi_0 pypi
protobuf 3.14.0 py38h2531618_1
psutil 5.8.0 py38h27cfd23_1
ptyprocess 0.7.0 pypi_0 pypi
py-lief 0.10.1 py38h403a769_0
pyasn1 0.4.8 py_0
pyasn1-modules 0.2.8 py_0
pycapnp 1.0.0 pypi_0 pypi
pycosat 0.6.3 py38h7b6447c_1
pycparser 2.20 py_2
pyglet 1.5.0 pypi_0 pypi
pyjwt 1.7.1 py38_0
pyopenssl 20.0.1 pyhd3eb1b0_1
pyparsing 2.4.7 pyhd3eb1b0_0
pyqt 5.9.2 py38h05f1152_4
pysocks 1.7.1 py38h06a4308_0
python 3.8.2 hcf32534_0
python-dateutil 2.8.1 pyhd3eb1b0_0
python-libarchive-c 2.9 pyhd3eb1b0_0
python-utils 2.5.6 pypi_0 pypi
python_abi 3.8 1_cp38 conda-forge
pytorch 1.7.1 py3.8_cuda10.2.89_cudnn7.6.5_0 pytorch
pytz 2021.1 pyhd3eb1b0_0
pyu2f 0.1.5 pypi_0 pypi
pyyaml 5.4.1 py38h27cfd23_1
qt 5.9.7 h5867ecd_1
readline 8.1 h27cfd23_0
requests 2.25.1 pyhd3eb1b0_0
requests-oauthlib 1.3.0 py_0
retry-decorator 1.1.1 pypi_0 pypi
ripgrep 12.1.1 0
rsa 4.7.2 pyhd3eb1b0_1
ruamel_yaml 0.15.100 py38h27cfd23_0
scikit-learn 0.24.1 py38ha9443f7_0
scipy 1.6.2 py38h91f5cce_0
setuptools 52.0.0 py38h06a4308_0
sexpdata 0.0.3 pypi_0 pypi
sip 4.19.13 py38he6710b0_0
six 1.15.0 pyh9f0ad1d_0 conda-forge
soupsieve 2.2.1 pyhd3eb1b0_0
sqlite 3.35.2 hdfb4753_0
tensorboard 2.4.0 pyhc547734_0
tensorboard-plugin-wit 1.6.0 py_0
tensorflow 2.4.1 pypi_0 pypi
tensorflow-estimator 2.4.0 pypi_0 pypi
termcolor 1.1.0 pypi_0 pypi
threadpoolctl 2.1.0 pyh5ca1d4c_0
tk 8.6.10 hbc83047_0
torchaudio 0.7.2 py38 pytorch
torchmeta 1.7.0 pypi_0 pypi
torchtext 0.8.1 py38 pytorch
torchvision 0.8.2 py38_cu102 pytorch
tornado 6.1 py38h27cfd23_0
tqdm 4.56.0 pypi_0 pypi
typing-extensions 3.7.4.3 0
typing_extensions 3.7.4.3 py_0 conda-forge
urllib3 1.26.4 pyhd3eb1b0_0
werkzeug 1.0.1 pyhd3eb1b0_0
wheel 0.36.2 pyhd3eb1b0_0
wrapt 1.12.1 pypi_0 pypi
xz 5.2.5 h7b6447c_0
yaml 0.2.5 h7b6447c_0
yarl 1.6.3 py38h27cfd23_0
zipp 3.4.1 pyhd3eb1b0_0
zlib 1.2.11 h7b6447c_3
zstd 1.4.5 h9ceee32_0
For a100s this seemed to work at some point:
pip3 install torch==1.9.1+cu111 torchvision==0.10.1+cu111 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
| https://stackoverflow.com/questions/66807131/ |
Slow execution time for CUDA initialization in Azure Batch VM | I have an issue of slow initialization time for running some CUDA program in one of the VM for Azure Batch.
After some troubleshooting, I made a simple test running this call as shown in the below code.
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <time.h>
clock_t start, end;
double cpu_time_used;
int main()
{
CUresult result;
printf("CUDA version %d \n", CUDA_VERSION);
start = clock();
result = cuInit(0);
if (result != CUDA_SUCCESS) {
printf("cuInit failed with error code %d: %s\n", result, cudaGetErrorString(result));
return 1;
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("cuInit took %f seconds to execute \n", cpu_time_used);
return 0;
}
It takes about 1.9 seconds in average.
Some specs:
NVidia driver: 460.32.03
CUDAToolkit: 10.2
Azure Batch: nc6, Tesla K80
As a comparisons, the same code was running on my desktop (Windows) as well another custom Azure VM (nc6, not Azure Batch) giving similar result of 0.03 seconds. (cudatoolkit 10.2)
-- update 1 --
Calling CUDA initialization via Torch also shows a significant lag (for the first call) as shown from this test:
run: 0, import torch: 0.430, cuda_available: 4.921
run: 1, import torch: 0.000, cuda_available: 0.000
run: 2, import torch: 0.000, cuda_available: 0.000
run: 3, import torch: 0.000, cuda_available: 0.000
max time for import torch: 0.43 s, max time for cuda_available: 4.92 s
torch.version 1.7.1+cu101 torch.version.cuda: 10.1
The import torch code is:
import torch
and the cuda_available code is:
torch.cuda.is_available()
My question is the time taken by the Azure Batch for CUDA initialization normal behavior ?
| Azure Batch in VirtualMachineConfiguration mode allocates Virtual Machine Scale Sets internally. There is no difference in the underlying hardware Azure Batch allocates from. For further investigation:
How big is your sample set? Is your start time reproducible between different VMs within different Batch pools? Perhaps you got a bad or degraded VM or GPU.
What does nvidia-smi tell you on the compute node in question? Are there any potential hardware faults?
Are you using an Azure Batch task or are you remoting into the VM and executing your task interactively?
| https://stackoverflow.com/questions/66809823/ |
Why do we need to pass the gradient parameter to the backward function in PyTorch? | According to the docs, when we call the backward function to the tensor if the tensor is non-scalar (i.e. its data has more than one element) and requires gradient, the function additionally requires specifying gradient.
import torch
a = torch.tensor([10.,10.],requires_grad=True)
b = torch.tensor([20.,20.],requires_grad=True)
F = a * b
F.backward(gradient=torch.tensor([1.,1.]))
print(a.grad)
Output: tensor([20., 20.])
Now scaling the external gradient:
a = torch.tensor([10.,10.],requires_grad=True)
b = torch.tensor([20.,20.],requires_grad=True)
F = a * b
F.backward(gradient=torch.tensor([2.,2.])) #modified
print(a.grad)
Output: tensor([40., 40.])
So, passing the gradient argument to backward seems to scale the gradients.Also, by default F.backward() is F.backward(gradient=torch.Tensor([1.]))
Apart from scaling the grad value how does the gradient parameter passed to the backward function helps to compute the derivatives when we have a non-scalar tensor?Why can't PyTorch calculate the derivative implicitly without asking explicit gradient parameter as it did for the scalar tensor?
| It's because PyTorch is calculating the jacobian product. In case of scalar value, .backward() w/o parameters is equivalent to .backward(torch.tensor(1.0)).
That's why you need to provide the tensor with which you want to calculate the product. Read more about automatic differentiation.
| https://stackoverflow.com/questions/66811113/ |
Logging training metrics to a csv file | I want to log all training metrics to a csv file while it is training on YOLOV5 which is written with pytorch but the problem is that I don't want to use tensorboard.
To achieve this goal I tried some techniques like below:
-First log it into tensorboard and then try to convert it to a csv file (failed)
-Extract log files from Weights & Biases (failed)
-Write to a csv file directly during training (failed)
Here I tried opening a csv file and transforming tensor data into string but couldn't succeed.
So I really wonder how could I achieve this goal because YOLOV5 is not using pytorch utilites like model.fit which we can use callbacks with.
Thanks in advance.
| You might try using the Weights and Biases YOLOv5 integration.
Here is the link: https://docs.wandb.ai/guides/integrations/yolov5
The link has more details, but here are some quotes that convey the basic idea:
Simply by installing wandb, you'll activate the built-in W&B logging features: system metrics, model metrics, and media logged to interactive Dashboards.
pip install wandb
git clone https://github.com/ultralytics/yolov5.git
python yolov5/train.py # train a small network on a small dataset
Just follow the links printed to the standard out by wandb.pip install wandb git clone
You can also do model versioning & dataset visualization, which is explained more in the w&b yolov5 integration docs page (linked above). You can also watch this YouTube video for a guide: https://youtu.be/yyecuhBmLxE
| https://stackoverflow.com/questions/66816695/ |
PyTorch CPU memory leak but only when running on a specific machine | I'm running a model and I've noticed that the RAM usage slowly increases during the training of the model. It's around 200mb-400mb per epoch, but over time it fills up all the RAM on my machine which eventually leads the OS to kill the job. However, the strange thing about this is it's only when running on a specific machine. I've run the same code on a different machine and there's no memory leak whatsoever.
The difference between the two machines is one is running PyTorch 1.7.1 with cuda 10.2 (the machine without the memory leak) and the other machine (the one with the memory leak) is running PyTorch 1.8.1· Both are run with conda and only on the CPU. I have tried using older versions of PyTorch on the machine with the memory leak, but the memory leak still exists so I doubt it's due to a PyTorch version.
I've been using psutil to monitor the RAM usage on the CPU and I've been using the tracemalloc package to print out snapshots during the training loop to see how the memory usage changes from one epoch to the next. Yet when printing out these differences there isn't any within my code that matches anywhere near the 200mb-400mb RAM increase...
An example of this would something like...
Epoch 25/ 1000 Loss 1428.8508 RAM: 9.910GB
~/Model.py:46: size=1136 B (-96 B), count=2 (+0), average=568 B
~/utils.py:14: size=1040 B (+24 B), count=8 (+1), average=130 B
~/calc_loss.py:79: size=2128 B (+0 B), count=24 (+0), average=89 B
~/calc_loss.py:78: size=2056 B (+0 B), count=23 (+0), average=89 B
~/utils.py:6: size=1920 B (+0 B), count=21 (+0), average=91 B
Epoch 26/ 1000 Loss 1426.0033 RAM: 10.254GB
~/Model.py:46: size=1232 B (+96 B), count=2 (+0), average=616 B
~/utils.py:14: size=1016 B (-24 B), count=7 (-1), average=145 B
~/calc_loss.py:79: size=2128 B (+0 B), count=24 (+0), average=89 B
~/calc_loss.py:78: size=2056 B (+0 B), count=23 (+0), average=89 B
~/utils.py:6: size=1920 B (+0 B), count=21 (+0), average=91 B
~/Layers.py:71: size=992 B (+0 B), count=11 (+0), average=90 B
Epoch 27/ 1000 Loss 1436.8241 RAM: 10.606GB
~/utils.py:14: size=1040 B (+24 B), count=8 (+1), average=130 B
~/calc_loss.py:79: size=2128 B (+0 B), count=24 (+0), average=89 B
~/calc_loss.py:78: size=2056 B (+0 B), count=23 (+0), average=89 B
~/utils.py:6: size=1920 B (+0 B), count=21 (+0), average=91 B
~/Model.py:46: size=1232 B (+0 B), count=2 (+0), average=616 B
~/Layers.py:71: size=992 B (+0 B), count=11 (+0), average=90 B
Epoch 28/ 1000 Loss 1428.6560 RAM: 10.968GB
~/calc_loss.py:79: size=2128 B (+0 B), count=24 (+0), average=89 B
~/calc_loss.py:78: size=2056 B (+0 B), count=23 (+0), average=89 B
~/utils.py:6: size=1920 B (+0 B), count=21 (+0), average=91 B
~/Model.py:46: size=1232 B (+0 B), count=2 (+0), average=616 B
~/utils.py:14: size=1040 B (+0 B), count=8 (+0), average=130 B
Epoch 29/ 1000 Loss 1435.2988 RAM: 11.321GB
~/calc_loss.py:79: size=2128 B (+0 B), count=24 (+0), average=89 B
~/calc_loss.py:78: size=2056 B (+0 B), count=23 (+0), average=89 B
~/utils.py:6: size=1920 B (+0 B), count=21 (+0), average=91 B
~/Model.py:46: size=1232 B (+0 B), count=2 (+0), average=616 B
~/utils.py:14: size=1040 B (+0 B), count=8 (+0), average=130 B
~/Layers.py:71: size=992 B (+0 B), count=11 (+0), average=90 B
The information printed out in between the lines showing the current Epoch are created via the use of this function
def compare_snaps(snap1, snap2, limit=50):
top_stats=snap1.compare_to(snap2, "lineno")
for stat in top_stats[:limit]:
line=str(stat)
if("~/" in line): #filter only lines from my own code
print(line)
this function takes two snapshots from tracemalloc.take_snapshot() from the current epoch and the previous epoch and compares how the memory usage changes. It takes the top 50 memory intensive operations, and filters only the ones that I've written (i.e. it excludes any changes from within anaconda3) and prints these changes to the screen. As can be seen, the changes in memory are negligible. In fact, when comparing the snap shotoutput from both machines, they're near identical.
It seems really weird that PyTorch code would have a memory leak on one machine and not on another... Could this perhaps be a conda environemnt issue? I have tried running this from a pip install and the leak still persists. I understand that this a bit vague (as there's no source code) but are there any ways to check where the leaks coming from? Things like Valgrind comes to mind for compile code but not much for interpretative code. This memory leak issue seems to be outside of my skill-set, so any help would be appreciated.
Thank you! :)
| Try incorporating this in your process:
import gc
# add this after computing one complete operation
gc.collect()
| https://stackoverflow.com/questions/66817006/ |
how to save a Pytorch model? | I am new to Deep learning and I want to know, how can I save the final model in Pytorch? I tried some things that were mentioned but I got confused with, how to save the model and how to load it back?
| to save:
# save the weights of the model to a .pt file
torch.save(model.state_dict(), "your_model_path.pt")
to load:
# load your model architecture/module
model = YourModel()
# fill your architecture with the trained weights
model.load_state_dict(torch.load("your_model_path.pt"))
| https://stackoverflow.com/questions/66821329/ |
Memory leak with en_core_web_trf model, Spacy |
there is a Memory leak when using pipe of en_core_web_trf model, I run the model using GPU with 16GB RAM, here is a sample of the code.
!python -m spacy download en_core_web_trf
import en_core_web_trf
nlp = en_core_web_trf.load()
#it's just an array of 100K sentences.
data = dataload()
for index, review in enumerate( nlp.pipe(data, batch_size=100) ):
#doing some processing here
if index % 1000: print(index)
this code cracks when reaching 31K, and raises OOM error.
CUDA out of memory. Tried to allocate 46.00 MiB (GPU 0; 11.17 GiB total capacity; 10.44 GiB already allocated; 832.00 KiB free; 10.72 GiB reserved in total by PyTorch)
I just use the pipeline to predict, not train any data or other stuff and tried with different batch sizes, but nothing happened,
still, crash.
Your Environment
spaCy version: 3.0.5
Platform: Linux-4.19.112+-x86_64-with-Ubuntu-18.04-bionic
Python version: 3.7.10
Pipelines: en_core_web_trf (3.0.0)
| Lucky you with GPU - I am still trying to get thru the (torch GPU) DLL Hell on Windows :-). But it looks like Spacy 3 uses more GPU memory than Spacy 2 did - my 6GB GPU may have become useless.
That said, have you tried running your case without the GPU (and watching memory usage)?
Spacy 2 'leak' on large datasets is (mainly) due to growing vocabulary - each data row may add couple more words, and the suggested 'solution' is reloading the model and/or just the vocabulary every nnn rows. The GPU usage may have the same issue...
| https://stackoverflow.com/questions/66832669/ |
Huggingface error during training: AttributeError: 'str' object has no attribute 'size' | While trying to finetune a Huggingface GPT2LMHeadModel model for casual language modeling (given a sequence of words, predict the next word) using Pytorch Lightning, I am getting an error during training:
AttributeError: 'str' object has no attribute 'size'
What went wrong with our training code? Is this due to the incorrect use of DataCollatorForLanguageModeling in a Pytorch DataLoader?
Reproducible Example:
import os
from pathlib import Path
import torch
import pytorch_lightning as pl
from transformers import (
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
DataCollatorForLanguageModeling,
)
from transformers.optimization import AdamW
from tokenizers import ByteLevelBPETokenizer
from torch.utils.data import (
DataLoader,
Dataset,
)
TOKENIZER_DIRPATH = os.path.join("..", "data")
def tokenize_data():
tokenizer = ByteLevelBPETokenizer()
tokenizer.train(
files=os.path.join(TOKENIZER_DIRPATH, "words.txt"),
vocab_size=50000,
min_frequency=2,
special_tokens=["<s>", "</s>", "<unk>", "<mask>", "<pad>",],
)
tokenizer.save_model("../data")
class MyDataset(Dataset):
def __init__(self):
tokenizer = GPT2Tokenizer(
os.path.join(TOKENIZER_DIRPATH, "vocab.json"),
os.path.join(TOKENIZER_DIRPATH, "merges.txt"),
)
src_file = Path(os.path.join(TOKENIZER_DIRPATH, "words.txt"))
lines = src_file.read_text(encoding="utf-8").splitlines()
self.examples = [tokenizer.encode(line) for line in lines]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i])
class MyDataModule(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.tokenizer = GPT2Tokenizer(
os.path.join(TOKENIZER_DIRPATH, "vocab.json"),
os.path.join(TOKENIZER_DIRPATH, "merges.txt"),
)
def setup(self, stage):
self.train_dataset = MyDataset()
def train_dataloader(self):
data_collator = DataCollatorForLanguageModeling(
tokenizer=self.tokenizer, mlm=False
)
train_dataloader = DataLoader(self.train_dataset, collate_fn=data_collator)
return train_dataloader
class MyModel(pl.LightningModule):
def __init__(self, learning_rate, adam_beta1, adam_beta2, adam_epsilon):
super().__init__()
self.save_hyperparameters()
config = GPT2Config()
self.model = GPT2LMHeadModel(config)
def forward(self, x):
return self.model(x).logits
def training_step(self, batch, batch_idx):
input_ids, labels = batch
loss = self.model(input_ids, labels=labels).loss
self.log("train_loss", loss, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = AdamW(
self.parameters(),
self.hparams.learning_rate,
betas=(self.hparams.adam_beta1, self.hparams.adam_beta2),
eps=self.hparams.adam_epsilon,
)
return optimizer
tokenize_data()
dm = MyDataModule()
model = MyModel(
learning_rate=5e-5, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-8,
)
trainer = pl.Trainer()
trainer.fit(model, dm)
Error Traceback:
Epoch 0: 0%| | 0/9 [00:00<?, ?it/s]
Traceback (most recent call last):
File "test_gpt.py", line 102, in <module>
trainer.fit(model, dm)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 499, in fit
self.dispatch()
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 546, in dispatch
self.accelerator.start_training(self)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 73, in start_training
self.training_type_plugin.start_training(trainer)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 114, in start_training
self._results = trainer.run_train()
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 637, in run_train
self.train_loop.run_training_epoch()
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 493, in run_training_epoch
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 655, in run_training_batch
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 426, in optimizer_step
model_ref.optimizer_step(
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py", line 1387, in optimizer_step
optimizer.step(closure=optimizer_closure)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py", line 214, in step
self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py", line 134, in __optimizer_step
trainer.accelerator.optimizer_step(optimizer, self._optimizer_idx, lambda_closure=closure, **kwargs)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 277, in optimizer_step
self.run_optimizer_step(optimizer, opt_idx, lambda_closure, **kwargs)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 282, in run_optimizer_step
self.training_type_plugin.optimizer_step(optimizer, lambda_closure=lambda_closure, **kwargs)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 163, in optimizer_step
optimizer.step(closure=lambda_closure, **kwargs)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/transformers/optimization.py", line 318, in step
loss = closure()
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 649, in train_step_and_backward_closure
result = self.training_step_and_backward(
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 743, in training_step_and_backward
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 293, in training_step
training_step_output = self.trainer.accelerator.training_step(args)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 156, in training_step
return self.training_type_plugin.training_step(*args)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py", line 125, in training_step
return self.lightning_module.training_step(*args, **kwargs)
File "test_gpt.py", line 81, in training_step
loss = self.model(input_ids, labels=labels).loss
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 904, in forward
transformer_outputs = self.transformer(
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/opt/anaconda3/envs/test_huggingface/lib/python3.8/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 633, in forward
input_shape = input_ids.size()
AttributeError: 'str' object has no attribute 'size'
Conda Packages:
pytorch 1.7.0 py3.8_cuda10.2.89_cudnn7.6.5_0 pytorch
pytorch-lightning 1.2.5 pyhd8ed1ab_0 conda-forge
tokenizers 0.10.1 pypi_0 pypi
transformers 4.4.2 pypi_0 pypi
| Here transformer recent version may occur this
pip install transformers==2.11.0
In my case it's work!! then restart your kernel
| https://stackoverflow.com/questions/66834205/ |
When using torch.backward() for a GANs generator, why doesn't discriminator losses change in Pytorch? | My understanding of GANs is:
When training your generator, you need to back-propagate through the discriminator first so you can follow the chain rule. As a result, we can't use a .detach() when working on our generators loss calculation.
When updating discriminator, since your generator weight update doesn't affect discriminator weight updates, we can .detach() the generator output from your computation, my understanding tells me that since this generator is no longer a part of computation graph, we don't update it during back prop any longer.
So when we update your discriminator losses with:
disc_loss.backward(retain_graph=True) at each mini-batch, we don't have to worry about your generator being part of the pipeline thanks to the .detach() function call.
But what about when we work on our generator? What is keeping our model from constantly changing our discriminator weights in accordance to generator? That isn't what we want after all right? The discriminator shouldn't learn when it is told that fake samples are real.
Why does such a model work in the first place
| backward doesn't update the weights, it updates the gradients of the weights. Updating weights is the responsibility of the optimizer(s). There are different ways to implement GANs, but often you would have two optimizers, one that is responsible for updating the weights (and resetting the gradients) of the generator and one that is responsible for updating the weights (and resetting the gradients) of the discriminator. Upon initialization, each optimizer is provided only the weights of the model it will update. Therefore, when you call an optimizer's step method it only updates those weights. Using separate optimizers is what prevents the discriminator weights from being updated while minimizing the generator's loss function.
| https://stackoverflow.com/questions/66841054/ |
I can't figure out why the size of the tensors doesn't match in Pytorch | Some context:
I have been studying AI and ML for the last couple of month now and finally I am studying neural nets. Great! The problem is that when I follow a tutorial everything seems to be OK, but when I try to implement a NN by my self I always face issues related to the size of the tensors.
I have seem the answer to other questions (like this one) but they face the exact problem of the post. I am not looking for a code to just copy and paste. I want to understand why I am facing this problem, how to handle it and avoid it.
The error message:
/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/modules/loss.py:528: UserWarning: Using a target size (torch.Size([16, 2])) that is different to the input size (torch.Size([9, 2])). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
return F.mse_loss(input, target, reduction=self.reduction)
Traceback (most recent call last):
File "nn_conv.py", line 195, in
loss = loss_function(outputs, targets)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/modules/loss.py", line 528, in forward
return F.mse_loss(input, target, reduction=self.reduction)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/functional.py", line 2928, in mse_loss
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/functional.py", line 74, in broadcast_tensors
return _VF.broadcast_tensors(tensors) # type: ignore
RuntimeError: The size of tensor a (9) must match the size of tensor b (16) at non-singleton dimension 0
This is my code:
import os
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class DogsVSCats():
IMG_SIZE = 50
CATS = 'PetImages/Cat'
DOGS = 'PetImages/Dog'
LABELS = {CATS: 0, DOGS: 1}
training_data = []
cats_count = 0
dogs_count = 0
def make_training_data(self):
for label in self.LABELS.keys():
for f in tqdm(os.listdir(label)):
try:
path = os.path.join(label, f)
# convert image to grayscale
img = cv2.imread(path)
if img is not None:
height, width = img.shape[:2]
if width > height:
height = round((height * self.IMG_SIZE) / width)
width = self.IMG_SIZE
right = 0
bottom = self.IMG_SIZE - height
else:
width = round((width * self.IMG_SIZE) / height)
height = self.IMG_SIZE
right = self.IMG_SIZE - width
bottom = 0
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.resize(img, (width, height))
img = cv2.copyMakeBorder(img,
top=0,
bottom=bottom,
left=0,
right=right,
borderType=cv2.BORDER_CONSTANT)
# Add a One-hot-vector of label of the image to self.training_data
self.training_data.append([np.array(img), np.eye(len(self.LABELS))[self.LABELS[label]]])
if label == self.CATS:
self.cats_count += 1
elif label == self.DOGS:
self.dogs_count += 1
except cv2.error as e:
pass
np.random.shuffle(self.training_data)
np.save("PetImages/training_data.npy", self.training_data)
print("Cats:", self.cats_count)
print("Dogs:", self.dogs_count)
training_data = np.load('PetImages/training_data.npy', allow_pickle=True)
plt.imsave('PetImages/trained_example.png', training_data[1][0])
class RunningMetrics():
def __init__(self):
self._sum = 0
self._count = 0
def __call__(self):
return self._sum/float(self._count)
def update(self, val, size):
self._sum += val
self._count += size
class Net(nn.Module):
def __init__(self, num_channels, conv_kernel_size=3, stride=1, padding=1, max_pool_kernel_size=2):
super(Net, self).__init__()
self._num_channels = num_channels
self._max_pool_kernel_size = max_pool_kernel_size
self.conv1 = nn.Conv2d(1, self._num_channels, conv_kernel_size, stride, padding)
self.conv2 = nn.Conv2d(self._num_channels, self._num_channels*2, conv_kernel_size, stride, padding)
self.conv3 = nn.Conv2d(self._num_channels*2, self._num_channels*4, conv_kernel_size, stride, padding)
# Calc input of first
self.fc1 = nn.Linear(self._num_channels*4*8*8, self._num_channels*8)
self.fc2 = nn.Linear(self._num_channels*8, 2)
def forward(self, x):
# Conv
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, self._max_pool_kernel_size))
x = self.conv2(x)
x = F.relu(F.max_pool2d(x, self._max_pool_kernel_size))
x = self.conv3(x)
x = F.relu(F.max_pool2d(x, self._max_pool_kernel_size))
# Flatten
x = x.view(-1, self._num_channels*4*8*8)
# Fully Connected
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
# return F.log_softmax(x, dim=1)
return F.softmax(x, dim=1)
def save_model(path):
torch.save(save, path)
def load_model(path):
self = torch.load(PATH)
self.eval()
if __name__ == '__main__':
print('Loading dataset')
if not os.path.exists("PetImages/training_data.npy"):
dogsvcats = DogsVSCats()
dogsvcats.make_training_data()
training_data = np.load('PetImages/training_data.npy', allow_pickle=True)
print('Loading Net')
net = Net(num_channels=32)
# net = net.to(device)
# optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9 )
optimizer = optim.Adam(net.parameters(), lr=0.001)
# loss_function = nn.NLLLoss()
loss_function = nn.MSELoss()
print('Converting X tensor')
X = torch.Tensor([i[0] for i in training_data]).view(-1, 50, 50)
X = X/255.0
print('Converting Y tensor')
y = torch.Tensor([i[1] for i in training_data])
# Validation data
VAL_PERCENT = 0.1
val_size = int(len(X)*VAL_PERCENT)
X_train = X[:-val_size]
y_train = y[:-val_size]
X_test = X[-val_size:]
y_test = y[-val_size:]
print('Training Set:', len(X_train))
print('Testing Set:', len(X_test))
BATCH_SIZE = 16
EPOCHS = 2
IMG_SIZE=50
for epoch in range(EPOCHS):
print(f'Epoch {epoch+1}/{EPOCHS}')
running_loss = RunningMetrics()
running_acc = RunningMetrics()
for i in tqdm(range(0, len(X_train), BATCH_SIZE)):
inputs = X_train[i:i+BATCH_SIZE].view(-1,1, IMG_SIZE, IMG_SIZE)
targets = y_train[i:i+BATCH_SIZE]
# inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
_, preds = torch.max(outputs, 1)
loss = loss_function(outputs, targets)
loss.backward()
optimizer.step()
running_loss.update(loss.item()*BATCH_SIZE,
BATCH_SIZE)
running_acc.update(toch.sum(preds == targets).float(),
BATCH_SIZE)
print(f'Loss: {running_loss:.4f}, Acc: {running_acc:.4f}')
print('-'*10)
Dataset:
I am using the Microsoft's dataset of cats and dogs images
EDIT:
The error previous message has been solved following Anonymous' advice but now I am getting another error:
Traceback (most recent call last):
File "nn_conv.py", line 203, in
running_acc.update(torch.sum(preds == targets).float(),
RuntimeError: The size of tensor a (16) must match the size of tensor b (2) at non-singleton dimension 1
| Input : 16 x 1 x 50 x 50
After conv1/maxpool1 : 16 x 32 x 25 x 25
After conv2/maxpool2 : 16 x 64 x 12 x 12 (no padding so taking floor)
After conv3/maxpool3 : 16 x 128 x 6 x 6 (=73 728 neurons here is your error)
Flattening : you specified a view like -1 x 32 * 4 * 8 * 8 = 9 x 8192
The correct flattening is -1 x 32 * 4 * 6 * 6
Few tips :
as you begin pytorch, you should go see how to use a dataloader/dataset
the binary cross entropy is more commonly used for classification (though MSE is still possible)
| https://stackoverflow.com/questions/66842842/ |
The input tensor should have dimensions 1 x height x width x 3. Got 1 x 3 x 224 x 224 | I want to convert the Pytorch-trained model to the tensorflow model and use the model on mobile devices. For this, I follow these steps; First I convert the pytorch trained model to onnx format. Then I convert the onnx format to the tensorflow model.
Firstly pytorch trained model to onnx;
import torch
import torch.onnx
from detectron2.modeling import build_model
from detectron2.modeling import build_backbone
from torch.autograd import Variable
model= build_backbone(cfg)
model.eval()
dummy_input = torch.randn(1,3,224, 224,requires_grad=True)
torch.onnx.export(model,dummy_input,"drive/Detectron2/model_final.onnx")
Then onnx to tflite model;
import onnx
import warnings
from onnx_tf.backend import prepare
model = onnx.load("drive/Detectron2/model_final.onnx")
tf_rep = prepare(model)
tf_rep.export_graph("drive/Detectron2/tf_rep.pb")
import tensorflow as tf
## TFLite Conversion
# Before conversion, fix the model input size
model = tf.saved_model.load("drive/Detectron2/tf_rep.pb")
model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY].inputs[0].set_shape([1, 3,224, 224])
tf.saved_model.save(model, "saved_model_updated", signatures=model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY])
# Convert
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir='saved_model_updated', signature_keys=['serving_default'])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
# Save the model.
with open('drive/Detectron2/model.tflite', 'wb') as f:
f.write(tflite_model)
## TFLite Interpreter to check input shape
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the model on random input data.
input_shape = input_details[0]['shape']
print(input_shape)
But when I use the model on mobile devices, I get the following error;
java.lang.AssertionError: Error occurred when initializing ImageSegmenter: The input tensor should have dimensions 1 x height x width x 3. Got 1 x 3 x 224 x 224.
Where am I doing wrong?
| Maybe you could try einops for tensor transformations. It's elegant and powerful.
In your case, the code should be
import einops
input_tensor = einops.rearrange(input_tensor,'b c w h -> b w h c')
| https://stackoverflow.com/questions/66843633/ |
TypeError: linear(): argument 'input' (position 1) must be Tensor, not str | so ive been trying to work on some example of bert that i found on github as its the first time im trying to use bert and see how it works. The respiratory im working with is the following: https://github.com/prateekjoshi565/Fine-Tuning-BERT/blob/master/Fine_Tuning_BERT_for_Spam_Classification.ipynb
im using a different dataset however im getting the issue TypeError: linear(): argument 'input' (position 1) must be Tensor, not str" and honestly i dont know what im doing wrong. is there anyone that could help me?
the code ive been using is the following:
# convert class weights to tensor
weights= torch.tensor(class_wts,dtype=torch.float)
weights = weights.to(device)
# loss function
cross_entropy = nn.NLLLoss(weight=weights)
# number of training epochs
epochs = 10
def train():
model.train()
total_loss, total_accuracy = 0, 0
# empty list to save model predictions
total_preds=[]
# iterate over batches
for step,batch in enumerate(train_dataloader):
# progress update after every 50 batches.
if step % 50 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader)))
# push the batch to gpu
batch = [r.to(device) for r in batch]
sent_id, mask, labels = batch
# clear previously calculated gradients
model.zero_grad()
# get model predictions for the current batch
preds = model(sent_id, mask)
# compute the loss between actual and predicted values
loss = cross_entropy(preds, labels)
# add on to the total loss
total_loss = total_loss + loss.item()
# backward pass to calculate the gradients
loss.backward()
# clip the the gradients to 1.0. It helps in preventing the exploding gradient problem
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# update parameters
optimizer.step()
# model predictions are stored on GPU. So, push it to CPU
preds=preds.detach().cpu().numpy()
# append the model predictions
total_preds.append(preds)
# compute the training loss of the epoch
avg_loss = total_loss / len(train_dataloader)
# predictions are in the form of (no. of batches, size of batch, no. of classes).
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
#returns the loss and predictions
return avg_loss, total_preds
def evaluate():
print("\nEvaluating...")
# deactivate dropout layers
model.eval()
total_loss, total_accuracy = 0, 0
# empty list to save the model predictions
total_preds = []
# iterate over batches
for step,batch in enumerate(val_dataloader):
# Progress update every 50 batches.
if step % 50 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader)))
# push the batch to gpu
batch = [t.to(device) for t in batch]
sent_id, mask, labels = batch
# deactivate autograd
with torch.no_grad():
# model predictions
preds = model(sent_id, mask)
# compute the validation loss between actual and predicted values
loss = cross_entropy(preds,labels)
total_loss = total_loss + loss.item()
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
# compute the validation loss of the epoch
avg_loss = total_loss / len(val_dataloader)
# reshape the predictions in form of (number of samples, no. of classes)
total_preds = np.concatenate(total_preds, axis=0)
return avg_loss, total_preds
# set initial loss to infinite
best_valid_loss = float('inf')
# empty lists to store training and validation loss of each epoch
train_losses=[]
valid_losses=[]
#for each epoch
for epoch in range(epochs):
print('\n Epoch {:} / {:}'.format(epoch + 1, epochs))
#train model
train_loss, _ = train()
#evaluate model
valid_loss, _ = evaluate()
#save the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'saved_weights.pt')
# append training and validation loss
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}')
the traceback i receive is:
Epoch 1 / 10
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-105-c5138ddf6b25> in <module>()
12
13 #train model
---> 14 train_loss, _ = train()
15
16 #evaluate model
5 frames
<ipython-input-103-3236a6e339dd> in train()
24
25 # get model predictions for the current batch
---> 26 preds = model(sent_id, mask)
27
28 # compute the loss between actual and predicted values
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
<ipython-input-99-9ebdcf410f97> in forward(self, sent_id, mask)
28 _, cls_hs = self.bert(sent_id, attention_mask=mask)
29
---> 30 x = self.fc1(cls_hs)
31
32 x = self.relu(x)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py in forward(self, input)
92
93 def forward(self, input: Tensor) -> Tensor:
---> 94 return F.linear(input, self.weight, self.bias)
95
96 def extra_repr(self) -> str:
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1751 if has_torch_function_variadic(input, weight):
1752 return handle_torch_function(linear, (input, weight), input, weight, bias=bias)
-> 1753 return torch._C._nn.linear(input, weight, bias)
1754
1755
TypeError: linear(): argument 'input' (position 1) must be Tensor, not str
| I've been working on this repo too.
Motivated by the answer provided on this link. There is a class probably named Bert_Arch that inherits the nn.Module and this class has a overriden method named forward. Inside forward method just add the parameter 'return_dict=False' to the self.bert() method call. Like so:
_, cls_hs = self.bert(sent_id, attention_mask=mask, return_dict=False)
This worked for me.
| https://stackoverflow.com/questions/66846030/ |
model.eval for class with field of network - pytorch | I have a class model with field of pre-trained resnet
something like:
class A(nn.Module):
def __init__(self, **kwargs):
super(A, self).__init__()
self.resnet = get_resnet()
def forward(self, x):
return self.resnet(x)
...
now Im doing
model = A()
...
model.eval()
Is it ok or shuld I overwrite the eval, train functions?
| Short answer
It's OK.
Long answer
As the nn.Module.train() runs recursively like this.
self.training = mode
for module in self.children():
module.train(mode)
return self
And the nn.Module.eval() is just calling self.train(False)
So as long as self.resnet is an nn.Module subclass. You don't need to bother about it and practically every method in nn.Module except forward will affect all the sub modules.
You can test this by
model = A()
...
model.eval()
print(model.resnet.training) # should be False
If you get False then everything is fine. If you get something else then there's something wrong with the get_resnet().
| https://stackoverflow.com/questions/66853955/ |
Proper way to log things when using Pytorch Lightning DDP | I was wondering what is the proper way of logging metrics when using DDP. I noticed that if I want to print something inside validation_epoch_end it will be printed twice when using 2 GPUs. I was expecting validation_epoch_end to be called only on rank 0 and to receive the outputs from all GPUs, but I am not sure this is correct anymore. Therefore I have several questions:
validation_epoch_end(self, outputs) - When using DDP does every subprocess receive the data processed from the current GPU or data processed from all GPUs, i.e. does the input parameter outputs contains the outputs of the entire validation set, from all GPUs?
If outputs is GPU/process specific what is the proper way to calculate any metric on the entire validation set in validation_epoch_end when using DDP?
I understand that I can solve the printing by checking self.global_rank == 0 and printing/logging only in that case, however I am trying to get a deeper understanding of what I am printing/logging in this case.
Here is a code snippet from my use case. I would like to be able to report f1, precision and recall on the entire validation dataset and I am wondering what is the correct way of doing it when using DDP.
def _process_epoch_outputs(self,
outputs: List[Dict[str, Any]]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Creates and returns tensors containing all labels and predictions
Goes over the outputs accumulated from every batch, detaches the
necessary tensors and stacks them together.
Args:
outputs (List[Dict])
"""
all_labels = []
all_predictions = []
for output in outputs:
for labels in output['labels'].detach():
all_labels.append(labels)
for predictions in output['predictions'].detach():
all_predictions.append(predictions)
all_labels = torch.stack(all_labels).long().cpu()
all_predictions = torch.stack(all_predictions).cpu()
return all_predictions, all_labels
def validation_epoch_end(self, outputs: List[Dict[str, Any]]) -> None:
"""Logs f1, precision and recall on the validation set."""
if self.global_rank == 0:
print(f'Validation Epoch: {self.current_epoch}')
predictions, labels = self._process_epoch_outputs(outputs)
for i, name in enumerate(self.label_columns):
f1, prec, recall, t = metrics.get_f1_prec_recall(predictions[:, i],
labels[:, i],
threshold=None)
self.logger.experiment.add_scalar(f'{name}_f1/Val',
f1,
self.current_epoch)
self.logger.experiment.add_scalar(f'{name}_Precision/Val',
prec,
self.current_epoch)
self.logger.experiment.add_scalar(f'{name}_Recall/Val',
recall,
self.current_epoch)
if self.global_rank == 0:
print((f'F1: {f1}, Precision: {prec}, '
f'Recall: {recall}, Threshold {t}'))
| Questions
validation_epoch_end(self, outputs) - When using DDP does every
subprocess receive the data processed from the current GPU or data
processed from all GPUs, i.e. does the input parameter outputs
contains the outputs of the entire validation set, from all GPUs?
Data processed from the current GPU only, outputs are not synchronized, there is only backward synchronization (gradients are synchronized during training and distributed to replicas of models residing on each GPU).
Imagine that all of the outputs were passed from 1000 GPUs to this poor master, it could give it an OOM very easily
If outputs is GPU/process specific what is the proper way to calculate
any metric on the entire validation set in validation_epoch_end when
using DDP?
According to documentation (emphasis mine):
When validating using a accelerator that splits data from each batch
across GPUs, sometimes you might need to aggregate them on the master
GPU for processing (dp, or ddp2).
And here is accompanying code (validation_epoch_end would receive accumulated data across multiple GPUs from single step in this case, also see the comments):
# Done per-process (GPU)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y)
pred = ...
return {'loss': loss, 'pred': pred}
# Gathered data from all processes (per single step)
# Allows for accumulation so the whole data at the end of epoch
# takes less memory
def validation_step_end(self, batch_parts):
gpu_0_prediction = batch_parts.pred[0]['pred']
gpu_1_prediction = batch_parts.pred[1]['pred']
# do something with both outputs
return (batch_parts[0]['loss'] + batch_parts[1]['loss']) / 2
def validation_epoch_end(self, validation_step_outputs):
for out in validation_step_outputs:
# do something with preds
Tips
Focus on per-device calculations and as small number of between-GPU transfers as possible
Inside validation_step (or training_step if that's what you want, this is general) calculate f1, precision, recall and whatever else on a per-batch basis
Returns those values (say, as a dict). Now you will return 3 numbers from each device instead of (batch, outputs) (which could be significantly larger)
Inside validation_step_end get those 3 values (actually (2, 3) if you have 2 GPUs) and sum/take mean of them and return 3 values
Now validation_epoch_end will get (steps, 3) values that you can use to accumulate
It would be even better if instead of operating on list of values during validation_epoch_end you could accumulate them in another 3 values (say you have a lot of validation steps, the list could grow too large), but this should be enough.
AFAIK PyTorch-Lightning doesn't do this (e.g. instead of adding to list, apply some accumulator directly), but I might be mistaken, so any correction would be great.
| https://stackoverflow.com/questions/66854148/ |
CUDA initialization: CUDA unknown error - this may be due to an incorrectly set up environment | I am trying to install torch with CUDA support.
Here is the result of my collect_env.py script:
PyTorch version: 1.7.1+cu101
Is debug build: False
CUDA used to build PyTorch: 10.1
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.1 LTS (x86_64)
GCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
Clang version: Could not collect
CMake version: Could not collect
Python version: 3.9 (64-bit runtime)
Is CUDA available: False
CUDA runtime version: 10.1.243
GPU models and configuration: GPU 0: GeForce GTX 1080
Nvidia driver version: 460.39
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] numpy==1.19.2
[pip3] torch==1.7.1+cu101
[pip3] torchaudio==0.7.2
[pip3] torchvision==0.8.2+cu101
[conda] blas 1.0 mkl
[conda] cudatoolkit 10.1.243 h6bb024c_0
[conda] mkl 2020.2 256
[conda] mkl-service 2.3.0 py39he8ac12f_0
[conda] mkl_fft 1.3.0 py39h54f3939_0
[conda] mkl_random 1.0.2 py39h63df603_0
[conda] numpy 1.19.2 py39h89c1606_0
[conda] numpy-base 1.19.2 py39h2ae0177_0
[conda] torch 1.7.1+cu101 pypi_0 pypi
[conda] torchaudio 0.7.2 pypi_0 pypi
[conda] torchvision 0.8.2+cu101 pypi_0 pypi
Process finished with exit code 0
Here is the output of nvcc - V
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2019 NVIDIA Corporation
Built on Sun_Jul_28_19:07:16_PDT_2019
Cuda compilation tools, release 10.1, V10.1.243
Finally, here is the output of nvidia-smi
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.39 Driver Version: 460.39 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 GeForce GTX 1080 Off | 00000000:01:00.0 On | N/A |
| 0% 52C P0 46W / 180W | 624MiB / 8116MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 0 N/A N/A 873 G /usr/lib/xorg/Xorg 101MiB |
| 0 N/A N/A 1407 G /usr/lib/xorg/Xorg 419MiB |
| 0 N/A N/A 2029 G ...AAAAAAAAA= --shared-files 90MiB |
+-----------------------------------------------------------------------------+
However, when I try to run
print(torch.cuda.is_available())
I get the following error:
UserWarning: CUDA initialization: CUDA unknown error - this may be due to an incorrectly set up environment, e.g. changing env variable CUDA_VISIBLE_DEVICES after program start. Setting the available devices to be zero. (Triggered internally at /pytorch/c10/cuda/CUDAFunctions.cpp:100.)
return torch._C._cuda_getDeviceCount() > 0
I have performed a reboot, and I have followed the post-installation steps as detailed in here
| Had the same issue and in my case solution was very easy, however it wasn't easy to find it. I had to remove and insert nvidia_uvm module. So:
> sudo rmmod nvidia_uvm
> sudo modprobe nvidia_uvm
That's all. Just before these command collect_env.py reported "Is CUDA available: False". After: "Is CUDA available: True"
| https://stackoverflow.com/questions/66857471/ |
Could not find a version that satisfies the requirement torch==1.7.0+cpu | I want to install torch==1.7.0+cpu from requirements.txt
I have an error
Could not find a version that satisfies the requirement torch==1.7.0+cpu (from -r requirements.txt (line 42)) (from versions: 0.1.2, 0.1.2.post1, 0.1.2.post2, 0.3.1, 0.4.0, 0.4.1, 1.0.0, 1.0.1, 1.0.1.post2, 1.1.0, 1.2.0, 1.3.0, 1.3.1, 1.4.0, 1.5.0, 1.5.1, 1.6.0, 1.7.0, 1.7.1, 1.8.0, 1.8.1)
No matching distribution found for torch==1.7.0+cpu
I am using
Python 3.6.12 (default, Aug 18 2020, 02:08:22)
[GCC 5.4.0 20160609] on linux
| If you have this issue while installing your modules from requirements.txt then you could simply add the following line to the top of your requirements.txt file
-f https://download.pytorch.org/whl/torch_stable.html
| https://stackoverflow.com/questions/66858277/ |
Get Pytorch - tensor values as a integer in python | I have my output of my torch tensor which looks like below
(coordinate of a bounding box in object detection)
[tensor(299., device='cuda:0'), tensor(272., device='cuda:0'), tensor(327., device='cuda:0'), tensor(350., device='cuda:0')]
I wanted to extract each of the tensor value as an int in the form of minx,miny,maxx,maxy
so that I can pass it to a shapely function in the below form
from shapely.geometry import box
minx,miny,maxx,maxy=1,2,3,4
b = box(minx,miny,maxx,maxy)
What's the best way to do it? by avoiding, Cuda enabled or not or other exceptions?
| minx, miny, maxx, maxy = [int(t.item()) for t in tensors]
where tensors is the list of tensors.
| https://stackoverflow.com/questions/66874669/ |
Dead Kernel after running torchvision.utils.make_grid(images) | To make it simple, I am following this tutorial provided by PyTorch to create a CNN.
However, it appears that when I'm running this particular code block with this respective line:
# show images
imshow(torchvision.utils.make_grid(images))
It somehow kills the kernel. Which confuses me because it is only a simple function.
I have also went through numerous SO posts that are related to PyTorch but all of them are not related to torchvision. The most similar issue I found was posted 5 months ago without answer.
Please let me know if I need to add on any information to make this question clearer.
| The same thing happened to me. Running these commands in the python interpreter, I got an error that lead me to this:
Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized
when I added the following, it worked:
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
| https://stackoverflow.com/questions/66875621/ |
Federated reinforcement learning | I am implementing federated deep Q-learning by PyTorch, using multiple agents, each running DQN.
My problem is that when I use multiple replay buffers for agents, each appending experiences at the corresponding agent, two elements of experiences in each agent replay buffer, i. e., "current_state" and "next_state" becomes the same after the first time slot. I mean in each buffer, we see the same values for current states and the same values for next states.
I have included simplified parts of the code and results below. Whay is it changing the current states and next states already exixting in the buffer when doing append? Is there something wrong with defining the buffers as a global variable? or do you have another idea?
<<< time 0 and agent 0:
current_state[0] = [1,2]
next_state[0] = [11,12]
*** experience: (array([ 1., 2.]), 2.0, array([200]), array([ 11., 12.]), 0)
*** buffer: deque([(array([ 1., 2.]), 2.0, array([200]), array([ 11., 12.]), 0)], maxlen=10000)
<<< time 0 and agent 1:
current_state[1] = [3, 4]
next_state[1] = [13, 14]
*** experience: (array([ 3., 4.]), 4.0, array([400]), array([ 13., 14.]), 0)
*** buffer: deque([(array([ 1., 2.]), 4.0, array([400]), array([ 11., 12.]), 0)], maxlen=10000)
<<< time 1 and agent 0:
current_state = [11,12]
next_state[0] = [110, 120]
*** experience: (array([ 11., 12.]), 6.0, array([600]), array([ 110., 120.]), 0)
*** buffer: deque([(array([ 11., 12.]), 2.0, array([200]), array([ 110., 120.]), 0),(array([ 11., 12.]), 6.0, array([600]), array([ 110., 120.]), 0)], maxlen=10000)
<<< time 1 and agent 1:
current_state = [13, 14]
next_state[1] = [130, 140]
*** experience: (array([ 13., 14.]), 8.0, array([800]), array([ 130., 140.]), 0)
*** buffer: deque([(array([ 13., 14.]), 4.0, array([400]), array([ 130., 140.]), 0),(array([ 13., 14.]), 8.0, array([800]), array([ 130., 140.]), 0)], maxlen=10000)
class BasicBuffer:
def __init__(self, max_size):
self.max_size = max_size
self.buffer = deque(maxlen=10000)
def add(self, current_state, action, reward, next_state, done):
## """"Add a new experience to buffer.""""
experience = (current_state, action, np.array([reward]), next_state, done)
self.buffer.append(experience)
def DQNtrain(env, state_size, agent):
for time in range(time_max):
for e in range(agents_numbers):
current_state[e,:]
next_state_edge[e, :]
## """"Add a new experience to buffer.""""
replay_buffer_t[e].add(current_state, action, reward, next_state, done)
current_state[e, :] = next_state[e, :]
if __name__ == '__main__':
DQNtrain(env, state_size, agent)
replay_buffer_t = [[] for _ in range(edge_max)]
for e in range(edge_max):
replay_buffer_t[e] = BasicBuffer(max_size=agent_buffer_size)
| I just found what is causing the problem. I should have used copy.deepcopy() for experiences:
experience = copy.deepcopy((current_state, action, np.array([reward]), next_state, done))
self.buffer.append(experience)
| https://stackoverflow.com/questions/66875831/ |
Finding the Hessian matrix of this function | Hi I have the following function:
sum from 1 to 5000 -log(1−(xi)^2) -log(1-(a_i)^t*x), where a_i is a random vector and we are trying to minimize this function's value via Netwon's method.
I need a way to calculate the Hessian matrix with respect to (x1, x2, x3, ...). I tried auto-gradient but it took too much time. Here is my current time.
from autograd import elementwise_grad as egrad
from autograd import jacobian
import autograd.numpy as np
x=np.zeros(5000);
a = np.random.rand(5000,5000)
def f (x):
sum = 0;
for i in range(5000):
sum += -np.log(1 - x[i]*x[i]) - np.log(1-np.dot(x,a[i]))
return sum;
df = egrad(f)
d2f = jacobian(egrad(df));
print(d2f(x));
I have tried looking into sympy but I am confused on how to proceed.
| PyTorch has a GPU optimised hessian operation:
import torch
torch.autograd.functional.hessian(func, inputs)
| https://stackoverflow.com/questions/66881349/ |
What does the 1 in torch.Size([64, 1, 28, 28]) mean when I check a tensor shape? | I'm following this tutorial on towardsdatascience.com because I wanted to try the MNIST dataset using Pytorch since I've already done it using keras.
So in Step 2, knowing the dataset better, they print the trainloader's shape and it returns torch.Size([64, 1, 28, 28]). I understand that 64 is the number of images in that loader and that each one is a 28x28 image but what does the 1 mean exactly?
| It simply defines an image of size 28x28 has 1 channel, which means it's a grayscale image. If it was a colored image then instead of 1 there would be 3 as the colored image has 3 channels such as RGB.
| https://stackoverflow.com/questions/66885978/ |
Output evaluation loss after every n-batches instead of epochs with pytorch | Instead of printing the evaluation loss every epoch I would like to output it after every n-batches.
I have around 150'000 batches per epoch. I would like to output the evaluation loss every 50'000 batches.
Is this even possible? I am using pytorch and a pretrained bert model from huggingface.
My train loop:
best_valid_loss = float('inf')
train_losses=[]
valid_losses=[]
for epoch in range(params['epochs']):
print('\n Epoch {:} / {:}'.format(epoch + 1, params['epochs']))
#train model
train_loss = train(scheduler, optimizer)
#evaluate model
valid_loss = evaluate()
#save the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), model_file)
# append training and validation loss
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(f'\nTraining Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}')
| If you want to skip n elements of, for example, some list, you can do this using enumerate:
n = 50000
for i,epoch in enumerate(some_list):
if i%n == 0:
print('\n Epoch {:} / {:}'.format(epoch + 1, params['epochs']))
...
But in your case, you can use only an additional condition:
n = 50000
for epoch in range(params['epochs']):
#train model
train_loss = train(scheduler, optimizer)
#evaluate model
valid_loss = evaluate()
#save the best model
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), model_file)
# append training and validation loss
train_losses.append(train_loss)
valid_losses.append(valid_loss)
##
# print every n cycles
if epoch%n == 0:
print('\n Epoch {:} / {:}'.format(epoch + 1, params['epochs']))
print(f'\nTraining Loss: {train_loss:.3f}')
print(f'Validation Loss: {valid_loss:.3f}')
| https://stackoverflow.com/questions/66889503/ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.