instruction
stringlengths
13
150
input
stringlengths
36
29.1k
output
stringlengths
31
29.5k
source
stringlengths
45
45
"for tokens_tensor, segments_tensors, att_mask, pos_id, trg in data_loader: NameError: name 'data_loader' is not defined"
I am trying to implement question answering model with a BERT transformer implemented by jugapuff. Link to the code: https://github.com/jugapuff/BERT-for-bAbi-task After executing the main.py file which is written below as well, I m getting this error: "for tokens_tensor, segments_tensors, att_mask, pos_id, trg in data_loader: NameError: name 'data_loader' is not defined" from dataloader import bAbi_Dataset import torch import torch.nn as nn from model import model from pytorch_transformers import AdamW device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): print("GPU:" + str(torch.cuda.get_device_name(0))) my_model = model() my_model.to(device) optimizer = AdamW(my_model.parameters()) criterion = nn.NLLLoss() EPOCHS = 10 for epoch in range(1, EPOCHS+1): my_model.train() train_loss = 0 length = 0 for tokens_tensor, segments_tensors, att_mask, pos_id, trg in data_loader: output = my_model(tokens_tensor.to(device), segments_tensors.to(device), att_mask.to(device), pos_id.to(device)) loss = criterion(output, trg.to(device)) optimizer.zero_grad() loss.backward() optimizer.step() length+=1 train_loss += loss.item() if length % 10 == 0: print("\t\t{:3}/25000 : {}".format(length, train_loss / length)) epoch_loss = train_loss / length print("##################") print("{} epoch Loss : {:.4f}".format(epoch, epoch_loss)) and data_loader.py is as import os import torch import torch.utils.data as data from pytorch_transformers import BertTokenizer def _parse( file, only_supporting=False): data, story = [], [] for line in file: tid, text = line.rstrip('\n').split(' ', 1) if tid == '1': story = [] if text.endswith('.'): story.append(text[:]) else: query, answer, supporting = (x.strip() for x in text.split('\t')) if only_supporting: substory = [story[int(i) - 1] for i in supporting.split()] else: substory = [x for x in story if x] data.append((substory, query[:-1], answer)) story.append("") return data def build_trg_dics(tenK=True, path="tasks_1-20_v1-2", train=True): if tenK: dirname = os.path.join(path, 'en-10k') else: dirname = os.path.join(path, 'en') for (dirpath, dirnames, filenames) in os.walk(dirname): filenames = filenames if train: filenames = [filename for filename in filenames if "train.txt" in filename] else: filenames = [filename for filename in filenames if "test.txt" in filename] temp = [] for filename in filenames: f = open(os.path.join(dirname, filename), 'r') parsed =_parse(f) temp.extend([d[2] for d in parsed]) temp = set(temp) trg_word2id = {word:i for i, word in enumerate(temp)} trg_id2word = {i:word for i, word in enumerate(temp)} return trg_word2id, trg_id2word class bAbi_Dataset(data.Dataset): def __init__(self, trg_word2id, tenK=True, path = "tasks_1-20_v1-2", train=True): # joint is Default self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') if tenK: dirname = os.path.join(path, 'en-10k') else: dirname = os.path.join(path, 'en') for (dirpath, dirnames, filenames) in os.walk(dirname): filenames = filenames if train: filenames = [filename for filename in filenames if "train.txt" in filename] else: filenames = [filename for filename in filenames if "test.txt" in filename] self.src = [] self.trg = [] for filename in filenames: f = open(os.path.join(dirname, filename), 'r') parsed = _parse(f) self.src.extend([d[:2] for d in parsed]) self.trg.extend([trg_word2id[d[2]] for d in parsed]) self.trg = torch.tensor(self.trg) def __getitem__(self, index): src_seq = self.src[index] trg = self.trg[index] src_seq, seg_seq, att_mask, pos_id = self.preprocess_sequence(src_seq) return src_seq, seg_seq, att_mask, pos_id, trg def __len__(self): return len(self.trg) def preprocess_sequence(self, seq): text = ["[CLS]"] + list(seq[0]) + ["[SEP]"] + [seq[1]] + ["[SEP]"] tokenized_text = self.tokenizer.tokenize(" ".join(text)) indexed_text = self.tokenizer.convert_tokens_to_ids(tokenized_text) where_is_sep = indexed_text.index(102) + 1 segment_ids = [0 ]* (where_is_sep) + [1] * (len(indexed_text)- where_is_sep) attention_mask = [1] *len(indexed_text) pos_id = [i for i in range(len(indexed_text))] return torch.tensor(indexed_text), torch.tensor(segment_ids), torch.tensor(attention_mask), torch.tensor(pos_id) def collate_fn(data): def merge(sequences): lengths = [len(seq) for seq in sequences] padded_seqs = torch.zeros(len(sequences), 512).long() for i, seq in enumerate(sequences): end = lengths[i] if end <= 512: padded_seqs[i, :end] = seq[:end] else: padded_seqs[i] = seq[-512:] return padded_seqs def pos_merge(sequences): lengths = [len(seq) for seq in sequences] padded_seqs = torch.zeros(len(sequences), 512).long() for i, seq in enumerate(sequences): padded_seqs[i] = torch.tensor([i for i in range(512)]) return padded_seqs src_seqs, seg_seqs, att_mask, pos_id, trgs = zip(*data) src_seqs = merge(src_seqs) seg_seqs = merge(seg_seqs) att_mask = merge(att_mask) pos_id = pos_merge(pos_id) trgs = torch.tensor(trgs) return src_seqs, seg_seqs, att_mask, pos_id, trgs data_loader variable declaration in main.py is missing. So I tried to load data_loader as for tokens_tensor, segments_tensors, att_mask, pos_id, trg in dataloader.collate_fn(bAbi_Dataset): use collate_fn() function in data_loader.py, but it did not work. When I change it as above, it gives the following error: Traceback (most recent call last): File "main.py", line 27, in <module> File "/content/BERT-for-bAbi-task/dataloader.py", line 133, in collate_fn src_seqs, seg_seqs, att_mask, pos_id, trgs = zip(*data) File "/usr/lib/python3.6/typing.py", line 682, in inner return func(*args, **kwds) File "/usr/lib/python3.6/typing.py", line 1107, in __getitem__ params = tuple(_type_check(p, msg) for p in params) File "/usr/lib/python3.6/typing.py", line 1107, in <genexpr> params = tuple(_type_check(p, msg) for p in params) File "/usr/lib/python3.6/typing.py", line 374, in _type_check raise TypeError(msg + " Got %.100r." % (arg,)) TypeError: Parameters to generic types must be types. Got 0. Could anyone please help me how to correct the error?
I will just give you some pointers: collate_fn is not meant to be called with a dataset as argument. It is a special callback function given to a dataloader and used to collate batch elements into a batch. Since bAbi_Dataset in /dataloader.py is defined as a torch.utils.data.Dataset I would guess you are meant to initialize it instead. It is defined here as: def __init__(self, trg_word2id, tenK=True, path = "tasks_1-20_v1-2", train=True) There is another function build_trg_dics in /dataloader.py which is used to create the parse the content from files. You should take a look at them before setting the right arguments for bAbi_Dataset. Lastly, when you have your dataset initialized, you can attach a dataloader on it using torch.utils.data.DataLoader. This would look like: data_loader = DataLoader(dataset, batch_size=16) At this point, you might even need to plug in the collate function provided in /dataloader.py. If you don't really know what you are doing, I would suggest you start with a working repository and work your way from there. Good luck!
https://stackoverflow.com/questions/65675445/
Torch, how to use Multiple GPU for different dataset
Assume that I have 4 different datasets and 4 GPU like below 4 dataset dat0 = [np.array(...)], dat1 = [np.array(...)] , dat2 = [np.array(...)] , dat3 = [np.array(...)] 4 GPU device = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())] assume all the four data set have already converted into tensor and transfer to 4 different GPU. Now, I have a function f from other module which can be used on GPU How can I do the following at the same time, compute 4 resulf of this ans0 = f(dat0) on device[0], ans1 = f(dat1) on device[1], ans2 = f(dat2) on device[2], ans3 = f(dat3) on device[3] then move all the 4 ans back to cpu then calculate the sum ans = ans0 + ans1 + ans2 + ans3
Assuming you only need ans for inference. You can easily perform those operations but you will certainly need function f to be on all four GPUs at the same time. Here is what I would try: duplicate f four times and send to each GPU. Then compute the intermediate result, sending back each result to the CPU for the final operation: fns = [f.clone().to(device) for device in devices] results = [] for fn, data in zip(fns, datasets): result = fn(data).detach().cpu() results.append(result) ans = torch.stack(results).sum(dim=0)
https://stackoverflow.com/questions/65675732/
How does torchvision.transforms.Normalize operate?
I don't understand how the normalization in Pytorch works. I want to set the mean to 0 and the standard deviation to 1 across all columns in a tensor x of shape (2, 2, 3). A simple example: >>> x = torch.tensor([[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [10., 11., 12.]]]) >>> norm = transforms.Normalize((0, 0), (1, 1)) >>> norm(x) tensor([[[ 1., 2., 3.], [ 4., 5., 6.]], [[ 7., 8., 9.], [10., 11., 12.]]]) So nothing has changed when applying the normalization transform. Why is that?
To give an answer to your question, you've now realized that torchvision.transforms.Normalize doesn't work as you had anticipated. That's because it's not meant to: normalize: (making your data range in [0, 1]) nor standardize: making your data's mean=0 and std=1 (which is what you're looking for. The operation performed by T.Normalize is merely a shift-scale transform: output[channel] = (input[channel] - mean[channel]) / std[channel] The parameters names mean and std which seems rather misleading knowing that it is not meant to refer to the desired output statistics but instead any arbitrary values. That's right, if you input mean=0 and std=1, it will give you output = (input - 0) / 1 = input. Hence the result you received where function norm had no effect on your tensor values when you were expecting to get a tensor of mean and variance 0 and 1, respectively. However, providing the correct mean and std parameters, i.e. when mean=mean(data) and std=std(data), then you end up calculating the z-score of your data channel by channel, which is what is usually called 'standardization'. So in order to actually get mean=0 and std=1, you first need to compute the mean and standard deviation of your data. If you do: >>> mean, std = x.mean(), x.std() (tensor(6.5000), tensor(3.6056)) It will give you the global average, and global standard deviation respectively. Instead, what you want is to measure the 1st and 2nd order statistics per-channel. Therefore, we need to apply torch.mean and torch.std on all dimensions expect dim=1. Both of those functions can receive a tuple of dimensions: >>> mean, std = x.mean((0,2)), x.std((0,2)) (tensor([5., 8.]), tensor([3.4059, 3.4059])) The above is the correct mean and standard deviation of x measured along each channel. From there you can go ahead and use T.Normalize(mean, std) to correctly transform your data x with the correct shift-scale parameters. >>> norm(x) tensor([[[-1.5254, -1.2481, -0.9707], [-0.6934, -0.4160, -0.1387]], [[ 0.1387, 0.4160, 0.6934], [ 0.9707, 1.2481, 1.5254]]])
https://stackoverflow.com/questions/65676151/
Chose rows of 3d Tensor based on some repeated indices. Tricky Slicing
So this is a tricky bit of Tensor slicing I'm trying. I have a tensor A which is 3d >> A.shape torch.Size([60, 10, 16]) So this tensor is composed of 5 different data samples, where in dim=0 we have cuts at split_ids = [10, 14, 10, 12, 14] i.e first 10 elements belong to sample1; next 14 belong to sample2 & so on.. I can split the tensor in such a way: >> torch.split(A, split_ids, dim=0) (tensor([[[-0.3888, -...Backward>), tensor([[[ 2.6473e-0...Backward>), tensor([[[ 1.1621, ...Backward>), tensor([[[ 0.1953, -...Backward>), tensor([[[ 8.1993e-0...Backward>)) This comprises of tuple of 5 elements (or 5 tensors), of shapes Size(10,10,16);Size(14,10,16); and so on for the splits we had. Now, comes the tricky part - I have another indices mapping that I have derived some previous processing for each of these individual splits. Its a list of 1d tensors like this: >> reverse_map [tensor([1, 2, 2, 1, ...='cuda:0'), tensor([ 7, 7, 9, ...='cuda:0'), tensor([7, 7, 4, 3, ...='cuda:0'), tensor([ 9, 4, 9, ...='cuda:0'), tensor([ 0, 0, 0, ...='cuda:0')] >> reverse_map[0] tensor([1, 2, 2, 1, 1, 2, 0, 1, 2, 0, 0, 1, 6, 1, 7, 1, 2, 7, 4, 5, 3, 4, 9, 7, 7, 3, 8, 7, 7, 7], device='cuda:0') So I need to basically use these indices and pull the count of these indices from the above split tensors i.e For tensor.Size(10,10,16) I need to pull [0, 0:3, :] which is for index 0 in dim=0, I need to pull 0:3 in dim=1 because there are 3 0's in the indexing. Then at index 1 I need to pull first 4 vectors since there are 4 1's.. and so on. Whats the best way to do this ? Does scatter_() help here ?
I think a combination of torch.bincount and this answer can give you what you want. For simplicity, let's focus on the first tensor split from A and the first reverse_map. You can then apply this code to the other splits and reverse_maps. Let source be the first split of shape (10, 10, 16). Here's how it goes: # inputs source = torch.arange(10*10*16).view(10, 10, 16) reverse_map = torch.tensor(tensor([1, 2, 2, 1, 1, 2, 0, 1, 2, 0, 0, 1, 6, 1, 7, 1, 2, 7, 4, 5, 3, 4, 9, 7, 7, 3, 8, 7, 7, 7]) # how many columns to pull for each row - use bincount to find out! lengths = torch.bincount(reverse_map, minlength=source.shape[0]) # use a mask to pull the elements mask = torch.zeros(source.shape[0], source.shape[1] + 1, dtype=source.dtype, device=source.device) mask[(torch.arange(source.shape[0]), lengths)] = 1 mask = mask.cumsum(dim=1)[:, :-1] == 0 # expand the mask to dim=2 as well and pull the elements out = source[mask[..., None].expand(-1, -1, source.shape[2])] # since you pull different number of columns per row, you loose the shape of source. You need a final split to recover it target = torch.split(out, (lengths * source.shape[2]).cpu().numpy().tolist()) target = [t_.view(-1, source.shape[2]) for t_ in target] The output target is a list of 2-D tensors with varying number of rows (according to the counts of reverse_map and source.shape[2] columns in each.
https://stackoverflow.com/questions/65677853/
TypeError: expected CPU (got CUDA)
import torch torch.cuda.is_available() torch.cuda.current_device() torch.cuda.get_device_name(0) torch.cuda.memory_reserved() torch.cuda.memory_allocated() torch.cuda.memory_allocated() var1=torch.FloatTensor([1.0,2.0,3.0]).cuda() var1 var1.device import pandas as pd df=pd.read_csv('diabetes.csv') df.head() df.isnull().sum() import seaborn as sns import numpy as np df['Outcome']=np.where(df['Outcome']==1,"Diabetic","No Diabetic") df.head() sns.pairplot(df,hue="Outcome") X=df.drop('Outcome',axis=1).values### independent features y=df['Outcome'].values###dependent features from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0) y_train import torch import torch.nn as nn import torch.nn.functional as F X_train=torch.FloatTensor(X_train).cuda() X_test=torch.FloatTensor(X_test).cuda() y_train=torch.LongTensor(y_train).cuda() y_test=torch.LongTensor(y_test).cuda() when I Run this code I got this error: Traceback (most recent call last): File "<stdin>", line 24, in <module> TypeError: expected CPU (got CUDA) How to can I solve this error?
To transfer the variables to GPU, try the following: device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') X_train=torch.FloatTensor(X_train).to(device) X_test=torch.FloatTensor(X_test).to(device) y_train=torch.LongTensor(y_train).to(device) y_test=torch.LongTensor(y_test).to(device)
https://stackoverflow.com/questions/65679823/
FFT loss in PyTorch
I want to compute the loss between the GT and the output of my network (called TDN) in the frequency domain by computing 2D FFT. The tensors are of dim batch x channel x height x width amp_ip, phase_ip = 2DFFT(TDN(ip)) amp_gt, phase_gt = 2DFFT(TDN(gt)) loss = ||amp_ip - amp_gt|| For computing FFT I can use torch.fft(ip, signal_ndim = 2). But the output is in a + j b format i.e rectangular coordinates and NOT decomposed into phase and amplitude. How can I convert a + j b into amp exp(j phase) format in PyTorch? A side concern is also if signal_ndims be kept 2 to compute 2D FFT or something else? The following description, which describes the loss that I plan to implement, maybe useful.
The question is answered by the GITHUB code file shared by @akshayk07 in the comments. Extracting the relevant information from that code, the concise answer to the question is, fft_im = torch.rfft(img.clone(), signal_ndim=2, onesided=False) # fft_im: size should be bx3xhxwx2 fft_amp = fft_im[:,:,:,:,0]**2 + fft_im[:,:,:,:,1]**2 fft_amp = torch.sqrt(fft_amp) # this is the amplitude fft_pha = torch.atan2( fft_im[:,:,:,:,1], fft_im[:,:,:,:,0] ) # this is the phase As of PyTorch 1.7.1 choose torch.rfft over torch.fft as the latter does not work off the shelf with real valued tensors propagating in CNNs. Also a good idea will be ti use the normalisation flag of torch.rfft.
https://stackoverflow.com/questions/65680001/
PULSE on github (link provided) RuntimeError: CUDA out of memory.... preventing the program "run.py" from executing
(As a student I am kind of new to this but did quite a bit of research and I got pretty far, I'm super into learning something new through this!) This issue is for the project pulse -> https://github.com/adamian98/pulse the readme if you scroll down a bit on the page, gives a much better explanation than I could. It will also give a direct "correct" path to judge my actions against and make solving the problem a lot easier. Objective: run program using the run.py file Issue: I got a "RuntimeError: CUDA out of memory" despite having a compatible gpu and enough vram Knowledge: when it comes to coding i just started a few days ago and have a dozen hours with anaconda now, comfterable creating environments. What I did was... (the list below is a summary and the specific details are after it) install anaconda use this .yml file -> https://github.com/leihuayi/pulse/blob/feature/docker/pulse.yml (it changes dependencies to work for windows which is why I needed to grab a different one than the one supplied on the master github page) to create a new environment and install the required packages. It worked fantastically! I only got an error trying to install dlib, it didn't seem compatible with A LOT of the packages and my python version. I installed the cuda toolkit 10.2 , cmake 3.17.2, and tried to install dlib into the environment directly. the errors spat out in a blaze of glory. The dlib package seems to be only needed for a different .py file and not run.py though so I think it may be unrelated to this error logs are below and I explain my process in more detail START DETAILS AND LOGS: from here until the "DETAILS 2" section should be enough information to solve, the rest past there is in case error log for runing out of memory--> (after executing the "run.py" file) Loading Synthesis Network Loading Mapping Network Running Mapping Network Traceback (most recent call last): File "C:\Users\micha\anaconda3\envs\Pulse1\pulse-master\run.py", line 58, in model = PULSE(cache_dir=kwargs["cache_dir"]) File "C:\Users\micha\anaconda3\envs\Pulse1\pulse-master\PULSE.py", line 44, in init latent_out = torch.nn.LeakyReLU(5)(mapping(latent)) File "C:\Users\micha\anaconda3\envs\pulse3\lib\site-packages\torch\nn\modules\module.py", line 550, in call result = self.forward(*input, **kwargs) File "C:\Users\micha\anaconda3\envs\Pulse1\pulse-master\stylegan.py", line 233, in forward x = super().forward(x) File "C:\Users\micha\anaconda3\envs\pulse3\lib\site-packages\torch\nn\modules\container.py", line 100, in forward input = module(input) File "C:\Users\micha\anaconda3\envs\pulse3\lib\site-packages\torch\nn\modules\module.py", line 550, in call result = self.forward(*input, **kwargs) File "C:\Users\micha\anaconda3\envs\Pulse1\pulse-master\stylegan.py", line 38, in forward return F.linear(x, self.weight * self.w_mul, bias) File "C:\Users\micha\anaconda3\envs\pulse3\lib\site-packages\torch\nn\functional.py", line 1610, in linear ret = torch.addmm(bias, input, weight.t()) RuntimeError: CUDA out of memory. Tried to allocate 1.91 GiB (GPU 0; 6.00 GiB total capacity; 3.92 GiB already allocated; 744.91 MiB free; 3.93 GiB reserved in total by PyTorch) End of error log. NVIDIA-SMI LOG WHILE RUNNING (checking free memory) C:\Users\micha>nvidia-smi --query-gpu=memory.free --format=csv --loop=1 memory.free [MiB] 5991 MiB 5991 MiB 5991 MiB 5991 MiB 5897 MiB 5781 MiB 5685 MiB 1643 MiB 5991 MiB 5991 MiB the program stopped at the 1643MiB DETAILS PART 1: I have an nvidia gpu with 6gb of memory which, (according to other logs posted by the creator of the project-> should have enough memory for this to work "We ran our tests with 8GB of memory but I believe that you should be able to run the code with 4GB as well" -adamian98 ). I'm trying to fix that error and get the run.py to work as intended. Here is the system info using the numba -s command in anaconda (includes hardware info: gpu, my windows version, memory, python version etc.) START CUDA INFO Hardware Information Machine : AMD64 CPU Name : znver1 CPU Count : 16 Number of accessible CPUs : 16 List of accessible CPUs cores : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 CFS Restrictions (CPUs worth of runtime) : None CPU Features : 64bit adx aes avx avx2 bmi bmi2 clflushopt clwb clzero cmov cx16 cx8 f16c fma fsgsbase fxsr lzcnt mmx movbe mwaitx pclmul popcnt prfchw rdpid rdrnd rdseed sahf sha sse sse2 sse3 sse4.1 sse4.2 sse4a ssse3 wbnoinvd xsave xsavec xsaveopt xsaves Memory Total (MB) : 15789 Memory Available (MB) : 8421 OS Information Platform Name : Windows-10-10.0.18362-SP0 Platform Release : 10 OS Name : Windows OS Version : 10.0.18362 OS Specific Version : 10 10.0.18362 SP0 Multiprocessor Free Libc Version : ? Python Information Python Compiler : MSC v.1916 64 bit (AMD64) Python Implementation : CPython Python Version : 3.8.5 Python Locale : en_CA.cp1252 LLVM Information LLVM Version : 10.0.1 CUDA Information CUDA Device Initialized : True CUDA Driver Version : 10020 CUDA Detect Output: Found 1 CUDA devices id 0 b'GeForce GTX 1660 Ti with Max-Q Design' [SUPPORTED] compute capability: 7.5 pci device id: 0 pci bus id: 1 Summary: 1/1 devices are supported CUDA Librairies Test Output: Finding cublas from named cublas.dll trying to open library... ERROR: failed to open cublas: Could not find module 'cublas.dll' (or one of its dependencies). Try using the full path with constructor syntax. Finding cusparse from named cusparse.dll trying to open library... ERROR: failed to open cusparse: Could not find module 'cusparse.dll' (or one of its dependencies). Try using the full path with constructor syntax. Finding cufft from named cufft.dll trying to open library... ERROR: failed to open cufft: Could not find module 'cufft.dll' (or one of its dependencies). Try using the full path with constructor syntax. Finding curand from named curand.dll trying to open library... ERROR: failed to open curand: Could not find module 'curand.dll' (or one of its dependencies). Try using the full path with constructor syntax. Finding nvvm from named nvvm.dll trying to open library... ERROR: failed to open nvvm: Could not find module 'nvvm.dll' (or one of its dependencies). Try using the full path with constructor syntax. Finding cudart from named cudart.dll trying to open library... ERROR: failed to open cudart: Could not find module 'cudart.dll' (or one of its dependencies). Try using the full path with constructor syntax. Finding libdevice from searching for compute_20... ERROR: can't open libdevice for compute_20 searching for compute_30... ERROR: can't open libdevice for compute_30 searching for compute_35... ERROR: can't open libdevice for compute_35 searching for compute_50... ERROR: can't open libdevice for compute_50 ROC information ROC Available : False ROC Toolchains : None HSA Agents Count : 0 HSA Agents: None HSA Discrete GPUs Count : 0 HSA Discrete GPUs : None SVML Information SVML State, config.USING_SVML : True SVML Library Loaded : True llvmlite Using SVML Patched LLVM : True SVML Operational : True Threading Layer Information TBB Threading Layer Available : False +--> Disabled due to Unknown import problem. OpenMP Threading Layer Available : True +-->Vendor: MS Workqueue Threading Layer Available : True +-->Workqueue imported successfully. Numba Environment Variable Information None found. Conda Information Conda Build : 3.20.5 Conda Env : 4.9.2 Conda Platform : win-64 Conda Python Version : 3.8.5.final.0 Conda Root Writable : True Installed Packages blas 1.0 mkl ca-certificates 2020.12.8 haa95532_0 certifi 2020.12.5 py38haa95532_0 cffi 1.14.0 py38h7a1dbc1_0 chardet 3.0.4 py38haa95532_1003 cryptography 2.9.2 py38h7a1dbc1_0 cudatoolkit 10.2.89 h74a9793_1 anaconda cycler 0.10.0 py38_0 freetype 2.9.1 ha9979f8_1 icc_rt 2019.0.0 h0cc432a_1 icu 58.2 ha925a31_3 idna 2.9 py_1 intel-openmp 2019.4 245 jpeg 9b hb83a4c4_2 kiwisolver 1.2.0 py38h74a9793_0 libcxx 7.0.0 h1ad3211_1002 conda-forge libpng 1.6.37 h2a8f88b_0 libtiff 4.1.0 h56a325e_0 llvm-meta 7.0.0 0 conda-forge m2-bash 4.3.042 5 m2-gcc-libs 5.3.0 4 m2-libedit 3.1 20150326 m2-libffi 3.2.1 2 m2-libreadline 6.3.008 8 m2-msys2-runtime 2.5.0.17080.65c939c 3 m2-ncurses 6.0.20160220 2 m2w64-gcc-libgfortran 5.3.0 6 m2w64-gcc-libs-core 5.3.0 7 m2w64-gmp 6.1.0 2 m2w64-libwinpthread-git 5.0.0.4634.697f757 2 matplotlib 3.1.3 py38_0 matplotlib-base 3.1.3 py38h64f37c6_0 mkl 2019.4 245 mkl-service 2.3.0 py38h196d8e1_0 mkl_fft 1.0.15 py38h14836fe_0 mkl_random 1.1.0 py38hf9181ef_0 msys2-conda-epoch 20160418 1 ninja 1.9.0 py38h74a9793_0 numpy 1.18.1 py38h93ca92e_0 numpy-base 1.18.1 py38hc3f5095_1 olefile 0.46 py_0 openssl 1.1.1i h2bbff1b_0 pandas 1.0.3 py38h47e9c7a_0 pillow 7.1.2 py38hcc1f983_0 pip 20.0.2 py38_3 powershell_shortcut 0.0.1 3 pycparser 2.20 py_2 pyopenssl 19.1.0 pyhd3eb1b0_1 pyparsing 2.4.7 py_0 pyqt 5.9.2 py38ha925a31_4 pysocks 1.7.1 py38haa95532_0 python 3.8.2 he1778fa_13 python-dateutil 2.8.1 py_0 pytorch 1.5.0 py3.8_cuda102_cudnn7_0 pytorch pytz 2020.1 py_0 qt 5.9.7 vc14h73c81de_0 requests 2.23.0 py38_0 scipy 1.4.1 py38h9439919_0 setuptools 46.2.0 py38_0 sip 4.19.13 py38ha925a31_0 six 1.14.0 py38haa95532_0 sqlite 3.31.1 h2a8f88b_1 tk 8.6.8 hfa6e2cd_0 torchvision 0.6.0 py38_cu102 pytorch tornado 6.0.4 py38he774522_1 urllib3 1.25.8 py38_0 vc 14.2 h21ff451_1 vs2015_runtime 14.27.29016 h5e58377_2 wheel 0.34.2 py38_0 win_inet_pton 1.1.0 py38haa95532_0 wincertstore 0.2 py38_0 xz 5.2.5 h62dcd97_0 zlib 1.2.11 h62dcd97_4 zstd 1.3.7 h508b16e_0 No errors reported. Warning log Warning (roc): Error initialising ROC: No ROC toolchains found. Warning (roc): No HSA Agents found, encountered exception when searching: Error at driver init: HSA is not currently supported on this platform (win32).` END CUDA INFO The section below is most likely not needed for this question, here in case: DETAILS PART 2 (It sounded like dlib (and cmake) were only required to align the faces before therefore theoretically it isn't needed in run.py, but I'm not 100 percent sure. I omitted the section including the dlib specific errors): I got the .yml installed successfully into the anaconda environment I named "pulse3" with no errors as well as the CUDA toolkit 10.2 and cmake 3.17.2. Dlib is the only thing that gave me some trouble spiting out a bunch of incompatibility errors with the other packages on windows with my python version 3.8.2.
based on new log evidence using this script simultaneously alongside the run.py file "C:\Users\micha>nvidia-smi --query-gpu=memory.free --format=csv --loop=1" alongside using gpuz to monitor, as well as the error produced from executing run.py in an anaconda environment with all required packages except dlib installed correctly: RuntimeError: CUDA out of memory. Tried to allocate 1.91 GiB (GPU 0; 6.00 GiB total capacity; 3.92 GiB already allocated; 744.91 MiB free; 3.93 GiB reserved in total by PyTorch) resulted in a logged usage of up to but not exceeding 4.502GB of VRAM on a gtx 1660ti Therefore, I can strongly infer that the project PULSE: Self-Supervised Photo Upsampling via Latent Space Exploration of Generative Models (https://github.com/adamian98/pulse) does need somewhere between 6.01GB and 8.00GB of VRAM to execute the "run.py" file, therefore a gpu with 6gb is insufficient so the error: RuntimeError: CUDA out of memory. Tried to allocate 1.91 GiB (GPU 0; 6.00 GiB total capacity; 3.92 GiB already allocated; 744.91 MiB free; 3.93 GiB reserved in total by PyTorch) is most likely due to hardware limitations. Notes To Help Improve the Pulse Project: This result, though inconclusive, contradicts this theory from the creator of the project: "Unfortunately 2GB is not enough memory to store all of the gradients necessary during optimization. We ran our tests with 8GB of memory but I believe that you should be able to run the code with 4GB as well." -adamian98 Possible factors of error (see logs in question for more info if needed): 1.needing to execute the .py file from outside the anaconda environment (despite having the file inside the environment as well) 2.lack of successful dlib package installation 3.the input image not being the correct size (it was 75x77 pixels, and was yoinked from the project samples) 4.the input image not being the correct format (it was.png) the image not being in the correct place \anaconda3\envs\pulse3\Library\qml\Qt3D (the anaconda environment was named pulse3 that is why the path uses pulse3)
https://stackoverflow.com/questions/65680194/
How to load model for inference?
I am using a simple perceptron based classifier to generate sentiment analysis in Pytorch, complete code here Classifying Yelp Reviews. The example does sentiment analysis, it outputs if the given input string is positive or negative. example: this is a pretty small old great book -> positive The application stores the final model along with the vectorizer.json. So my question is: What should be the pre-requisites to build a separate application just to test the model, so that it can be used in web-application later-on? Below is my current understanding and queries for this: I assume to test, we need to load the model, load model parameters and evaluate for inference, please confirm model = TheModelClass(*args, **kwargs) # Model class must be defined somewhere model.load_state_dict(torch.load(PATH)) model.eval() # run if you only want to use it for inference Once step 1 is done, I hope we can deploy the model using Flask and expose a REST API for model inference.
I found a nice tutorial explaining how to load model for inference, here is the link https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_models_for_inference.html Thanks!
https://stackoverflow.com/questions/65681034/
nn.LSTM() received an invalid combination of arguments
I use lstm from pytorch in my code to predict time series. while i write this code class LSTM_model(nn.Module): def __init__(self, input_size, output_size, hidden_size,num_layers,dropout): super(LSTM_model,self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.seq_len = seq_len self.num_layers = num_layers self.dropout = dropout self.output_size = output_size # self.lstm=nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers) self.lstm = nn.LSTM(self.input_size, self.hidden_size,self.num_layers , self.dropout, batch_first=True) self.fc= nn.Linear(self.hidden_size , self.output_size) def forward(self, x , hidden): x, hidden= self.lstm(x,hidden) x = self.fc(x) return x,hidden but when I use the class, I get an error with the x,hidden=self.lstm(x,hidden) line about the internal nn.LSTM() function from PyTorch. <ipython-input-63-211c1442b5a7> in forward(self, x, hidden) 15 16 def forward(self, x , hidden): ---> 17 x, hidden= self.lstm(x,hidden) 18 x = self.fc(x) 19 return x,hidden D:\Anaconda\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), D:\Anaconda\lib\site-packages\torch\nn\modules\rnn.py in forward(self, input, hx) 232 _impl = _rnn_impls[self.mode] 233 if batch_sizes is None: --> 234 result = _impl(input, hx, self._flat_weights, self.bias, self.num_layers, 235 self.dropout, self.training, self.bidirectional, self.batch_first) 236 else: TypeError: rnn_tanh() received an invalid combination of arguments - got (Tensor, Tensor, list, int, int, float, bool, bool, bool), but expected one of: * (Tensor data, Tensor batch_sizes, Tensor hx, tuple of Tensors params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) didn't match because some of the arguments have invalid types: (Tensor, Tensor, !list!, !int!, !int!, !float!, !bool!, bool, bool) * (Tensor input, Tensor hx, tuple of Tensors params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) didn't match because some of the arguments have invalid types: (Tensor, Tensor, !list!, !int!, int, float, bool, bool, bool) i called the function with this line model = LSTM_model(input_size=1, output_size=1, hidden_size=128, num_layers=2, dropout=0).to(device) and its called here from tqdm.auto import tqdm def loop_fn(mode, dataset, dataloader, model, criterion, optimizer,device): if mode =="train": model.train() elif mode =="test": model.eval() cost = 0 for feature, target in tqdm(dataloader, desc=mode.title()): feature, target = feature.to(device), target.to(device) output , hidden = model(feature,None) loss = criterion(output,target) if mode =="train": loss.backward() optimizer.step() optimizer.zero_grad() cost += loss.item() * feature.shape[0] cost = cost / len(dataset) return cost Thank You in advance
I took me a while to find out, but you are initializing your nn.LSTM incorrectly because of positional arguments. self.lstm = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, self.dropout, batch_first=True) The above will assign self.dropout to the argument named bias: >>> model.lstm LSTM(1, 128, num_layers=2, bias=0, batch_first=True) You may want to use keyword arguments instead: self.lstm = nn.LSTM( input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.num_layers, dropout=self.dropout, batch_first=True) Which will provide the desired result: >>> model.lstm LSTM(1, 128, num_layers=2, batch_first=True)
https://stackoverflow.com/questions/65684231/
Output of CNN should be image
I am pretty new to deep learning, so I got one question: Assume an input Grayscale image of shape (128,128,1). Target (Output) is as well an (128,128,1) sized image, e.g. for segmentation, depth prediction etc.. Usually with valid padding the size of the image shrinks after several convolution layers. What are decent (maybe not the toughest one) variants to keep the size or predict a same sized image? Is it via same-padding? Is it via tranpose convolution or upsampling? Should I use a FCN at the end and reshape them to the image size? I am using pytorch. I would be glad for any hints, because I didn't find much in the internet. Best
TLDR; You want to look at Deconv networks (Convolution transpose) that help regenerate an image using convolution operations. You want to build an encoder-decoder convolution architecture that compresses an image to a latent representation using convolutions and then decodes an image from this compressed representation. For image segmentation, a popular architecture is U-net. NOTE: I cant answer for pytorch, so I will he sharing the Tensorflow equivalent. Please feel to ignore the code, but since you are looking for the concept, I can help you with what you need to solve this. You are trying to generate an image as the output of the network. A series convolution operation help to Downsample an image. Since you need an output 2D matrix (gray scale image), you want to Upsample as well. Such a network is called a Deconv network. The first series of layers convolve over the input, 'flattening' them into a vector of channels. The next set of layers use 2D Conv Transpose or Deconv operations to change the channels back into a 2D matrix (Gray scale image) Refer to this image for reference - Here is a sample code that shows you how you can take a (10,3,1) image to a (12,10,1) image using a deconv net. You can find the conv2dtranspose layer implementation in pytorch here. from tensorflow.keras import layers, Model, utils inp = layers.Input((128,128,1)) ## x = layers.Conv2D(2, (3,3))(inp) ## Convolution part x = layers.Conv2D(4, (3,3))(x) ## x = layers.Conv2D(6, (3,3))(x) ## ########## x = layers.Conv2DTranspose(6, (3,3))(x) x = layers.Conv2DTranspose(4, (3,3))(x) ## ## Deconvolution part out = layers.Conv2DTranspose(1, (3,3))(x) ## model = Model(inp, out) utils.plot_model(model, show_shapes=True, show_layer_names=False) Also, if you are looking for tried and tested architectures in this domain, check out U-net; U-Net: Convolutional Networks for Biomedical Image Segmentation. This is an encoder-decoder (conv2d, conv2d-transpose) architecture that uses a concept called skip connections to avoid information loss and generate better image segmentation masks.
https://stackoverflow.com/questions/65684804/
Problem with adding smiles on photos with convolutional autoencoder
I have a dataset with images and another dataset as it's description: There are a lot of pictures: people with and without sunglasses, smiles and other attributes. What I want to do is be able to add smiles to photos where people are not smiling. I've started like this: smile_ids = attrs['Smiling'].sort_values(ascending=False).iloc[100:125].index.values smile_data = data[smile_ids] no_smile_ids = attrs['Smiling'].sort_values(ascending=True).head(5).index.values no_smile_data = data[no_smile_ids] eyeglasses_ids = attrs['Eyeglasses'].sort_values(ascending=False).head(25).index.values eyeglasses_data = data[eyeglasses_ids] sunglasses_ids = attrs['Sunglasses'].sort_values(ascending=False).head(5).index.values sunglasses_data = data[sunglasses_ids] When I print them their are fine: plot_gallery(smile_data, IMAGE_H, IMAGE_W, n_row=5, n_col=5, with_title=True, titles=smile_ids) Plot gallery looks like this: def plot_gallery(images, h, w, n_row=3, n_col=6, with_title=False, titles=[]): plt.figure(figsize=(1.5 * n_col, 1.7 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) try: plt.imshow(images[i].reshape((h, w, 3)), cmap=plt.cm.gray, vmin=-1, vmax=1, interpolation='nearest') if with_title: plt.title(titles[i]) plt.xticks(()) plt.yticks(()) except: pass Then I do: def to_latent(pic): with torch.no_grad(): inputs = torch.FloatTensor(pic.reshape(-1, 45*45*3)) inputs = inputs.to('cpu') autoencoder.eval() output = autoencoder.encode(inputs) return output def from_latent(vec): with torch.no_grad(): inputs = vec.to('cpu') autoencoder.eval() output = autoencoder.decode(inputs) return output After that: smile_latent = to_latent(smile_data).mean(axis=0) no_smile_latent = to_latent(no_smile_data).mean(axis=0) sunglasses_latent = to_latent(sunglasses_data).mean(axis=0) smile_vec = smile_latent-no_smile_latent sunglasses_vec = sunglasses_latent - smile_latent And finally: def add_smile(ids): for id in ids: pic = data[id:id+1] latent_vec = to_latent(pic) latent_vec[0] += smile_vec pic_output = from_latent(latent_vec) pic_output = pic_output.view(-1,45,45,3).cpu() plot_gallery([pic,pic_output], IMAGE_H, IMAGE_W, n_row=1, n_col=2) def add_sunglasses(ids): for id in ids: pic = data[id:id+1] latent_vec = to_latent(pic) latent_vec[0] += sunglasses_vec pic_output = from_latent(latent_vec) pic_output = pic_output.view(-1,45,45,3).cpu() plot_gallery([pic,pic_output], IMAGE_H, IMAGE_W, n_row=1, n_col=2) But when I execute this line I don't get any faces: add_smile(no_smile_ids) The output: Could someone please explain where is my mistake or why it can happen? Thanks for any help. Added: checking the shape of pic_output:
Wild guess, but it seems you are broadcasting your images instead of permuting the axes. The former will have the undesired effect of mixing information across the batches/channels. pic_output = pic_output.view(-1, 45, 45, 3).cpu() should be replaced with pic_output = pic_output.permute(0, 2, 3, 1).cpu() Assuming tensor pic_output is already shaped like (-1, 3, 45, 45).
https://stackoverflow.com/questions/65689779/
How can I make this PyTorch heatmap function faster and more efficient?
I have this function that creates a sort if heatmap for 2d tensors, but it's painfully slow when using larger tensor inputs. How can I speed it up and make it more efficient? import torch import numpy as np import matplotlib.pyplot as plt def heatmap( tensor: torch.Tensor, ) -> torch.Tensor: assert tensor.dim() == 2 def color_tensor(x: torch.Tensor) -> torch.Tensor: if x < 0: x = -x if x < 0.5: x = x * 2 return (1 - x) * torch.tensor( [0.9686, 0.9686, 0.9686] ) + x * torch.tensor([0.5725, 0.7725, 0.8706]) else: x = (x - 0.5) * 2 return (1 - x) * torch.tensor( [0.5725, 0.7725, 0.8706] ) + x * torch.tensor([0.0196, 0.4431, 0.6902]) else: if x < 0.5: x = x * 2 return (1 - x) * torch.tensor( [0.9686, 0.9686, 0.9686] ) + x * torch.tensor([0.9569, 0.6471, 0.5098]) else: x = (x - 0.5) * 2 return (1 - x) * torch.tensor( [0.9569, 0.6471, 0.5098] ) + x * torch.tensor([0.7922, 0.0000, 0.1255]) return torch.stack( [torch.stack([color_tensor(x) for x in t]) for t in tensor] ).permute(2, 0, 1) x = torch.randn(3,3) x = x / x.max() x_out = heatmap(x) x_out = (x_out.permute(1, 2, 0) * 255).numpy() plt.imshow(x_out.astype(np.uint8)) plt.axis("off") plt.show() An example of the output:
You need to get rid of ifs and the for loop and make a vectorized function. To do that, you can use masks and calculate all in one. Here it is: def heatmap(tensor: torch.Tensor) -> torch.Tensor: assert tensor.dim() == 2 # We're expanding to create one more dimension, for mult. to work. xt = x.expand((3, x.shape[0], x.shape[1])).permute(1, 2, 0) # this part is the mask: (xt >= 0) * (xt < 0.5) ... # ... the rest is the original function translated color_tensor = ( (xt >= 0) * (xt < 0.5) * ((1 - xt * 2) * torch.tensor([0.9686, 0.9686, 0.9686]) + xt * 2 * torch.tensor([0.9569, 0.6471, 0.5098])) + (xt >= 0) * (xt >= 0.5) * ((1 - (xt - 0.5) * 2) * torch.tensor([0.9569, 0.6471, 0.5098]) + (xt - 0.5) * 2 * torch.tensor([0.7922, 0.0000, 0.1255])) + (xt < 0) * (xt > -0.5) * ((1 - (-xt * 2)) * torch.tensor([0.9686, 0.9686, 0.9686]) + (-xt * 2) * torch.tensor([0.5725, 0.7725, 0.8706])) + (xt < 0) * (xt <= -0.5) * ((1 - (-xt - 0.5) * 2) * torch.tensor([0.5725, 0.7725, 0.8706]) + (-xt - 0.5) * 2 * torch.tensor([0.0196, 0.4431, 0.6902])) ).permute(2, 0, 1) return color_tensor
https://stackoverflow.com/questions/65690160/
How to use VGG19 transfer learning pretraining
I'm working on a VQA model, and I need some help as I'm new to this. I want to use transfer learning from the VGG19 network before running the train, so when I start the train, I will have the image features ahead (trying to solve performance issue). Does it possible to do so? If so, can someone please share an example with pytorch? below is the relevant code: class img_CNN(nn.Module): def __init__(self, img_size): super(img_CNN, self).__init__() self.model = models.vgg19(pretrained=True) self.in_features = self.model.classifier[-1].in_features self.model.classifier = nn.Sequential(*list(self.model.classifier.children())[:-1]) # remove vgg19 last layer self.fc = nn.Linear(in_features, img_size) def forward(self, image): #with torch.no_grad(): img_feature = self.model(image) # (batch, channel, height, width) img_feature = self.fc(img_feature) return img_feature class vqamodel(nn.Module): def __init__(self, output_dim,input_dim, emb_dim, hid_dim, n_layers, dropout, answer_len, que_size, img_size,model_vgg,in_features): super(vqamodel,self).__init__() self.image=img_CNN(img_size) self.question=question_lstm(input_dim, emb_dim, hid_dim, n_layers, dropout,output_dim,que_size) self.tanh=nn.Tanh() self.relu=nn.ReLU() self.dropout=nn.Dropout(dropout) self.fc1=nn.Linear(que_size,answer_len) #the input to the linear network is equal to the combain vector self.softmax=nn.Softmax(dim=1) def forward(self, image, question): image_emb=self.image(image) question_emb=self.question(question) combine =question_emb*image_emb out_feature=self.fc1(combine) out_feature=self.relu(out_feature) return (out_feature) How can I take out the models.vgg19(pretrained=True),run it before the train on the image dataloader and save the image representation in NumPy array? thank you!
Yes, you can use a pretrained VGG model to extract embedding vectors from images. Here is a possible implementation, using torchvision.models.vgg*. First retrieve the pretrained model model = torchvision.models.vgg19(pretrained=True) Its classifier is: >>> model.classifier (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace=True) (2): Dropout(p=0.5, inplace=False) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace=True) (5): Dropout(p=0.5, inplace=False) (6): Linear(in_features=4096, out_features=1000, bias=True) ) Depending on your finetuning strategy, you can either truncate it to keep some of the trained dense layers: model.classifier = nn.Sequential(*[model.classifier[i] for i in range(4)]) Or replace it altogether with a different set of dense layers wrapped in a nn.Sequential: model.classifier = nn.Sequential( nn.Linear(25088, 4096), nn.ReLU(True), nn.Dropout(0.5), nn.Linear(4096, 2048)) Additionally, you can freeze the entire head of the model (the feature extractor): for param in model.features.parameters(): param.requires_grad = False Then you will be able to use that model to extract image embeddings and perform back propagation to finetune your classifier: >>> model(img) # shape (batchs_size, 2048)
https://stackoverflow.com/questions/65690251/
Why is the code not able to approximate the square function?
WHy does the following code not work as a square approximator? I am getting weird dimensions. When I tried plotting loss, the graph somehow does not show anything. I am a beginner with pytorch, so I would be grateful for any help. import torch from torch import nn import matplotlib.pyplot as plt import numpy as np data = [[i] for i in range(-10000, 10000)] y = [[i[0] * i[0]] for i in data] data=torch.FloatTensor(data) y=torch.FloatTensor(y) class MyModel(nn.Module): def __init__(self, numfeatures, outfeatures): super().__init__() self.modele = nn.Sequential( nn.Linear( numfeatures, 2*numfeatures), nn.ReLU(), nn.Linear(2 * numfeatures, 4 * numfeatures), nn.ReLU(), nn.Linear(4* numfeatures, 2 * numfeatures), nn.ReLU(), nn.Linear(2*numfeatures, numfeatures), ) def forward(self, x): return self.modele(x) model = MyModel(1, 1) criterion = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) n_epochs = 10000 epoch_loss= [] for i in range(n_epochs): y_pred = model(data) loss = criterion(y_pred, y) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss.append(loss.item()) plt.plot(epoch_loss)
Your data is ranging from -10000 to 10000! You need to standardize your data, otherwise you won't be able to make your model learn: data = (data - data.min()) / (data.max() - data.min()) y = (y - y.min()) / (y.max() - y.min()) Additionally, you could normalize your input with: mean, std = data.mean(), data.std() data = (data - mean) / std After 100 epochs:
https://stackoverflow.com/questions/65692185/
when I run this code in Pycharm i didn't get any outputs simply it shows------ process finished with exit code 0. please help me with this issue
import torch torch.cuda.is_available() torch.cuda.current_device() torch.cuda.get_device_name(0) torch.cuda.memory_reserved() torch.cuda.memory_allocated() torch.cuda.memory_allocated() var1=torch.FloatTensor([1.0,2.0,3.0]).cuda() var1 var1.device import pandas as pd df=pd.read_csv('diabetes.csv') df.head() df.isnull().sum() import seaborn as sns import numpy as np df['Outcome']=np.where(df['Outcome']==1,"Diabetic","No Diabetic") df.head() sns.pairplot(df,hue="Outcome") The output shows: C:\Users\vinot.conda\envs\python21\python.exe D:/python/python_work/pythonProject/diabetes.py Process finished with exit code 0
You have to print your outputs, for example: torch.cuda.is_available() Will return either True/False but it isn't assigned to any variable and hence it does nothing. It doesn't work like Jupyter notebooks. You can assign your outputs to variable, just like you did with df=pd.read_csv('diabetes.csv') Or you can print dataframe (or any other variable that is not assigned for that matter, not sure what you're after): print(df.head())
https://stackoverflow.com/questions/65693758/
Which parameters of Mask-RCNN control mask recall?
I'm interested in fine-tuning a Mask-RCNN model that I'm using for instance segmentation. Currently I have trained the model for 6 epochs and the various Mask-RCNN losses are as follows: The reason I'm stopping is that the COCO evaluation metrics seem to have dipped in the last epoch: I know this is a far reaching question, but I'm looking to gain some intuition of how to understand which parameters are going to be the most impactful in improving the evaluation metrics. I understand there are three places to consider: Should I be looking at batch size, learning rate and momentum, this uses an SGD optimizer with a learning rate of 1e-4 and batch size 2? Should I be looking at using more training data or adding augmentation (I don't currently use any) and my dataset is current pretty large 40K images? Should I be looking at the specific MaskRCNN parameters? I thing I'll likely be asked to me more specific on what I want to improve so let me say that I would like to improve the recall of the individual masks. The model is performing well but doesn't quite capture the full extend of what I would like it to. I'm also leaving out details of the specific learning problem as I'd like to gain intuition of how to approach this in general.
A couple of notes: 6 epochs is a way too little number for the network to converge even if you use a pre-trained network. Especially such a big one as resnet50. I think you need at least 50 epochs. On a pre-trained resnet18 I started to get good results after 30 epochs, resnet34 needed +10-20 epochs and your resnet50 + 40k images of train set - definitely need more epochs than 6; definitely use a pre-trained network; in my experience, I failed to get results I like with SGD. I started using AdamW + ReduceLROnPlateau scheduler. Network converges quite fast, like 50-60% AP on epoch 7-8 but then it comes up to 80-85 after 50-60 epochs using very small improvements from epoch to epoch, only if the LR is small enough. You must be familiar with the gradient descent notion. I used to think of it as if you have more augmentation, your "hill" is covered with "boulders" that you have to be able to bypass and this is only possible if you control the LR. Additionally, AdamW helps with the overfitting. This is how I do it. For networks with higher input resolution (you input images are scaled on input by the net itself), I use higher lr. init_lr = 0.00005 weight_decay = init_lr * 100 optimizer = torch.optim.AdamW(params, lr=init_lr, weight_decay=weight_decay) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True, patience=3, factor=0.75) for epoch in range(epochs): # train for one epoch, printing every 10 iterations metric_logger = train_one_epoch(model, optimizer, train_loader, scaler, device, epoch, print_freq=10) scheduler.step(metric_logger.loss.global_avg) optimizer.param_groups[0]["weight_decay"] = optimizer.param_groups[0]["lr"] * 100 # scheduler.step() # evaluate on the test dataset evaluate(model, test_loader, device=device) print("[INFO] serializing model to '{}' ...".format(args["model"])) save_and_print_size_of_model(model, args["model"], script=False) Find such an lr and weight decay that the training exhausts lr to a very small value, like 1/10 of your initial lr, at the end of the training. If you will have a plateau too often, the scheduler quickly brings it to very small values and the network will learn nothing all the rest of the epochs. Your plots indicate that your LR is too high at some point of the training, the network stops training and then AP is going down. You need constant improvements, even small ones. The more network trains the more subtle details it learns about your domain and the smaller is the learning rate. Imho, constant LR will not allow doing that correctly. anchor generator settings. Here is how I initialize the network. def get_maskrcnn_resnet_model(name, num_classes, pretrained, res='normal'): print('Using maskrcnn with {} backbone...'.format(name)) backbone = resnet_fpn_backbone(name, pretrained=pretrained, trainable_layers=5) sizes = ((4,), (8,), (16,), (32,), (64,)) aspect_ratios = ((0.25, 0.5, 1.0, 2.0, 4.0),) * len(sizes) anchor_generator = AnchorGenerator( sizes=sizes, aspect_ratios=aspect_ratios ) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0', '1', '2', '3'], output_size=7, sampling_ratio=2) default_min_size = 800 default_max_size = 1333 if res == 'low': min_size = int(default_min_size / 1.25) max_size = int(default_max_size / 1.25) elif res == 'normal': min_size = default_min_size max_size = default_max_size elif res == 'high': min_size = int(default_min_size * 1.25) max_size = int(default_max_size * 1.25) else: raise ValueError('Invalid res={} param'.format(res)) model = MaskRCNN(backbone, min_size=min_size, max_size=max_size, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) model.roi_heads.detections_per_img = 512 return model I need to find small objects here why I use such anchor params. classes in-balancing issue. If you have only your object and bg - no problem. If you have more classes then make sure that your training split (as 80% for train and 20% for the test) is more or less precisely applied to all the classes used in your particular training. Good luck!
https://stackoverflow.com/questions/65694335/
Pytorch in V.S. Code: torch.tensor is not callable
I am running my pytorch program in V.S. Code, but, I am getting the following errors:- torch.tensor is not callable I have followed this answer https://stackoverflow.com/a/53572939/13218692 and added the following lines in my Settings.json file in V.S. Code, but, I am still getting an error. Any help would be highly appreciated! Thanks!
add this line after it: ,"--disable-msg=not-callable" just like this enter image description here
https://stackoverflow.com/questions/65696441/
How to run Python script on a Discrete Graphics AMD GPU?
WHAT I WANT TO DO: I have a script that I use for factorizing prime numbers given a certain range: # Python program to display all the prime numbers within an interval lower = 900 upper = 1000 print("Prime numbers between", lower, "and", upper, "are:") for num in range(lower, upper + 1): # all prime numbers are greater than 1 if num > 1: for i in range(2, num): if (num % i) == 0: break else: print(num) I would like to use the GPU instead of the CPU to run such script so it would be faster THE PROBLEM: I don't have a NVIDIA GPU on my Intel NUC NUC8i7HVK but a "Discrete GPU" If I run this code to check what are my GPUs: import pyopencl as cl import numpy as np a = np.arange(32).astype(np.float32) res = np.empty_like(a) ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) mf = cl.mem_flags a_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a) dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY, res.nbytes) prg = cl.Program(ctx, """ __kernel void sq(__global const float *a, __global float *c) { int gid = get_global_id(0); c[gid] = a[gid] * a[gid]; } """).build() prg.sq(queue, a.shape, None, a_buf, dest_buf) cl.enqueue_copy(queue, res, dest_buf) print (a, res) I receive: [0] <pyopencl.Platform 'AMD Accelerated Parallel Processing' at 0x7ffb3d492fd0> [1] <pyopencl.Platform 'Intel(R) OpenCL HD Graphics' at 0x187b648ed80> THE POSSIBLE APPROACH TO THE PROBLEM: I found a guide that takes you by the hand and explains step by step how to run it on your GPU. But all Pyhton libraries that pipes Python through the GPU like PyOpenGL, PyOpenCL, Tensorflow (Force python script on GPU), PyTorch, etc... are tailored for NVIDIA. In case you have an AMD all libraries ask for ROCm but such software still doesn't support integrated GPU or Discrete GPU as far as I know (see my own reply below). I only found a guide that talks about such approach but I cannot make it work. Is there hope or I'm just tying to do something impossible? EDIT: Reply to @chapelo If I choose 0 the reply is: Set the environment variable PYOPENCL_CTX='0' to avoid being asked again. [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31.] [ 0. 1. 4. 9. 16. 25. 36. 49. 64. 81. 100. 121. 144. 169. 196. 225. 256. 289. 324. 361. 400. 441. 484. 529. 576. 625. 676. 729. 784. 841. 900. 961.] If I choose 1 the reply is: Set the environment variable PYOPENCL_CTX='1' to avoid being asked again. [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31.] [ 0. 1. 4. 9. 16. 25. 36. 49. 64. 81. 100. 121. 144. 169. 196. 225. 256. 289. 324. 361. 400. 441. 484. 529. 576. 625. 676. 729. 784. 841. 900. 961.]
The following code is sample of a complete python program that usually includes: The import statements The function definitions The main() function The if __name__ == "__main__": section. I hope this helps you solve your problem. import pyprimes from math import sqrt import numpy as np import pyopencl as cl import pyopencl.algorithm import pyopencl.array def primes_below(number): """Generate a list of prime numbers below a specified `number`""" n = lambda a: 2 if a==0 else 2*a + 1 limit = int(sqrt(number)) + 1 size = number//2 primes = [True] * size for i in range(1, size): if primes[i]: num = n(i) if num > limit: break for j in range(i+num, size, num): primes[j] = False for i, flag in enumerate(primes): if flag: yield n(i) def primes_between(lo, hi): """Generate a list of prime numbers betwenn `lo` and `hi` numbers""" primes = list(primes_below(int(sqrt(hi))+1)) size = (hi - lo - (0 if hi%2 else 1))//2 + 1 n = lambda a: 2*a + lo + (0 if lo%2 else 1) numbers = [True]*size for i, prime in enumerate(primes): if i == 0: continue # avoid dividing by 2 nlo = n(0) # slower # start = prime * (nlo//prime + 1) if nlo%prime else 0 start = 0 while (n(start)%prime) != 0: start += 1 for j in range(start, size, prime): numbers[j] = False for i, flag in enumerate(numbers): if flag: yield n(i) def primes_between_using_cl(lo, hi): """Generate a list of prime numbers betwenn a lo and hi numbers this is a parallel algorithm using pyopencl""" primes = list(primes_below(int(sqrt(hi))+1)) size_primes_h = np.array( (len(primes)-1, ), dtype=np.int32) numbers_h = np.arange( lo + (0 if lo&1 else 1), hi + (0 if hi&1 else 1), 2, dtype=np.int32) size = (hi - lo - (0 if hi%2 else 1))//2 + 1 code = """\ __kernel void is_prime( __global const int *primes, __global int *numbers) { int gid = get_global_id(0); int num = numbers[gid]; int max = (int) (sqrt((float)num) + 1.0); for (; *primes; ++primes) { if (*primes > max) break; if (num % *primes == 0) { numbers[gid] = 0; return; } } } """ platforms = cl.get_platforms() ctx = cl.Context(dev_type=cl.device_type.ALL, properties=[(cl.context_properties.PLATFORM, platforms[0])]) queue = cl.CommandQueue(ctx) prg = cl.Program(ctx, code).build() numbers_d = cl.array.to_device(queue, numbers_h) primes_d = cl.array.to_device(queue, np.array(primes[1:], dtype=np.int32)) prg.is_prime(queue, (size, ), None, primes_d.data, numbers_d.data) array, length = cl.algorithm.copy_if(numbers_d, "ary[i]>0")[:2] yield from array.get()[:length.get()] def test(f, lo, hi): """Test that all prime numbers are generated by comparing with the output of the library `pyprimes`""" a = filter(lambda p: p>lo, pyprimes.primes_below(hi)) b = f(lo, hi) result = True for p, q in zip (a, b): if p != q: print(p, q) result = False return result def main(): lower = 1000 upper = 5000 print("The prime numbers between {} and {}, are:".format(lower,upper)) print() for p in primes_between_using_cl(lower, upper): print(p, end=' ') print() if __name__ == '__main__': main()
https://stackoverflow.com/questions/65703605/
PyTorch DataLoader using Mongo DB
I would like to know if using a DataLoader connected to a MongoDB is a sensible thing to do and how this could be implemented. Background I have about 20 million documents sitting in a (local) MongoDB. Way more documents than fit in memory. I would like to train a deep neural net on the data. So far, I have been exporting the data to the file system first, with subfolders named as the classes of the documents. But I find this approach nonsensical. Why export first (and later delete) if the data is already well-maintained sitting in a DB. Question 1: Am I right? Would it make sense to directly connect to the MongoDB? Or are there reasons not to do it (e.g. DBs generally being too slow etc.)? If DBs are too slow (why?), can one prefetch the data somehow? Question 2: How would one implement a PyTorch DataLoader? I have found only very few code snippets online ([1] and [2]) which makes me doubt my approach. Code snippet The general way how I access MongoDB is as follows below. Nothing special about this, I think. import pymongo from pymongo import MongoClient myclient = pymongo.MongoClient("mongodb://localhost:27017/") mydb = myclient["xyz"] mycol = mydb["xyz_documents"] query = { # some filters } results = mycol.find(query) # results is now a cursor that can run through all docs # Assume, for the sake of this example, that each doc contains a class name and some image that I want to train a classifier on
Introduction This one is a little open-ended, but let's try, also please correct me if I'm wrong somewhere. So far, I have been exporting the data to the file system first, with subfolders named as the classes of the documents. IMO this isn't sensible because: you are essentially duplicating data any time you would like to train a-new given only code and database this operation would have to be repeated you can access multiple datapoints at once and cache them in RAM for later reuse without reading from hard drive multiple times (which is quite heavy) Am I right? Would it make sense to directly connect to the MongoDB? Given above, probably yes (especially when it comes to clear and portable implementation) Or are there reasons not to do it (e.g. DBs generally being to slow etc.)? AFAIK DB shouldn't be slower in this case as it will cache access to it, but I'm no db expert unfortunately. Many tricks for faster access are implemented out-of-the-box for databases. can one prefetch the data somehow? Yes, if you just want to get data, you could load a larger part of data (say 1024 records) at one go and return batches of data from that (say batch_size=128) Implementation How would one implement a PyTorch DataLoader? I have found only very few code snippets online ([1] and [2]) which makes me doubt with my approach. I'm not sure why would you want to do that. What you should go for is torch.utils.data.Dataset as shown in the examples you've listed. I would start with simple non-optimized approach similar to the one here, so: open connection to db in __init__ and keep it as long as it's used (I would create a context manager from torch.utils.data.Dataset so the connection is closed after epochs are finished) I would not transform the results to list (especially since you cannot fit it in RAM for obvious reasons) as it misses the point of generators I would perform batching inside this Dataset (there is an argument batch_size here). I am not sure about __getitem__ function but it seems it can return multiple datapoints at once, hence I'd use that and it should allow us to use num_workers>0 (given that mycol.find(query) returns data in the same order every time) Given that, something along those lines is what I'd do: class DatabaseDataset(torch.utils.data.Dataset): def __init__(self, query, batch_size, path: str, database: str): self.batch_size = batch_size client = pymongo.MongoClient(path) self.db = client[database] self.query = query # Or non-approximate method, if the approximate method # returns smaller number of items you should be fine self.length = self.db.estimated_document_count() self.cursor = None def __enter__(self): # Ensure that this find returns the same order of query every time # If not, you might get duplicated data # It is rather unlikely (depending on batch size), shouldn't be a problem # for 20 million samples anyway self.cursor = self.db.find(self.query) return self def shuffle(self): # Find a way to shuffle data so it is returned in different order # If that happens out of the box you might be fine without it actually pass def __exit__(self, *_, **__): # Or anything else how to close the connection self.cursor.close() def __len__(self): return len(self.examples) def __getitem__(self, index): # Read takes long, hence if you can load a batch of documents it should speed things up examples = self.cursor[index * batch_size : (index + 1) * batch_size] # Do something with this data ... # Return the whole batch return data, labels Now batching is taken care of by DatabaseDataset, hence torch.utils.data.DataLoader can have batch_size=1. You might need to squeeze additional dimension. As MongoDB uses locks (which is no surprise, but see here) num_workers>0 shouldn't be a problem. Possible usage (schematically): with DatabaseDataset(...) as e: dataloader = torch.utils.data.DataLoader(e, batch_size=1) for epoch in epochs: for batch in dataloader: # And all the stuff ... dataset.shuffle() # after each epoch Remember about shuffling implementation in such case! (also shuffling can be done inside context manager and you might want to close connection manually or something along those lines).
https://stackoverflow.com/questions/65704680/
Fast.ai Course 2020: HTTPError: 401 Client Error: PermissionDenied for url
I am going through the fast.ai course (version 2020) and am currently in lesson 2. When trying to use the azure key, I always get this error and I can't figure out what to do: #To download images with Bing Image Search, sign up at Microsoft Azure for a free account. You will be given a key, which you can copy and enter in a cell as follows (replacing 'XXX' with your key and executing it): key = os.environ.get('AZURE_SEARCH_KEY', 'XXX') #Once you've set key, you can use search_images_bing. This function is provided by the small utils class included with the notebooks online. If you're not sure where a function is defined, you can just type it in your notebook to find out: search_images_bing results = search_images_bing(key, 'grizzly bear') ims = results.attrgot('content_url') len(ims) HTTPError Traceback (most recent call last) <ipython-input-18-cddb73f3292e> in <module>() ----> 1 results = search_images_bing(key, 'grizzly bear') 2 ims = results.attrgot('content_url') 3 len(ims) 1 frames /usr/local/lib/python3.6/dist-packages/requests/models.py in raise_for_status(self) 939 940 if http_error_msg: --> 941 raise HTTPError(http_error_msg, response=self) 942 943 def close(self): HTTPError: 401 Client Error: PermissionDenied for url: https://api.bing.microsoft.com/v7.0/images/search? q=grizzly+bear&count=150&min_height=128&min_width=128 I am using Google Colab. Maybe I have to add that I don't know what the key name is that I should replace the 'XXX' with. Jeremy Howard said that you would get one when signing up to Azure, but I didn't.
This at first didn't work for me also, then I realized I needed to register for a different Bing Search service on Azure. Look for "Bing Search v7" in the marketplace.
https://stackoverflow.com/questions/65706220/
TensorFlow vs PyTorch convolution confusion
I am confused on how to replicate Keras (TensorFlow) convolutions in PyTorch. In Keras, I can do something like this. (the input size is (256, 237, 1, 21) and the output size is (256, 237, 1, 1024). import tensorflow as tf x = tf.random.normal((256,237,1,21)) y = tf.keras.layers.Conv1D(filters=1024, kernel_size=5,padding="same")(x) print(y.shape) (256, 237, 1, 1024) However, in PyTorch, when I try to do the same thing I get a different output size: import torch.nn as nn x = torch.randn(256,237,1,21) m = nn.Conv1d(in_channels=237, out_channels=1024, kernel_size=(1,5)) y = m(x) print(y.shape) torch.Size([256, 1024, 1, 17]) I want PyTorch to give me the same output size that Keras does: This previous question seems to imply that Keras filters are PyTorch's out_channels but thats what I have. I tried to add the padding in PyTorch of padding=(0,503) but that gives me torch.Size([256, 1024, 1, 1023]) but that still not correct. This also takes so much longer than keras does so I feel that I have incorrectly assigned a parameter. How can I replicate what Keras did with convolution in PyTorch?
In TensorFlow, tf.keras.layers.Conv1D takes in a tensor of shape (batch_shape + (steps, input_dim)). Which means that what is commonly known as channels appears on the last axis. For instance in 2D convolution you would have (batch, height, width, channels). This is different from PyTorch where the channel dimension is right after the batch axis: torch.nn.Conv1d takes in shapes of (batch, channel, length). So you will need to permute two axes. For torch.nn.Conv1d: in_channels is the number of channels in the input tensor out_channels is the number of filters, i.e. the number of channels the output will have stride the step size of the convolution padding the zero-padding added to both sides In PyTorch there is no option for padding='same', you will need to choose padding correctly. Here stride=1, so padding must equal to kernel_size//2 (i.e. padding=2) in order to maintain the length of the tensor. In your example, since x has a shape of (256, 237, 1, 21), in TensorFlow's terminology it will be considered as an input with: a batch shape of (256, 237), steps=1, so the length of your 1D input is 1, 21 input channels. Whereas in PyTorch, x of shape (256, 237, 1, 21) would be: batch shape of (256, 237), 1 input channel a length of 21. Have kept the input in both examples below (TensorFlow vs. PyTorch) as x.shape=(256, 237, 21) assuming 256 is the batch size, 237 is the length of the input sequence, and 21 is the number of channels (i.e. the input dimension, what I see as the dimension on each timestep). In TensorFlow: >>> x = tf.random.normal((256, 237, 21)) >>> m = tf.keras.layers.Conv1D(filters=1024, kernel_size=5, padding="same") >>> y = m(x) >>> y.shape TensorShape([256, 237, 1024]) In PyTorch: >>> x = torch.randn(256, 237, 21) >>> m = nn.Conv1d(in_channels=21, out_channels=1024, kernel_size=5, padding=2) >>> y = m(x.permute(0, 2, 1)) >>> y.permute(0, 2, 1).shape torch.Size([256, 237, 1024]) So in the latter, you would simply work with x = torch.randn(256, 21, 237)...
https://stackoverflow.com/questions/65708548/
What is a tensor argument to Normal supposed to mean in Distributions Package of Pytorch?
I understand torch.Normal(loc, scale) is a class corresponding to univariate normal distribution in pytorch. I understand how it works when loc and scale are numbers. The problem is when the inputs to torch.Normal are tensors as opposed to numbers. In that case I do not understand it well. What is the exact interpretation/usage of such tensor arguments? See for example y_dist in code below. loc and scale are tensors for y_dist. What does this exactly mean? I do not think this converts the univariate distribution to multivariate, does it? Does it instead form a group of univariate distributions? import torch as pt ptd = pt.distributions x_dist = ptd.Normal(loc = 2, scale = 3) x_samples = x_dist.sample() batch_size = 256 y_dist = ptd.Normal(loc = 0.25 * pt.ones(batch_size, dtype=pt.float32), scale = pt.ones(batch_size, dtype=pt.float32))
As you said, if loc (a.k.a. mu) and scale (a.k.a. sigma) are floats then it will sample from a normal distribution, with loc as the mean, and scale as the standard deviation. Providing tensors instead of floats will just make it sample from more than one normal distribution independently (unlike torch.distributions.MultivariateNormal of course) If you look at the source code you will see loc and scale are broadcasted to the same shape on __init__. Here's an example to show this behavior: >>> mu = torch.tensor([-10, 10], dtype=torch.float) >>> sigma = torch.ones(2, 2) >>> y_dist = Normal(loc=mu, scale=sigma) Above mu is 1D, while sigma is 2D, yet: >>> y_dist.loc tensor([[-10., 10.], [-10., 10.]]) So it will get two samples from N(-10, 1) and two samples from N(10, 1) >>> y_dist.sample() tensor([[ -9.1686, 10.6062], [-10.0974, 8.5439]]) Similarly: >>> mu = torch.zeros(2, 2) >>> sigma = torch.tensor([0.001, 1000], dtype=torch.float) >>> y_dist = Normal(loc=mu, scale=sigma) Will broadcast scale to be: >>> y_dist.scale tensor([[1.0000e-03, 1.0000e+01], [1.0000e-03, 1.0000e+01]]) >>> y_dist.sample() tensor([[-8.0329e-04, 1.4213e+01], [-1.4907e-03, 3.1190e+02]])
https://stackoverflow.com/questions/65710149/
ImportError: libc10.so: cannot open shared object file: No such file or directory
While running smdataparallel, I see following error # python Python 3.6.10 |Anaconda, Inc.| (default, May 8 2020, 02:54:21) [GCC 7.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import smdistributed.dataparallel.torch.distributed as dist Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/conda/lib/python3.6/site-packages/smdistributed/dataparallel/__init__.py", line 16, in <module> import smddpcommon as hc ImportError: libc10.so: cannot open shared object file: No such file or directory
libc10.so is made available by pytorch. Hence first import torch and then import packages that depend on pytorch.
https://stackoverflow.com/questions/65710713/
How to make tensordot operations after permutation
I have 2 tensors, A and B: A = torch.randn([32,128,64,12],dtype=torch.float64) B = torch.randn([64,12,64,12],dtype=torch.float64) C = torch.tensordot(A,B,([2,3],[0,1])) D = C.permute(0,2,1,3) # shape:[32,64,128,12] tensor D comes from the operations "tensordot -> permute". How can I implement a new operation f() to make the tensordot operation after f() like: A_2 = f(A) B_2 = f(B) D = torch.tensordot(A_2,B_2)
Have you considered using torch.einsum which is very flexible? D = torch.einsum('ijab,abkl->ikjl', A, B) The problem with tensordot is that it outputs all dimensions of A before those of B and what you are looking for (when permuting) is to "interleave" dimensions from A and B.
https://stackoverflow.com/questions/65716540/
Multi label classification with unbalanced labels
I am building multi label classification network. My GTs are vectors of length 512 [0,0,0,1,0,1,0,...,0,0,0,1] Most of the time they are zeroes, each vector has about 5 ones, and rest are zeros . I am thinking to do: Use sigmoid for activation for output layer. Use binary_crossentropy for loss function. But how I can solve the unbalance issue ? Network can learn to predict always zeros and still have really low learning loss score. How I can make it actually learn to predict ones...
You cannot easily upsample as this is a multilabel case (what I've missed from the post originally). What you can do is give 1 way higher weights, something like this: import torch class BCEWithLogitsLossWeighted(torch.nn.Module): def __init__(self, weight, *args, **kwargs): super().__init__() # Notice none reduction self.bce = torch.nn.BCEWithLogitsLoss(*args, **kwargs, reduction="none") self.weight = weight def forward(self, logits, labels): loss = self.bce(logits, labels) binary_labels = labels.bool() loss[binary_labels] *= labels[binary_labels] * self.weight # Or any other reduction return torch.mean(loss) loss = BCEWithLogitsLossWeighted(50) logits = torch.randn(64, 512) labels = torch.randint(0, 2, size=(64, 512)).float() print(loss(logits, labels)) Also you can use FocalLoss to focus on positive examples (there should be some implementations available in some libraries). EDIT: Focal Loss can be coded something along those lines also (functional form cause that's what I have in repo, but you should be able to work from that): def binary_focal_loss( outputs: torch.Tensor, targets: torch.Tensor, gamma: float, weight=None, pos_weight=None, reduction: typing.Callable[[torch.Tensor], torch.Tensor] = None, ) -> torch.Tensor: probabilities = (1 - torch.sigmoid(outputs)) ** gamma loss = probabilities * torch.nn.functional.binary_cross_entropy_with_logits( outputs, targets.float(), weight, reduction="none", pos_weight=pos_weight, ) return reduction(loss)
https://stackoverflow.com/questions/65718296/
How can I swap values in PyTorch Tensor?
I have the following tensor: vector = torch.tensor([[1,5,3], [2,3,4]]) How can I swap values in the second axis? e.g. tensor([[1, 5, 3], [2, 3, 4]]) becomes: tensor([[1, 3, 3], [2, 5, 4]])
You can use numpy's style of indexing : >>> vector = torch.tensor([[1,5,3], [2,3,4]]) tensor([[1, 3, 4], [2, 5, 3]]) >>> vector[[0,1],1] = vector[[1,0],1] >>> vector tensor([[1, 3, 3], [2, 5, 4]]) In that case, we switch the value between the 0th and 1st index in the first dimension, only at the index 1 on the second dimension.
https://stackoverflow.com/questions/65720276/
Using a subset of classes in ImageNet
I'm aware that subsets of ImageNet exist, however they don't fulfill my requirement. I want 50 classes at their native ImageNet resolutions. To this end, I used torch.utils.data.dataset.Subset to select specific classes from ImageNet. However, it turns out, class labels/indices must be greater than 0 and less than num_classes. Since ImageNet contains 1000 classes, the idx of my selected classes quickly goes over 50. How can I reassign the class indices and do so in a way that allows for evaluation later down the road as well? Is there a way more elegant way to select a subset?
I am not sure I understood your conclusions about labels being greater than zero and less than num_classes. The torch.utils.data.Subset helper takes in a torch.utils.data.Dataset and a sequence of indices, they correspond to indices of data points from the Dataset you would like to keep in the subset. These indices have nothing to do with the classes they belong to. Here's how I would approach this: Load your dataset through torchvision.datasets (custom datasets would work the same way). Here I will demonstrate it with FashionMNIST since ImageNet's data is not made available directly through torchvision's API. >>> ds = torchvision.datasets.FashionMNIST('.') >>> len(ds) 60000 Define the classes you want to select for the subset dataset. And retrieve all indices from the main dataset which correspond to these classes: >>> targets = [1, 3, 5, 9] >>> indices = [i for i, label in enumerate(ds.targets) if label in targets] You have your subset: >>> ds_subset = Subset(ds, indices) >>> len(ds_subset) 24000 At this point, you can use a dictionnary to remap your labels using targets: >>> remap = {i:x for i, x in enumerate(targets)} {0: 1, 1: 3, 2: 5, 3: 9} For example: >>> x, y = ds_subset[10] >>> y, remap[y] # old_label, new_label 1, 3
https://stackoverflow.com/questions/65725306/
Why is data augmentation degrading performance for Mask-RCNN?
I trained a Mask-RCNN for instance segmentation with and without data augmentation. The augmentation was simply a rotation which makes sense for the data concerned. I was very surprised that the augmentation run (dark blue) was worse that the non-augmentation run (light blue). Since the augmentation plots see to just be shifted down but have the same shape I was wondering if there is something else at play. I am using a batch size of 2 and the dataset have 40K images, could that affect things?
Not quite an answer. I had similar effects with it and I think all the parameters and how you train it is important. For example, with more layers (resnet34 vs. resnet18 for the backbone) you need more information to train the bigger network. In this case, augmentations are useful. Another example is network resolution. I trained it with the default one min_size=800 and max_size=1333 on some learning rate and with the higher resolution, you have a higher potential for the aggressive growth of the network AP on a higher LR. Yet another example related to this is how many "levels" you have in your FPN and what is the grid settings for AnchorGenerator. If your augmentations generate samples smaller than the anchors on a particular level of FPN then they probably will cause more issues than do any good. And if your augmentations generate samples such a small that the details of your object are not visible - again, not very useful, especially on small networks. There are tons of similar small issues that matter. I had a situation, that rotations made the result worse because, with some rotation angle, the rotated sample started to look like a part of the background and the detector based on maskrcnn failed to work with it. Cubic interpolation fixed it a little bit but eventually, I came up with the idea to limit the angle of the rotation. Just experiment and find hyperparameters that play well for your particular task.
https://stackoverflow.com/questions/65725824/
How to Increase the Scope of Images a Neural Network Can Recognize?
I am working on an image recognition neural network with Pytorch. My goal is to take pictures of handwritten math equations, process them, and use the neural network to recognize each element. I've reached the point where I am able to separate every variable, number, or symbol from the equation, and everything is ready to be sent through the neural network. I've trained my network to recognize numbers quite well (this part was quite easy), but now I want to expand the scope of the neural network to recognizing letters as well as numbers. I loaded handwritten letters along with the numbers into tensors, shuffled the elements, and put them into batches. No matter how I vary my learning rate, my architecture (hidden layers and the number of neurons per layer), or my batch size I cannot get the neural network to recognize letters. Here is my network architecture and the feed-forward function (you can see I experimented with the number of hidden layers): class NeuralNetwork(nn.Module): def __init__(self): super().__init__() inputNeurons, hiddenNeurons, outputNeurons = 784, 700, 36 # Create tensors for the weights self.layerOne = nn.Linear(inputNeurons, hiddenNeurons) self.layerTwo = nn.Linear(hiddenNeurons, hiddenNeurons) self.layerThree = nn.Linear(hiddenNeurons, outputNeurons) #self.layerFour = nn.Linear(hiddenNeurons, outputNeurons) #self.layerFive = nn.Linear(hiddenNeurons, outputNeurons) # Create function for Forward propagation def Forward(self, input): # Begin Forward propagation input = torch.sigmoid(self.layerOne(torch.sigmoid(input))) input = torch.sigmoid(self.layerTwo(input)) input = torch.sigmoid(self.layerThree(input)) #input = torch.sigmoid(self.layerFour(input)) #input = torch.sigmoid(self.layerFive(input)) return input And this is the training code block (the data is shuffled in a dataloader, the ground truths are shuffled in the same order, batch size is 10, total number letter and number data points is 244800): neuralNet = NeuralNetwork() params = list(neuralNet.parameters()) criterion = nn.MSELoss() print(neuralNet) dataSet = next(iter(imageDataLoader)) groundTruth = next(iter(groundTruthsDataLoader)) for i in range(15): for k in range(24480): neuralNet.zero_grad() prediction = neuralNet.Forward(dataSet) loss = criterion(prediction, groundTruth) loss.backward() for layer in range(len(params)): # Updating the weights of the neural network params[layer].data.sub_(params[layer].grad.data * learningRate) Thanks for the help in advance!
First thing i would recommend is writing a clean Pytorch code For eg. if i see your NeuralNetwork class it should have forward method (f in lower case), so that you wont call it using prediction = neuralNet.Forward(dataSet). Reason being your hooks from neural network does not get dispatched if you use prediction = neuralNet.Forward(dataSet). For more details refer this link Second thing is : Since your dataset is not balance.....try to use undersampling / oversampling methods, which will be very helpful in your case.
https://stackoverflow.com/questions/65727871/
Python Error: the following arguments are required: experimentname
I am a beginner in Python and PyTorch environment. I am executing the program on GitHub. I am getting the following error: usage: main.py [-h] [--self_host SELF_HOST] [--cpu] [--port PORT] [--server_port SERVER_PORT] [--config CONFIG] [--devices_per_trial DEVICES_PER_TRIAL] [--dataroot DATAROOT] [--s3 S3] [--logroot LOGROOT] [--seed SEED] [--num_eigenthings NUM_EIGENTHINGS] [--batch_size BATCH_SIZE] [--eval_batch_size EVAL_BATCH_SIZE] [--momentum MOMENTUM] [--num_steps NUM_STEPS] [--max_samples MAX_SAMPLES] [--cuda] [--full_dataset] [--fname FNAME] [--mode {power_iter,lanczos}] experimentname main.py: error: the following arguments are required: experimentname An exception has occurred, use %tb to see the full traceback. SystemExit: 2 Upon %tb, I get following details: Traceback (most recent call last): File "/home/ajinkya/Hessian_Expts/main.py", line 77, in <module> skeletor.execute(main) File "/home/ajinkya/miniconda3/lib/python3.8/site-packages/skeletor/launcher.py", line 233, in execute args = _parser.val.parse_args() File "/home/ajinkya/miniconda3/lib/python3.8/argparse.py", line 1768, in parse_args args, argv = self.parse_known_args(args, namespace) File "/home/ajinkya/miniconda3/lib/python3.8/argparse.py", line 1800, in parse_known_args namespace, args = self._parse_known_args(args, namespace) File "/home/ajinkya/miniconda3/lib/python3.8/argparse.py", line 2034, in _parse_known_args self.error(_('the following arguments are required: %s') % File "/home/ajinkya/miniconda3/lib/python3.8/argparse.py", line 2521, in error self.exit(2, _('%(prog)s: error: %(message)s\n') % args) File "/home/ajinkya/miniconda3/lib/python3.8/argparse.py", line 2508, in exit _sys.exit(status) SystemExit: 2 Kindly help to get rid of this error. Thanks in advance.
Your CLI seems different to the one exposed from pytorch-hessian-eigenthings/main.py. Anyhow, all options prefixed with -- are options (like --self_host, --cpu, ...). The only (required) positional argument is experimentname, so you need to provide it when calling main.py! An extremely minimal call would be: python main.py my_experiment
https://stackoverflow.com/questions/65729423/
Running into IndexError when following PyTorch CrossEntropyLoss example for classification
I am trying to follow exemplary usage of PyTorch's CrossEntropyLoss function for a classification problem using the fashionMNIST dataset. I think the issue has to do with the dimension of the data output from my model. I'm not sure though, which makes figuring this out more difficult. Only running on Google Colab right now so I don't have easy access to a debugger (as far as I know). Here's my code, please let me know what I am doing wrong! import torch.nn as nn import torch.optim as optim model = nn.Sequential( nn.Linear(784, 1024), nn.ReLU(), nn.Linear(1024, 10), nn.ReLU(), ) optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.0) loss_fn = nn.CrossEntropyLoss() # Model Training for epoch in range(1): for data, label in trainset: optimizer.zero_grad() output = model(torch.flatten(data)) loss = loss_fn(output, label) loss.backward() optimizer.step() And then the error: IndexError Traceback (most recent call last) <ipython-input-3-6d9a9ea2e921> in <module>() 16 optimizer.zero_grad() 17 output = model(torch.flatten(data)) ---> 18 loss = loss_fn(output, label) 19 loss.backward() 20 optimizer.step() 3 frames /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in log_softmax(input, dim, _stacklevel, dtype) 1603 dim = _get_softmax_dim('log_softmax', input.dim(), _stacklevel) 1604 if dtype is None: -> 1605 ret = input.log_softmax(dim) 1606 else: 1607 ret = input.log_softmax(dim, dtype=dtype) IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
It looks like you haven't converted your dataset into DataLoader object, mostly that's the reason you're facing this issue. However, coming back to your problem. the loss function expecting the shape of the output variable from the model to be a 2-D tensor, something like [1, 10] where 1 is the number of batches (which should be 1 as you've used only 1 data at a time and the 10 is the number of logits/classes, so 10 predictions). So, in short, to solve your issue, change the line of loss calculation with the following line- loss = loss_fn(output.unsqueeze(0), torch.Tensor([label]).type(torch.LongTensor)) Here, .unsqueeze() increases the dimension of a tensor, and the label is an int, hence we've turned it into a 1-D tensor before sending it into the loss function. Note: As you haven't mentioned much in your question about what you're trying to do exactly, what type of data you're working with, so it's not obvious but I don't know why you're using ReLU() as your output layer's activation (It's something we almost never do unless you've some special reason) however seeing that you're using CrossEntropyLoss() loss, which indicates you're maybe trying to do multi-class classification, in that case, I would suggest you remove the ReLU() after the last layer, then your model will able to learn different classes using softmax() activation (which is inbuilt within CrossEntropyLoss() loss) so you'll get what you want from the model.
https://stackoverflow.com/questions/65729822/
Don't include an operation for gradient computation in PyTorch
I have a custom layer. Let the layer be called 'Gaussian' class Gaussian(nn.Module): def __init__(): super(Gaussian, self).__init__() #@torch.no_grad def forward(self, x): _r = np.random.randint(0, x.shape[0], x.shape[0]) _sample = x[_r] _d = (_sample - x) _number = int(self.k * x.shape[0]) x[1: _number] = x[1: _number] + (self.n * _d[1: _number]).detach() return x The above class will be used as below: cnn_model = nn.Sequential(nn.Conv2d(1, 32, 5), Gaussian(), nn.ReLU(), nn.Conv2d(32, 32, 5)) If x is the input, I want the gradient of x to exclude operations that are present in the Gaussian module, but include the calculations in other layers of the neural network(nn.Conv2d etc). In the end, my aim is to use the Gaussian module to perform calculations but that calculations should not be included in gradient computation. I tried to do the following: Used the @torch.no_grad above the forward method of the Gaussian Using detach after every operation in the Gaussian module: x[1: _number] = x[1: _number] + (self.n * _d[1: _number]).detach() and similarly for other operations Use y = x.detach() in the forward method. Perform the operations on y and then x.data = y Are the above methods correct? P.S: Question edited
The gradient calculation has sense when there are parameters to optimise. If your module do not have any parameters, then no gradient will be stored, because there are no parameters to associate it.
https://stackoverflow.com/questions/65741865/
Pytorch - Caught StopIteration in replica 1 on device 1 error while Training on GPU
I am trying to train a BertPunc model on the train2012 data used in the git link: https://github.com/nkrnrnk/BertPunc. While running on the server, with 4 GPUs enabled, below is the error I get: StopIteration: Caught StopIteration in replica 1 on device 1. Original Traceback (most recent call last): File "/home/stenoaimladmin/.local/lib/python3.8/site-packages/torch/nn/parallel/parallel_apply.py", line 61, in _worker output = module(*input, **kwargs) File "/home/stenoaimladmin/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) File "/home/stenoaimladmin/notebooks/model_BertPunc.py", line 16, in forward x = self.bert(x) File "/home/stenoaimladmin/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) File "/home/stenoaimladmin/anaconda3/lib/python3.8/site-packages/pytorch_pretrained_bert/modeling.py", line 861, in forward sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, File "/home/stenoaimladmin/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) File "/home/stenoaimladmin/anaconda3/lib/python3.8/site-packages/pytorch_pretrained_bert/modeling.py", line 727, in forward extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility StopIteration From the link: https://github.com/huggingface/transformers/issues/8145, this appears to be happening when the data gets moved back and forth between multiple GPUs. As per the git link: https://github.com/interpretml/interpret-text/issues/117, we need to downgrade PyTorch version to 1.4 from 1.7 which I use currently. For me downgrading the version isnt an option as I have other scripts that use Torch 1.7 version. What should I do to overcome this error? I cant put the whole code here as there are too many lines, but here is the snippet that gives me the error: bert_punc, optimizer, best_val_loss = train(bert_punc, optimizer, criterion, epochs_top, data_loader_train, data_loader_valid, save_path, punctuation_enc, iterations_top, best_val_loss=1e9) Here is my DataParallel code: bert_punc = nn.DataParallel(BertPunc(segment_size, output_size, dropout)).cuda() I tried changing the Dataparallel line to divert the training to only 1 GPU , out of 4 present. But that gave me a space issue, and hence had to revert the code back to default. Here is the link to all scripts that I am using: https://github.com/nkrnrnk/BertPunc Please advice.
change extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility to extended_attention_mask = extended_attention_mask.to(dtype=torch.float32) # fp16 compatibility For more details, see https://github.com/vid-koci/bert-commonsense/issues/6
https://stackoverflow.com/questions/65750762/
Updating weights manually in Pytorch
import torch import math # Create Tensors to hold input and outputs. x = torch.linspace(-math.pi, math.pi, 2000) y = torch.sin(x) # For this example, the output y is a linear function of (x, x^2, x^3), so # we can consider it as a linear layer neural network. Let's prepare the # tensor (x, x^2, x^3). p = torch.tensor([1, 2, 3]) xx = x.unsqueeze(-1).pow(p) model = torch.nn.Sequential( torch.nn.Linear(3, 1), torch.nn.Flatten(0, 1) ) loss_fn = torch.nn.MSELoss(reduction='sum') learning_rate = 1e-6 Then I print the weights parameters = list(model.parameters()) print(parameters) Results: [Parameter containing: tensor([[ 0.0407, 0.2680, -0.1148]], requires_grad=True), Parameter containing: tensor([-0.0132], requires_grad=True)] y_pred = model(xx) loss = loss_fn(y_pred, y) model.zero_grad() loss.backward() Updating the weights: with torch.no_grad(): for param in model.parameters(): param -= 1e-6 * param.grad and then list(model.parameters()) [Parameter containing: tensor([[ 0.0532, 0.2472, -0.0393]], requires_grad=True), Parameter containing: tensor([-0.0167], requires_grad=True)] The weights were updated. I got confused. How is that possible? I thought just param variable in the for loop changed, not model.parameters(). But when change the code a bit with torch.no_grad(): for param in model.parameters(): param -= 1e-6 The weights didn't change. So I guess it related to param.grad. Can you guys explain to me about that?
param variable inside the loop references each element of model.parameters(). Thus, updating param is the same as updating the elements of model.parameters(). As for your second example, I think decrementing by 1e-6 is just not enough for you to see the effect. Try param -= 1. and see if this has any effect on model.parameters().
https://stackoverflow.com/questions/65752599/
LSTM/RNN in pytorch The relation between forward method and training model
I'm still fairly new to neural networks, so sorry on beforehand for any ambiguities to the following. In a "standard" LSTM implementation for language task, we have the following (sorry for the very rough sketches): class LSTM(nn.Module): def __init__(*args): ... def forward(self, input, states): lstn_in = self.model['embed'](input) lstm_out, hidden = self.model['lstm'](lstm_in,states) return lstm_out, hidden Later on, we call upon this model in the training step: def train(*args): for epoch in range(epochs): .... *init_zero_states ... out, states = model(input, states) ... return model Let's just say, that I have 3 sentences as input: sents = [[The, sun, is, shiny], [The, beach, was, very, windy], [Computer, broke, down, today]] model = train(LSTM, sents) All words in all sentences gets converted to embeddings and loaded into the model. Now the question: Does the self.model['lstm'] iterate though all words from all articles and makes one output after every word? or every sentence? How does the model make distinction between the 3 sentences, such as after getting "The", "sun", "is", "shiny", does something (such as the states) in the 'lstm' reset and begin anew? The "out" in the train step after out, states = model(input, states) is the output after running all 3 sentences and hence the combined "information" from all 3 sentences? Thanks!
when using LSTMs in Pytorch you usually use the nn.LSTM function. Here is a quick example and then an explanation what happens inside: class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.embedder = nn.Embedding(voab_size, embed_size) self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, output_size) self.softmax = nn.Softmax(dim=1) def forward(self, x): x = self.embedder(x) # every time you pass a new sentence into the model you need to create # a new hidden-state (the LSTM requires, unlike RNNs, two hidden-states in a tuple) hidden = (torch.zeros(num_layers, batch_size, hidden_size), torch.zeros(num_layers, batch_size, hidden_size)) x, hidden = self.lstm(x, hidden) # x contains the output states of every timestep, # for classifiction we mostly just want the last one x = x[:, -1] x = self.fc(x) x = self.softmax(x) return x So, when taking a look at the nn.LSTM function, you see all N embedded words are passed into it at once and you get as output all N outputs (one from every timestep). That means inside of the lstm function, it iterates over all words in the sentence embeddings. We just dont see that in the code. It also returns the hiddenstate of every timestep but you dont have to use that further. In most cases you can just ignore that. As pseudo code: def lstm(x): hiddenstates = init_with_zeros() outputs, hiddenstates = [], [] for e in x: output, hiddenstate = neuralnet(e, hiddenstate) outputs.append(output) hiddenstates.append(hiddenstate) return outputs, hiddenstates sentence = ["the", "sun", "is", "shiny"] sentence = embedding(sentence) outputs, hiddenstates = lstm(sentence)
https://stackoverflow.com/questions/65753368/
Estimating mixture of Gaussian models in Pytorch
I actually want to estimate a normalizing flow with a mixture of gaussians as the base distribution, so I'm sort of stuck with torch. However you can reproduce my error in my code by just estimating a mixture of Gaussian model in torch. My code is below: import numpy as np import matplotlib.pyplot as plt import sklearn.datasets as datasets import torch from torch import nn from torch import optim import torch.distributions as D num_layers = 8 weights = torch.ones(8,requires_grad=True).to(device) means = torch.tensor(np.random.randn(8,2),requires_grad=True).to(device)#torch.randn(8,2,requires_grad=True).to(device) stdevs = torch.tensor(np.abs(np.random.randn(8,2)),requires_grad=True).to(device) mix = D.Categorical(weights) comp = D.Independent(D.Normal(means,stdevs), 1) gmm = D.MixtureSameFamily(mix, comp) num_iter = 10001#30001 num_iter2 = 200001 loss_max1 = 100 for i in range(num_iter): x = torch.randn(5000,2)#this can be an arbitrary x samples loss2 = -gmm.log_prob(x).mean()#-densityflow.log_prob(inputs=x).mean() optimizer1.zero_grad() loss2.backward() optimizer1.step() The error I get is: 0 8.089411823514835 Traceback (most recent call last): File "/home/cameron/AnacondaProjects/gmm.py", line 183, in <module> loss2.backward() File "/home/cameron/anaconda3/envs/torch/lib/python3.7/site-packages/torch/tensor.py", line 221, in backward torch.autograd.backward(self, gradient, retain_graph, create_graph) File "/home/cameron/anaconda3/envs/torch/lib/python3.7/site-packages/torch/autograd/__init__.py", line 132, in backward allow_unreachable=True) # allow_unreachable flag RuntimeError: Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling backward the first time. After as you see the model runs for 1 iteration.
There is ordering problem in your code, since you create Gaussian mixture model outside of training loop, then when calculate the loss the Gaussian mixture model will try to use the initial value of the parameters that you set when you define the model, but the optimizer1.step() already modify that value so even you set loss2.backward(retain_graph=True) there will still be the error: RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation Solution to this problem is simply create new Gaussian mixture model whenever you update the parameters, example code running as expected: import numpy as np import matplotlib.pyplot as plt import sklearn.datasets as datasets import torch from torch import nn from torch import optim import torch.distributions as D num_layers = 8 weights = torch.ones(8,requires_grad=True) means = torch.tensor(np.random.randn(8,2),requires_grad=True) stdevs = torch.tensor(np.abs(np.random.randn(8,2)),requires_grad=True) parameters = [weights, means, stdevs] optimizer1 = optim.SGD(parameters, lr=0.001, momentum=0.9) num_iter = 10001 for i in range(num_iter): mix = D.Categorical(weights) comp = D.Independent(D.Normal(means,stdevs), 1) gmm = D.MixtureSameFamily(mix, comp) optimizer1.zero_grad() x = torch.randn(5000,2)#this can be an arbitrary x samples loss2 = -gmm.log_prob(x).mean()#-densityflow.log_prob(inputs=x).mean() loss2.backward() optimizer1.step() print(i, loss2)
https://stackoverflow.com/questions/65755730/
Managing several versions of Python
Ok, I am relatively new to Python (more of a MATLAB / R /Stata user). I previously installed Python on my computer from the Python website. Everything was running smoothly until I had to install Pytorch too. I tried installing it via pip to no avail, so I had to make a new installation of Python but this time with Anaconda. However, now I have a mess and I can not load Scypi on Anaconda and I can not load Pytorch in the regular Python I have. Having to run them separately is driving me insane. Is there a way that I can merge the two versions together or should I uninstall and stick to only one?
Put this in environment.yml file name: myenv channels: - pytorch - anaconda dependencies: - pytorch - torchvision - torchaudio - cpuonly - scipy You can change the name so that it suits your taste. You should modify dependencies according to the install command provided by https://pytorch.org. And you can continue after running the following command. conda activate [your environment name] But as described in the conda documentation, if you want to use pip, you should install pip for your conda environment and then use it after using conda for as many packages as possible. This way, you don't need to uninstall your original python. And in regard to merging the two environment, I haven't heard of it and it seems not a good practice if it's possible.
https://stackoverflow.com/questions/65755978/
Why do sometimes CNN models predict just one class out of all others?
I am relatively new to the deep learning landscape, so please don't be as mean as Reddit! It seems like a general question so I won't be giving my code here as it doesn't seem necessary (if it is, here's the link to colab) A bit about the data: You can find the original data here. It is a downsized version of the original dataset of 82 GB. Once I trained my CNN on this, it predicts 'No Diabetic Retinopathy' (No DR) every single time, leading to an accuracy of 73%. Is the reason for this is just the vast amount of No DR images or something else? I have no idea! The 5 classes I have for prediction are ["Mild", "Moderate", "No DR", "Proliferative DR", "Severe"]. It's probably just bad code, was hoping you guys could help
I was about to comment: A more rigorous approach would be to start measuring your dataset balance: how many images of each class do you have? This will likely give an answer to your question. But couldn't help myself look at the link you gave. Kaggle already gives you an overview of the dataset: Quick calculation: 25,812 / 35,126 * 100 = 73%. That's interesting, you said you had an accuracy of 74%. Your model is learning on an inbalanced dataset, with the first class being over represented, 25k/35k is enormous. My hypothesis is that your model keeps predicting the first class which means that on average you'll end up with an accuracy of 74%. What you should do is balance your dataset. For example by only allowing 35,126 - 25,810 = 9,316 examples from the first class to appear during an epoch. Even better, balance your dataset over all classes such that each class will only appear n times each, per epoch.
https://stackoverflow.com/questions/65762961/
apex: TypeError: Class advice impossible in Python3
i installed apex: pip uninstall apex My configurations are as follows: torch version 1.7.1 python version 3.7 cuda 11.0 I came across this error after running import pytorch: TypeError: Class advice impossible in Python3. Use the @implementer class decorator instead.
I uninstalled the old version of apex and reinstalled a new version. It worked. Thanks. git clone https://www.github.com/nvidia/apex cd apex python setup.py install
https://stackoverflow.com/questions/65774794/
How do I link in all of PyTorch with a pybind11 .so?
I have a pybind11 c++ project which uses the pytorch c++ api: #include <pybind11/pybind11.h> #include <pybind11/numpy.h> #include <math.h> #include <torch/torch.h> ... void f() { ... torch::Tensor dynamic_parameters = torch::full({1}, /*value=*/0.5, torch::dtype(torch::kFloat64).requires_grad(true)); torch::optim::SGD optimizer({dynamic_parameters}, /*lr=*/0.01); ... } PYBIND11_MODULE(reson8, m) { m.def("my_function", &my_function, ""); } I use distutils to compile this into a .so that can be imported in Python: from distutils.core import setup, Extension def configuration(parent_package='', top_path=None): import numpy from numpy.distutils.misc_util import Configuration from numpy.distutils.misc_util import get_info #Necessary for the half-float d-type. info = get_info('npymath') config = Configuration('', parent_package, top_path) config.add_extension('reson8', ['reson8.cpp'], extra_info=info, include_dirs=["/home/ian/anaconda3/lib/python3.7/site-packages/pybind11/include", "/home/ian/anaconda3/lib/python3.8/site-packages/pybind11/include", "/home/ian/dev/hedgey/Engine/lib/libtorch/include", "/home/ian/dev/hedgey/Engine/lib/libtorch/include/torch/csrc/api/include"]) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) It compiles with no error, but on running "import reson8" in python I get this error: importerror: undefined symbol: _ZTVN5torch5optim9OptimizerE I'm unsure if it's whether pytorch hasn't been linked into my so (although the .so is 10mb which is rather large if it doesn't include pytorch, but maybe all pybind11 .so files are large.) How can I solve this issue?
I eventually found I needed to use the Anaconda version of torchlib rather than my own, as well as using Torch's CppExtension. Here is my working setup.py: from distutils.core import setup, Extension from torch.utils.cpp_extension import BuildExtension, CppExtension def configuration(parent_package='', top_path=None): import numpy from numpy.distutils.misc_util import Configuration from numpy.distutils.misc_util import get_info #Necessary for the half-float d-type. info = get_info('npymath') config = Configuration('', parent_package, top_path) config.ext_modules.append(CppExtension( name='reson8', sources=['reson8.cpp'], extra_info=info, extra_compile_args=['-g', '-D_GLIBCXX_USE_CXX11_ABI=0'], extra_ldflags=['-ltorch_python'], include_dirs=["/home/ian/anaconda3/lib/python3.7/site-packages/pybind11/include", "/home/ian/anaconda3/lib/python3.8/site-packages/pybind11/include", "/home/ian/anaconda3/lib" ] )) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration)
https://stackoverflow.com/questions/65774829/
Does NVLink accelerate training with DistributedDataParallel?
Nvidia's NVLink accelerates data transfer between several GPUs on the same machine. I train large models on such a machine using PyTorch. I see why NVLink would make model-parallel training faster, since one pass through a model will involve several GPUs. But would it accelerate a data-parallel training process using DistributedDataParallel?
How does data-parallel training on k GPUs works? You split your mini batch into k parts, each part is forwarded on a different GPU, and gradients are estimated on each GPU. However, (and this is super crucial) updating the weights must be synchronized between all GPUs. This is where NVLink becomes important for data-parallel training as well.
https://stackoverflow.com/questions/65777660/
LSTM Autoencoder Output Layer
I am trying to get a LSTM autoencoder to recreate its inputs. So far I have: class getSequence(nn.Module): def forward(self, x): out, _ = x return out class getLast(nn.Module): def forward(self, x): out, states = x states = states[len(states) - 1] return states class AEncoder(nn.Module): def __init__(self, input_size, first_layer, second_layer, n_layers): super(AEncoder, self).__init__() self.n_layers = n_layers self.encode = nn.Sequential(nn.LSTM(input_size, first_layer, batch_first=True), getSequence(), nn.ReLU(True), nn.LSTM(first_layer, second_layer), getLast()) self.decode = nn.Sequential(nn.LSTM(second_layer, first_layer), getSequence(), nn.ReLU(True), nn.LSTM(first_layer, input_size), getSequence()) def forward(self, x): x = x.float() x = self.encode(x) x = x.repeat(32, 1, 1) # repeating last hidden state of self.encode x = self.decode(x) return x While researching I have been seeing some people adding a time-distributed dense layer at the end of the self.decode. I am confused if that final layer is specific to other tasks autoencoders are used for, if so, can I ignore that layer if I am only trying to recreate inputs?
The time-distributed dense layer as name suggested just an ordinary dense layer that apply to every temporal slice of an input, you can think it as special form of RNN cell, i.e without recurrent hidden state. So you can using any layer that is time-distributed as your output layer for an Autoencoder that deal with time-distributed inputs, e.g RNN layer with LSTM Cell, GRU Cell, Simple RNN Cell or time-distributed dense layer; As in research paper that propose the LSTM-Autoencoder, it basic model for reconstruct sequence of vectors (image patches or features) only using one LSTM layer in both encoder and decoder, model structure is: Following is an example to using time-distributed dense layer in decoder: class Decoder(nn.Module): def __init__(self, seq_len, input_dim=64, n_features=1): super(Decoder, self).__init__() self.seq_len, self.input_dim = seq_len, input_dim self.hidden_dim, self.n_features = 2 * input_dim, n_features self.rnn = nn.LSTM( input_size=input_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True) self.output_layer = nn.Linear(self.hidden_dim, n_features) def forward(self, x): x = x.repeat(self.seq_len, self.n_features) x = x.reshape((self.n_features, self.seq_len, self.input_dim)) x, (hidden_n, cell_n) = self.rnn(x) x = x.reshape((self.seq_len, self.hidden_dim)) return self.output_layer(x)
https://stackoverflow.com/questions/65783297/
The reason behind rgb image normalization parameters in pytorch
I've seen transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) both in lots of tutorials and pytorch docs, I know the first param is mean and the second one is std. I can't understand why the values for different channels differ.
That can just be the distribution of the colours in the original dataset of the code authors (such as COCO or PascalVOC). It would only be through chance that all colours are equally represented. However, if you used the same mean in your case, I doubt it would make much of a difference due to the similarity of the means and stds. For example, in my custom dataset taken from a GoPro camera, the means and standard deviations are as such: mean: [0.2841186 , 0.32399923, 0.27048702], std: [0.21937862, 0.26193094, 0.23754872] where the means are hardly equal. This does not mean, however, that they are treated differently in the ML model. All this transform does is make sure that each feature is standardised (through z-score standardisation). Think about it this way: if one colour is represented with a generally-higher intensity in your dataset (eg. if you have lots of vibrant "blue" pictures of the beach, the sky, lagoons, etc), than you will have to subtract a larger number from that channel to ensure that your data is standardised.
https://stackoverflow.com/questions/65785995/
Why does nn.Conv1d work on 2d feature [b, c, h, w]?
I am wondering why conv1d works on 2d feature(batch, channel, height, width). An nn.Conv1d(channel, channel, kernel_size=(1,1)) works when I put 2d feature, but gives different result from nn.Conv2d(channel, channel, kernel_size=1). I want to know why conv1d works and what it mean by 2d kernel size in 1d convolution.
"I want to know why conv1d works and what it mean by 2d kernel size in 1d convolution" It doesn't have any reason not to work. Under the hood all this "convolution" means is "Dot Product", now it could be between matrix and vector, matrix and matrix, vector and vector, etc. Simply put, the real distinction between 1D and 2D convolution is the freedom one has to move along the spatial dimension of input. This means If you look at 1D convolution, It can move along one direction only, that is, the temporal dimension of the input (Note the kernel could be a vector, matrix whatever that doesn't matter). On the other hand, 2D convolution has the freedom to move along 2 dimensions (height and width) of the input that is the spatial dimension. If it still seems confusing, have a look at the gifs below. 1D Convolution in action: Note: It's a 1D convolution with kernel size 3x3, look how it only moves down the input which is the temporal dimension. 2D Connvolution in action: Note: It's a 2D convolution with kernel size 3x3, look how it moves along both width and height of the input which is the spatial dimension. I think It's clear now what is the actual difference between 1D and 2D conv and why they both would produce different results for the same input.
https://stackoverflow.com/questions/65790139/
Functionality of `detach()` in PyTorch for this specific case
I know that detach() is used for detaching a variable from the computational graph. In that context, are the following expressions x = x - torch.mean(x, dim=0).detach() and x = x - torch.mean(x, dim=0) equivalent? I just want to subtract the mean out, don't want to pass gradients through the average calculation.
If you do not detach the mean, then you have lateral dependencies between all elements in the batch (dim=0) when estimating the gradient.
https://stackoverflow.com/questions/65791620/
Computing loss along batch dimension in PyTorch
I have tensors of BxCxHxW where B is the batch. I am interested in how is the loss implemented in PyTorch when batch size is more than 1. The below is what we usually do in PyTorch, l1_loss = torch.nn.L1Loss() loss = l1_loss(pred,gt) Is it that the loss is the average loss over the batch? If this is indeed the case, then is the following codes equivalent to what is we usually do in PyTorch (the code above)? l1_loss = torch.nn.L1Loss() for idx in range(B): if idx==0: loss = l1_loss(pred[idx,:,:,:],gt[idx,:,:,:]) else: loss = l1_loss(pred[idx,:,:,:],gt[idx,:,:,:]) + loss loss = loss/B
The documentation describes the behavior of L1loss : it is indeed (by default) the mean over the whole batch. You can change it easily to the sum instead : l1_loss = torch.nn.L1Loss(reduction='sum') Yes your code is equivalent to what Pytorch does. A version without the call to L1loss would be : # Assuming your output is a vector of shape (B, F) loss = torch.abs(pred - gt).sum(dim=1).mean(dim=0)
https://stackoverflow.com/questions/65791976/
How to create tensor directly on GPU, or on device of another tensor?
I found this discussion about this, in which the code if std.is_cuda: eps = torch.FloatTensor(std.size()).cuda().normal_() else: eps = torch.FloatTensor(std.size()).normal_() becomes the nice eps = std.new().normal_() but it is said there that The new() method is deprecated. How to create a new tensor directly on a specific device? How to create a new tensor on the same device like another tensor without the ugly if?
The documentation is quite clear now about this I think. Here are described the 4 main ways to create a new tensor, and you just have to specify the device to make it on gpu : t1 = torch.zeros((3,3), device=torch.device('cuda')) t2 = torch.ones_like(t1, device=torch.device('cuda')) t3 = torch.randn((3,5), device=torch.device('cuda')) And this link adds further information about the torch.tensor() constructor. Again, the device is an argument to be specified. If you want to use the device of another tensor, you can access it with tensor.device: t4 = torch.empty((2,2), device=t3.device)
https://stackoverflow.com/questions/65794673/
pytorch gathering from a 1d tensor
I have a long list of values stored in a 1d tensor from which I like to gather values. Given a 2d tensor where the second dimension denotes a set of indices, I would like to compute a new 2d tensor where the second dimension are all the values gathered given by the indices. Basically, I have a batch of indices and I use those indices to obtain a batch of values from the 1d-tensor. I am new to pytorch but I managed to compute exactly that using pytorch's gather function list_values = torch.tensor([1, 2, 3, 4, 5, 6]) list_values = list_values.unsqueeze(0) list_values = list_values.expand((2, 6)) indices = torch.tensor([[1, 2], [2, 3]]) result = torch.gather(list_values, 1, indices) This works perfectly fince and gives the correct result. However, If I am not mistaken, the expand operation is quite expensive in terms of memory as the number of elements in "list_value" grows. Is there a better solution ?
Nope, take a look at the documentation : Expanding a tensor does not allocate new memory, but only creates a new view on the existing tensor where a dimension of size one is expanded to a larger size by setting the stride to 0. Any dimension of size 1 can be expanded to an arbitrary value without allocating new memory. You solution is completely fine
https://stackoverflow.com/questions/65794917/
GCP AI Platform: Error when creating a custom predictor model version ( trained model Pytorch model + torchvision.transform)
Am currently trying to deploy a custom model to AI platform by following https://cloud.google.com/ai-platform/prediction/docs/deploying-models#gcloud_1. which is based on a combination of the pre-trained model from 'Pytorch' and 'torchvision.transform'. Currently, I keep getting below error which happens to be related to 500MB constraint on custom prediction. ERROR: (gcloud.beta.ai-platform.versions.create) Create Version failed. Bad model detected with error: Model requires more memory than allowed. Please try to decrease the model size and re-deploy. If you continue to experience errors, please contact support. Setup.py from setuptools import setup from pathlib import Path base = Path(__file__).parent REQUIRED_PACKAGES = [line.strip() for line in open(base/"requirements.txt")] print(f"\nPackages: {REQUIRED_PACKAGES}\n\n") # [torch==1.3.0,torchvision==0.4.1, ImageHash==4.2.0 # Pillow==6.2.1,pyvis==0.1.8.2] installs 800mb worth of files setup(description="Extract features of a image", author=, name='test', version='0.1', install_requires=REQUIRED_PACKAGES, project_urls={ 'Documentation':'https://cloud.google.com/ai-platform/prediction/docs/custom-prediction-routines#tensorflow', 'Deploy':'https://cloud.google.com/ai-platform/prediction/docs/deploying-models#gcloud_1', 'Ai_platform troubleshooting':'https://cloud.google.com/ai-platform/training/docs/troubleshooting', 'Say Thanks!': 'https://medium.com/searce/deploy-your-own-custom-model-on-gcps-ai-platform- 7e42a5721b43', 'google Torch wheels':"http://storage.googleapis.com/cloud-ai-pytorch/readme.txt", 'Torch & torchvision wheels':"https://download.pytorch.org/whl/torch_stable.html " }, python_requires='~=3.7', scripts=['predictor.py', 'preproc.py']) Steps taken: Tried adding ‘torch’ and torchvision directly to ‘REQUIRED_PACKAGES’ list in setup.py file in order to provide PyTorch + torchvision as a dependency to be installed while deployment. I am guessing, Internally Ai platform downloads PyPI package for PyTorch which is +500 MB, this results in the failure of our model deployment. If I just deploy the model with 'torch' only and it seems to be working (of course throws error for not able to find library 'torchvision') File size pytorch (torch-1.3.1+cpu-cp37-cp37m-linux_x86_64.whl about 111MB) torchvision (torchvision-0.4.1+cpu-cp37-cp37m-linux_x86_64.whl about 46MB) from https://download.pytorch.org/whl/torch_stable.html and stored it on GKS. The zipped predictor model file (.tar.gz format) which is the output of setup.py (5kb ) A trained PyTorch model (size 44MB) In total, the model dependencies should be less than 250MB but still, keep getting this error. Have also tried to use the torch and torchvision provided from Google mirrored packages http://storage.googleapis.com/cloud-ai-pytorch/readme.txt, but same memory issue persists. AI platform is quite new for us and would like some input from professional’s. MORE INFO: GCP CLI Input: My environment variable: BUCKET_NAME= “something” MODEL_DIR=<> VERSION_NAME='v6' MODEL_NAME="something_model" STAGING_BUCKET=$MODEL_DIR<> # TORCH_PACKAGE=$MODEL_DIR"package/torch-1.3.1+cpu-cp37-cp37m-linux_x86_64.whl" # TORCHVISION_PACKAGE=$MODEL_DIR"package/torchvision-0.4.1+cpu-cp37-cp37m-linux_x86_64.whl" TORCH_PACKAGE=<> TORCHVISION_PACKAGE=<> CUSTOM_CODE_PATH=$STAGING_BUCKET"imt_ai_predict-0.1.tar.gz" PREDICTOR_CLASS="predictor.MyPredictor" REGION=<> MACHINE_TYPE='mls1-c4-m2' gcloud beta ai-platform versions create $VERSION_NAME \ --model=$MODEL_NAME \ --origin=$MODEL_DIR \ --runtime-version=2.3 \ --python-version=3.7 \ --machine-type=$MACHINE_TYPE \ --package-uris=$CUSTOM_CODE_PATH,$TORCH_PACKAGE,$TORCHVISION_PACKAGE \ --prediction-class=$PREDICTOR_CLASS \ GCP CLI Output: **[1] global** [2] asia-east1 [3] asia-northeast1 [4] asia-southeast1 [5] australia-southeast1 [6] europe-west1 [7] europe-west2 [8] europe-west3 [9] europe-west4 [10] northamerica-northeast1 [11] us-central1 [12] us-east1 [13] us-east4 [14] us-west1 [15] cancel Please enter your numeric choice: 1 To make this the default region, run `gcloud config set ai_platform/region global`. Using endpoint [https://ml.googleapis.com/] Creating version (this might take a few minutes)......failed. ERROR: (gcloud.beta.ai-platform.versions.create) Create Version failed. Bad model detected with error: **Model requires more memory than allowed. Please try to decrease the model size and re-deploy. If you continue to experience errors, please contact support.** My finding: Have found articles of people struggling in same ways for PyTorch package and made it work by installing torch wheels on the GCS (https://medium.com/searce/deploy-your-own-custom-model-on-gcps-ai-platform- 7e42a5721b43). Have tried the same approach with torch and torchvision but no luck till now and waiting response from "[email protected] [email protected]". Any help on getting custom torch_torchvision based custom predictor working on AI platform that will be great.
Got this fixed by a combination of few things. I stuck to 4gb CPU MlS1 machine and custom predictor routine (<500MB). Install the libraries using setup.py parameter but instead of parsing just the package name and it's version, add correct torch wheel (ideally <100 mb). REQUIRED_PACKAGES = [line.strip() for line in open(base/"requirements.txt")] +\ ['torchvision==0.5.0', 'torch @ https://download.pytorch.org/whl/cpu/torch-1.4.0%2Bcpu-cp37-cp37m-linux_x86_64.whl'] I reduced the steps for preprocessing taken. Couldn't fit in all of them so jsonify your SEND response and GET one from both preproc.py and predictor.py import json json.dump(your data to send to predictor class) Import those function from the class of a library that is needed. from torch import zeros,load your code [Important] Haven't tested different types of serializing object for the trained model, could be a difference there as well to which one (torch.save, pickle, joblib etc) is memory saving. Found this link for those whose organization is a partner with GCP might be able to request more quota (am guessing from 500MB to 2GB or so). Didn't had to go in this direction as my issue was resolved and other poped up lol. https://cloud.google.com/ai-platform/training/docs/quotas
https://stackoverflow.com/questions/65795374/
Numpy/PyTorch funny tensor product
I've got a 4 dimensional torch tensor parameter defined like this : nn.parameter.Parameter(data=torch.Tensor((13,13,13,13)), requires_grad=True) and four tensors with dims (batch_size,13) (or one tensor with dims (batch_size,4,13)). I'd like to get a tensor with dims (batch_size) equal to the formula at the end of this picture : [EDIT: I made a mistake in the first pict, I've corrected it] I've seen in the torch documentation the function tensordot, but I can't manage to make it work by myself.
whenever you have a funny tensor product torch.einsum (or numpy.einsum) is your friend: batch_size = 5 A = torch.rand(13, 13, 13, 13) a = torch.rand(batch_size, 13) b = torch.rand(batch_size, 13) c = torch.rand(batch_size, 13) d = torch.rand(batch_size, 13) B = torch.einsum('ijkl,bi,bj,bk,bl->b', A, a, b, c, d)
https://stackoverflow.com/questions/65797216/
Subsetting a two dimensional tensor with a one dimensional tensor
I want to extract from each row of a two-dimensional tensor the column that is stored in another one dimensional tensor. import torch test_tensor = tensor([1,-2,3], [-2,7,4]).float() select_tensor = tensor([1,2]) So in this particular example I would like to get the element in position 1 for the first row (so -2) and the element in position 2 for the second row (so 4). I tried: test_tensor[:, select_tensor] But this selects the elements at position 1 and 2 for each row. I suspect it might be something very simple that I am missing.
You can use torch.gather import torch test_tensor = torch.tensor([[1,-2,3], [-2,7,4]]).float() select_tensor = torch.tensor([1,2], dtype=torch.int64).view(-1,1) # number of dimension should match with the test tensor. final_tensor = torch.gather(test_tensor, 1, select_tensor) final_tensor output tensor([[-2.], [ 4.]]) or, use torch.view to flatten the output tensor: final_tensor.view(-1) will give you tensor([-2., 4.])
https://stackoverflow.com/questions/65798656/
PyTorch: "ValueError: can't optimize a non-leaf Tensor" after changing pretrained model from 3 RGB Channels to 4 Channels
I have been trying to change the pretrained PyTorch Densenet's first conv layer from 3 channels to 4 channels while maintaining its original RGB channel's pretrained weights. I have done the following codes, but the optimizer part throws me this error: "ValueError: can't optimize a non-leaf Tensor" . import torchvision.models as models import torch.nn as nn backbone = models.__dict__['densenet169'](pretrained=True) weight1 = backbone.features.conv0.weight.data.clone() new_first_layer = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) with torch.no_grad(): new_first_layer.weight[:,:3] = weight1 backbone.features.conv0 = new_first_layer optimizer = torch.optim.SGD(backbone.parameters(), 0.001, weight_decay=0.1) # Changing this optimizer from SGD to ADAM I have also tried to remove the argument with torch.no_grad(): but this issue still remains: ValueError Traceback (most recent call last) <ipython-input-343-5fc87352da04> in <module>() 11 backbone.features.conv0 = new_first_layer 12 optimizer = torch.optim.SGD(res.parameters(), 0.001, ---> 13 weight_decay=0.1) # Changing this optimizer from SGD to ADAM ~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/sgd.py in __init__(self, params, lr, momentum, dampening, weight_decay, nesterov) 66 if nesterov and (momentum <= 0 or dampening != 0): 67 raise ValueError("Nesterov momentum requires a momentum and zero dampening") ---> 68 super(SGD, self).__init__(params, defaults) 69 70 def __setstate__(self, state): ~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/optimizer.py in __init__(self, params, defaults) 50 51 for param_group in param_groups: ---> 52 self.add_param_group(param_group) 53 54 def __getstate__(self): ~/anaconda3/envs/detectron2/lib/python3.6/site-packages/torch/optim/optimizer.py in add_param_group(self, param_group) 231 "but one of the params is " + torch.typename(param)) 232 if not param.is_leaf: --> 233 raise ValueError("can't optimize a non-leaf Tensor") 234 235 for name, default in self.defaults.items(): ValueError: can't optimize a non-leaf Tensor My PyTorch version is: 1.7.0. Could you guys please help? Thanks alot! Regards.
I think I have resolved this problem!: import torchvision.models as models import torch.nn as nn from torch.autograd import Variable backbone = models.__dict__['densenet169'](pretrained=True) weight1 = backbone.features.conv0.weight.clone() new_first_layer = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).requires_grad_() new_first_layer.weight[:,:3,:,:].data[...] = Variable(weight1, requires_grad=True) backbone.features.conv0 = new_first_layer optimizer = torch.optim.SGD(res.parameters(), 0.001, weight_decay=0.1)
https://stackoverflow.com/questions/65801034/
output prediction of pytorch lightning model
This is potentially a very easy question. I just started with PyTorch lightning and can't figure out how to receive the output of my model after training. I am interested in both predictions of y_train and y_test as an array of some sort (PyTorch tensor or NumPy array in a later step) to plot next to the labels using different scripts. dataset = Dataset(train_tensor) val_dataset = Dataset(val_tensor) training_generator = torch.utils.data.DataLoader(dataset, **train_params) val_generator = torch.utils.data.DataLoader(val_dataset, **val_params) mynet = Net(feature_len) trainer = pl.Trainer(gpus=0,max_epochs=max_epochs, logger=logger, progress_bar_refresh_rate=20, callbacks=[early_stop_callback], num_sanity_val_steps=0) trainer.fit(mynet) In my lightning module I have the functions: def __init__(self, random_inputs): def forward(self, x): def train_dataloader(self): def val_dataloader(self): def training_step(self, batch, batch_nb): def training_epoch_end(self, outputs): def validation_step(self, batch, batch_nb): def validation_epoch_end(self, outputs): def configure_optimizers(self): Do I need a specific predict function or is there any already implemented way I don't see?
You can use the predict method as well. Here is the example from the document. https://pytorch-lightning.readthedocs.io/en/latest/starter/introduction_guide.html class LitMNISTDreamer(LightningModule): def forward(self, z): imgs = self.decoder(z) return imgs def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None): return self(batch) model = LitMNISTDreamer() trainer.predict(model, datamodule)
https://stackoverflow.com/questions/65807601/
How to use k channels in CNN for k FC Layers
I have an encoder, which outputs a tensor with shape (bn, c * k, 32, 32). I now want produce k means with shape (bn, k, 1, 2). So the means are 2-dim coordinates. To do so, I want to use k FC Layers, while for each mean k_i I only want to use c channels. So my idea is, that I reshape the encoder output out to a 5d tensor with shape (bn, k, c, 32, 32). Then I can use the flattened out[:, 0] ... out[:, k] as input for the k linear layers. The trivial solution would be to define the linear layers manually: self.fc0 = nn.Linear(c * 32 * 32, 2) ... self.fck = nn.Linear(c * 32 * 32, 2) Then I could define the forward pass for each mean as follows: mean_0 = self.fc0(out[:, 0].reshape(bn, -1)) ... mean_k = self.fck(out[:, k].reshape(bn, -1)) Is there a more efficient way to do that?
I believe you are looking for a grouped convolution. You can let axis=1 have k*c tensors, so the input shape is (bn, k*c, 32, 32). Then use a nn.Conv2d convolution layer with 2*k filters and set to receive k groups so it's not a fully connected channel-wise (only k groups of c maps: convolves c at a time. >>> bn = 1; k = 5; c = 3 >>> x = torch.rand(bn, k*c, 32, 32) >>> m = nn.Conv2d(in_channels=c*k, out_channels=2*k, kernel_size=32, groups=k) >>> m(x).shape torch.Size([4, 10, 1, 1]) Which you can then reshape to your liking. In terms of number of parameters. A typical nn.Conv2d usage would be: >>> m = nn.Conv2d(in_channels=c*k, out_channels=2*k, kernel_size=32) >>> sum(layer.numel() for layer in m.parameters()) 153610 Which is exactly c*k*2*k*32*32 weights, plus 2*k biases. In your case, you would have >>> m = nn.Conv2d(in_channels=c*k, out_channels=2*k, kernel_size=32, groups=k) >>> sum(layer.numel() for layer in m.parameters()) 30730 Which is exactly c*2*k*32*32 weights, plus 2*k biases. i.e. k times less than the previous layer. A given filter's has only c layers (not k*c) which means it will have an input with c channels (i.e. one of the k groups containing c maps)
https://stackoverflow.com/questions/65808295/
What is the best way to create augmentation on image dataset while training instance segmentation?
Background : I am using YOLACT instance segmentation model to train set of images. The dataset size is very small (~20 images). The model doesn't converge properly (of-course given the dataset size). I wanted to increase the dataset size by adding some augmented images. I know we have various image augmentation techniques and packages like imgaug , albumentation, opencv etc. but I need image & annotation file ( COCO JSON ) format to train the model. So my question is : Is there a package that helps me to automatically generate the annotations of augmented images ? or Is there a better way to address my issue ? Thank you in advance for your help!
Simple Copy Paste is a strong method for data augmentation for instance segmentation related tasks. Check about the research paper here. For unofficial github code, check here Albumentation and TorMentor are also useful libraries for data augmentation.
https://stackoverflow.com/questions/65812100/
understanding how PyTorch Linear works
I am considering the sample code from the documentation: import torch from torch import nn # m = nn.Linear(20, 30) input = torch.randn(128, 20) output = m(input) print(output.size()) The output is : torch.Size([128, 30]) The constructor of Linear is : def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: This is consistent with the way the instance is created, i.e.: m = nn.Linear(20, 30) However, when m is used, it receives a tensor output = m(input) as input. I do not understand why. Where is this tensor defined in the source code?
When you do m(input), the __call__ (what is __call__?) method is called, which internally calls forward method and does other stuff. This logic is written in the base class: nn.Module. For simplicity, assume for now, that doing m(input) is equivalent to m.forward(input). And what's the input to forward? A tensor. def forward(self, input: Tensor) -> Tensor
https://stackoverflow.com/questions/65812199/
Changing Dropout value during training
How can I change Dropout during training? For example Dropout= [0.1, 0.2, 0.3] I tried passing it as as a list but I couldn't make it work.
To change the dropout probability during training, you should use the functional version, i.e. torch.nn.functional.dropout. The input arguments to the functional version of dropout are the input tensor the dropout probability (which you can alter) a boolean to indicate if it is in training mode (you can use the self.training) and a flag to indicate if you want the operation to be performed in place. Thus, you can alter the probability of the dropout in your forward method, according to your needs. For example, you can do in your forward method: def forward(self, x): ... # apply some layers to the input h = self.my_layers(x) # set the value of p p = self.get_value_for_p() # apply dropout with new p h = torch.nn.functional.dropout(h, p, self.training) ... More on the functional version of dropout, here: https://pytorch.org/docs/stable/nn.functional.html#dropout-functions
https://stackoverflow.com/questions/65813108/
Calling .backward() function for two different neural networks but getting retain_graph=True error
I have an Actor Critic neural network where the Actor is its own class and the Critic is its own class with its own neural network and .forward() function. I then am creating an object of each of these classes in a larger Model class. My setup is as follows: self.actor = Actor().to(device) self.actor_opt = optim.Adam(self.actor.parameters(), lr=lr) self.critic = Critic().to(device) self.critic_opt = optim.Adam(self.critic.parameters(), lr=lr) I then calculate two different loss functions and want to update each neural network separately. For the critic: loss_critic = F.smooth_l1_loss(value, expected) self.critic_opt.zero_grad() loss_critic.backward() self.critic_opt.step() and for the actor: loss_actor = -self.critic(state, action) self.actor_opt.zero_grad() loss_actor.backward() self.actor_opt.step() However, when doing this, I get the following error: RuntimeError: Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling backward the first time. When reading up on this, I understood that I only need to retain_graph=True when calling backward twice on the same network, and in most cases this is not good to set to True as I will run out of GPU. Moreover, when I comment out one of the .backward() functions, the error goes away, leading me to believe that for some reason the code is thinking that both backward() functions are being called on the same neural network, even though I think I am doing it separately. What could be the reason for this? Is there a way to specify for which neural network I am calling the backward function on? Edit: For reference, the optimize() function in this code here https://github.com/wudongming97/PyTorch-DDPG/blob/master/train.py uses backward() twice with no issue (I've cloned the repo and tested it). I'd like my code to operate similarly where I backprop through critic and actor separately.
Yes, you shouldn't do it like that. What you should do instead, is propagating through parts of the graph. What the graph contains Now, graph contains both actor and critic. If the computations pass through the same part of graph (say, twice through actor), it will raise this error. And they will, as you clearly use actor and critic joined with loss value (this line: loss_actor = -self.critic(state, action)) Different optimizers do not change anything here, as it's backward problem (optimizers simply apply calculated gradients onto models) Trying to fix it This is how to fix it in GANs, but not in this case, see Actual fix paragraph below, read on if you are curious about the topic If part of a neural network (critic in this case) does not take part in the current optimization step, it should be treated as a constant (and vice versa). To do that, you could disable gradient using torch.no_grad context manager (documentation) and set critic to eval mode (documentation), something along those lines: self.critic.eval() with torch.no_grad(): loss_actor = -self.critic(state, action) ... But, here is a problem: We are turning off gradient (tape recording) for action and breaking the graph! hence this is not a viable solution. Actual solution It is much simpler than you think, one can see it in PyTorch's repository also: Do not backpropagate after critic/actor loss Calculate all losses (for both critic and actor) sum them together zero_grad for both optimizers backpropagate with this summed value critic_optimizer.step() and actor_optimizer.step() at this point Something along those lines: self.critic_opt.zero_grad() self.actor_opt.zero_grad() loss_critic = F.smooth_l1_loss(value, expected) loss_actor = -self.critic(state, action) total_loss = loss_actor + loss_critic total_loss.backward() self.critic_opt.step() self.actor_opt.step()
https://stackoverflow.com/questions/65815598/
Having trouble installing PyTorch
I'm having trouble installing PyTorch. C:\Users\myself>pip install torch Collecting torch Using cached torch-1.7.1-cp38-cp38-win_amd64.whl (184.0 MB) Requirement already satisfied: numpy in c:\users\myself\appdata\local\packages\pythonsoftwarefoundation.python.3.8_qbz5n2kfra8p0\localcache\local-packages\python38\site-packages (from torch) (1.19.1) Requirement already satisfied: typing-extensions in c:\users\myself\appdata\local\packages\pythonsoftwarefoundation.python.3.8_qbz5n2kfra8p0\localcache\local-packages\python38\site-packages (from torch) (3.7.4.3) Installing collected packages: torch ERROR: Could not install packages due to an EnvironmentError: [Errno 2] No such file or directory: 'C:\\Users\\myself\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.8_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python38\\site-packages\\caffe2\\python\\serialized_test\\data\\operator_test\\learning_rate_adaption_op_test.test_learning_rate_adaption_op_normalization.zip' WARNING: You are using pip version 20.2.4; however, version 20.3.3 is available. You should consider upgrading via the 'C:\Users\myself\AppData\Local\Microsoft\WindowsApps\PythonSoftwareFoundation.Python.3.8_qbz5n2kfra8p0\python.exe -m pip install --upgrade pip' command. It says that I'm missing a file in caffe2, but I'm not sure how I can get that file back.
Torch wheel contains caffe2 directory. 1.Try --no-cache-dir option pip install --no-cache-dir torch 2. Removing the MAX_PATH Limitation See here 3. Using Python on Windows — Python 3.7.9 documentation. You can find some more detailed discussion here.
https://stackoverflow.com/questions/65815632/
Python calculate phash from an image located at a url
I want to calculate the phash from about 10.000.000 pictures, from which i only have the url where they are located at. I know how to download a picture and then calculate the phash after that, but i always have to safe the picture first. Is it possible to download the picture and calculate the phash without saving it or is it even possible to not download the picture at all and just calculate the phash just using the url? This is my code to download the first ten pictures and calculate the phash: folder, pic_savefolder = 'data', 'data/pictures' file = 'external-asset-url-clean.csv' path = os.path.join(folder,file) df = pd.read_csv(path, header=None, names=["URL"]) counter = 0 hashes = set() headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} for image_url in df['URL']: filename = image_url.split('/')[-1] try: r = requests.get(image_url, allow_redirects=False,verify=False, headers=headers) pathlong = os.path.join(pic_savefolder, filename) with open(pathlong,"wb") as f: f.write(r.content) hash = imagehash.phash(Image.open(pathlong)) hashes.add((hash)) counter += 1 if counter > 10: break except Exception as e: print(e) print("\n")
Instead of writing to a file, you can pass the contents directly if you use the .raw property instead of the .content one. Here is how that looks in code: image_data = Image.open(requests.get(image_url, stream=True).raw) hash = imagehash.phash(image_data)
https://stackoverflow.com/questions/65818260/
Is there a function in PyTorch for matrix left division?
MATLAB has the backslash "\" operator. SciPy has "lsqr." Does PyTorch have an equivalent operator that solves systems of linear equations? Specifically, I need to solve the matrix equation for A*X=B for A, and I need autograd to be able to backpropagate error through the operation.
There is no \ operator in Python. The closest you will get to is Scipy's implementation: scipy.sparse.linalg.lsqr. You can either use torch.solve to solve linear equations of shape AX=B torch.lsrsq to solve the least-squares problem min ||AX-B||_2 (if A.size(0) >= A.size(1)) or solve the least-norm problem min ||X||_2 such that AX=B (if A.size(0) < A.size(1)) ​ For solving XA=B you would use the transposed matrices: def lsqrt(A, B): XT, _ = torch.solve(B.T, A.T) return XT.T
https://stackoverflow.com/questions/65819399/
Process output data from YOLOv5 TFlite
❔Question Hi, I have successfully trained a custom model based on YOLOv5s and converted the model to TFlite. I feel silly asking, but how do you use the output data? I get as output: StatefulPartitionedCall: 0 = [1,25200,7] from the converted YOLOv5 model Netron YOLOv5s.tflite model But I expect an output like: StatefulPartitionedCall:3 = [1, 10, 4] # boxes StatefulPartitionedCall:2 = [1, 10] # classes StatefulPartitionedCall:1 = [1, 10] #scores StatefulPartitionedCall:0 = [1] #count (this one is from a tensorflow lite mobilenet model (trained to give 10 output data, default for tflite)) Netron mobilenet.tflite model It may also be some other form of output, but I honestly have no idea how to get the boxes, classes, scores from a [1,25200,7] array. (on 15-January-2021 I updated pytorch, tensorflow and yolov5 to the latest version) The data contained in the [1, 25200, 7] array can be found in this file: outputdata.txt 0.011428807862102985, 0.006756599526852369, 0.04274776205420494, 0.034441519528627396, 0.00012877583503723145, 0.33658933639526367, 0.4722323715686798 0.023071227595210075, 0.006947836373001337, 0.046426184475421906, 0.023744791746139526, 0.0002465546131134033, 0.29862138628959656, 0.4498370885848999 0.03636947274208069, 0.006819264497607946, 0.04913407564163208, 0.025004519149661064, 0.00013208389282226562, 0.3155967593193054, 0.4081345796585083 0.04930267855525017, 0.007249316666275263, 0.04969717934727669, 0.023645592853426933, 0.0001222355494974181, 0.3123127520084381, 0.40113094449043274 ... Should I add a Non Max Suppression or something else, can someone help me please? (github YOLOv5 #1981)
Thanks to @Glenn Jocher I found the solution. The output is [xywh, conf, class0, class1, ...] My current code is now: def classFilter(classdata): classes = [] # create a list for i in range(classdata.shape[0]): # loop through all predictions classes.append(classdata[i].argmax()) # get the best classification location return classes # return classes (int) def YOLOdetect(output_data): # input = interpreter, output is boxes(xyxy), classes, scores output_data = output_data[0] # x(1, 25200, 7) to x(25200, 7) boxes = np.squeeze(output_data[..., :4]) # boxes [25200, 4] scores = np.squeeze( output_data[..., 4:5]) # confidences [25200, 1] classes = classFilter(output_data[..., 5:]) # get classes # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right x, y, w, h = boxes[..., 0], boxes[..., 1], boxes[..., 2], boxes[..., 3] #xywh xyxy = [x - w / 2, y - h / 2, x + w / 2, y + h / 2] # xywh to xyxy [4, 25200] return xyxy, classes, scores # output is boxes(x,y,x,y), classes(int), scores(float) [predictions length] To get the output data: """Output data""" output_data = interpreter.get_tensor(output_details[0]['index']) # get tensor x(1, 25200, 7) xyxy, classes, scores = YOLOdetect(output_data) #boxes(x,y,x,y), classes(int), scores(float) [25200] And for the boxes: for i in range(len(scores)): if ((scores[i] > 0.1) and (scores[i] <= 1.0)): H = frame.shape[0] W = frame.shape[1] xmin = int(max(1,(xyxy[0][i] * W))) ymin = int(max(1,(xyxy[1][i] * H))) xmax = int(min(H,(xyxy[2][i] * W))) ymax = int(min(W,(xyxy[3][i] * H))) cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2) ...
https://stackoverflow.com/questions/65824714/
torch row wise cosinus similarity
Assuming I am having 2 tensors with dimension: tensor A: [512, 100] tensor B: [512, 200, 100] and I would like to use torch.cosine_similarity to produce a tensor C of dimensions [512, 1, 200] or [512, 200]. Using torch.cosine_similarity(A, B) I get an error: '{RuntimeError}The size of tensor a (512) must match the size of tensor b (200) at non-singleton dimension 1' I guess it can be done by the following: desired_result = torch.stack([torch.cosine_similarity(A_row, B_row, axis=-1) for B_row, A_row in zip(B, A)]) but there should be a more optimized way. Any help/hint?
No need for stacking, broadcasting will do the job for you: # (512, 100, 1) after unsqueeze A = torch.randn(512, 100).unsqueeze(dim=-1) B = torch.randn(512, 200, 100) # (512, 200) torch.cosine_similarity(B, A.unsqueeze(1), axis=-1)
https://stackoverflow.com/questions/65825564/
Is there a `Split` equivalent to torch.nn.Sequential?
A sample code for a Sequential block is self._encoder = nn.Sequential( # 1, 28, 28 nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=3, padding=1), # 32, 10, 10 = 16, (1//3)(28 + 2 * 1 - 3) + 1, (1//3)(28 + 2*1 - 3) + 1 nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), # 32, 5, 5 nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1), # 64, 3, 3 nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=1), # 64, 2, 2 ) Is there some construct like nn.Sequential that puts modules in it in parallel? I would like to now define something like self._mean_logvar_layers = nn.Parallel( nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=1, padding=0), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=1, padding=0), ) Whose output should be two pipes of data - one for each element in self._mean_logvar_layers which are then feedable to the rest of the network. Kind of like a multi-headed network. My current implementation: self._mean_layer = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=1, padding=0) self._logvar_layer = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=1, padding=0) and def _encode(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: for i, layer in enumerate(self._encoder): x = layer(x) mean_output = self._mean_layer(x) logvar_output = self._logvar_layer(x) return mean_output, logvar_output I would like to treat the parallel construct as a layer. Is that doable in PyTorch?
Sequential split What you can do is create a Parallel module (though I would name it differently as it implies this code actually runs in parallel, probably Split would be a good name) like this: class Parallel(torch.nn.Module): def __init__(self, *modules: torch.nn.Module): super().__init__() self.modules = modules def forward(self, inputs): return [module(inputs) for module in self.modules] Now you can define it as you wanted: self._mean_logvar_layers = Parallel( nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=1, padding=0), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=1, padding=0), ) And use it like this: mean, logvar = self._mean_logvar_layers(x) One layer and split As suggested by @xdurch0 we could use a single layers and split across channels instead, using this module: class Split(torch.nn.Module): def __init__(self, module, parts: int, dim=1): super().__init__() self.parts self.dim = dim self.module = module def forward(self, inputs): output = self.module(inputs) chunk_size = output.shape[self.dim] // self.parts return torch.split(output, chunk_size, dim=self.dim) This inside your neural network (notice 128 channels, those will be split into 2 parts, each of size 64): self._mean_logvar_layers = Split( nn.Conv2d(in_channels=64, out_channels=128, kernel_size=2, stride=1, padding=0), parts=2, ) And use it like previously: mean, logvar = self._mean_logvar_layers(x) Why this approach? Everything will be done in one swoop instead of sequentially, hence faster, but might be too wide if you don't have enough GPU memory. Can it work with Sequential? Yes, it is still a layer. But next layer has to work with tuple(torch.Tensor, torch.Tensor) as inputs. Sequential is also a layer, quite simple one, let's see forward: def forward(self, inp): for module in self: inp = module(inp) return inp It just passes output from previous model to the next and that's it.
https://stackoverflow.com/questions/65831101/
How to load a trained model to inference the predicted data
I trained and saved CNN model for 1000 epochs and now want to retrieve validation data (predicted images). In code below test_pred and test_real outputs predicted and real images in validation set. Should I load and run the saved model for another 1 epoch to retrieve predicted images (this will end in CUDA out of memory since data are huge)? Or there are other ways? You can see part of my code below: for epoch in range(epochs): mse_train_losses= [] mae_train_losses = [] N_train = [] mse_val_losses = [] mae_val_losses = [] N_test = [] if save_model: if epoch % 50 ==0: checkpoint = {'state_dict' : model.state_dict(),'optimizer' : optimizer.state_dict()} save_checkpoint(checkpoint) model.train() for data in train_loader: x_train_batch, y_train_batch = data[0].to(device, dtype=torch.float), data[1].to(device, dtype=torch.float) y_train_pred = model(x_train_batch) # 1) Forward pass mse_train_loss = criterion(y_train_batch, y_train_pred, x_train_batch, mse) mae_train_loss = criterion(y_train_batch, y_train_pred, x_train_batch, l1loss) optimizer.zero_grad() mse_train_loss.backward() optimizer.step() mse_train_losses.append(mse_train_loss.item()) mae_train_losses.append(mae_train_loss.item()) N_train.append(len(x_train_batch)) test_pred=[] test_real=[] model.eval() with torch.no_grad(): for data in test_loader: x_test_batch, y_test_batch = data[0].to(device, dtype=torch.float), data[1].to(device, dtype=torch.float) y_test_pred = model(x_test_batch) mse_val_loss = criterion(y_test_batch, y_test_pred, x_test_batch, mse) mae_val_loss = criterion(y_test_batch, y_test_pred, x_test_batch, l1loss) mse_val_losses.append(mse_val_loss.item()) mae_val_losses.append(mae_val_loss.item()) N_test.append(len(x_test_batch)) test_pred.append(y_test_pred) test_real.append(y_test_batch)
When you append it to the list try using .cpu() at the end like this: test_pred.append(t_test_pred.cpu())
https://stackoverflow.com/questions/65838180/
How to make R2 score in nn.LSTM pytorch
I tried to make loss function with R2in nn.LSTM but i couldnt find any documentation about it . I already use RMSE and MAE loss from pytorch. My data is a time series and im doing time series forecasting This is the code where i use the loss function of RMSE in data training model = LSTM_model(input_size=1, output_size=1, hidden_size=512, num_layers=2, dropout=0).to(device) criterion = nn.MSELoss(reduction="sum") optimizer = optim.Adam(model.parameters(), lr=0.001) callback = Callback(model, early_stop_patience=10 ,outdir="model/lstm", plot_every=20,) from tqdm.auto import tqdm def loop_fn(mode, dataset, dataloader, model, criterion, optimizer,device): if mode =="train": model.train() elif mode =="test": model.eval() cost = 0 for feature, target in tqdm(dataloader, desc=mode.title()): feature, target = feature.to(device), target.to(device) output , hidden = model(feature,None) loss = torch.sqrt(criterion(output,target)) if mode =="train": loss.backward() optimizer.step() optimizer.zero_grad() cost += loss.item() * feature.shape[0] cost = cost / len(dataset) return cost And this is the code to start data training while True : train_cost = loop_fn("train", train_set, trainloader, model, criterion, optimizer,device) with torch.no_grad(): test_cost = loop_fn("test", test_set, testloader, model, criterion, optimizer,device) callback.log(train_cost, test_cost) callback.save_checkpoint() callback.cost_runtime_plotting() if callback.early_stopping(model, monitor="test_cost"): callback.plot_cost() break Can anyone help me with the R2 loss function ? Thank you in advance
Here is an implemention, """ From https://en.wikipedia.org/wiki/Coefficient_of_determination """ def r2_loss(output, target): target_mean = torch.mean(target) ss_tot = torch.sum((target - target_mean) ** 2) ss_res = torch.sum((target - output) ** 2) r2 = 1 - ss_res / ss_tot return r2 You can use it as below, loss = r2_loss(output, target) loss.backward()
https://stackoverflow.com/questions/65840698/
Final step of PyTorch Gradient Accumulation for small datasets
I am training a BERT model on a relatively small dataset and cannot afford to lose any labelled sample as they must all be used for training. Due to GPU memory constraints, I am using gradient accumulation to train on larger batches (e.g. 32). According to PyTorch documentation, gradient accumulation is implemented as follows: scaler = GradScaler() for epoch in epochs: for i, (input, target) in enumerate(data): with autocast(): output = model(input) loss = loss_fn(output, target) loss = loss / iters_to_accumulate # Accumulates scaled gradients. scaler.scale(loss).backward() if (i + 1) % iters_to_accumulate == 0: # may unscale_ here if desired (e.g., to allow clipping unscaled gradients) scaler.step(optimizer) scaler.update() optimizer.zero_grad() However, if you are using e.g. 110 training samples, with batch size 8 and accumulation step 4 (i.e. effective batch size 32), this method would only train the first 96 samples (i.e. 32 x 3), i.e. wasting 14 samples. In order to avoid this, I'd like to modify the code as follows (notice change to the final if statement): scaler = GradScaler() for epoch in epochs: for i, (input, target) in enumerate(data): with autocast(): output = model(input) loss = loss_fn(output, target) loss = loss / iters_to_accumulate # Accumulates scaled gradients. scaler.scale(loss).backward() if (i + 1) % iters_to_accumulate == 0 or (i + 1) == len(data): # may unscale_ here if desired (e.g., to allow clipping unscaled gradients) scaler.step(optimizer) scaler.update() optimizer.zero_grad() Is this correct and really that simple, or will this have any side effects? It seems very simple to me, but I've never seen it done before. Any help appreciated!
As Lucas Ramos already mentioned, when using DataLoader where the underlying dataset's size is not divisible by the batch size, the default behavior is to have a smaller last batch: drop_last (bool, optional) – set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: False) Your plan is basically implementing gradient accumulation combined with drop_last=False - that is having the last batch smaller than all others. Therefore, in principle there's nothing wrong with training with varying batch sizes. However, there is something you need to fix in your code: The loss is averaged over the mini-batch. So, if you process mini batches in the usual way you do not need to worry about it. However, when accumulating gradients you do it explicitly by dividing the loss by iters_to_accumulate: loss = loss / iters_to_accumulate In the last mini batch (with smaller size) you need to change the value of iter_to_accumulate to reflect this smaller minibatch size! I proposed this revised code, breaking the training loop into two: an outer loop on mini-batches, and an inner one that accumulates gradients per mini batch. Note how using an iter over the DataLoader helps breaking the training loop into two: scaler = GradScaler() for epoch in epochs: bi = 0 # index batches # outer loop over minibatches data_iter = iter(data) while bi < len(data): # determine the range for this batch nbi = min(len(data), bi + iters_to_accumulate) # inner loop over the items of the mini batch - accumulating gradients for i in range(bi, nbi): input, target = data_iter.next() with autocast(): output = model(input) loss = loss_fn(output, target) loss = loss / (nbi - bi) # divide by the true batch size # Accumulates scaled gradients. scaler.scale(loss).backward() # done mini batch loop - gradients were accumulated, we can make an optimizatino step. # may unscale_ here if desired (e.g., to allow clipping unscaled gradients) scaler.step(optimizer) scaler.update() optimizer.zero_grad() bi = nbi
https://stackoverflow.com/questions/65842691/
PyTorch .to(torch.device("cuda")) speed differs vastly depending on execution state - How to achieve a speed up?
My question is concerned with the speed of the to Method of PyTorch tensors and how it depends on the "execution state" (not sure if thats the correct name, feel free to edit). My setup is as follows (RTX 2060 Super): python version: 3.8.5 (default, Jul 28 2020, 12:59:40) [GCC 9.3.0] pytorch version: 1.7.0+cu110 Firstly, a file that contains the code for what I'm talking about: import torch import time import sys gpu = torch.device("cuda") def ver(): print("python version:", sys.version) print("pytorch version:", torch.__version__) print("\n") def test(): start = time.time() torch.cuda.init() print("cuda init:", time.time()-start) x = torch.randn(15000,3).float() print("randn initialized:", time.time()-start) x.to(gpu) print("to(gpu):", time.time()-start) torch.cuda.synchronize() print("time after sync:", time.time()-start) print("\n") if __name__ == "__main__": ver() test() test() Running this yields following console output: python version: 3.8.5 (default, Jul 28 2020, 12:59:40) [GCC 9.3.0] pytorch version: 1.7.0+cu110 cuda init: 0.002934694290161133 randn initialized: 0.0033266544342041016 to(gpu): 1.5724568367004395 time after sync: 1.5725233554840088 cuda init: 9.5367431640625e-07 randn initialized: 0.00030875205993652344 to(gpu): 0.00037860870361328125 time after sync: 0.00039458274841308594 The speed of the second to call is so much faster than the first one. Why does this occur? And more importantly, how can I achieve consistently fast speeds? In a bigger project I'm working on I should have enough time to maybe initalize the GPU in parallel before to gets called for the first time. Is that possible? If so, how? torch.cuda.init() doesn't seem to change the speed of the first to. I cannot use torch.randn(x,y, device=gpu) because in the original setup the data comes from torch.from_numpy(). Thanks.
The first run is always the slowest because it is loading everything on to the gpu. After the first run the times get much more consistent. If you run your test a few more times you should see the times are much closer to each other.
https://stackoverflow.com/questions/65845501/
How to update classification layers without changing weights for convolutional layers
I have a CNN with numerous convolutional layers. To each of these convolutional layers I have attached a classifier, to check outputs of intermediary layers. After losses have been produced for each of these classifiers, I want to update the weights for the classifier without touching the weights for the convolutional layers. This code: for i in range(len(loss_per_layer)): loss_per_layer[i].backward(retain_graph=True) self.classifiers[i].weight.data -= self.learning_rate * self.alpha[i] * self.classifiers[i].weight.grad.data self.classifiers[i].bias.data -= self.learning_rate * self.alpha[i] * self.classifiers[i].bias.grad.data allows me to do so if the classifier consists of a singular nn.Linear layer. However, my classifiers are of the shape: self.classifiers.append(nn.Sequential( nn.Linear(int(feature_map * input_shape[1] * input_shape[2]), 100), nn.ReLU(True), nn.Dropout(), nn.Linear(100, self.num_classes), nn.Sigmoid(), )) How can I update the weights of the Sequential block without touching the rest of the network? I have recently changed from keras to pytorch and so am unsure on how exactly to utilize the optimizer.step() function for this situation, but I have a suspicion it can be done using that. Please note, I need a generic solution for a Sequential block of any shape, as it will change in future iterations of the model. Any help is much appreciated.
You can implement your model as below: class Model(nn.Module): def __init__(self, conv_layers, classifier): super().__init__() self.conv_layers = conv_layers self.classifier = classifier def forward(self,x): x = self.conv_layers(x) return self.classifier(x) When declaring optimizer, only pass the parameters that you want to be updated. model = Model(conv_layers, classifier) optimizer = torch.optim.Adam(model.classifier.parameters(), lr=lr) Now when you will do loss.backward() optimizer.step() model.zero_grad() only classifier params will be updated. EDIT: After OP's comment, I am adding below for more generic use cases. A more generic scenario class Model(nn.Module): def __init__(self, modules): super().__init__() # supposing you have multiple modules declared like below. # You can also keep them as an array or dict too. # For this see nn.ModuleList or nn.ModuleDict in pytorch self.module0 = modules[0] self.module1 = modules[1] #..... and so on def forward(self,x): # implement forward # model and optimizer declarations model = Model(modules) # assuming we want to update module0 and module1 optimizer = torch.optim.Adam([ model.module0.parameters(), model.module1.parameters() ], lr=lr) # you can also provide different learning rate for different modules. # See [documentation][https://pytorch.org/docs/stable/optim.html] # when training loss.backward() optimizer.step() model.zero_grad() # use model.zero_grad to remove gradient computed for all the modules. # optimizer.zero_grad only removes gradient for parameters that were passed to it.
https://stackoverflow.com/questions/65845857/
Error while implenting the Faster R-CNN Object detection algorithm
I am trying to implement the Faster R-CNN object detection algorithm and I have an unusual error. While trying to call the train_one_epoch function in this colab tutorial I had an error in the loss_dict = model(images, targets)which is mentioned here . The exact error that i had is : 101 cell_anchors = self.cell_anchors 102 assert cell_anchors is not None --> 103 assert len(grid_sizes) == len(strides) == len(cell_anchors) 104 105 for size, stride, base_anchors in zip( AssertionError: Anyone have an idea ? And thanks in advance !
Finally, I was able to solve this problem and it's just by adding adjusting the size of the AnchorGenerator and their corresponding aspect ratios in the Faster R-CNN function ft_anchor_generator = AnchorGenerator( sizes=((32, 64, 128),), aspect_ratios=((0.5, 1.0, 2.0),) ) ft_model = FasterRCNN( backbone=ft_backbone, num_classes=num_classes, rpn_anchor_generator=ft_anchor_generator)
https://stackoverflow.com/questions/65847198/
Translating 3D CNN from Keras to Pytorch
I'm trying to translate the below 3D CNN architecture from keras to pytorch. The 3D images all have the following dimensions: 193 x 229 x 193. Network architecture in Keras: def test_model(size): model = Sequential() model.add(Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu', input_shape=size, name="conv_1_1")) model.add(Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu', name="conv_1_2")) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))) model.add(Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu', input_shape=size, name="conv_2_1")) model.add(Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu', name="conv_2_2")) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))) model.add(Flatten()) model.add(Dense(units=1, name="d_2")) return model My attempt at translating this to Pytorch: class Model(torch.nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv3d(input_channel=1, output_channel=8, kernel_size=3) self.conv2 = nn.Conv3d(input_channel=8, output_channel=8, kernel_size=3) self.conv3 = nn.Conv3d(input_channel=8, output_channel=16, kernel_size=3) self.conv4 = nn.Conv3d(input_channel=16, output_channel=16, kernel_size=3) self.fc1 = nn.Linear( ???? , 1) def forward (self, x): x = F.relu(self.conv1(x)) x = F.relu(F.max_pool3d(self.conv2(x), kernel_size=2, stride=2)) x = F.relu(self.conv3(x)) x = F.relu(F.max_pool3d(self.conv4(x), kernel_size=2, stride=2)) x = self.fc1 return x net = Model() Please could you let me know where I've made mistakes and also clarify how to determine the input for the nn.Linear( ???? , 2) layer? Thank you for your help!
Inspecting your last nn.Conv3d's output, you have a tensor shape of (-1, 16, 45, 54, 45). Therefore, you need a total of 16*45*54*45=1749600 connections on your dense layer (this is tremendously large!). Some other things to point out: input_channel and output_channels should be in_channels and out_channels, respectively. You can either use torch.flatten(x, start_dim=1) or a nn.Flatten() layer (which will flatten from axis=1 to axis=-1 by default). you have misplaced an F.relu activation as your overall structure is [conv3d, relu, conv3d, relu, maxpool3d] + [conv3d, relu, conv3d, relu, maxpool3d] + [flatten, dense]. Resulting code: class Model(torch.nn.Module): def __init__(self): super(Model, self).__init__() self.conv1 = nn.Conv3d(in_channels=1, out_channels=8, kernel_size=3) self.conv2 = nn.Conv3d(in_channels=8, out_channels=8, kernel_size=3) self.conv3 = nn.Conv3d(in_channels=8, out_channels=16, kernel_size=3) self.conv4 = nn.Conv3d(in_channels=16, out_channels=16, kernel_size=3) self.fc1 = nn.Linear(1749600, 1) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool3d(x, kernel_size=2, stride=2) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = F.max_pool3d(x, kernel_size=2, stride=2) x = torch.flatten(x, start_dim=1) x = self.fc1(x) return x
https://stackoverflow.com/questions/65850610/
How to introduce a loss to get two matrices similar
I am training a neural network and I want two matrices to be similar (the covariance matrices). My naive approach was to use a loss based on the difference, such as the L1 loss. But this also forced the matrices to become small, which is not what I want. Does anyone have an idea here? Thanks a lot!
There are many metrics you can use (euclidian distances, cosine similarity, the Bhattacharyya similarity for non-negative features, the Jensen-Shannon divergence). The cosine similarity seems like a good place to start. You can achieve this by considering both n x m matrices in a n*m dimensional space. And, compare those two vectors with the cosine similarity. In practice, this can be done using torch.flatten and torch.nn.functionnal.cosine_similarity. Or equivalently with a nn.Flatten layer and nn.CosineSimilarity. Here I've taken the functional route: >>> x = torch.rand(1, 10, 10) >>> y = torch.rand(1, 10, 10) >>> F.cosine_similarity(torch.flatten(x, 1), torch.flatten(y, 1)) tensor([0.6220]) Notice you will need an extra dimension for the batch: axis=0. Edit - if you're not working with batches, you could simply broadcast both tensors to 1D tensors: >>> F.cosine_similarity(x.reshape(1, -1), y.reshape(1, -1))
https://stackoverflow.com/questions/65851071/
Add Samples after Partial Training in PyTorch
I have trained a model in PyTorch - an RCNN for text classification. The model has very high precision and recall, but I may eventually receive new documents with text unlike what I used to train, validate, or test the model. I would like to add new text samples to the model without retraining the model from the beginning. This is desirable because I may lose access to some of the text used for initial training. If it is not possible to add samples (documents), is it possible to train a new model on only the new samples and then somehow combine the original model and the new model? How? Here is what my model looks like. RCNN( (embeddings): Embedding(10661, 300) (lstm): LSTM(300, 64, bidirectional=True) (dropout): Dropout(p=0.0, inplace=False) (W): Linear(in_features=428, out_features=64, bias=True) (tanh): Tanh() (fc): Linear(in_features=64, out_features=3, bias=True) (softmax): Softmax(dim=1) (loss_op): NLLLoss() ) I am aware of techniques for saving the model and the corresponding load techniques. State Dictionary:torch.save(model.state_dict(), PATH) Model:torch.save(model, PATH) Checkpoint:torch.save({'epoch': EPOCH, 'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': LOSS,}, PATH) I can continue training based on the original samples, but I do not know how to add samples. If this is something TensorFlow can do by PyTorch cannot, I might switch to TensorFlow.
Assuming you have your model's state saved in some file PATH, you can load it back in memory with torch.load. Either on CPU or CUDA device, by default it will be loaded on the device it was on when torch.save was called). state_dict = torch.load(PATH) model.load_state_dict(state_dict) Assuming model is an instance of the same nn.Module class that was used to save the state on PATH. Now model will have an identical state (same parameter weights/biases) as when it was saved on PATH with torch.save. From there you can call model and finetune on new data. Note: You can load it directly on the desired device by passing a torch.device to torch.load's map_location argument.
https://stackoverflow.com/questions/65852250/
Perceptron on multi-dimensional tensor
I'm trying to use Perceptron to reduce a tensor of size: [1, 24, 768] to another tensor with size of [1, 1, 768]. The only way I could use was to first reshape the input tensor to [1, 1, 24*768] and then pass it through linear layers. I'm wondering if there's a more elegant way of this transformation --other than using RNNs cause I do not want to use that. Are there other methods generally for the transformation that I want to make? Here is my code for doing the above operation: lin = nn.Linear(24*768, 768) # x is in shape of [1, 24, 768] # out is in shape of [1, 1, 768] x = x.view(1,1,-1) out = lin(x)
If the broadcasting is what's bothering you, you could use a nn.Flatten to do it: >>> m = nn.Sequential( ... nn.Flatten(), ... nn.Linear(24*768, 768)) >>> x = torch.rand(1, 24, 768) >>> m(x).shape torch.Size([1, 768]) If you really want the extra dimension you can unsqueeze the tensor on axis=1: >>> m(x).unsqueeze(1).shape torch.Size([1, 1, 768])
https://stackoverflow.com/questions/65854475/
Conditionally apply tensor operations in PyTorch
I know PyTorch doesn't have a map-like function to apply a function to each element of a tensor. So, could I do something like the following without a map-like function in PyTorch? if tensor_a * tensor_b.matmul(tensor_c) < 1: return -tensor_a*tensor_b else: return 0 This would work if the tensors were 1D. However, I need this to work when tensor_b is 2D (tensor_a needs to be unsqueezed in the return statement). This means a 2D tensor should be returned where some of the rows will be 0 vectors. Happy to use the latest features of the most recent Python version.
If I understand correctly, you are looking to return a tensor either way (hence the mapping) but by checking the condition element-wise. Assuming the shapes of tensor_a, tensor_b, and tensor_c are all two dimensional, as in "simple matrices", here is a possible solution. What you're looking for is probably torch.where, it's fairly close to a mapping where based on a condition, it will return one value or another element-wise. It works like torch.where(condition, value_if, value_else) where all three tensors have the same shape (value_if and value_else can actually be floats which will be cast to tensors, filled with the same value). Also, condition is a bool tensor which defines which value to assign to the outputted tensor: it's a boolean mask. For the purpose of this example, I have used random tensors: >>> a = torch.rand(2, 2, dtype=float)*100 >>> b = torch.rand(2, 2, dtype=float)*0.01 >>> c = torch.rand(2, 2, dtype=float)*10 >>> torch.where(a*(b@c) < 1, -a*b, 0.) tensor([[ 0.0000, 0.0000], [ 0.0000, -0.0183]], dtype=torch.float64) More generally though, this will work if tensor_a and tensor_b have a shape of (m, n), and tensor_c has a shape of (n, m) because of the operation constraints. In your experiment I'm guessing you only had columns.
https://stackoverflow.com/questions/65854970/
PyTorch - indices of corresponding values in another tensor
I have a tensor, where I want to copy only some of the values (columnwise). The same values are in another tensor but in a random order. What I want, are the column indices from tensor2 of the values of tensor1. Here is an example: copy_ind = torch.tensor([0, 1, 3], dtype=torch.long) tensor1 = torch.tensor([[4, 6, 5, 1, 8],[10, 0, 8, 2, 1]]) temp = torch.index_select(tensor1, 1, copy_ind) # values to copy tensor2 = torch.tensor([[1, 4, 5, 6, 8],[2, 10, 8, 0, 1]], dtype=torch.long) _, t_ind = torch.sort(temp[0], dim=0) t2_ind = copy_ind[t_ind] # indices of tensor2 The output should be: t2_ind = [1, 3, 0] Here is another example where I want to get the values of the tensor according to c1_new: c1 = torch.tensor([[6, 7, 7, 8, 6, 8, 9, 4, 7, 6, 1, 3],[5, 11, 5, 7, 2, 9, 5, 5, 7, 11, 10, 7]], dtype=torch.long) copy_ind = torch.tensor([1, 2, 3, 5, 7, 8], dtype=torch.long) c1_new = torch.index_select(c1, 1, copy_ind) indices = torch.as_tensor([[1, 3, 4, 6, 6, 6, 7, 7, 7, 8, 8, 9], [10, 7, 5, 2, 5, 11, 5, 7, 11, 7, 9, 5]]) values = torch.randn(12) tensor = torch.sparse.FloatTensor(indices, values, (12, 12)) _, t_ind = torch.sort(c1[0], dim=0) ind = t_ind[copy_ind] # should be [8, 6, 9, 10, 2, 7] Unfortunately, the indices ind are not correct. Can someone please help me?
If you're ok with using a for loop you can use something like this: checking each column of your temp tensor against the columns of tensor2: Edit: using torch.prod across dimension 1 to make sure both rows match [torch.prod((temp.T[i] == tesnor2.T), dim=1).nonzero()[0] for i in range(temp.size(1))] My output for your first example is [tensor(1), tensor(3), tensor(0)]
https://stackoverflow.com/questions/65860296/
PyTorch torch_sparse installation without CUDA
I am new in PyTorch and I have faced one issue, namely I cannot get my torch_sparse module properly installed. In general, I wanted to use module torch_geometric - this I have installed. However, when during the execution of the program I keep receiving the error ModuleNotFoundError: No module named ‘torch_sparse’ . I try to intall it, but when I use the command pip install torch-sparse in anaconda, I get an error: UserWarning: CUDA initialization:Found no NVIDIA driver on your system. My system does not have a CUDA. So how could I install torch_sparse module without it? Thank you in advance! Kind regards Rostyslav
As outlined in in pytorch_geometric installation instructions you have to install dependencies first and torch_geometric after that. For PyTorch 1.7.0 and CPU: pip install --no-index torch-scatter -f https://pytorch-geometric.com/whl/torch-1.7.0+cpu.html pip install --no-index torch-sparse -f https://pytorch-geometric.com/whl/torch-1.7.0+cpu.html pip install --no-index torch-cluster -f https://pytorch-geometric.com/whl/torch-1.7.0+cpu.html pip install --no-index torch-spline-conv -f https://pytorch-geometric.com/whl/torch-1.7.0+cpu.html pip install torch-geometric Please notice torch-1.7.0+cpu at the very end of each page
https://stackoverflow.com/questions/65860764/
VGG16 Model Outputs Incorrect dimension - Transfer Learning
I am trying to use a pre-trained VGG16 model to classify CIFAR10 on pyTorch. The model was originally trained on ImageNet. Here is how I imported and modified the model: from torchvision import models model = models.vgg16(pretrained=True).cuda() model.classifier[6].out_features = 10 and this is the summary of the model print(model) VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace=True) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace=True) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace=True) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace=True) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace=True) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace=True) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace=True) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace=True) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace=True) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace=True) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace=True) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace=True) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace=True) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (avgpool): AdaptiveAvgPool2d(output_size=(7, 7)) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace=True) (2): Dropout(p=0.5, inplace=False) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace=True) (5): Dropout(p=0.5, inplace=False) (6): Linear(in_features=4096, out_features=10, bias=True) ) ) Now I wanted to train the model on CIFAR10, I created the train loader with batch size = 128. Below is the training function and I added some printing statements to check if everything is working fine or not. The problem is the model outputs for each data point a 1000 prediction (as the original - unmodified version). def train(model, optimizer, train_loader, epoch=5): """ This function updates/trains client model on client data """ model.train() for e in range(epoch): for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) print("shape output: ", output.shape) probs = F.softmax(output, dim=1) #get the entropy print("entropy: ", probs) _, predicted = torch.max(output.data, 1) loss = criterion(output, target) loss.backward() if batch_idx % 100 == 0: # print every 100 mini-batches print('[%d, %5d] loss: %.3f' % (e + 1, batch_idx + 1, loss.item())) optimizer.step() return loss.item() Here is part of the the output which shows the output shape: shape output: torch.Size([128, 1000]) I don't understand why the model outputs the results in this way. Is there something that I am missing?
Error It is pretty simple, all you do here: model = models.vgg16(pretrained=True).cuda() model.classifier[6].out_features = 10 is changing attribute out_features of torch.nn.Linear layer, not changing weights which actually carry out the computation! In simplest case (see comments for outputted shape): import torch layer = torch.nn.Linear(20, 10) layer.out_features # 10 layer.weight.shape # (10, 20) layer(torch.randn(64, 20)).shape # (64, 10) layer.out_features = 100 layer.out_features # 100 layer.weight.shape # (10, 20) layer(torch.randn(64, 20)).shape # (64, 10) Weights are of the same shape, as those are created during __init__ (and altering out_features doesn't change anything). Fix You have to recreate the last layer a-new (it will be randomly initialized), like this: model = torchvision.models.vgg16(pretrained=True) # New layer model.classifier[6] = torch.nn.Linear(4096, 10)
https://stackoverflow.com/questions/65864259/
How to convert TensorFlow tensor to PyTorch tensor without converting to Numpy array?
I want to use a pre-trained Pytorch model in Tensorflow and I need to convert the tensorflow tensors to pytorch tensors. But I don't want to convert the pytorch tensor to a numpy array and convert that to a tensorflow tensor since I'm getting the error of " You must feed a value for placeholder tensor". I need this conversion when I'm making the graph so the tensorflow tensor doesn't have value and cannot be converted to numpy! Any solution for that?
Operations you do to Tensorflow tensors are "remembered" in order to calculate and back-propagate gradients. Same is true for PyTorch tensors. All this is ultimately required to train the model in both frameworks. This also is the reason why you can't convert tensors between the two frameworks: They have different ops and gradient calculation systems. They are incapable of capturing any operation that happens beyond their framework. For example, you can't (as of Jan 2021) have python for loops in custom loss functions. It has to be implemented into the framework in order to work. Similarly, there is no implementation of converting pytorch operations to Tensorflow operations. This answer shows how it's done when your tensor is well-defined (not a placeholder). But there is currently no way to propagate gradients from Tensorflow to PyTorch or vice-versa. Maybe in the future there will be some kind of massive update to both frameworks that lets them inter-operate, but I doubt that. It's best to use them both separately. So, in short, you can't convert placeholder tensors between two frameworks. You have to stick to one of the libraries or use concrete tensors + numpy mediator to communicate in-between frameworks.
https://stackoverflow.com/questions/65867615/
How to implement an epoch-dependent parameter inside a neural network in Pytorch?
For example, let's say I want to use Softmax with temperature after a layer and I also want to decrease the temperature after 5 epochs. for epoch in range(EPOCHS): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data[0].to(torch_device), data[1].to(torch_device) optimizer.zero_grad() outputs = my_model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() my_model.adjust_softmax_temperature(epoch) # ... How should I implement the adjust_softmax_temperature method in order to make my code to work?
as far as I know, the model is loaded BEFORE thre training, and the forward step passes data through the already-defined model. Am I missing something here? You can mutate the state of your model even after it has been defined. In this case, we're interested in changing a property of my_model during training. Here is a broad overview of how you could implement this. First, expose a function to update the model's temperature based on the provided epoch number: class MyModel(nn.Module): def __init__(self, t0): self.t = t0 def forward(self, x): # compute forward pass using self.t pass def adjust_softmax_temperature(self, epoch): if epoch % 5: self.t /= 10 # update policy of temperature Then, after having defined your model, you can call the update function at the end of each epoch: it will decide based on epoch wether to update self.t or not. my_model = MyModel(t0=100) for epoch in range(EPOCHS): for i, data in enumerate(trainloader, 0): # train iteration my_model.adjust_softmax_temperature(epoch)
https://stackoverflow.com/questions/65872473/
Understanding the usage of nn.Linear for Forward Propagation in PyTorch
The purpose of this study is to build a simplified forward propagation model that reproduces the code structure in PyTorch, yet does not use any of the PyTorch libraries. The idea is to do a matrix multiplication while emulating the code structure, including class definitions as in pyTorch. PyTorch code for Forward Propagation from torch import nn #DEFINE THE REQUIRED CLASS class Network(nn.Module): def __init__(self): super().__init__() # Inputs to hidden layer linear transformation self.hidden = nn.Linear(784, 256) # Output layer, 10 units - one for each digit self.output = nn.Linear(256, 10) # Define sigmoid activation and softmax output self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=1) def forward(self, x): # Pass the input tensor through each of our operations x = self.hidden(x) x = self.sigmoid(x) x = self.output(x) x = self.softmax(x) return x #use model = Network() ... ps = model.forward(some_tensor) here is my reproducer code: class my_mul: def __init__(self, h, w): self.dim1 = h self.dim2 = w self.layer = LAYER(self.dim1, self.dim2) def forward(self, X): X = self.layer.doit( X) return X class LAYER: def __init__(self, h, w): self.dim1 = h self.dim2 = w def __call__(self, Z): self.matrix2 = Z def doit(self, X): self.matrix1 = np.random.rand(self.dim1, self.dim2) print('matrix 1 in class LAYER ',self.matrix1) X = np.matmul(self.matrix1, self.matrix2) import numpy as np # np.random.seed(0) # initialize matrix2 which emulates the tensor being passed down to the forward # propagation step within a deep network training matrix2 = np.random.rand(2,2) print('matrix2 ',matrix2) # # use the __call__ method of LAYER to pass matrix2 to LL LL = LAYER(2,2) LL(matrix2) MM = my_mul(2,2) P = MM.forward(matrix2) print('the product of the 2 matrices is ', P) The above 'reproducer code' fails as follows. The problem is that the data is not passed correctly and I don't know how to make it work. matrix2 [[0.5488135 0.71518937] [0.60276338 0.54488318]] matrix 1 in class LAYER [[0.4236548 0.64589411] [0.43758721 0.891773 ]] Traceback (most recent call last): File "all_s.py", line 30, in <module> P = MM.forward(matrix2) File "all_s.py", line 7, in forward X = self.layer.doit( X) File "all_s.py", line 19, in doit X = np.matmul(self.matrix1, self.matrix2) AttributeError: 'LAYER' object has no attribute 'matrix2'
The issue is, when you are calling the forward function on MM your initialized model, self.matrix2 hasn't been defined as the error suggests: AttributeError: 'LAYER' object has no attribute 'matrix2' Just to be clear: MM() is equivalent to calling MM.__call__(). You haven't called it, hence the error. I'm not sure why you would have both an implementation in forward and __call__. In PyTorch the high level API call is made through __call__ which is what you might expect. And, __call__ will call forward as well as trigger registered hooks on the module. A quick fix would be to define your matrix (the underlying component of your linear layer) in the initialization (i.e. inside __init__). Then, when called the matrix multiplication between the input and that matrix is performed. class LAYER: def __init__(self, h, w): self.dim1 = h self.dim2 = w self.matrix1 = np.random.rand(self.dim1, self.dim2) def __call__(self, Z): return self.doit(Z) def doit(self, X): return np.matmul(X, self.matrix1) Although, something like this would be clearer: class Model: def __init__(self, h, w): self.layer = Linear(h, w) def __call__(self, x): return self.forward(x) def forward(self, x): x = self.layer(x) return x class Linear: def __init__(self, h, w): self.weights = np.random.rand(h, w) def __call__(self, x): return self.forward(x) def forward(self, x): return np.matmul(x, self.weights) x = np.random.rand(2, 2) model = Model(2, 2) model(x) Now you can use additional layers in your Model class. And a backward pass on Linear!
https://stackoverflow.com/questions/65875089/
Feeding Classifier data from LSTM Autoencoder
Goal: I have built a LSTM autoencoder for the purpose of feature reduction. My plan is to encode some input and feed it to a classifier in the future. The encoder takes data of the shape [batch_size, timesteps, features_of_timesteps However in the output layer of the encoder portion I am returning just the last hidden state in the form [1, timesteps, features_of_timesteps]. class Encoder(nn.Module): def __init__(self, input_size, first_layer, second_layer, n_layers): super(Encoder, self).__init__() self.n_layers = n_layers self.encode = nn.Sequential(nn.LSTM(input_size, first_layer, batch_first=True), getSequence(), nn.ReLU(True), nn.LSTM(first_layer, second_layer), getLast()) self.decode = nn.Sequential(nn.LSTM(second_layer, first_layer, batch_first=True), getSequence(), nn.ReLU(True), nn.LSTM(first_layer, input_size), getSequence()) def forward(self, x): x = x.float() x = self.encode(x) x = x.repeat(batch_size, 1, 1) x = self.decode(x) return x Worry: I am afraid that the last hidden state of the my second LSTM layer in the encoding portion of the model is summarizing the entire batch along with decreasing the feature dimensionality. This feels wrong because I am trying to reduce a single timeseries into a smaller vector, not an entire batch of timeseries into one vector. Am I correct in my worries?
There have multiple problem in your code, for simplicity, I just give you one well defined model instead, following code build a LSTM Autoencoder that reconstruct the inputs with shape (batch_size, timesteps, number_of_features_at_each_timesteps): import torch from torch import nn device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class Encoder(nn.Module): def __init__(self, seq_len, n_features, embedding_dim=64): super(Encoder, self).__init__() self.seq_len, self.n_features = seq_len, n_features self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim self.rnn1 = nn.LSTM( input_size=n_features, hidden_size=self.hidden_dim, num_layers=1, batch_first=True ) self.rnn2 = nn.LSTM( input_size=self.hidden_dim, hidden_size=self.embedding_dim, num_layers=1, batch_first=True ) def forward(self, x): x, (_, _) = self.rnn1(x) x, (hidden_n, _) = self.rnn2(x) return hidden_n class Decoder(nn.Module): def __init__(self, seq_len, input_dim=64, n_features=1): super(Decoder, self).__init__() self.seq_len, self.input_dim = seq_len, input_dim self.hidden_dim, self.n_features = 2 * input_dim, n_features self.rnn1 = nn.LSTM( input_size=input_dim, hidden_size=input_dim, num_layers=1, batch_first=True ) self.rnn2 = nn.LSTM( input_size=input_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True ) self.output_layer = nn.Linear(self.hidden_dim, n_features) def forward(self, x): x = x.repeat(self.seq_len, 1, 1) x = x.permute(1, 0, 2) x, (hidden_n, cell_n) = self.rnn1(x) x, (hidden_n, cell_n) = self.rnn2(x) return self.output_layer(x) class RecurrentAutoencoder(nn.Module): def __init__(self, seq_len, n_features, embedding_dim=64): super(RecurrentAutoencoder, self).__init__() self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device) self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device) def forward(self, x): print("Inputs size:", x.size()) x = self.encoder(x) print("Representation size: ", x.size()) x = self.decoder(x) print("Outputs size: ", x.size()) return x batch_n = 5 seq_len = 10 n_features = 3 inputs = torch.randn(batch_n, seq_len, n_features).to(device) model = RecurrentAutoencoder(seq_len, n_features).to(device) y = model(inputs) Outputs: Inputs size: torch.Size([5, 10, 3]) Representation size: torch.Size([1, 5, 64]) Outputs size: torch.Size([5, 10, 3]) Beware the representation (i.e outputs of encoder) have shape (1, batch_size, embedding_dim)
https://stackoverflow.com/questions/65876808/
MNIST plot first test figure after transform
Here are some of my code, I want to know how to add code to plot the first figure from the test dataset after transforms? transform=transforms.Compose([ transforms.ToTensor(), AddGaussianNoise(0, 1), transforms.Normalize((0.1307,), (0.3081,)) ]) dataset1 = datasets.MNIST('../data', train=True, download=True, transform=transform) dataset2 = datasets.MNIST('../data', train=False, transform=transform) train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs) test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs) model = Net().to(device) optimizer = optim.Adadelta(model.parameters(), lr=args.lr) scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) test(model, device, test_loader) scheduler.step() if args.save_model: torch.save(model.state_dict(), "mnist_cnn.pt")
Try this: import torchvision.utils as vutils import matplotlib.pyplot as plt # get batch from dataloader -> (bs, ch, h, w) imgs, lbls = next(iter(test_loader)) # make grid using images of the batch img = vutils.make_grid(imgs) fig = plt.figure() plt.axis("off") # channels first to channels last conversion using permute plt.imshow(img.permute(1, 2, 0)) plt.show()
https://stackoverflow.com/questions/65879506/
Expected more than 1 value per channel when training, got input size torch.Size([1, **])
I met an error when I use BatchNorm1d, code: ##% first I set a model class net(nn.Module): def __init__(self, max_len, feature_linear, rnn, input_size, hidden_size, output_dim, num__rnn_layers, bidirectional, batch_first=True, p=0.2): super(net, self).__init__() self.max_len = max_len self.feature_linear = feature_linear self.input_size = input_size self.hidden_size = hidden_size self.bidirectional = bidirectional self.num_directions = 2 if bidirectional == True else 1 self.p = p self.batch_first = batch_first self.linear1 = nn.Linear(max_len, feature_linear) init.kaiming_normal_(self.linear1.weight, mode='fan_in') self.BN1 = BN(feature_linear) def forward(self, xb, seq_len_crt): rnn_input = torch.zeros(xb.shape[0], self.feature_linear, self.input_size) for i in range(self.input_size): out = self.linear1(xb[:, :, i]) # xb[:,:,i].shape:(1,34), out.shape(1,100) out = F.relu(out) # 输入:out.shape(1,100), 输出:out.shape(1,100) out = self.BN1(out) # 输入:out.shape(1,100),输出:out.shape(1,100) return y_hat.squeeze(-1) ##% make the model as a function and optimize it input_size = 5 hidden_size = 32 output_dim = 1 num_rnn_layers = 2 bidirectional = True rnn = nn.LSTM batch_size = batch_size feature_linear = 60 BN = nn.BatchNorm1d model = net(max_len, feature_linear, rnn, input_size, hidden_size, output_dim, num_rnn_layers, bidirectional, p=0.1) loss_func = nn.MSELoss(reduction='none') # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # optimizer = optim.Adam(model.parameters(), lr=0.01) optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.05) ##% use this model to predict data def predict(xb, model, seq_len): # xb's shape should be (batch_size, seq_len, n_features) if xb.ndim == 2: # suitable for both ndarray and Tensor # add a {batch_size} dim xb = xb[None, ] if not isinstance(xb, torch.Tensor): xb = torch.Tensor(xb) return model(xb, seq_len) # xb.shape(1,34,5) ##% create training/valid/test data seq_len_train_iter = [] for i in range(0, len(seq_len_train), batch_size): if i + batch_size <= len(seq_len_train): seq_len_train_iter.append(seq_len_train[i:i+batch_size]) else: seq_len_train_iter.append(seq_len_train[i:]) seq_len_valid_iter = [] for i in range(0, len(seq_len_valid), batch_size): if i + batch_size <= len(seq_len_valid): seq_len_valid_iter.append(seq_len_valid[i:i+batch_size]) else: seq_len_valid_iter.append(seq_len_valid[i:]) seq_len_test_iter = [] for i in range(0, len(seq_len_test), batch_size): if i + batch_size <= len(seq_len_test): seq_len_test_iter.append(seq_len_test[i:i+batch_size]) else: seq_len_test_iter.append(seq_len_test[i:]) ##% fit model def fit(epochs, model, loss_func, optimizer, train_dl, valid_dl, valid_ds, seq_len_train_iter, seq_len_valid_iter): train_loss_record = [] valid_loss_record = [] mean_pct_final = [] mean_abs_final = [] is_better = False last_epoch_abs_error = 0 last_epoch_pct_error = 0 mean_pct_final_train = [] mean_abs_final_train = [] for epoch in range(epochs): # seq_len_crt: current batch seq len for batches, ((xb, yb), seq_len_crt) in enumerate(zip(train_dl, seq_len_train_iter)): if isinstance(seq_len_crt, np.int64): seq_len_crt = [seq_len_crt] y_hat = model(xb, seq_len_crt) packed_yb = nn.utils.rnn.pack_padded_sequence(yb, seq_len_crt, batch_first=True, enforce_sorted=False) final_yb, input_sizes = nn.utils.rnn.pad_packed_sequence(packed_yb) final_yb = final_yb.permute(1, 0) # assert torch.all(torch.tensor(seq_len_crt).eq(input_sizes)) loss = loss_func(y_hat, final_yb) batch_size_crt = final_yb.shape[0] loss = (loss.sum(-1) / input_sizes).sum() / batch_size_crt loss.backward() optimizer.step() # scheduler.step() optimizer.zero_grad() # print(i) with torch.no_grad(): train_loss_record.append(loss.item()) if batches % 50 == 0 and epoch % 1 == 0: # print(f'Epoch {epoch}, batch {i} training loss: {loss.item()}') y_hat = predict(xb[0], model, torch.tensor([seq_len_crt[0]])).detach().numpy().squeeze() # xb[0].shape(34,5) label = yb[0][:len(y_hat)] # plt.ion() plt.plot(y_hat, label='predicted') plt.plot(label, label='label') plt.legend(loc='upper right') plt.title('training mode') plt.text(len(y_hat)+1, max(y_hat.max(), label.max()), f'Epoch {epoch}, batch {batches} training loss: {loss.item()}') plt.show() return train_loss_record but I met:Expected more than 1 value per channel when training, got input size torch.Size([1, 60]) the error message is: ValueError Traceback (most recent call last) <ipython-input-119-fb062ad3f20e> in <module> ----> 1 fit(500, model, loss_func, optimizer, train_dl, valid_dl, valid_ds, seq_len_train_iter, seq_len_valid_iter) <ipython-input-118-2eb946c379bf> in fit(epochs, model, loss_func, optimizer, train_dl, valid_dl, valid_ds, seq_len_train_iter, seq_len_valid_iter) 38 # print(f'Epoch {epoch}, batch {i} training loss: {loss.item()}') 39 ---> 40 y_hat = predict(xb[0], model, torch.tensor([seq_len_crt[0]])).detach().numpy().squeeze() # xb[0].shape(34,5) 41 label = yb[0][:len(y_hat)] 42 # plt.ion() <ipython-input-116-28afce77e325> in predict(xb, model, seq_len) 7 if not isinstance(xb, torch.Tensor): 8 xb = torch.Tensor(xb) ----> 9 return model(xb, seq_len) # xb.shape(None,34,5) D:\Anaconda3\envs\LSTM\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), <ipython-input-114-3e9c30d20ed6> in forward(self, xb, seq_len_crt) 50 out = self.linear1(xb[:, :, i]) # xb[:,:,i].shape:(None,34), out.shape(None,100) 51 out = F.relu(out) # 输入:out.shape(None,100), 输出:out.shape(None,100) ---> 52 out = self.BN1(out) # 输入:out.shape(None,100),输出:out.shape(None,100) 53 54 out = self.linear2(out) D:\Anaconda3\envs\LSTM\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), D:\Anaconda3\envs\LSTM\lib\site-packages\torch\nn\modules\batchnorm.py in forward(self, input) 129 used for normalization (i.e. in eval mode when buffers are not None). 130 """ --> 131 return F.batch_norm( 132 input, 133 # If buffers are not to be tracked, ensure that they won't be updated D:\Anaconda3\envs\LSTM\lib\site-packages\torch\nn\functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps) 2052 bias=bias, training=training, momentum=momentum, eps=eps) 2053 if training: -> 2054 _verify_batch_size(input.size()) 2055 2056 return torch.batch_norm( D:\Anaconda3\envs\LSTM\lib\site-packages\torch\nn\functional.py in _verify_batch_size(size) 2035 size_prods *= size[i + 2] 2036 if size_prods == 1: -> 2037 raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size)) 2038 2039 ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 60]) I have checked and I found that in out = self.BN1(out),out.shape = (1,60),it seems that batchsize=1 is not permitted in BatchNorm1d .But I don't know how to modify it.
what does BatchNorm1d do mathematically? try and write down the equation for the case of batch_size=1 and you'll understand why pytorch is angry with you. How to solve it? It is simple: BatchNorm has two "modes of operation": one is for training where it estimates the current batch's mean and variance (this is why you must have batch_size>1 for training). The other "mode" is for evaluation: it uses accumulated mean and variance to normalize new inputs without re-estimating the mean and variance. In this mode there is no problem processing samples one by one. When evaluating your model use model.eval() before and model.train() after.
https://stackoverflow.com/questions/65882526/
Underfitting a single batch: Can't cause autoencoder to overfit multi-sample batches of 1d data. How to debug?
TL;DR I am unable to overfit batches with multiple samples using autoencoder. Fully connected decoder seems to handle more samples per batch than conv decoder, but then also fails when number of samples increases. Why is this happening, and how to debug this? In depth I am trying to use an auto encoder on 1d data points of size (n, 1, 1024), where n is the number of samples in the batch. I am trying to overfit to that single batch. Using a convolutional decoder, I am only able to fit a single sample (n=1), and when n>1 I am unable to drop the loss (MSE) below 0.2. In blue: expected output (=input), in orange: reconstruction. Single sample, single batch: Multiple samples, single batch, loss won't go down: Using more than one sample, we can see the net learns the general shape of the input (=output) signal, but greatly misses the bias. Using a fully connected decoder does manage to reconstruct batches of multiple samples: Relevant code: class Conv1DBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super().__init__() self._in_channels = in_channels self._out_channels = out_channels self._kernel_size = kernel_size self._block = nn.Sequential( nn.Conv1d( in_channels=self._in_channels, out_channels=self._out_channels, kernel_size=self._kernel_size, stride=1, padding=(self._kernel_size - 1) // 2, ), # nn.BatchNorm1d(num_features=out_channels), nn.ReLU(True), nn.MaxPool1d(kernel_size=2, stride=2), ) def forward(self, x): for layer in self._block: x = layer(x) return x class Upsample1DBlock(nn.Module): def __init__(self, in_channels, out_channels, factor): super().__init__() self._in_channels = in_channels self._out_channels = out_channels self._factor = factor self._block = nn.Sequential( nn.Conv1d( in_channels=self._in_channels, out_channels=self._out_channels, kernel_size=3, stride=1, padding=1 ), # 'same' nn.ReLU(True), nn.Upsample(scale_factor=self._factor, mode='linear', align_corners=True), ) def forward(self, x): x_tag = x for layer in self._block: x_tag = layer(x_tag) # interpolated = F.interpolate(x, scale_factor=0.5, mode='linear') # resnet idea return x_tag encoder: self._encoder = nn.Sequential( # n, 1024 nn.Unflatten(dim=1, unflattened_size=(1, 1024)), # n, 1, 1024 Conv1DBlock(in_channels=1, out_channels=8, kernel_size=15), # n, 8, 512 Conv1DBlock(in_channels=8, out_channels=16, kernel_size=11), # n, 16, 256 Conv1DBlock(in_channels=16, out_channels=32, kernel_size=7), # n, 32, 128 Conv1DBlock(in_channels=32, out_channels=64, kernel_size=5), # n, 64, 64 Conv1DBlock(in_channels=64, out_channels=128, kernel_size=3), # n, 128, 32 nn.Conv1d(in_channels=128, out_channels=128, kernel_size=32, stride=1, padding=0), # FC # n, 128, 1 nn.Flatten(start_dim=1, end_dim=-1), # n, 128 ) conv decoder: self._decoder = nn.Sequential( nn.Unflatten(dim=1, unflattened_size=(128, 1)), # 1 Upsample1DBlock(in_channels=128, out_channels=64, factor=4), # 4 Upsample1DBlock(in_channels=64, out_channels=32, factor=4), # 16 Upsample1DBlock(in_channels=32, out_channels=16, factor=4), # 64 Upsample1DBlock(in_channels=16, out_channels=8, factor=4), # 256 Upsample1DBlock(in_channels=8, out_channels=1, factor=4), # 1024 nn.ReLU(True), nn.Conv1d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Flatten(start_dim=1, end_dim=-1), nn.Linear(1024, 1024) ) FC decoder: self._decoder = nn.Sequential( nn.Linear(128, 256), nn.ReLU(True), nn.Linear(256, 512), nn.ReLU(True), nn.Linear(512, 1024), nn.ReLU(True), nn.Flatten(start_dim=1, end_dim=-1), nn.Linear(1024, 1024) ) Another observation is that when the batch size increases more, to say, 16, the FC decoder also starts to fail. In the image, 4 samples of a 16 sample batch I am trying to overfit What could be wrong with the conv decoder? How to debug this or make the conv decoder work?
Dying ReLU I think the main reason for underfitting in your case is Dying Relu problem. Your network is simple Autoencoder with no skip/residual connections. So Code in the bottleneck should encode enough information about bias in the data to make Decoder to learn. So if ReLU activation function is used Negative Biased data information can be lost due to Dying ReLU problem. The solution is to to use better activation functions like LeakyReLU, ELU, MISH, etc. Linear vs Conv. In your case, you are overfitting on a single batch. As Linear layers will have more parameters than that of Convolution layers maybe they are Memorising given small data easily. Batch Size As you are overfitting on a single batch, a small-batch of data will make it very easy to memorise on the other hand for large batch with single Update of network per batch(during overfitting) make network to learn Generalized abstract features. (This works better if more batches are there with a lot of variety of data) I tried to reproduce your problem using simple Gaussian data. Just by using LeakyReLU in place of ReLU with proper learning rate solved the problem. Same architecture given by you is used. Hyper parameters: batch_size = 16 epochs = 100 lr = 1e-3 optimizer = Adam loss(after training with ReLU) = 0.27265918254852295 loss(after training with LeakyReLU) = 0.0004763789474964142 With Relu With Leaky Relu
https://stackoverflow.com/questions/65882896/
Implementing BandRNN with pytorch and tensorflow
So I am trying to figure out how to train my matrix in a way that I will get a BandRNN. BandRnn is a diagonalRNN model with a different number of connections per neuron. For example: C is the number of connections per neuron. I found out that there is a way to turn off some of the gradients in a for loop, in a way that prevents them from being trained as follows: for p in model.input.parameters(): p.requires_grad = False But I can't find a proper way to do so, in a way that will make my matrix become a BandRNN. Hopefully, someone will be able to help me with this issue.
As far as I know you can only activate/deactivate requires_grad on a tensor, and not on distinct components of that tensor. Instead what you could do is zero out the values outside the band. First create a mask for the band, you could use torch.ones with torch.diagflat: >>> torch.diagflat(torch.ones(5), offset=1) By setting the right dimension for torch.ones as well as the right offset you can generate offset diagonal matrices with consistent shapes. >>> N = 10; i = -1 >>> torch.diagflat(torch.ones(N-abs(i)), offset=i) tensor([[0., 0., 0., 0., 0.], [1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 1., 0.]]) >>> N = 10; i = 0 >>> torch.diagflat(torch.ones(N-abs(i)), offset=i) tensor([[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]) >>> N = 10; i = 1 >>> torch.diagflat(torch.ones(N-abs(i)), offset=i) tensor([[0., 1., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.], [0., 0., 0., 0., 0.]]) You get the point, summing these matrices element-wise allows use to get a mask: >>> N = 10; b = 3 >>> mask = sum(torch.diagflat(torch.ones(N-abs(i)), i) for i in range(-b//2,b//2+1)) >>> mask tensor([[1., 1., 0., 0., 0.], [1., 1., 1., 0., 0.], [1., 1., 1., 1., 0.], [0., 1., 1., 1., 1.], [0., 0., 1., 1., 1.]]) Then you can zero out the values outside the band on your nn.Linear: >>> m = nn.Linear(N, N) >>> m.weight.data = m.weight * mask >>> m.weight Parameter containing: tensor([[-0.3321, -0.3377, -0.0000, -0.0000, -0.0000], [-0.4197, 0.1729, 0.2101, 0.0000, 0.0000], [ 0.3467, 0.2857, -0.3919, -0.0659, 0.0000], [ 0.0000, -0.4060, 0.0908, 0.0729, -0.1318], [ 0.0000, -0.0000, -0.4449, -0.0029, -0.1498]], requires_grad=True) Note, you might need to perform this on each forward pass as the parameters outside the band might get updated to non-zero values during the training. Of course, you can initialize mask once and keep it in memory. It would be more convenient to wrap everything into a custom nn.Module.
https://stackoverflow.com/questions/65883374/
Pytorch: copy.deepcopy vs torch.tensor.contiguous()?
In python torch, it seems copy.deepcopy method is generally used to create deep-copies of torch tensors instead of creating views of existing tensors. Meanwhile, as far as I understood, the torch.tensor.contiguous() method turns a non-contiguous tensor into a contiguous tensor, or a view into a deeply copied tensor. Then, do the two code lines below work equivalently if I want to deepcopy src_tensor into dst_tensor? org_tensor = torch.rand(4) src_tensor = org_tensor dst_tensor = copy.deepcopy(src_tensor) # 1 dst_tensor = src_tensor.contiguous() # 2 If the two work equivalent, which method is better in deepcopying tensors?
torch.tensor.contiguous() and copy.deepcopy() methods are different. Here's illustration: >>> x = torch.arange(6).view(2, 3) >>> x tensor([[0, 1, 2], [3, 4, 5]]) >>> x.stride() (3, 1) >>> x.is_contiguous() True >>> x = x.t() >>> x.stride() (1, 3) >>> x.is_contiguous() False >>> y = x.contiguous() >>> y.stride() (2, 1) >>> y.is_contiguous() True >>> z = copy.deepcopy(x) >>> z.stride() (1, 3) >>> z.is_contiguous() False >>> Here we can easily see that .contiguous() method created contiguous tensor from non-contiguous tensor while deepcopy method just copied the data without converting it to contiguous tensor. One more thing contiguous creates new tensor only if old tensor is non-contiguous while deepcopy always creates new tensor. >>> x = torch.arange(10).view(2, 5) >>> x.is_contiguous() True >>> y = x.contiguous() >>> z = copy.deepcopy(x) >>> id(x) 2891710987432 >>> id(y) 2891710987432 >>> id(z) 2891710987720 contiguous() Use this method to convert non-contiguous tensors to contiguous tensors. deepcopy() Use this to copy nn.Module i.e. mostly neural network objects not tensors. clone() Use this method to copy tensors.
https://stackoverflow.com/questions/65894586/
Is there a Numpy or pyTorch function for this code?
Basically is there a Numpy or PyTorch function that does this: vp_sa_s=mdp_data['sa_s'].detach().clone() dims = vp_sa_s.size() for i in range(dims[0]): for j in range(dims[1]): for k in range(dims[2]): # to mimic matlab functionality: vp(mdp_data.sa_s) try: vp_sa_s[i,j,k] = vp[mdp_data['sa_s'][i,j,k]] except: pass Given that vp_sa_s is size (10,5,5) and each value is a valid index vp i.e in range 0-9. vp is size (10,1) with a bunch of random values. Matlab do it elegantly and quickly with vp(mdp_data.sa_s) which will form a new (10,5,5) matrix. If all values in mdp_data.sa_s are 1, the result would be a (10,5,5) tensor with each value being the 1st value in vp. Does a function or method that exists that can achieve this in less than O(N^3) time as the above code is terribly inefficient. Thanks!
What is wrong with result = vp[vp_sa_s, 0] note that since your vp is of shape (10, 1) (it has a trailing singleton dimension) you need to add the , 0] index in the assignment to get rid of this extra dimension.
https://stackoverflow.com/questions/65898774/
Torch is installed but I'm unable to import it in a computer vision python project in Jupyter notebook
I'm working on a computer vision project using detectron2. I'm having trouble installing torch or importing it into my jupyter notebook. I'm working on a mac that is running MACOS Catalina, Python3 version in 3.8.2 and I'm using Anaconda for my Development environment. In the screenshot, it says torch is installed but it throws error while importing it
You have installed torch for python at /usr/local/lib/python3.9/site-packages but as you said yourself, you are using Anaconda for my Development environment So if your jupyter notebook is really configured to use your python installation that came alongside anaconda, then you need to do conda install pytorch torchvision torchaudio cudatoolkit=10.1 -c pytorch Note: Make sure which python interpreter your jupyter is actually using by running import sys; print(sys.executable) Make sure that you install torch for the correct conda environment (above command will also indicate which one that is) Set the cudatoolkit version according to your needs. There is also a simple interface on the official website to get the correct command
https://stackoverflow.com/questions/65899616/
Does pytorch broadcast consume less memory than expand?
Does pytorch operations using broadcast consume less memory than expand? For example, are the following two programs different in memory usage? import torch x = torch.randn(20,1) y = torch.randn(1,20) z = x*y import torch x = torch.randn(20,1).expand(-1,20) y = torch.randn(1,20).expand(20,-1) z = x*y
According to the documentation page of torch.expand: Expanding a tensor does not allocate new memory, but only creates a new view on the existing tensor You can experiment it yourself by profiling the calls (here in Colab): >>> x = torch.randn(200,1) >>> y = torch.randn(1,200) >>> %memit z = x*y peak memory: 286.85 MiB, increment: 0.31 MiB >>> x = torch.randn(200,1).expand(-1,200) >>> y = torch.randn(1,200).expand(200,-1) >>> %memit z = x*y peak memory: 286.86 MiB, increment: 0.00 MiB %memit is a magic function provided by memory_profiler: pip install memory_profiler %load_ext memory_profiler
https://stackoverflow.com/questions/65900110/
Error: optimizer got an empty parameter list. How can i modify my code?
Here is code: class MLP(nn.Module): def __ini__(self): super (MLP, self). __init__() self.model=nn.nn.Sequential( nn.Linear(784, 200), nn.LeakyReLU(inplace=True), nn.Linear(200, 200), nn.LeakyReLU(inplace=True), nn.Linear(200, 10), nn.LeakyReLU(inplace=True), ) def forward(self,x): x=self.model(x) return device= torch.device('cuda:0') net = MLP().to(device) when running these codes optimizer = optim.SGD(net.parameters(),lr=learning_rate) I get ValueError: optimizer got an empty parameter list" I am trying to imitate this notebook.
There are several issues with your code. Currently your code is equivalent to: class MLP(nn.Module): def forward(self, x): pass Here are the issues: Your initializer is named __init__. forward should return a value, here x. Replace nn.nn.Sequential with nn.Sequential. class MLP(nn.Module): def __init__(self): super (MLP, self).__init__() self.model = nn.Sequential( nn.Linear(784, 200), nn.LeakyReLU(inplace=True), nn.Linear(200, 200), nn.LeakyReLU(inplace=True), nn.Linear(200, 10), nn.LeakyReLU(inplace=True)) def forward(self,x): x = self.model(x) return x device= torch.device('cuda:0') net = MLP().to(device) optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
https://stackoverflow.com/questions/65903600/
Torch: Why is this collate function so much faster than this other one?
I have developed two collate functions to read in data from h5py files (I tried to create some synthetic data for a MWE here but it is not going to plan). The difference between the two in processing my data is about 10x -- a very large increase and I am unsure as to why and I am curious for insights for my future collate functions. def slow(batch): ''' This function retrieves the data emitted from the H5 torch data set. It alters the emitted dimensions from the dataloader from: [batch_sz, layers, tokens, features], to: [layers, batch_sz, tokens, features] ''' embeddings = [] start_ids = [] end_ids = [] idxs = [] for i in range(len(batch)): embeddings.append(batch[i]['embeddings']) start_ids.append(batch[i]['start_ids']) end_ids.append(batch[i]['end_ids']) idxs.append(batch[i]['idx']) # package data; # swap to expected [layers, batch_sz, tokens, features] sample = {'embeddings': torch.as_tensor(embeddings).permute(1, 0, 2, 3), 'start_ids': torch.as_tensor(start_ids), 'end_ids': torch.as_tensor(end_ids), 'idx': torch.as_tensor(idxs)} return sample I thought the one below, with more loops, would be slower, but it is far from the case. def fast(batch): ''' This function alters the emitted dimensions from the dataloader from: [batch_sz, layers, tokens, features] to: [layers, batch_sz, tokens, features] for the embeddings ''' # turn data to tensors embeddings = torch.stack([torch.as_tensor(item['embeddings']) for item in batch]) # swap to expected [layers, batch_sz, tokens, features] embeddings = embeddings.permute(1, 0, 2, 3) # get start ids start_ids = torch.stack([torch.as_tensor(item['start_ids']) for item in batch]) # get end ids end_ids = torch.stack([torch.as_tensor(item['end_ids']) for item in batch]) # get idxs idxs = torch.stack([torch.as_tensor(item['idx']) for item in batch]) # repackage sample = {'embeddings': embeddings, 'start_ids': start_ids, 'end_ids': end_ids} return sample Edit: I tried swapping to this: It still is about 10x slower compared to 'fast'. def slow(batch): ''' This function retrieves the data emitted from the H5 torch data set. It alters the emitted dimensions from the dataloader from: [batch_sz, layers, tokens, features], to: [layers, batch_sz, tokens, features] ''' embeddings = [] start_ids = [] end_ids = [] idxs = [] for item in batch: embeddings.append(item['embeddings']) start_ids.append(item['start_ids']) end_ids.append(item['end_ids']) idxs.append(item['idx']) # package data; # swap to expected [layers, batch_sz, tokens, features] sample = {'embeddings': torch.as_tensor(embeddings).permute(1, 0, 2, 3), 'start_ids': torch.as_tensor(start_ids), 'end_ids': torch.as_tensor(end_ids), 'idx': torch.as_tensor(idxs)} return sample
See this answer (and give it an upvote): https://stackoverflow.com/a/30245465/10475762 Particularly the line: "In other words and in general, list comprehensions perform faster because suspending and resuming a function's frame, or multiple functions in other cases, is slower than creating a list on demand." So in your case, you're calling append multiple times each collate, which is called quite a few times in your training/testing/evaluation steps which all adds up. IMO, always avoid for loops whenever you can as it seems to somehow invariably lead to slowdowns.
https://stackoverflow.com/questions/65904637/
Is there a way to retrieve the specific parameters used in a random torchvision transform?
I can augment my data during training by applying a random transform (rotation/translation/rescaling) but I don't know the value that was selected. I need to know what values were applied. I can manually set these values, but then I lose a lot of the benefits that torch vision transforms provide. Is there an easy way to get these values are implement them in a sensible way to apply during training? Here is an example. I would love to be able print out the rotation angle, translation/rescaling being applied at each image: import numpy as np import matplotlib.pyplot as plt from torchvision import transforms RandAffine = transforms.RandomAffine(degrees=0, translate=(0.1, 0.1), scale=(0.8, 1.2)) rotate = transforms.RandomRotation(degrees=45) shift = RandAffine composed = transforms.Compose([rotate, shift]) # Apply each of the above transforms on sample. fig = plt.figure() sample = np.zeros((28,28)) sample[5:15,7:20] = 255 sample = transforms.ToPILImage()(sample.astype(np.uint8)) title = ['None', 'Rot','Aff','Comp'] for i, tsfrm in enumerate([None,rotate, shift, composed]): if tsfrm: t_sample = tsfrm(sample) else: t_sample = sample ax = plt.subplot(1, 5, i + 2) plt.tight_layout() ax.set_title(title[i]) ax.imshow(np.reshape(np.array(list(t_sample.getdata())), (-1,28)), cmap='gray') plt.show()
I'm afraid there is no easy way around it: Torchvision's random transforms utilities are built in such a way that the transform parameters will be sampled when called. They are unique random transforms, in the sense that (1) parameters used are not accessible by the user and (2) the same random transformation is not repeatable. As of Torchvision 0.8.0, random transforms are generally built with two main functions: get_params: which will sample based on the transform's hyperparameters (what you have provided when you initialized the transform operator, namely the parameters' range of values) forward: the function that gets executed when applying the transform. The important part is it gets its parameters from get_params then applies it to the input using the associated deterministic function. For RandomRotation, F.rotate will get called. Similarly, RandomAffine will use F.affine. One solution to your problem is sampling the parameters from get_params yourself and calling the functional - deterministic - API instead. So you wouldn't be using RandomRotation, RandomAffine, nor any other Random* transformation for that matter. For instance, let's look at T.RandomRotation (I have removed the comments for conciseness). class RandomRotation(torch.nn.Module): def __init__( self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=None, resample=None): # ... @staticmethod def get_params(degrees: List[float]) -> float: angle = float(torch.empty(1).uniform_(float(degrees[0]), \ float(degrees[1])).item()) return angle def forward(self, img): fill = self.fill if isinstance(img, Tensor): if isinstance(fill, (int, float)): fill = [float(fill)] * F._get_image_num_channels(img) else: fill = [float(f) for f in fill] angle = self.get_params(self.degrees) return F.rotate(img, angle, self.resample, self.expand, self.center, fill) def __repr__(self): # ... With that in mind, here is a possible override to modify T.RandomRotation: class RandomRotation(T.RandomRotation): def __init__(*args, **kwargs): super(RandomRotation, self).__init__(*args, **kwargs) # let super do all the work self.angle = self.get_params(self.degrees) # initialize your random parameters def forward(self): # override T.RandomRotation's forward fill = self.fill if isinstance(img, Tensor): if isinstance(fill, (int, float)): fill = [float(fill)] * F._get_image_num_channels(img) else: fill = [float(f) for f in fill] return F.rotate(img, self.angle, self.resample, self.expand, self.center, fill) I've essentially copied T.RandomRotation's forward function, the only difference being that the parameters are sampled in __init__ (i.e. once) instead of inside the forward (i.e. on every call). Torchvision's implementation covers all cases, you generally won't need to copy the full forward. In some cases, you can just call the functional version pretty much straight away. For example, if you don't need to set the fill parameters, you can just discard that part and only use: class RandomRotation(T.RandomRotation): def __init__(*args, **kwargs): super(RandomRotation, self).__init__(*args, **kwargs) # let super do all the work self.angle = self.get_params(self.degrees) # initialize your random parameters def forward(self): # override T.RandomRotation's forward return F.rotate(img, self.angle, self.resample, self.expand, self.center) If you want to override other random transforms you can look at the source code. The API is fairly self-explanatory and you shouldn't have too many issues implementing an override for each transform.
https://stackoverflow.com/questions/65906171/
LSTM error: AttributeError: 'tuple' object has no attribute 'dim'
I have the following code: import torch import torch.nn as nn model = nn.Sequential( nn.LSTM(300, 300), nn.Linear(300, 100), nn.ReLU(), nn.Linear(300, 7), ) s = torch.ones(1, 50, 300) a = model(s) And I get: My-MBP:Desktop myname$ python3 testmodel.py Traceback (most recent call last): File "testmodel.py", line 12, in <module> a = model(s) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/container.py", line 117, in forward input = module(input) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 93, in forward return F.linear(input, self.weight, self.bias) File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/functional.py", line 1688, in linear if input.dim() == 2 and bias is not None: AttributeError: 'tuple' object has no attribute 'dim' Why? The dimensions should be fine. I saw related fixes to this issue when *input is defined in model.forward, but I don't even have anything implemented yet. /edit: WAIT, there IS a *input!? How can I override this?
You won't be able to use a nn.RNN inside a nn.Sequential since nn.LSTM layers will output a tuple containing (1) the output features and (2) hidden states and cell states. The output must first be unpacked in order to use the output features in your subsequent layer: nn.Linear. Something as, if your interested in the hidden states and cell states: rnn = nn.LSTM(300, 300) output, (h_n, c_n) = rnn(x) You could define a custom nn.Module and implement a simple forward function: class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.rnn = nn.LSTM(300, 300) self.body = nn.Sequential( nn.Linear(300, 100), nn.ReLU(), nn.Linear(100, 7)) # <- had it set to in_features=300 def forward(self, x): x, _ = self.rnn(x) # <- ignore second output x = self.body(x) return x Such that: >>> model = Model() >>> s = torch.ones(1, 50, 300) >>> model(s).shape torch.Size([1, 50, 7])
https://stackoverflow.com/questions/65906889/
slice tensor of tensors using boolean tensor
Having two tensors :inputs_tokens is a batch of 20x300 of token ids and seq_A is my model output with size of [20, 300, 512] (512 vector for each of the tokens in the batch) seq_A.size() Out[1]: torch.Size([20, 300, 512]) inputs_tokens.size() torch.Size([20, 300]) I would like to get only the vectors of the token 101 (CLS) as follow: cls_tokens = (inputs_tokens == 101) cls_tokens Out[4]: tensor([[ True, False, False, ..., False, False, False], [ True, False, False, ..., False, False, False], [ True, False, False, ..., False, False, False], ... How do I slice seq_A to get only the vectors which are true in cls_tokens for each batch? when I do seq_A[cls_tokens].size() Out[7]: torch.Size([278, 512]) but I still need it to bee in the size of [20 x N x 512 ] (otherwise I don't know to which sample it belongs)
TLDR; You can't, all sequences must have the same size along a given axis. Take this simplified example: >>> inputs_tokens = torch.tensor([[ 1, 101, 18, 101, 9], [ 1, 2, 101, 101, 101]]) >>> inputs_tokens.shape torch.Size([2, 5]) >>> cls_tokens = inputs_tokens == 101 tensor([[False, True, False, True, False], [False, False, True, True, True]]) Indexing inputs_tokens with the cls_tokens mask comes down to reducing inputs_tokens to cls_tokens's true values. In a general case where there is a different number of true values per batch, keeping the shape is impossible. Following the above example, here is seq_A: >>> seq_A = torch.rand(2, 5, 1) tensor([[[0.4644], [0.7656], [0.3951], [0.6384], [0.1090]], [[0.6754], [0.0144], [0.7154], [0.5805], [0.5274]]]) According to your example, you would expect to have an output shape of (2, N, 1). What would N be? 3? What about the first batch which only as 2 true values? The resulting tensor can't have different sizes (2 and 3 on axis=1). Hence: "all sequences on axis=1 must have the same size". If however, you are expecting each batch to have the same number of tokens 101, then you could get away with a broadcast of your indexed tensor: >>> inputs_tokens = torch.tensor([[ 1, 101, 101, 101, 9], [ 1, 2, 101, 101, 101]]) >>> inputs_tokens.shape >>> N = cls_tokens[0].sum() 3 Here remember, I'm assuming you have: >>> assert all(cls_tokens.sum(axis=1) == N) Therefore the desired output (with shape (2, 3, 1)) is: >>> seq_A[cls_tokens].reshape(seq_A.size(0), N, -1) tensor([[[0.7656], [0.3951], [0.6384]], [[0.7154], [0.5805], [0.5274]]]) Edit - if you really want to do this though you would require the use of a list comprehension: >>> [seq_A[i, cls_tokens[i]] for i in range(cls_tokens.size(0))] [ tensor([[0.7656], [0.6384]]), tensor([[0.7154], [0.5805], [0.5274]]) ]
https://stackoverflow.com/questions/65908585/
Found 0 Defected examples, But In the directory there are 10 images | Found 0 Perfect examples |
I am trying to read images from a directory. There are some images, but the code is saying 0 images. I checked with os.listdir(r'C:\Users\hafizurr\Documents\classifier\dataset\Perfect'), which is showing all of the images. But my code is now showing. I am putting my codes below. Please take a look and help me to figure out the issue. Hoping for your help: class_names = ['Perfect', 'Defected'] root_dir = r'C:\Users\hafizurr\Documents\classifier\dataset' source_dirs = ['Perfect', 'Defected']` `if os.path.isdir(os.path.join(root_dir, source_dirs[1])): os.mkdir(os.path.join(root_dir, 'test'))` for i, d in enumerate(source_dirs): os.rename(os.path.join(root_dir, d), os.path.join(root_dir, class_names[i])) for c in class_names: os.mkdir(os.path.join(root_dir, 'test', c)) for c in class_names: images = [x for x in os.listdir(os.path.join(root_dir, c)) if x.lower().endswith('.JPG')] selected_images = random.sample(images, 0) for image in selected_images: source_path = os.path.join(root_dir, c, image) target_path = os.path.join(root_dir, 'test', c, image) shutil.move(source_path, target_path) `class ImgDataset(torch.utils.data.Dataset): def __init__(self, image_dirs, transform): def get_images(class_name): images = [x for x in os.listdir(image_dirs[class_name]) if x.lower().endswith('.JPG')] print(f'Found {len(images)} {class_name} examples') return images self.images = {} self.class_names = ['Perfect', 'Defected'] for class_name in self.class_names: self.images[class_name] = get_images(class_name) self.image_dirs = image_dirs self.transform = transform` def __len__(self): return sum([len(self.images[class_name]) for class_name in self.class_names]) def __getitem__(self, index): class_name = random.choice(self.class_names) index = index % len(self.images[class_name]) image_name = self.images[class_name][index] image_path = os.path.join(self.image_dirs[class_name], image_name) image = Image.open(image_path).convert('RGB') return self.transform(image), self.class_names.index(class_name) train_transform = torchvision.transforms.Compose([ torchvision.transforms.Resize(size=(224, 224)), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_transform = torchvision.transforms.Compose([ torchvision.transforms.Resize(size=(224, 224)), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) train_dirs = { 'Perfect': f'{root_dir}/Perfect', 'Defected': f'{root_dir}/Defected' } train_dataset = ImgDataset(train_dirs, train_transform)
That's because x.lower().endswith('.JPG') will always be false! You could do x.lower().endswith('.jpg') instead. Alternatively, use imghdr.what to check the image type.
https://stackoverflow.com/questions/65909034/
Image classification Using Pytorch
this is the code where I was working on Image Classification using Pytorch and I'm not able to get the accuracy right. the accuracy is exceeding 100 ,can anyone help me to find the error. def trained_model(criterion, optimizer, epochs=5): epoch_loss = 0.0 epoch_accuracy = 0 running_loss = 0 running_accuracy = 0 total = 0 for epoch in range(epochs): print('epoch : {}/{}'.format(epoch+1, epochs)) for images, labels in train_loader: images, labels = images.to(device), labels.to(device) optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) _, predictions = torch.max(outputs, dim=1) loss.backward() optimizer.step() running_loss += loss.item() running_accuracy += torch.sum(predictions == labels.data) epoch_loss = running_loss / len(train_dataset) epoch_accuracy = running_accuracy / len(train_dataset) print('Loss:{:.4f} , Accuracy : {:.4f} '.format(epoch_loss, epoch_accuracy)) return model
You should probably use torch.argmax to get the class predictions from your model output, instead of torch.max. Assuming you are working with indices as labels. Something like the following will get you the average accuracy of the current batch: >>> outputs = torch.rand(16, 5) >>> pred = torch.argmax(outputs, axis=0) tensor([14, 11, 13, 15, 7]) >>> labels = torch.tensor([14, 6, 13, 5, 8]) >>> accuracy = (pred == labels).float().mean() tensor(0.4000)
https://stackoverflow.com/questions/65916356/
What is the LibTorch equivalent to PyTorch's torch.no_grad?
When testing a network in PyTorch one can use with torch.no_grad():. What is the Libtorch (C++) equivalent? Thanks!
The equivalent in LibTorch is torch::NoGradGuard no_grad, see documentation.
https://stackoverflow.com/questions/65920683/
Custom dataset and dataloader
I am new to pytorch . I have big dataset consist of two txt files one for data and other for target data . In training file each line is list of length 340, In target each line is list of 136. I would like to ask how i can define my dataset so I can use Dataloader to load my data to train pytorch model? I apricate you answers
Dataset from torch.utils.data is an abstract class representing a dataset. Your custom dataset should inherit Dataset and override the following methods: __len__() so that len(dataset) returns the size of the dataset. __getitem__() to support the indexing such that dataset[i] can be used to get ith sample Eg of writing custom Dataset i have written a general custom dataloader for you as your problem statement. here data.txt has data and label.txt has labels. import torch from torch.utils.data import Dataset class CustomDataset(Dataset): def __init__(self): with open('data.txt', 'r') as f: self.data_info = f.readlines() with open('label.txt', 'r') as f: self.label_info = f.readlines() def __getitem__(self, index): single_data = self.data_info[index].rstrip('\n') single_label = self.label_info[index].rstrip('\n') return ( single_data , single_label) def __len__(self): return len(self.data_info) # Testing d = CustomDataset() print(d[1]) # should output data along with label This will be a basic for your case but have to do some changes that matches your case. Note : you have to make required changes as per your dataset
https://stackoverflow.com/questions/65920729/
PyTorch - Convert CIFAR dataset to `TensorDataset`
I train ResNet34 on CIFAR dataset. For a certain reason, I need to convert the dataset into TensorDataset. My solution is based on this: https://stackoverflow.com/a/44475689/15072863 with some differences (maybe they are critical, but I don't see why). It looks I'm not doing this correctly. Train loader: transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) train_ds = torchvision.datasets.CIFAR10('/files/', train=True, transform=transform_train, download=True) xs, ys = [], [] for x, y in train_ds: xs.append(x) ys.append(y) # 1) Standard Version # cifar_train_loader = DataLoader(train_ds, batch_size=batch_size_train, shuffle=True, num_workers=num_workers) # 2) TensorDataset version, seems to be incorrect cifar_tensor_ds = TensorDataset(torch.stack(xs), torch.tensor(ys, dtype=torch.long)) cifar_train_loader = DataLoader(cifar_tensor_ds, batch_size=batch_size_train, shuffle=True, num_workers=num_workers) I don't think it matters, but test loader is defined as usual: transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) cifar_test_loader = DataLoader( torchvision.datasets.CIFAR10('/files/', train=False, transform=transform_test, download=True), batch_size=batch_size_test, shuffle=False, num_workers=num_workers) I know that something is wrong with how I use TensorDataset, since; With TensorDataset I achieve 100% train accuracy, 80% test accuracy With standard Dataset I achieve 99% train accuracy (never 100%), 90% test accuracy. So, what am I doing wrong? P.S.: My final goal is to split the dataset into 10 datasets based on their class. Is there a better way to do this? Of course, I can define my subclass of DataSet, but manually splitting it and creating TensorDataset's seemed to be simpler.
When using the "standard" dataset, each time you load an image, a random transform (flip + crop) is applied to it. As a consequence, virtually every image of every epoch is unique, seen only once. So you kind of have nb_epochs * len(dataset) different inputs. With your custom dataset, you first read all the images of the CIFAR dset (each of them with a random transform), store them all, and then use the stored tensor as your training inputs. Thus at each epoch, the network sees exactly the same inputs Since the network was already able to achieve great accuracy with the random transformations, removing it makes it even easier and thus it further improves the accuracy Oh, and you should definitely redefine you own subclass of Dataset. It's not even complicated, and it will be much easier to work with. You just need to extract the 10 different datasets, either by manually moving the images in their folders or using some reindexing arrays or something like that. Either way, you will only have to do it once, so not big deal
https://stackoverflow.com/questions/65925371/