version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.4 | import numpy as np, sys, os, random, pdb, json, uuid, time, argparse
from pprint import pprint
import logging, logging.config
from collections import defaultdict as ddict
from ordered_set import OrderedSet
# PyTorch related imports
import torch
from torch.nn import functional as F
from torch.nn.init import xavier_normal_
from torch.utils.data import DataLoader
from torch.nn import Parameter
from torch_scatter import scatter_add
np.set_printoptions(precision=4)
def set_gpu(gpus):
"""
Sets the GPU to be used for the run
Parameters
----------
gpus: List of GPUs to be used for the run
Returns
-------
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open( config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def get_combined_results(left_results, right_results):
results = {}
count = float(left_results['count'])
results['left_mr'] = round(left_results ['mr'] /count, 5)
results['left_mrr'] = round(left_results ['mrr']/count, 5)
results['right_mr'] = round(right_results['mr'] /count, 5)
results['right_mrr'] = round(right_results['mrr']/count, 5)
results['mr'] = round((left_results['mr'] + right_results['mr']) /(2*count), 5)
results['mrr'] = round((left_results['mrr'] + right_results['mrr'])/(2*count), 5)
for k in range(10):
results['left_hits@{}'.format(k+1)] = round(left_results ['hits@{}'.format(k+1)]/count, 5)
results['right_hits@{}'.format(k+1)] = round(right_results['hits@{}'.format(k+1)]/count, 5)
results['hits@{}'.format(k+1)] = round((left_results['hits@{}'.format(k+1)] + right_results['hits@{}'.format(k+1)])/(2*count), 5)
return results
def get_param(shape):
param = Parameter(torch.Tensor(*shape));
xavier_normal_(param.data)
return param
def com_mult(a, b):
r1, i1 = a[..., 0], a[..., 1]
r2, i2 = b[..., 0], b[..., 1]
return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim = -1)
def conj(a):
a[..., 1] = -a[..., 1]
return a
def cconv(a, b):
return torch.irfft(com_mult(torch.rfft(a, 1), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def ccorr(a, b):
return torch.irfft(com_mult(conj(torch.rfft(a, 1)), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],)) | [
"torch.rfft",
"torch.stack",
"torch.nn.init.xavier_normal_",
"torch.Tensor"
] | 1.4.0 | syedhamzazaidi/CompGCN | 76de7466b18ee39416fd9fc0d45996f0caa60186 |
1.0 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import torch.nn as nn
import torch.nn.functional as F
import torch
import os
class MnistModel(Enum):
MODEL_A = "modelA"
MODEL_B = "modelB"
MODEL_C = "modelC"
MODEL_D = "modelD"
MADRY_MODEL = "madry"
def __str__(self):
return self.value
class modelA(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = 10
self.conv1 = nn.Conv2d(1, 64, 5)
self.conv2 = nn.Conv2d(64, 64, 5)
self.dropout1 = nn.Dropout(0.25)
self.fc1 = nn.Linear(64 * 20 * 20, 128)
self.dropout2 = nn.Dropout(0.5)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.dropout1(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
return x
class modelB(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = 10
self.dropout1 = nn.Dropout(0.2)
self.conv1 = nn.Conv2d(1, 64, 8)
self.conv2 = nn.Conv2d(64, 128, 6)
self.conv3 = nn.Conv2d(128, 128, 5)
self.dropout2 = nn.Dropout(0.5)
self.fc = nn.Linear(128 * 12 * 12, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.dropout1(x)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.dropout2(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class modelC(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = 10
self.conv1 = nn.Conv2d(1, 128, 3)
self.conv2 = nn.Conv2d(128, 64, 3)
self.fc1 = nn.Linear(64 * 5 * 5, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = torch.tanh(self.conv1(x))
x = F.max_pool2d(x, 2)
x = torch.tanh(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class modelD(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = 10
self.fc1 = nn.Linear(1 * 28 * 28, 300)
self.dropout1 = nn.Dropout(0.5)
self.fc2 = nn.Linear(300, 300)
self.dropout2 = nn.Dropout(0.5)
self.fc3 = nn.Linear(300, 300)
self.dropout3 = nn.Dropout(0.5)
self.fc4 = nn.Linear(300, 300)
self.dropout4 = nn.Dropout(0.5)
self.fc5 = nn.Linear(300, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.dropout1(x)
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = F.relu(self.fc3(x))
x = self.dropout3(x)
x = F.relu(self.fc4(x))
x = self.dropout4(x)
x = self.fc5(x)
return x
__mnist_model_dict__ = {
MnistModel.MODEL_A: modelA,
MnistModel.MODEL_B: modelB,
MnistModel.MODEL_C: modelC,
MnistModel.MODEL_D: modelD,
}
def make_mnist_model(model: MnistModel) -> nn.Module:
return __mnist_model_dict__[model]()
def load_mnist_classifier(
model_type: MnistModel,
name: str = None,
model_dir: str = None,
device=None,
eval=False,
) -> nn.Module:
if model_type == MnistModel.MADRY_MODEL:
from online_attacks.classifiers.madry import load_madry_model
filename = os.path.join(model_dir, "mnist", model_type.value, "%s" % name)
if os.path.exists(filename):
model = load_madry_model("mnist", filename)
else:
raise OSError("File %s not found !" % filename)
# Hack to be able to use some attacker class
model.num_classes = 10
elif model_type in __mnist_model_dict__:
model = make_mnist_model(model_type)
if name is not None:
filename = os.path.join(
model_dir, "mnist", model_type.value, "%s.pth" % name
)
if os.path.exists(filename):
state_dict = torch.load(filename, map_location=torch.device("cpu"))
model.load_state_dict(state_dict)
else:
raise OSError("File %s not found !" % filename)
else:
raise ValueError()
if eval:
model.eval()
return model.to(device)
| [
"torch.nn.Linear",
"torch.device",
"torch.nn.Dropout",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool2d"
] | 1.0.0 | hugobb/OnlineAttacks | 5cc971eba014e625ec43f67f6c5eadf713c4141c |
1.7 | import pandas as pd
import numpy as np
import os
import albumentations as A
import torch
from torch.utils.data import Dataset
import random
# import cv2
from skimage.io import imread
SEED = 42
TRAIN_RATIO = 0.9
class PadChest(Dataset):
"""
PadChest dataset
Hospital San Juan de Alicante - University of Alicante
PadChest: A large chest x-ray image dataset with multi-label annotated reports.
Aurelia Bustos, Antonio Pertusa, Jose-Maria Salinas, and Maria de la Iglesia-Vayá.
arXiv preprint, 2019. https://arxiv.org/abs/1901.07441
Dataset website:
http://bimcv.cipf.es/bimcv-projects/padchest/
Download full size images here:
https://academictorrents.com/details/dec12db21d57e158f78621f06dcbe78248d14850
Download resized (224x224) images here (recropped):
https://academictorrents.com/details/96ebb4f92b85929eadfb16761f310a6d04105797
"""
def __init__(self, path, train=True, aug=None, transform=None, views=["AP", "PA"], unique_patients=False):
# def __init__(self, imgpath,
# csvpath=os.path.join(thispath, "PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv.gz"),
# views=["PA"],
# transform=None,
# data_aug=None,
# flat_dir=True,
# seed=0,
# unique_patients=True):
super().__init__()
# super(PC_Dataset, self).__init__()
# np.random.seed(seed) # Reset the seed so all runs are the same.
csvpath = os.path.join(path, "PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv.gz")
data_aug = aug
self.pathologies = ["Atelectasis", "Consolidation", "Infiltration",
"Pneumothorax", "Edema", "Emphysema", "Fibrosis",
"Effusion", "Pneumonia", "Pleural_Thickening",
"Cardiomegaly", "Nodule", "Mass", "Hernia","Fracture",
"Granuloma", "Flattened Diaphragm", "Bronchiectasis",
"Aortic Elongation", "Scoliosis",
"Hilar Enlargement", "Support Devices" , "Tuberculosis",
"Air Trapping", "Costophrenic Angle Blunting", "Aortic Atheromatosis",
"Hemidiaphragm Elevation"]
self.pathologies = sorted(self.pathologies)
mapping = dict()
mapping["Infiltration"] = ["infiltrates",
"interstitial pattern",
"ground glass pattern",
"reticular interstitial pattern",
"reticulonodular interstitial pattern",
"alveolar pattern",
"consolidation",
"air bronchogram"]
mapping["Pleural_Thickening"] = ["pleural thickening"]
mapping["Consolidation"] = ["air bronchogram"]
mapping["Hilar Enlargement"] = ["adenopathy",
"pulmonary artery enlargement"]
mapping["Support Devices"] = ["device",
"pacemaker"]
self.imgpath = path
self.transform = transform
self.data_aug = data_aug
# self.flat_dir = flat_dir
self.csvpath = csvpath
# self.check_paths_exist()
self.csv = pd.read_csv(self.csvpath, low_memory=False)
# self.MAXVAL = 65535
# standardize view names
self.csv.loc[self.csv["Projection"].isin(["AP_horizontal"]),"Projection"] = "AP Supine"
# Keep only the specified views
if type(views) is not list:
views = [views]
self.views = views
self.csv["view"] = self.csv['Projection']
# print(self.csv.view.unique())
self.csv = self.csv[self.csv["view"].isin(self.views)]
# remove null stuff
self.csv = self.csv[~self.csv["Labels"].isnull()]
# remove missing files
missing = ["216840111366964012819207061112010307142602253_04-014-084.png",
"216840111366964012989926673512011074122523403_00-163-058.png",
"216840111366964012959786098432011033083840143_00-176-115.png",
"216840111366964012558082906712009327122220177_00-102-064.png",
"216840111366964012339356563862009072111404053_00-043-192.png",
"216840111366964013076187734852011291090445391_00-196-188.png",
"216840111366964012373310883942009117084022290_00-064-025.png",
"216840111366964012283393834152009033102258826_00-059-087.png",
"216840111366964012373310883942009170084120009_00-097-074.png",
"216840111366964012819207061112010315104455352_04-024-184.png"]
missing.extend([
# "216840111366964012283393834152009033102258826_00-059-087.png",
# "216840111366964012339356563862009068084200743_00-045-105.png",
# "216840111366964012339356563862009072111404053_00-043-192.png",
# "216840111366964012373310883942009117084022290_00-064-025.png",
# "216840111366964012373310883942009170084120009_00-097-074.png",
# "216840111366964012558082906712009300162151055_00-078-079.png",
# "216840111366964012558082906712009327122220177_00-102-064.png",
# "216840111366964012819207061112010306085429121_04-020-102.png",
# "216840111366964012819207061112010307142602253_04-014-084.png",
# "216840111366964012819207061112010315104455352_04-024-184.png",
# "216840111366964012959786098432011033083840143_00-176-115.png",
# "216840111366964012989926673512011074122523403_00-163-058.png",
# "216840111366964012989926673512011101154138555_00-191-086.png",
# "216840111366964012989926673512011132200139442_00-157-099.png",
# "216840111366964013076187734852011178154626671_00-145-086.png",
# "216840111366964013076187734852011291090445391_00-196-188.png",
#wrong
"216840111366964013829543166512013353113303615_02-092-190.png",
"216840111366964012904401302362010337093236130_03-198-079.png",
"216840111366964012904401302362010336141343749_03-198-010.png",
"216840111366964012989926673512011151082430686_00-157-045.png",
"216840111366964012989926673512011083134050913_00-168-009.png",
"216840111366964012373310883942009077082646386_00-047-124.png",
"216840111366964013686042548532013208193054515_02-026-007.png",
"216840111366964013962490064942014134093945580_01-178-104.png",
"216840111366964012819207061112010281134410801_00-129-131.png",
"216840111366964013590140476722013043111952381_02-065-198.png",
"216840111366964012283393834152009027091819347_00-007-136.png",
"216840111366964012373310883942009152114636712_00-102-045.png",
"216840111366964012283393834152009033140208626_00-059-118.png",
"216840111366964013590140476722013058110301622_02-056-111.png",
"216840111366964012487858717522009280135853083_00-075-001.png",
"216840111366964013590140476722013049100117076_02-063-097.png",
"216840111366964013649110343042013092101343018_02-075-146.png",
"216840111366964012487858717522009280135853083_00-075-001.png",
"216840111366964012819207061112010306085429121_04-020-102.png",
"269300710246070740096540277379121868595_e7zsan.png",
"216840111366964012373310883942009180082307973_00-097-011.png",
])
self.csv = self.csv[~self.csv["ImageID"].isin(missing)]
if unique_patients:
self.csv = self.csv.groupby("PatientID").first().reset_index()
# Get our classes.
self.labels = []
for pathology in self.pathologies:
mask = self.csv["Labels"].str.contains(pathology.lower())
if pathology in mapping:
for syn in mapping[pathology]:
#print("mapping", syn)
mask |= self.csv["Labels"].str.contains(syn.lower())
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
########## add consistent csv values
# offset_day_int
dt = pd.to_datetime(self.csv["StudyDate_DICOM"], format="%Y%m%d")
self.csv["offset_day_int"] = dt.astype(np.int)// 10**9 // 86400
# patientid
self.csv["patientid"] = self.csv["PatientID"].astype(str)
inds = np.arange(len(self.csv))
rng = np.random.RandomState(SEED)
rng.shuffle(inds)
# print("Padchest size full" , len(self.csv))
nb_train = int(len(inds) * TRAIN_RATIO)
if train:
inds = inds[0:nb_train]
else:
inds = inds[nb_train:]
self.csv = self.csv.iloc[inds]
self.labels = self.labels[inds]
# print("Padchest size" , len(self.csv))
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
imgid = self.csv['ImageID'].iloc[idx]
img_path = os.path.join(self.imgpath,imgid)
# try:
img = imread(img_path)
# except Exception:
# print('<<',img_path,'>>')
# return torch.zeros((3,224,224)).float(),torch.zeros(27).float()
img = img / 65535
# print(img.min(), img.max())
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
img = img * np.ones((3,1,1), dtype="float32") # use 3 channels
img = torch.from_numpy(img).float()
target = torch.from_numpy(self.labels[idx]).float()
return img, target
| [
"torch.from_numpy"
] | 1.7.0 | SLAMPAI/large-scale-pretraining-transfer | 730c1f25e56bbe5c70e5933f845824f98c015876 |
1.8 | import torch
from all.core import State, StateArray
from ._body import Body
class FrameStack(Body):
def __init__(self, agent, size=4, lazy=False):
super().__init__(agent)
self._frames = []
self._size = size
self._lazy = lazy
self._to_cache = TensorDeviceCache()
def process_state(self, state):
if not self._frames:
self._frames = [state.observation] * self._size
else:
self._frames = self._frames[1:] + [state.observation]
if self._lazy:
return LazyState.from_state(state, self._frames, self._to_cache)
if isinstance(state, StateArray):
return state.update('observation', torch.cat(self._frames, dim=1))
return state.update('observation', torch.cat(self._frames, dim=0))
class TensorDeviceCache:
'''
To efficiently implement device trasfer of lazy states, this class
caches the transfered tensor so that it is not copied multiple times.
'''
def __init__(self, max_size=16):
self.max_size = max_size
self.cache_data = []
def convert(self, value, device):
cached = None
for el in self.cache_data:
if el[0] is value:
cached = el[1]
break
if cached is not None and cached.device == torch.device(device):
new_v = cached
else:
new_v = value.to(device)
self.cache_data.append((value, new_v))
if len(self.cache_data) > self.max_size:
self.cache_data.pop(0)
return new_v
class LazyState(State):
@classmethod
def from_state(cls, state, frames, to_cache):
state = LazyState(state, device=frames[0].device)
state.to_cache = to_cache
state['observation'] = frames
return state
def __getitem__(self, key):
if key == 'observation':
v = dict.__getitem__(self, key)
if torch.is_tensor(v):
return v
return torch.cat(dict.__getitem__(self, key), dim=0)
return super().__getitem__(key)
def update(self, key, value):
x = {}
for k in self.keys():
if not k == key:
x[k] = super().__getitem__(k)
x[key] = value
state = LazyState(x, device=self.device)
state.to_cache = self.to_cache
return state
def to(self, device):
if device == self.device:
return self
x = {}
for key, value in self.items():
if key == 'observation':
x[key] = [self.to_cache.convert(v, device) for v in value]
# x[key] = [v.to(device) for v in value]#torch.cat(value,axis=0).to(device)
elif torch.is_tensor(value):
x[key] = value.to(device)
else:
x[key] = value
state = LazyState.from_state(x, x['observation'], self.to_cache)
return state
| [
"torch.is_tensor",
"torch.device",
"torch.cat"
] | 1.8.0 | drozzy/autonomous-learning-library | 67b27aa71e6689e3447f1b342296b4360419ac38 |
1.0 | # coding=utf-8
# Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import gc
import math
import os
import random
import re
import subprocess
import tempfile
import unittest
from pathlib import Path
from unittest.mock import Mock, patch
import numpy as np
from huggingface_hub import Repository, delete_repo, login
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import (
AutoTokenizer,
IntervalStrategy,
PretrainedConfig,
TrainingArguments,
is_torch_available,
logging,
)
from transformers.file_utils import WEIGHTS_NAME, is_apex_available
from transformers.testing_utils import (
ENDPOINT_STAGING,
PASS,
USER,
CaptureLogger,
TestCasePlus,
get_gpu_count,
get_tests_dir,
is_staging_test,
require_optuna,
require_ray,
require_sentencepiece,
require_sigopt,
require_tokenizers,
require_torch,
require_torch_bf16,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
require_torch_tf32,
require_torch_up_to_2_gpus,
slow,
)
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from transformers.training_args import OptimizerNames
from transformers.utils.hp_naming import TrialShortNamer
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import IterableDataset
import transformers.optimization
from transformers import (
AutoModelForSequenceClassification,
EarlyStoppingCallback,
GlueDataset,
GlueDataTrainingArguments,
GPT2Config,
GPT2LMHeadModel,
LineByLineTextDataset,
PreTrainedModel,
Trainer,
TrainerState,
)
from transformers.modeling_utils import unwrap_model
PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt"
class RegressionDataset:
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
np.random.seed(seed)
self.label_names = ["labels"] if label_names is None else label_names
self.length = length
self.x = np.random.normal(size=(length,)).astype(np.float32)
self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
self.ys = [y.astype(np.float32) for y in self.ys]
def __len__(self):
return self.length
def __getitem__(self, i):
result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
result["input_x"] = self.x[i]
return result
@dataclasses.dataclass
class RegressionTrainingArguments(TrainingArguments):
a: float = 0.0
b: float = 0.0
def __post_init__(self):
super().__post_init__()
# save resources not dealing with reporting (also avoids the warning when it's not set)
self.report_to = []
class RepeatDataset:
def __init__(self, x, length=64):
self.x = x
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_ids": self.x, "labels": self.x}
class DynamicShapesDataset:
def __init__(self, length=64, seed=42, batch_size=8):
self.length = length
np.random.seed(seed)
sizes = np.random.randint(1, 20, (length // batch_size,))
# For easy batching, we make every batch_size consecutive samples the same size.
self.xs = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]
self.ys = [np.random.normal(size=(s,)) for s in sizes.repeat(batch_size)]
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_x": self.xs[i], "labels": self.ys[i]}
class AlmostAccuracy:
def __init__(self, thresh=0.25):
self.thresh = thresh
def __call__(self, eval_pred):
predictions, labels = eval_pred
true = np.abs(predictions - labels) <= self.thresh
return {"accuracy": true.astype(np.float32).mean().item()}
class RegressionModelConfig(PretrainedConfig):
def __init__(self, a=0, b=0, double_output=False, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
self.double_output = double_output
self.hidden_size = 1
if is_torch_available():
class SampleIterableDataset(IterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names)
def __iter__(self):
for i in range(len(self.dataset)):
yield self.dataset[i]
class FiniteIterableDataset(SampleIterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
super().__init__(a, b, length, seed, label_names)
self.current_sample = 0
def __iter__(self):
while self.current_sample < len(self.dataset):
yield self.dataset[self.current_sample]
self.current_sample += 1
class RegressionModel(nn.Module):
def __init__(self, a=0, b=0, double_output=False):
super().__init__()
self.a = nn.Parameter(torch.tensor(a).float())
self.b = nn.Parameter(torch.tensor(b).float())
self.double_output = double_output
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionDictModel(nn.Module):
def __init__(self, a=0, b=0):
super().__init__()
self.a = nn.Parameter(torch.tensor(a).float())
self.b = nn.Parameter(torch.tensor(b).float())
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
result = {"output": y}
if labels is not None:
result["loss"] = nn.functional.mse_loss(y, labels)
return result
class RegressionPreTrainedModel(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
def __init__(self, config):
super().__init__(config)
self.a = nn.Parameter(torch.tensor(config.a).float())
self.b = nn.Parameter(torch.tensor(config.b).float())
self.double_output = config.double_output
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionRandomPreTrainedModel(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
def __init__(self, config):
super().__init__(config)
self.a = nn.Parameter(torch.tensor(config.a).float())
self.b = nn.Parameter(torch.tensor(config.b).float())
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
torch_rand = torch.randn(1).squeeze()
np_rand = np.random.rand()
rand_rand = random.random()
y += 0.05 * torch_rand + 0.05 * torch.tensor(np_rand + rand_rand)
if labels is None:
return (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y)
class TstLayer(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h = self.ln1(nn.functional.relu(self.linear1(x)))
h = nn.functional.relu(self.linear2(x))
return self.ln2(x + h + self.bias)
def get_regression_trainer(a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, **kwargs):
label_names = kwargs.get("label_names", None)
train_dataset = RegressionDataset(length=train_len, label_names=label_names)
eval_dataset = RegressionDataset(length=eval_len, label_names=label_names)
model_init = kwargs.pop("model_init", None)
if model_init is not None:
model = None
else:
if pretrained:
config = RegressionModelConfig(a=a, b=b, double_output=double_output)
model = RegressionPreTrainedModel(config)
else:
model = RegressionModel(a=a, b=b, double_output=double_output)
compute_metrics = kwargs.pop("compute_metrics", None)
data_collator = kwargs.pop("data_collator", None)
optimizers = kwargs.pop("optimizers", (None, None))
output_dir = kwargs.pop("output_dir", "./regression")
args = RegressionTrainingArguments(output_dir, a=a, b=b, **kwargs)
return Trainer(
model,
args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
optimizers=optimizers,
model_init=model_init,
)
class TrainerIntegrationCommon:
def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True):
file_list = [WEIGHTS_NAME, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"]
if is_pretrained:
file_list.append("config.json")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint))
for filename in file_list:
self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))
def check_best_model_has_been_loaded(
self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True
):
checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}")
log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history
values = [d[metric] for d in log_history]
best_value = max(values) if greater_is_better else min(values)
best_checkpoint = (values.index(best_value) + 1) * freq
checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}")
if is_pretrained:
best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)
best_model.to(trainer.args.device)
else:
best_model = RegressionModel()
state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))
best_model.load_state_dict(state_dict)
best_model.to(trainer.args.device)
self.assertTrue(torch.allclose(best_model.a, trainer.model.a))
self.assertTrue(torch.allclose(best_model.b, trainer.model.b))
metrics = trainer.evaluate()
self.assertEqual(metrics[metric], best_value)
def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):
# We'll pop things so operate on copies.
state = trainer_state.copy()
state1 = trainer_state1.copy()
# Log history main contain different logs for the time metrics (after resuming a training).
log_history = state.pop("log_history", None)
log_history1 = state1.pop("log_history", None)
self.assertEqual(state, state1)
skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"]
for log, log1 in zip(log_history, log_history1):
for key in skip_log_keys:
_ = log.pop(key, None)
_ = log1.pop(key, None)
self.assertEqual(log, log1)
@require_torch
@require_sentencepiece
@require_tokenizers
class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon):
"""
Only tests that want to tap into the auto-pre-run 2 trainings:
- self.default_trained_model
- self.alternate_trained_model
directly, or via check_trained_model
"""
def setUp(self):
super().setUp()
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.default_trained_model = (trainer.model.a, trainer.model.b)
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.alternate_trained_model = (trainer.model.a, trainer.model.b)
def check_trained_model(self, model, alternate_seed=False):
# Checks a training seeded with learning_rate = 0.1
(a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model
self.assertTrue(torch.allclose(model.a, a))
self.assertTrue(torch.allclose(model.b, b))
def test_reproducible_training(self):
# Checks that training worked, model trained and seed made a reproducible training.
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.check_trained_model(trainer.model)
# Checks that a different seed gets different (reproducible) results.
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_trainer_with_datasets(self):
import datasets
np.random.seed(42)
x = np.random.normal(size=(64,)).astype(np.float32)
y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,))
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y})
# Base training. Should have the same results as test_reproducible_training
model = RegressionModel()
args = TrainingArguments("./regression", learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Can return tensors.
train_dataset.set_format(type="torch", dtype=torch.float32)
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Adding one column not used by the model should have no impact
z = np.random.normal(size=(64,)).astype(np.float32)
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z})
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
def test_model_init(self):
train_dataset = RegressionDataset()
args = TrainingArguments("./regression", learning_rate=0.1)
trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel())
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results.
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results and new seed should be used.
trainer.args.seed = 314
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_gradient_accumulation(self):
# Training with half the batch size but accumulation steps as 2 should give the same results.
trainer = get_regression_trainer(
gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1
)
trainer.train()
self.check_trained_model(trainer.model)
def test_training_loss(self):
n_gpus = max(1, get_gpu_count())
# With even logs
trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus))
trainer.train()
log_history = trainer.state.log_history
losses = [log["loss"] for log in log_history if "loss" in log]
train_loss = log_history[-1]["train_loss"]
self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4)
# With uneven logs
trainer = get_regression_trainer(logging_steps=5)
trainer.train()
log_history = trainer.state.log_history
# Training loss should be the same as before
new_train_loss = log_history[-1]["train_loss"]
self.assertAlmostEqual(train_loss, new_train_loss, places=4)
def test_custom_optimizer(self):
train_dataset = RegressionDataset()
args = TrainingArguments("./regression")
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0)
def test_adafactor_lr_none(self):
# test the special case where lr=None, since Trainer can't not have lr_scheduler
from transformers.optimization import Adafactor, AdafactorSchedule
train_dataset = RegressionDataset()
args = TrainingArguments("./regression")
model = RegressionModel()
optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
lr_scheduler = AdafactorSchedule(optimizer)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0)
@require_torch_gpu
@require_torch_bf16
def test_mixed_bf16(self):
# very basic test
trainer = get_regression_trainer(learning_rate=0.1, bf16=True)
trainer.train()
self.check_trained_model(trainer.model)
# --bf16 --half_precision_backend apex can't be used together
with self.assertRaises(ValueError):
trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend="apex")
# will add more specific tests once there are some bugs to fix
@require_torch_gpu
@require_torch_tf32
def test_tf32(self):
# very basic test
trainer = get_regression_trainer(learning_rate=0.1, tf32=True)
trainer.train()
self.check_trained_model(trainer.model)
@require_torch
@require_sentencepiece
@require_tokenizers
class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_trainer_works_with_dict(self):
# Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break
# anything.
train_dataset = RegressionDataset()
eval_dataset = RegressionDataset()
model = RegressionDictModel()
args = TrainingArguments("./regression")
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
_ = trainer.evaluate()
_ = trainer.predict(eval_dataset)
def test_evaluation_with_keys_to_drop(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,))
eval_dataset = RepeatDataset(x)
args = TrainingArguments("./test")
trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset)
# By default the past_key_values are removed
result = trainer.predict(eval_dataset)
self.assertTrue(isinstance(result.predictions, np.ndarray))
# We can still get them by setting ignore_keys to []
result = trainer.predict(eval_dataset, ignore_keys=[])
self.assertTrue(isinstance(result.predictions, tuple))
self.assertEqual(len(result.predictions), 2)
def test_training_arguments_are_left_untouched(self):
trainer = get_regression_trainer()
trainer.train()
args = TrainingArguments("./regression", report_to=[])
dict1, dict2 = args.to_dict(), trainer.args.to_dict()
for key in dict1.keys():
# Logging dir can be slightly different as they default to something with the time.
if key != "logging_dir":
self.assertEqual(dict1[key], dict2[key])
def test_number_of_steps_in_training(self):
# Regular training has n_epochs * len(train_dl) steps
trainer = get_regression_trainer(learning_rate=0.1)
train_output = trainer.train()
self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size)
# Check passing num_train_epochs works (and a float version too):
trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5)
train_output = trainer.train()
self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size))
# If we pass a max_steps, num_train_epochs is ignored
trainer = get_regression_trainer(learning_rate=0.1, max_steps=10)
train_output = trainer.train()
self.assertEqual(train_output.global_step, 10)
def test_logging_inf_nan_filter(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,))
train_dataset = RepeatDataset(x)
# Trainer without inf/nan filter
args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False)
trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)
trainer.train()
log_history_no_filter = trainer.state.log_history
# Trainer with inf/nan filter
args = TrainingArguments("./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True)
trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset)
trainer.train()
log_history_filter = trainer.state.log_history
def is_any_loss_nan_or_inf(log_history):
losses = [l["loss"] for l in log_history[:-1]]
return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses)
self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter))
self.assertFalse(is_any_loss_nan_or_inf(log_history_filter))
def test_train_and_eval_dataloaders(self):
n_gpu = max(1, torch.cuda.device_count())
trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16)
self.assertEqual(trainer.get_train_dataloader().batch_size, 16 * n_gpu)
trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16)
self.assertEqual(trainer.get_eval_dataloader().batch_size, 16 * n_gpu)
# Check drop_last works
trainer = get_regression_trainer(
train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32
)
self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1)
self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1)
trainer = get_regression_trainer(
train_len=66,
eval_len=74,
learning_rate=0.1,
per_device_train_batch_size=16,
per_device_eval_batch_size=32,
dataloader_drop_last=True,
)
self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu))
self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu))
# Check passing a new dataset for evaluation works
new_eval_dataset = RegressionDataset(length=128)
self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu))
@require_torch_multi_gpu
def test_data_is_not_parallelized_when_model_is_parallel(self):
model = RegressionModel()
# Make the Trainer believe it's a parallelized model
model.is_parallelizable = True
model.model_parallel = True
args = TrainingArguments("./regression", per_device_train_batch_size=16, per_device_eval_batch_size=16)
trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset())
# Check the Trainer was fooled
self.assertTrue(trainer.is_model_parallel)
self.assertEqual(trainer.args.n_gpu, 1)
# The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu
self.assertEqual(trainer.get_train_dataloader().batch_size, 16)
self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16)
self.assertEqual(trainer.get_eval_dataloader().batch_size, 16)
self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16)
def test_evaluate(self):
trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_predict(self):
trainer = get_regression_trainer(a=1.5, b=2.5)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With more than one output of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
# With more than one output/label of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"])
outputs = trainer.predict(trainer.eval_dataset)
preds = outputs.predictions
labels = outputs.label_ids
x = trainer.eval_dataset.x
self.assertTrue(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
def test_dynamic_shapes(self):
eval_dataset = DynamicShapesDataset(batch_size=self.batch_size)
model = RegressionModel(a=2, b=1)
args = TrainingArguments("./regression")
trainer = Trainer(model, args, eval_dataset=eval_dataset)
# Check evaluation can run to completion
_ = trainer.evaluate()
# Check predictions
preds = trainer.predict(eval_dataset)
for expected, seen in zip(eval_dataset.ys, preds.label_ids):
self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
for expected, seen in zip(eval_dataset.xs, preds.predictions):
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
# Same tests with eval accumulation
args = TrainingArguments("./regression", eval_accumulation_steps=2)
trainer = Trainer(model, args, eval_dataset=eval_dataset)
# Check evaluation can run to completion
_ = trainer.evaluate()
# Check predictions
preds = trainer.predict(eval_dataset)
for expected, seen in zip(eval_dataset.ys, preds.label_ids):
self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
for expected, seen in zip(eval_dataset.xs, preds.predictions):
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
def test_log_level(self):
# testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere)
logger = logging.get_logger()
log_info_string = "Running training"
# test with the default log_level - should be info and thus log on the main process
with CaptureLogger(logger) as cl:
trainer = get_regression_trainer()
trainer.train()
self.assertIn(log_info_string, cl.out)
# test with low log_level - lower than info
with CaptureLogger(logger) as cl:
trainer = get_regression_trainer(log_level="debug")
trainer.train()
self.assertIn(log_info_string, cl.out)
# test with high log_level - should be quiet
with CaptureLogger(logger) as cl:
trainer = get_regression_trainer(log_level="error")
trainer.train()
self.assertNotIn(log_info_string, cl.out)
def test_save_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size))
# With a regular model that is not a PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False)
@require_torch_multi_gpu
def test_run_seq2seq_double_train_wrap_once(self):
# test that we don't wrap the model more than once
# since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for
# example DataParallel(DataParallel(model))
trainer = get_regression_trainer()
trainer.train()
model_wrapped_before = trainer.model_wrapped
trainer.train()
model_wrapped_after = trainer.model_wrapped
self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice")
@require_torch_up_to_2_gpus
def test_can_resume_training(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
kwargs = dict(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmpdir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# With a regular model that is not a PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
kwargs = dict(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, pretrained=False)
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmpdir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check failures
# 1. fail to find a bogus checkpoint
trainer = get_regression_trainer()
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
self.assertTrue("Can't find a valid checkpoint at" in str(context.exception))
# 2. fail to find any checkpoint - due a fresh output_dir
output_dir2 = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=output_dir2)
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=True)
self.assertTrue("No valid checkpoint found in output directory" in str(context.exception))
@require_torch_non_multi_gpu
def test_resume_training_with_randomness(self):
# This test will fail flakily for more than 1 GPUs since the result will be slightly more different
# TODO: investigate why it fails for 2 GPUs?
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
train_dataset = RegressionDataset(length=128)
eval_dataset = RegressionDataset()
config = RegressionModelConfig(a=0, b=2)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
model = RegressionRandomPreTrainedModel(config)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15"))
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
self.assertAlmostEqual(a, a1, delta=1e-8)
self.assertAlmostEqual(b, b1, delta=1e-8)
# regression for this issue: https://github.com/huggingface/transformers/issues/12970
def test_training_with_resume_from_checkpoint_false(self):
train_dataset = RegressionDataset(length=128)
eval_dataset = RegressionDataset()
config = RegressionModelConfig(a=0, b=2)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train(resume_from_checkpoint=False)
@require_torch_up_to_2_gpus
def test_resume_training_with_gradient_accumulation(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_torch_up_to_2_gpus
def test_resume_training_with_frozen_params(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.model.a.requires_grad_(False)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.model.a.requires_grad_(False)
trainer.train(resume_from_checkpoint=checkpoint)
self.assertFalse(trainer.model.a.requires_grad)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
def test_load_best_model_at_end(self):
total = int(self.n_epochs * 64 / self.batch_size)
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss")
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True)
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total)
self.check_best_model_has_been_loaded(
tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True
)
# Test this works with a non PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
evaluation_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
pretrained=False,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False)
@slow
def test_trainer_eval_mrpc(self):
MODEL_ID = "bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True
)
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
training_args = TrainingArguments(output_dir="./examples", no_cuda=True)
trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset)
result = trainer.evaluate()
self.assertLess(result["eval_loss"], 0.2)
@slow
def test_trainer_eval_lm(self):
MODEL_ID = "distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
dataset = LineByLineTextDataset(
tokenizer=tokenizer,
file_path=PATH_SAMPLE_TEXT,
block_size=tokenizer.max_len_single_sentence,
)
self.assertEqual(len(dataset), 31)
def test_training_iterable_dataset(self):
config = RegressionModelConfig()
model = RegressionPreTrainedModel(config)
train_dataset = SampleIterableDataset()
args = RegressionTrainingArguments(output_dir="./examples", max_steps=4)
trainer = Trainer(model=model, args=args, train_dataset=train_dataset)
trainer.train()
self.assertEqual(trainer.state.global_step, 4)
loader = trainer.get_train_dataloader()
self.assertIsInstance(loader, torch.utils.data.DataLoader)
self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler)
def test_training_finite_iterable_dataset(self):
config = RegressionModelConfig()
model = RegressionPreTrainedModel(config)
batch_size = 1
num_samples = 10
available_steps = num_samples // batch_size
data = FiniteIterableDataset(length=num_samples)
train_args = TrainingArguments(
".",
max_steps=available_steps + 1, # set a higher number than actually available
per_device_train_batch_size=batch_size,
)
trainer = Trainer(model, train_dataset=data, args=train_args)
with self.assertLogs("transformers.trainer", level="WARNING") as logs:
trainer.train()
self.assertIn(f"stopping training at step {available_steps}!", logs.output[0])
def test_evaluation_iterable_dataset(self):
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
args = RegressionTrainingArguments(output_dir="./examples")
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy())
results = trainer.evaluate()
x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
eval_dataset = SampleIterableDataset(length=66)
results = trainer.evaluate(eval_dataset)
x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_predict_iterable_dataset(self):
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
args = RegressionTrainingArguments(output_dir="./examples")
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy())
preds = trainer.predict(trainer.eval_dataset).predictions
x = eval_dataset.dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
test_dataset = SampleIterableDataset(length=66)
preds = trainer.predict(test_dataset).predictions
x = test_dataset.dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
def test_num_train_epochs_in_training(self):
# len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given.
# It should give 1 update step for each epoch.
trainer = get_regression_trainer(
max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5
)
train_output = trainer.train()
self.assertEqual(train_output.global_step, 3)
# Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if
# len(train_dl) < gradient_accumulation_steps.
trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5)
train_output = trainer.train()
self.assertEqual(train_output.global_step, int(self.n_epochs))
def test_early_stopping_callback(self):
# early stopping stops training before num_training_epochs
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
num_train_epochs=20,
gradient_accumulation_steps=1,
per_device_train_batch_size=16,
load_best_model_at_end=True,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
compute_metrics=AlmostAccuracy(),
metric_for_best_model="accuracy",
)
trainer.add_callback(EarlyStoppingCallback(1, 0.0001))
train_output = trainer.train()
self.assertLess(train_output.global_step, 20 * 64 / 16)
# Invalid inputs to trainer with early stopping callback result in assertion error
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
num_train_epochs=20,
gradient_accumulation_steps=1,
per_device_train_batch_size=16,
evaluation_strategy=IntervalStrategy.EPOCH,
compute_metrics=AlmostAccuracy(),
metric_for_best_model="accuracy",
)
trainer.add_callback(EarlyStoppingCallback(1))
self.assertEqual(trainer.state.global_step, 0)
try:
trainer.train()
except AssertionError:
self.assertEqual(trainer.state.global_step, 0)
def test_flos_extraction(self):
trainer = get_regression_trainer(learning_rate=0.1)
def assert_flos_extraction(trainer, wrapped_model_to_check):
self.assertEqual(trainer.model, unwrap_model(wrapped_model_to_check))
self.assertGreaterEqual(getattr(unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0)
# with plain model
assert_flos_extraction(trainer, trainer.model)
# with enforced DataParallel
assert_flos_extraction(trainer, nn.DataParallel(trainer.model))
trainer.train()
self.assertTrue(isinstance(trainer.state.total_flos, float))
def check_checkpoint_deletion(self, trainer, output_dir, expected):
# Make fake checkpoints
for n in [5, 10, 15, 20, 25]:
os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True)
trainer._rotate_checkpoints(output_dir=output_dir)
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")]
values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints]
self.assertSetEqual(set(values), set(expected))
def test_checkpoint_rotation(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# Without best model at end
trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2)
self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25])
# With best model at end
trainer = get_regression_trainer(
output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=2
)
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5")
self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])
# Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume
# from checkpoint
trainer = get_regression_trainer(
output_dir=tmp_dir, evaluation_strategy="steps", load_best_model_at_end=True, save_total_limit=1
)
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25")
self.check_checkpoint_deletion(trainer, tmp_dir, [25])
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5")
self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])
def check_mem_metrics(self, trainer, check_func):
metrics = trainer.train().metrics
check_func("init_mem_cpu_alloc_delta", metrics)
check_func("train_mem_cpu_alloc_delta", metrics)
if torch.cuda.device_count() > 0:
check_func("init_mem_gpu_alloc_delta", metrics)
check_func("train_mem_gpu_alloc_delta", metrics)
metrics = trainer.evaluate()
check_func("eval_mem_cpu_alloc_delta", metrics)
if torch.cuda.device_count() > 0:
check_func("eval_mem_gpu_alloc_delta", metrics)
metrics = trainer.predict(RegressionDataset()).metrics
check_func("test_mem_cpu_alloc_delta", metrics)
if torch.cuda.device_count() > 0:
check_func("test_mem_gpu_alloc_delta", metrics)
def test_mem_metrics(self):
# with mem metrics enabled
trainer = get_regression_trainer(skip_memory_metrics=False)
self.check_mem_metrics(trainer, self.assertIn)
# with mem metrics disabled
trainer = get_regression_trainer(skip_memory_metrics=True)
self.check_mem_metrics(trainer, self.assertNotIn)
@require_torch_gpu
def test_fp16_full_eval(self):
# this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.
# it's using pretty large safety margins, but small enough to detect broken functionality.
debug = 0
n_gpus = get_gpu_count()
bs = 8
eval_len = 16 * n_gpus
# make the params somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
# 1. with mem metrics enabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False)
metrics = trainer.evaluate()
del trainer
gc.collect()
fp32_init = metrics["init_mem_gpu_alloc_delta"]
fp32_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp32_init {fp32_init}")
print(f"fp32_eval {fp32_eval}")
# here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.
# perfect world: fp32_init == 64<<10
self.assertGreater(fp32_init, 59_000)
# after eval should be no extra memory allocated - with a small margin (other than the peak
# memory consumption for the forward calculation that gets recovered)
# perfect world: fp32_eval == close to zero
self.assertLess(fp32_eval, 5_000)
# 2. with mem metrics disabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False)
metrics = trainer.evaluate()
fp16_init = metrics["init_mem_gpu_alloc_delta"]
fp16_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp16_init {fp16_init}")
print(f"fp16_eval {fp16_eval}")
# here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0
# perfect world: fp16_init == close to zero
self.assertLess(fp16_init, 5_000)
# here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)
# perfect world: fp32_init == 32<<10
self.assertGreater(fp16_eval, 27_000)
# 3. relative comparison fp32 vs full fp16
# should be about half of fp16_init
# perfect world: fp32_init/2 == fp16_eval
self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000)
@require_torch_gpu
@require_torch_bf16
def test_bf16_full_eval(self):
# note: most of the logic is the same as test_fp16_full_eval
# this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.
# it's using pretty large safety margins, but small enough to detect broken functionality.
debug = 0
n_gpus = get_gpu_count()
bs = 8
eval_len = 16 * n_gpus
# make the params somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
# 1. with mem metrics enabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False)
metrics = trainer.evaluate()
del trainer
gc.collect()
fp32_init = metrics["init_mem_gpu_alloc_delta"]
fp32_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp32_init {fp32_init}")
print(f"fp32_eval {fp32_eval}")
# here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.
# perfect world: fp32_init == 64<<10
self.assertGreater(fp32_init, 59_000)
# after eval should be no extra memory allocated - with a small margin (other than the peak
# memory consumption for the forward calculation that gets recovered)
# perfect world: fp32_eval == close to zero
self.assertLess(fp32_eval, 5_000)
# 2. with mem metrics disabled
trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False)
metrics = trainer.evaluate()
bf16_init = metrics["init_mem_gpu_alloc_delta"]
bf16_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"bf16_init {bf16_init}")
print(f"bf16_eval {bf16_eval}")
# here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0
# perfect world: bf16_init == close to zero
self.assertLess(bf16_init, 5_000)
# here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)
# perfect world: fp32_init == 32<<10
self.assertGreater(bf16_eval, 27_000)
# 3. relative comparison fp32 vs full bf16
# should be about half of bf16_init
# perfect world: fp32_init/2 == bf16_eval
self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000)
def test_no_wd_param_group(self):
model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))
trainer = Trainer(model=model)
trainer.create_optimizer_and_scheduler(10)
# fmt: off
wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight']
# fmt: on
wd_params = [p for n, p in model.named_parameters() if n in wd_names]
no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names]
self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params)
self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params)
@require_torch
@is_staging_test
class TrainerIntegrationWithHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = login(username=USER, password=PASS)
@classmethod
def tearDownClass(cls):
for model in ["test-trainer", "test-trainer-epoch", "test-trainer-step"]:
try:
delete_repo(token=cls._token, name=model)
except HTTPError:
pass
try:
delete_repo(token=cls._token, name="test-trainer-org", organization="valid_org")
except HTTPError:
pass
def test_push_to_hub(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer"),
push_to_hub=True,
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"{USER}/test-trainer")
model = RegressionPreTrainedModel.from_pretrained(repo_name)
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def test_push_to_hub_in_organization(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir)
trainer.save_model()
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer-org"),
push_to_hub=True,
hub_model_id="valid_org/test-trainer-org",
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, "valid_org/test-trainer-org")
model = RegressionPreTrainedModel.from_pretrained("valid_org/test-trainer-org")
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def get_commit_history(self, repo):
commit_logs = subprocess.run(
"git log".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=repo,
).stdout
commits = commit_logs.split("\n\n")[1::2]
return [commit.strip() for commit in commits]
def test_push_to_hub_with_saves_each_epoch(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer-epoch"),
push_to_hub=True,
hub_token=self._token,
save_strategy="epoch",
)
trainer.train()
with tempfile.TemporaryDirectory() as tmp_dir:
_ = Repository(tmp_dir, clone_from=f"{USER}/test-trainer-epoch", use_auth_token=self._token)
commits = self.get_commit_history(tmp_dir)
expected_commits = [f"Training in progress, epoch {i}" for i in range(3, 0, -1)]
expected_commits.append("initial commit")
self.assertListEqual(commits, expected_commits)
def test_push_to_hub_with_saves_each_n_steps(self):
num_gpus = max(1, get_gpu_count())
if num_gpus > 2:
return
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, "test-trainer-step"),
push_to_hub=True,
hub_token=self._token,
save_strategy="steps",
save_steps=5,
)
trainer.train()
with tempfile.TemporaryDirectory() as tmp_dir:
_ = Repository(tmp_dir, clone_from=f"{USER}/test-trainer-step", use_auth_token=self._token)
commits = self.get_commit_history(tmp_dir)
total_steps = 20 // num_gpus
expected_commits = [f"Training in progress, step {i}" for i in range(total_steps, 0, -5)]
expected_commits.append("initial commit")
self.assertListEqual(commits, expected_commits)
@require_torch
@require_optuna
class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4)
@require_torch
@require_ray
class TrainerHyperParameterRayIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def ray_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
from ray import tune
return {
"a": tune.randint(-4, 4),
"b": tune.randint(-4, 4),
}
def model_init(config):
if config is None:
a = 0
b = 0
else:
a = config["a"]
b = config["b"]
model_config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(model_config)
def hp_name(params):
return MyTrialShortNamer.shortname(params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(
direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4
)
def test_hyperparameter_search(self):
self.ray_hyperparameter_search()
def test_hyperparameter_search_ray_client(self):
import ray
from ray.util.client.ray_client_helpers import ray_start_client_server
with ray_start_client_server():
assert ray.util.client.ray.is_connected()
self.ray_hyperparameter_search()
@require_torch
@require_sigopt
class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return [
{"bounds": {"min": -4, "max": 4}, "name": "a", "type": "int"},
{"bounds": {"min": -4, "max": 4}, "name": "b", "type": "int"},
]
def model_init(trial):
if trial is not None:
a = trial.assignments["a"]
b = trial.assignments["b"]
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.assignments)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
evaluation_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
logging_dir="runs",
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(
direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="sigopt", n_trials=4
)
optim_test_params = []
if is_torch_available():
default_adam_kwargs = {
"betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2),
"eps": TrainingArguments.adam_epsilon,
"lr": TrainingArguments.learning_rate,
}
optim_test_params = [
(
OptimizerNames.ADAMW_HF,
transformers.optimization.AdamW,
default_adam_kwargs,
),
(
OptimizerNames.ADAMW_HF.value,
transformers.optimization.AdamW,
default_adam_kwargs,
),
(
OptimizerNames.ADAMW_TORCH,
torch.optim.AdamW,
default_adam_kwargs,
),
(
OptimizerNames.ADAFACTOR,
transformers.optimization.Adafactor,
{
"scale_parameter": False,
"relative_step": False,
"lr": TrainingArguments.learning_rate,
},
),
]
if is_apex_available():
import apex
optim_test_params.append(
(
OptimizerNames.ADAMW_APEX_FUSED,
apex.optimizers.FusedAdam,
default_adam_kwargs,
)
)
@require_torch
class TrainerOptimizerChoiceTest(unittest.TestCase):
def check_optim_and_kwargs(self, optim: OptimizerNames, mandatory_kwargs, expected_cls):
args = TrainingArguments(optim=optim, output_dir="None")
actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(args)
self.assertEqual(expected_cls, actual_cls)
self.assertIsNotNone(optim_kwargs)
for p, v in mandatory_kwargs.items():
self.assertTrue(p in optim_kwargs)
actual_v = optim_kwargs[p]
self.assertTrue(actual_v == v, f"Failed check for {p}. Expected {v}, but got {actual_v}.")
@parameterized.expand(optim_test_params, skip_on_empty=True)
def test_optim_supported(self, name: str, expected_cls, mandatory_kwargs):
# exercises all the valid --optim options
self.check_optim_and_kwargs(name, mandatory_kwargs, expected_cls)
trainer = get_regression_trainer(optim=name)
trainer.train()
def test_fused_adam(self):
# Pretend that apex is installed and mock apex.optimizers.FusedAdam exists.
# Trainer.get_optimizer_cls_and_kwargs does not use FusedAdam, but only has to return a
# class called, so mocking apex.optimizers.FusedAdam should be fine for testing and allow
# the test to run without requiring an apex installation.
mock = Mock()
modules = {
"apex": mock,
"apex.optimizers": mock.optimizers,
"apex.optimizers.FusedAdam": mock.optimizers.FusedAdam,
}
with patch.dict("sys.modules", modules):
self.check_optim_and_kwargs(
OptimizerNames.ADAMW_APEX_FUSED,
default_adam_kwargs,
mock.optimizers.FusedAdam,
)
def test_fused_adam_no_apex(self):
args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None")
# Pretend that apex does not exist, even if installed. By setting apex to None, importing
# apex will fail even if apex is installed.
with patch.dict("sys.modules", {"apex.optimizers": None}):
with self.assertRaises(ValueError):
Trainer.get_optimizer_cls_and_kwargs(args)
| [
"torch.nn.Linear",
"torch.ones",
"torch.cuda.is_available",
"torch.allclose",
"torch.nn.DataParallel",
"torch.nn.LayerNorm",
"torch.randint",
"torch.tensor",
"torch.zeros",
"torch.cuda.device_count",
"torch.nn.functional.mse_loss",
"torch.optim.lr_scheduler.LambdaLR",
"torch.randn"
] | 1.0 | Yokohide0317/transformers | 1089c30a4a3c56dcf017e500ba4b44e5c39f68dd |
1.0 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import Dataset
from .deepspeed import is_deepspeed_zero3_enabled
from .trainer import Trainer
from .trainer_utils import PredictionOutput
from .utils import logging
logger = logging.get_logger(__name__)
class Seq2SeqTrainer(Trainer):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
max_length: Optional[int] = None,
num_beams: Optional[int] = None,
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (`Dataset`, *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is an `datasets.Dataset`, columns not
accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
method.
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is `"eval"` (default)
max_length (`int`, *optional*):
The maximum target length to use when predicting with the generate method.
num_beams (`int`, *optional*):
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
beam search.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
self._max_length = max_length if max_length is not None else self.args.generation_max_length
self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams
return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def predict(
self,
test_dataset: Dataset,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "test",
max_length: Optional[int] = None,
num_beams: Optional[int] = None,
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is `"eval"` (default)
max_length (`int`, *optional*):
The maximum target length to use when predicting with the generate method.
num_beams (`int`, *optional*):
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
beam search.
<Tip>
If your predictions or labels have different sequence lengths (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
"""
self._max_length = max_length if max_length is not None else self.args.generation_max_length
self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams
return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
)
has_labels = "labels" in inputs
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
gen_kwargs = {
"max_length": self._max_length if self._max_length is not None else self.model.config.max_length,
"num_beams": self._num_beams if self._num_beams is not None else self.model.config.num_beams,
"synced_gpus": True if is_deepspeed_zero3_enabled() else False,
}
# prepare generation inputs
# some encoder-decoder models can have varying encder's and thus
# varying model input names
if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name:
generation_inputs = inputs[self.model.encoder.main_input_name]
else:
generation_inputs = inputs[self.model.main_input_name]
generated_tokens = self.model.generate(
generation_inputs,
attention_mask=inputs.get("attention_mask", None),
**gen_kwargs,
)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
with torch.no_grad():
with self.autocast_smart_context_manager():
outputs = model(**inputs)
if has_labels:
if self.label_smoother is not None:
loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
else:
loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
else:
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
if has_labels:
labels = inputs["labels"]
if labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
else:
labels = None
return (loss, generated_tokens, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
# If PAD token is not defined at least EOS token has to be defined
pad_token_id = (
self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
)
else:
if self.model.config.pad_token_id is not None:
pad_token_id = self.model.config.pad_token_id
else:
raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor
| [
"torch.no_grad",
"torch.ones"
] | 1.0 | Yokohide0317/transformers | 28e091430eea9e0d40839e56fd0d57aec262f5f9 |
1.0 | import torch
from transformers import AutoConfig, AutoModelForSeq2SeqLM, BartTokenizer, BartForConditionalGeneration, BartExtendedForConditionalGeneration, BartConfig, BartExtendedModel
# Loading trained model
PATH = "/home/ec2-user/moymarce/transformers/checkpoints/5-source_oracle-double/"
tokenizer = BartTokenizer.from_pretrained(PATH)
model = BartExtendedForConditionalGeneration.from_pretrained(PATH)
# Generate example
ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs. I hope one day they start eating healthier. Maybe a plant-based diet would be enough. <knw> My friends are cool"
summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=20, early_stopping=True, use_cache=False)
print('Predicted text by model:', [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids], sep='\n')
# Add special token
tokenizer.add_tokens(['<knw>'], special_tokens=True)
# Initialize special tokens
knw_token_id = tokenizer.convert_tokens_to_ids(['<knw>'])[0] #50265
pad_id = tokenizer.pad_token
# Tokenize inputs into batch
ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs. I hope one day they start eating healthier. Maybe a plant-based diet would be enough. <knw> My friends are cool"
KNOWLEDGE = "My friends are cool"
inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
knowledge_inputs = tokenizer([KNOWLEDGE], max_length=1024, return_tensors='pt')
tokenizer([ARTICLE_TO_SUMMARIZE, KNOWLEDGE], max_length=1024, return_tensors='pt')
# Masking
X = torch.Tensor([[1,2,3,4], [5,6,7,8]])
indexes = ((X == 3) + (X == 6)).nonzero(as_tuple=True)
knw_token_id = tokenizer.convert_tokens_to_ids(['<knw>'])[0] #50265
pad_id = tokenizer.pad_token
for row, ind in zip(X, indexes[1]):
ind = (row == tokenizer.decode('<knw>')).nonzero()
print('row', row, ind)
print(row[ind:])
row[ind:] = torch.zeros(row[ind:].size()) | [
"torch.Tensor"
] | 1.0 | MarcelGM/transformers | aad1d9b6d5c58fd974618ac0aead1c5bd1119467 |
1.0 | # coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Training and inference using the library models for sequence classification on GLUE (Bert, Albert) with PABEE."""
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import transformers
from pabee.modeling_pabee_albert import AlbertForSequenceClassificationWithPabee
from pabee.modeling_pabee_bert import BertForSequenceClassificationWithPabee
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertTokenizer,
BertConfig,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers.trainer_utils import is_main_process
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassificationWithPabee, BertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassificationWithPabee, AlbertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch,
)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", patience=0):
if args.model_type == "albert":
model.albert.set_regression_threshold(args.regression_threshold)
model.albert.set_patience(patience)
model.albert.reset_stats()
elif args.model_type == "bert":
model.bert.set_regression_threshold(args.regression_threshold)
model.bert.set_patience(patience)
model.bert.reset_stats()
else:
raise NotImplementedError()
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
if args.eval_all_checkpoints and patience != 0:
if args.model_type == "albert":
model.albert.log_stats()
elif args.model_type == "bert":
model.bert.log_stats()
else:
raise NotImplementedError()
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--patience",
default="0",
type=str,
required=False,
)
parser.add_argument(
"--regression_threshold",
default=0,
type=float,
required=False,
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=1,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.patience != "0" and args.per_gpu_eval_batch_size != 1:
raise ValueError("The eval batch size must be 1 with PABEE inference on.")
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
print("Total Model Parameters:", sum(param.numel() for param in model.parameters()))
output_layers_param_num = sum(param.numel() for param in model.classifiers.parameters())
print("Output Layers Parameters:", output_layers_param_num)
single_output_layer_param_num = sum(param.numel() for param in model.classifiers[0].parameters())
print(
"Added Output Layers Parameters:",
output_layers_param_num - single_output_layer_param_num,
)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
patience_list = [int(x) for x in args.patience.split(",")]
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
print(f"Evaluation for checkpoint {prefix}")
for patience in patience_list:
result = evaluate(args, model, tokenizer, prefix=prefix, patience=patience)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| [
"torch.distributed.get_world_size",
"torch.utils.data.RandomSampler",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.save",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.barrier",
"torch.utils.data.TensorDataset",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.0 | MarcelGM/transformers | aad1d9b6d5c58fd974618ac0aead1c5bd1119467 |
1.4 | # Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright (C) 2019 Intel Corporation
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from distutils.version import LooseVersion
import inspect
import itertools
import os
import platform
import sys
import unittest
import warnings
import time
import json
from collections.abc import Iterable
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import horovod.torch as hvd
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath
_1_5_api = LooseVersion(torch.__version__) >= LooseVersion('1.5.0')
ccl_supported_types = set([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor])
# Set environment variable for dynamic timeline API test
os.environ["HOROVOD_TIMELINE"] = "DYNAMIC"
# Set environment variable to enable adding/removing process sets after initializing Horovod.
os.environ["HOROVOD_DYNAMIC_PROCESS_SETS"] = "1"
class TorchTests(unittest.TestCase):
"""
Tests for ops in horovod.torch.
"""
def __init__(self, *args, **kwargs):
super(TorchTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
def convert_cpu_fp16_to_fp32(self, *values):
# PyTorch doesn't support any CPU ops on FP16 tensors.
# In case we need to do ops, we will convert tensor to FP32 here.
result = []
for value in values:
if value.dtype in [torch.float16, torch.HalfTensor] and not value.is_cuda:
result.append(value.float())
else:
result.append(value)
return result
def cast_and_place(self, tensor, dtype):
if dtype.is_cuda:
return tensor.cuda(hvd.local_rank()).type(dtype)
return tensor.type(dtype)
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_gpu_required(self):
if not torch.cuda.is_available():
skip_or_fail_gpu_test(self, "No GPUs available")
@pytest.mark.skipif(platform.system() == 'Darwin', reason='Reinit not supported on macOS')
def test_horovod_reinit(self):
"""Test that Horovod can init -> shutdown -> init successfully."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Horovod cannot be re-initialized after shutdown when using MPI, so
# this test can only be done using the Gloo controller
self.skipTest("Gloo is not available")
hvd.init()
rank, size = hvd.rank(), hvd.size()
hvd.shutdown()
hvd.init()
rank2, size2 = hvd.rank(), hvd.size()
assert rank == rank2
assert size == size2
def test_horovod_is_initialized(self):
"""Test that is_initialized returned by hvd.is_initialized() is correct."""
hvd.init()
assert hvd.is_initialized()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Only applies for Gloo
self.skipTest("Gloo is not available")
hvd.shutdown()
assert not hvd.is_initialized()
hvd.init()
def test_horovod_rank(self):
"""Test that the rank returned by hvd.rank() is correct."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
# The mpi rank does not match gloo rank, we need to figure which one
# we are using to run the test.
is_mpi = gloo_rank == -1
hvd.init()
rank = hvd.rank()
if is_mpi:
assert mpi_rank == rank
else:
assert gloo_rank == rank
def test_horovod_size(self):
"""Test that the size returned by hvd.size() is correct."""
_, mpi_size = mpi_env_rank_and_size()
gloo_size = int(os.getenv('HOROVOD_SIZE', -1))
# The mpi size does not match gloo size, we need to figure which one
# we are using to run the test.
is_mpi = gloo_size == -1
hvd.init()
size = hvd.size()
if is_mpi:
assert mpi_size == size
else:
assert gloo_size == size
def test_horovod_allreduce(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False)
tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)
multiplied = tensor * size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_average(self):
"""Test that the allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
averaged = hvd.allreduce(tensor, average=True)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged, tensor, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
multiplied = self.cast_and_place(tensor * size, dtype)
tensor = self.cast_and_place(tensor, dtype)
hvd.allreduce_(tensor, average=False)
tensor, multiplied = self.convert_cpu_fp16_to_fp32(tensor, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_async_fused(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allreduce_async(tensor, average=False)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tensor, = self.convert_cpu_fp16_to_fp32(tensor)
multiplied = tensor * size
tests.append((dtype, multiplied, handle))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for dtype, multiplied, handle in tests:
summed = hvd.synchronize(handle)
summed, = self.convert_cpu_fp16_to_fp32(summed)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_multi_gpu(self):
"""Test that the allreduce works on multiple GPUs."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
iter = 0
dtypes = [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
iter += 1
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
device = local_rank * 2 + (iter + local_rank) % 2
tensor = tensor.cuda(device).type(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_prescale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
prescale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = factor * tensor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
multiplied *= size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_postscale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
postscale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = size * tensor
multiplied = multiplied * factor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_process_sets(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
even_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
odd_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
if rank in even_ranks:
tensor = self.cast_and_place(even_rank_tensor, dtype)
summed = hvd.allreduce(tensor, average=False, process_set=even_set)
elif rank in odd_ranks:
tensor = self.cast_and_place(odd_rank_tensor, dtype)
summed = hvd.allreduce(tensor, average=False, process_set=odd_set)
tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)
if rank in even_ranks:
multiplied = tensor * len(even_ranks)
elif rank in odd_ranks:
multiplied = tensor * len(odd_ranks)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
max_process_set_size = max(len(even_ranks), len(odd_ranks))
if max_process_set_size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif max_process_set_size < 10:
threshold = 1e-4
elif max_process_set_size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
torch.manual_seed(1234)
dims = [17 + rank] * 3
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
# Same number of elements, different rank
torch.manual_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.cuda.FloatTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_duplicate_name_error(self):
"""Test that the allreduce raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allreduce_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allreduce_async(tensor, name='duplicate_name')
assert False, 'hvd.allreduce_async did not throw error'
except (torch.FatalError, ValueError):
pass
if LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):
# To fix https://github.com/horovod/horovod/issues/3149
hvd.join()
def test_horovod_allreduce_grad(self):
"""Test the correctness of the allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=False)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=True)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_process_sets(self):
"""Test the correctness of the allreduce gradient if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
even_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
odd_rank_tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
if rank in even_ranks:
tensor = self.cast_and_place(even_rank_tensor, dtype)
this_set = even_set
set_size = len(even_ranks)
elif rank in odd_ranks:
tensor = self.cast_and_place(odd_rank_tensor, dtype)
this_set = odd_set
set_size = len(odd_ranks)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=False, process_set=this_set)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim) * set_size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_grouped_allreduce(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False)
tensors, summed = zip(*[self.convert_cpu_fp16_to_fp32(t, s) for t, s in zip(tensors, summed)])
multiplied = [tensor * size for tensor in tensors]
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(summed, multiplied)]), \
'hvd.grouped_allreduce produces incorrect results'
def test_horovod_grouped_allreduce_average(self):
"""Test that the grouped allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
averaged = hvd.grouped_allreduce(tensors, average=True)
tensors, averaged = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, averaged)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(averaged, tensors)]), \
'hvd.grouped_allreduce produces incorrect results for average'
def test_horovod_grouped_allreduce_inplace(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
multiplied = [self.cast_and_place(tensor * size, dtype) for tensor in tensors]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
hvd.grouped_allreduce_(tensors, average=False)
tensors, multiplied = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, multiplied)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(tensors, multiplied)]), \
'hvd.grouped_allreduce_ produces incorrect results'
def test_horovod_grouped_allreduce_process_sets(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
even_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
odd_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
if rank in even_ranks:
tensors = [self.cast_and_place(tensor, dtype) for tensor in even_rank_tensors]
summed = hvd.grouped_allreduce(tensors, average=False, process_set=even_set)
elif rank in odd_ranks:
tensors = [self.cast_and_place(tensor, dtype) for tensor in odd_rank_tensors]
summed = hvd.grouped_allreduce(tensors, average=False, process_set=odd_set)
tensors, summed = zip(*[self.convert_cpu_fp16_to_fp32(t, s) for t, s in zip(tensors, summed)])
if rank in even_ranks:
multiplied = [tensor * len(even_ranks) for tensor in tensors]
elif rank in odd_ranks:
multiplied = [tensor * len(odd_ranks) for tensor in tensors]
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
max_process_set_size = max(len(even_ranks), len(odd_ranks))
if max_process_set_size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif max_process_set_size < 10:
threshold = 1e-4
elif max_process_set_size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(summed, multiplied)]), \
'hvd.grouped_allreduce produces incorrect results'
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_grouped_allreduce_cpu_gpu_error(self):
"""Test that the grouped allreduce raises an error if the input tensor
list contains a mix of tensors on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
tensors = [torch.FloatTensor(10) if i % 2 else torch.cuda.FloatTensor(10) for i in range(5)]
try:
hvd.grouped_allreduce(tensors, average=False)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_grouped_allreduce_grad(self):
"""Test the correctness of the grouped allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=False)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim) * size
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_grouped_allreduce_grad_average(self):
"""Test the correctness of the grouped allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=True)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim)
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_grouped_allreduce_grad_process_sets(self):
"""Test the correctness of the grouped allreduce gradient if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
even_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
odd_rank_tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
if rank in even_ranks:
tensors = [self.cast_and_place(tensor, dtype) for tensor in even_rank_tensors]
this_set = even_set
set_size = len(even_ranks)
elif rank in odd_ranks:
tensors = [self.cast_and_place(tensor, dtype) for tensor in odd_rank_tensors]
this_set = odd_set
set_size = len(odd_ranks)
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=False, process_set=this_set)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim) * set_size
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.data.min() == i
assert rank_tensor.data.max() == i
def test_horovod_allgather_async_fused(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
rank_shape = [17] * dim
tensor = torch.FloatTensor(*(rank_shape)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allgather_async(tensor)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tests.append((handle, rank_shape))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for handle, rank_shape in tests:
gathered = hvd.synchronize(handle)
gathered, = self.convert_cpu_fp16_to_fp32(gathered)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == rank_shape, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_process_sets(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor, process_set=this_set)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
assert list(gathered.shape) == [17 * set_size] + [17] * (dim - 1)
for i in range(set_size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
value = set_ranks[i]
assert rank_tensor.data.min() == value, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == value, 'hvd.allgather produces incorrect gathered tensor'
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_duplicate_name_error(self):
"""Test that the allgather raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allgather_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allgather_async(tensor, name='duplicate_name')
assert False, 'hvd.allgather_async did not throw error'
except (torch.FatalError, ValueError):
pass
if LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):
# To fix https://github.com/horovod/horovod/issues/3149
hvd.join()
def test_horovod_allgather_grad(self):
"""Test the correctness of the allgather gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
grad_list = []
for r, tensor_size in enumerate(tensor_sizes):
grad_list.append(self.cast_and_place(
torch.ones([tensor_size] + [17] * (dim - 1)), dtype) * r)
grad_ys = torch.cat(grad_list, dim=0)
gathered = hvd.allgather(tensor)
gathered.backward(grad_ys)
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allgather_grad_process_sets(self):
"""Test the correctness of the allgather gradient if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_ranks = odd_ranks
this_set = odd_set
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
set_tensor_sizes = [tensor_sizes[rk] for rk in set_ranks]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
grad_list = []
for r, tensor_size in zip(set_ranks, set_tensor_sizes):
grad_list.append(self.cast_and_place(
torch.ones([tensor_size] + [17] * (dim - 1)), dtype) * r)
grad_ys = torch.cat(grad_list, dim=0)
gathered = hvd.allgather(tensor, process_set=this_set)
gathered.backward(grad_ys)
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
if rank != root_rank:
assert (tensor == root_tensor).max() == 0, \
'hvd.broadcast modifies source tensor'
assert (broadcasted_tensor.data == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast_(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
assert (tensor == broadcasted_tensor).min() == 1, \
'hvd.broadcast does not modify source tensor'
assert (broadcasted_tensor == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_process_sets(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(set_ranks)
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank, process_set=this_set)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
if rank != root_rank:
assert (tensor == root_tensor).max() == 0, \
'hvd.broadcast modifies source tensor'
assert (broadcasted_tensor.data == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor = torch.FloatTensor(*([17] * 3)).fill_(1)
try:
hvd.broadcast(tensor, rank)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_duplicate_name_error(self):
"""Test that the broadcast raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
try:
for i in range(10):
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
assert False, 'hvd.broadcast_async did not throw error'
except (torch.FatalError, ValueError):
pass
if LooseVersion(torch.__version__) >= LooseVersion('1.10.0'):
# To fix https://github.com/horovod/horovod/issues/3149
hvd.join()
def test_horovod_broadcast_grad(self):
"""Test the correctness of the broadcast gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
broadcasted_tensor.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
c = 1 if rank == root_rank else 0
expected = np.ones([17] * dim) * c
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_broadcast_grad_process_sets(self):
"""Test the correctness of the broadcast gradient if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(set_ranks)
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
broadcasted_tensor = hvd.broadcast(tensor, root_rank, process_set=this_set)
broadcasted_tensor.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
c = 1 if rank == root_rank else 0
expected = np.ones([17] * dim) * c
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_alltoall(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
tensor = self.cast_and_place(tensor, dtype)
collected, received_splits = hvd.alltoall(tensor, splits)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],
"hvd.alltoall returned incorrect received_splits")
def test_horovod_alltoall_equal_split(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
collected = hvd.alltoall(tensor)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_splits_on_gpu(self):
"""Test that the alltoall works correctly when the splits argument is a tensor on GPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * size, dtype=torch.int32, device="cuda")
tensor = self.cast_and_place(tensor, dtype)
collected, received_splits = hvd.alltoall(tensor, splits)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertEqual(received_splits.device.type, "cuda", "received_splits should be on GPU here")
self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],
"hvd.alltoall returned incorrect received_splits")
def test_horovod_alltoall_process_sets(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in set_ranks:
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * set_size, dtype=torch.int32)
tensor = self.cast_and_place(tensor, dtype)
collected, received_splits = hvd.alltoall(tensor, splits, process_set=this_set)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == sum(rk + 1 for rk in set_ranks) * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in set_ranks],
"hvd.alltoall returned incorrect received_splits")
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_alltoall_type_error(self):
"""Test that the alltoall returns an error if the tensor types differ
across the processes."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
if rank % 2:
tensor = torch.empty(size, dtype=torch.int32)
else:
tensor = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_equal_split_length_error(self):
"""Test that the alltoall with default splitting returns an error if the tensor length is not a multiple
of the number of workers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size + 1)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_error(self):
"""Test that the alltoall returns an error if the sum of the splits entries exceeds
the first dimension of the input tensor."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size - 1)
splits = torch.ones(size, dtype=torch.int32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_type_error(self):
"""Test that the alltoall returns an error if the splits tensor does not
contain 32-bit integers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size)
splits = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_rank_error(self):
"""Test that the alltoall returns an error if any dimension besides
the first is different among the tensors being processed."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor_size = [2 * size] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.ones(tensor_size)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_grad(self):
"""Test the correctness of the alltoall gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
collected, received_splits = hvd.alltoall(tensor, splits)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_equal_split_grad(self):
"""Test the correctness of the alltoall gradient with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
collected = hvd.alltoall(tensor)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_grad_process_sets(self):
"""Test the correctness of the alltoall gradient if restricted to process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in set_ranks:
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
splits = torch.tensor([rank + 1] * set_size, dtype=torch.int32)
collected, received_splits = hvd.alltoall(tensor, splits, process_set=this_set)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_broadcast_state(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
def create_model(opt_class, opt_params):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
optimizer = new_optimizer(opt_class, opt_params, model)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
return model, optimizer
def get_model_param_values(model):
params = sorted(model.state_dict().items())
return [(k, v.clone()) for k, v in params]
def get_optimizer_param_values(optimizer):
results = []
state_dict = optimizer.state_dict()
for group in state_dict['param_groups']:
for param_id in group['params']:
if param_id not in state_dict['state']:
continue
params = sorted(state_dict['state'][param_id].items())
for k, v in params:
results.append(
(k, v.clone() if torch.is_tensor(v) else v))
return results
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model_param_values = get_model_param_values(model)
for name, model_param_value in model_param_values:
hvd.broadcast_(model_param_value, root_rank=0)
opt_param_values_updated = []
opt_param_values = get_optimizer_param_values(optimizer)
for name, opt_param_value in opt_param_values:
is_tensor = torch.is_tensor(opt_param_value)
if is_tensor:
hvd.broadcast_(opt_param_value, root_rank=0)
else:
opt_param_value = hvd.broadcast_object(opt_param_value, name=name)
opt_param_values_updated.append((name, opt_param_value))
opt_param_values = opt_param_values_updated
with temppath() as fname:
if hvd.rank() == 0:
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, fname)
model, optimizer = create_model(opt_class, opt_params)
if hvd.rank() == 0:
checkpoint = torch.load(fname)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model_param_value_after = get_model_param_values(model)
for before, after in zip(model_param_values,
model_param_value_after):
name, model_param_value = before
name_after, model_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(model_param_value),
type(model_param_value_after))
self.assertTrue(
(model_param_value == model_param_value_after).all())
expected_tensors = hvd.broadcast_object(len(optimizer.state_dict()['state'].values()))
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
self.assertEqual(len(optimizer.state_dict()['state'].values()), expected_tensors)
opt_param_values_after = get_optimizer_param_values(optimizer)
for before, after in zip(opt_param_values, opt_param_values_after):
name, opt_param_value = before
name_after, opt_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(opt_param_value),
type(opt_param_value_after))
if torch.is_tensor(opt_param_value):
self.assertTrue(
(opt_param_value == opt_param_value_after).all())
else:
self.assertEqual(opt_param_value, opt_param_value_after)
# TODO: investigate why this hangs on K80s
@unittest.skip
def test_broadcast_state_gpu(self):
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
# Set default tensor type, ensuring optimizer tensor-wrapping is robust
# to this setting.
try:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.test_broadcast_state()
finally:
torch.set_default_tensor_type(torch.FloatTensor)
def test_broadcast_state_options(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
params_0 = dict(lr=0.1, momentum=0.8, weight_decay=0.2, nesterov=True,
betas=(0.9, 0.999), etas=(0.8, 2.4), step_sizes=(1e-5, 100))
params_1 = dict(lr=0.2, momentum=0.9, weight_decay=0.1, nesterov=False,
betas=(0.8, 0.9), etas=(0.25, 1.75), step_sizes=(1e-7, 5))
def create_model(opt_class):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
params = params_0 if hvd.rank() == 0 else params_1
p = {
k: v for k, v in params.items()
if k in inspect.getargspec(opt_class.__init__).args
}
opt = opt_class(model.parameters(), **p)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
return model, opt
# Include subclass name so we can sort them lexicographically, otherwise different
# ranks will have different optimizer orderings
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
for _, opt_class in optimizers:
model, optimizer = create_model(opt_class)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
p0 = {
k: v for k, v in params_0.items()
if k in inspect.getargspec(opt_class.__init__).args
}
for k, p in p0.items():
p_actual = optimizer.param_groups[0][k]
if not isinstance(p, Iterable):
p_actual = [p_actual]
p = [p]
for i in range(len(p)):
self.assertEqual(type(p_actual[i]), type(p[i]))
self.assertAlmostEqual(p_actual[i], p[i], delta=1e-5)
# Ensure that the parameter option types are compatible with ops
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_broadcast_state_no_grad(self):
class ModelNoGrad(nn.Module):
def __init__(self, a, b):
super(ModelNoGrad, self).__init__()
self.a = nn.Parameter(a.int(), requires_grad=False)
self.b = nn.Parameter(b)
def forward(self, x):
return torch.index_select(self.b, 0, self.a.long()) * x
hvd.init()
a = torch.Tensor([1, 3])
b = torch.rand(4)
model = ModelNoGrad(a, b)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-6, momentum=0.9, nesterov=True)
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
grad = optimizer.param_groups[0]['params'][1].grad
bgrad = hvd.broadcast(grad, root_rank=0)
assert optimizer.param_groups[0]['params'][0].grad is None
assert torch.all(torch.eq(grad, bgrad)).item()
def test_broadcast_object(self):
hvd.init()
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
obj = hvd.broadcast_object(obj, root_rank=0)
self.assertDictEqual(obj, expected_obj)
def test_broadcast_object_process_sets(self):
hvd.init()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if hvd.size() == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if hvd.rank() in even_ranks:
set_ranks = even_ranks
this_set = even_set
elif hvd.rank() in odd_ranks:
set_ranks = odd_ranks
this_set = odd_set
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == set_ranks[0] else {}
obj = hvd.broadcast_object(obj, root_rank=set_ranks[0], process_set=this_set)
self.assertDictEqual(obj, expected_obj)
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_allgather_object(self):
hvd.init()
d = {'metric_val_1': hvd.rank()}
if hvd.rank() == 1:
d['metric_val_2'] = 42
results = hvd.allgather_object(d)
expected = [{'metric_val_1': i} for i in range(hvd.size())]
if hvd.size() > 1:
expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}
self.assertEqual(len(results), hvd.size())
self.assertListEqual(results, expected)
def test_compression_fp16(self):
valid_dtypes = [torch.float32, torch.float64]
invalid_dtypes = [torch.uint8, torch.int8, torch.int16,
torch.int32, torch.int64]
tensor_size = [5] * 3
compression = hvd.Compression.fp16
for dtype in valid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, torch.float16)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
for dtype in invalid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, dtype)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
if dtype != torch.int8: # Cannot cast to NumPy with a CharTensor
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
def test_force_allreduce(self):
"""Test that allreduce is forced on all gradients during opt.step()."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(D_in, H)
self.fc2 = torch.nn.Linear(H, D_out)
self.fc3 = torch.nn.Linear(D_out, D_out)
def forward(self, x_):
x_ = F.relu(self.fc1(x_))
x1_ = self.fc2(x_)
x2_ = self.fc3(F.relu(x1_))
return x1_, x2_
def create_model(opt_class, opt_params):
model = Net()
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
opt = new_optimizer(opt_class, opt_params, model)
opt = hvd.DistributedOptimizer(
opt, named_parameters=model.named_parameters())
return model, opt
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred1, y_pred2 = model(x)
if rank == 0:
loss = F.mse_loss(y_pred1, y, size_average=False)
else:
loss = F.mse_loss(y_pred2, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_model_parallelism(self):
"""Test that tensors on different GPUs are supported."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
first_device = local_rank * 2
second_device = local_rank * 2 + 1
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# Place parts of model on different GPUs.
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(first_device)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(second_device)
def forward(self, x):
x = x.cuda(first_device)
x = self.conv1(x)
x = x.cuda(second_device)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_delta_optimizer(self):
"""Test that delta optimizer."""
hvd.init()
# TODO support non-MPI Adasum operation
# Only do this test if there are GPUs available.
if not hvd.mpi_enabled() or not torch.cuda.is_available():
self.skipTest("No GPUs available")
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(local_rank)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(local_rank)
def forward(self, x):
x = x.cuda(local_rank)
x = self.conv1(x)
x = x.cuda(local_rank)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters(), op=hvd.Adasum)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_duplicate_names(self):
"""Test that passing duplicate names to optimizer will fail."""
net1 = torch.nn.Conv2d(1, 1, 1)
net2 = torch.nn.Conv2d(1, 1, 1)
parameters = itertools.chain(net1.parameters(), net2.parameters())
opt = torch.optim.SGD(parameters, lr=0.1)
# This will have duplicate names, since both net1 and net2 have 'weight' and 'bias'
named_parameters = itertools.chain(net1.named_parameters(), net2.named_parameters())
try:
hvd.DistributedOptimizer(opt, named_parameters=named_parameters)
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_dynamic_requires_grad(self):
"""Test that makes sure that gradients can be turned off/on dynamically."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
gen = torch.nn.Conv2d(1, 10, 1)
disc = torch.nn.Conv2d(10, 1, 1)
inp = torch.rand([1, 1, 100, 100])
gen_opt = torch.optim.SGD(gen.parameters(), lr=0.1)
gen_opt = hvd.DistributedOptimizer(gen_opt, named_parameters=gen.named_parameters())
disc_opt = torch.optim.SGD(disc.parameters(), lr=0.1)
disc_opt = hvd.DistributedOptimizer(disc_opt, named_parameters=disc.named_parameters())
def train_step(train_generator=False, train_discriminator=False):
for p in gen.parameters():
p.requires_grad_(train_generator)
for p in disc.parameters():
p.requires_grad_(train_discriminator)
gen_opt.zero_grad()
disc_opt.zero_grad()
loss = disc(gen(inp)).sum()
loss.backward()
for p in gen.parameters():
assert train_generator == (p.grad is not None and p.grad.max().is_nonzero()), \
'Gradient for generator is zero but it should be trained or vice versa.'
for p in disc.parameters():
assert train_discriminator == (p.grad is not None and p.grad.max().is_nonzero()), \
'Gradient for discriminator is zero but it should be trained or vice versa.'
if train_generator:
gen_opt.step()
if train_discriminator:
disc_opt.step()
for x in range(10):
# Step 1: train generator.
train_step(train_generator=True)
# Step 2: train discriminator.
train_step(train_discriminator=True)
def test_gradient_clipping(self):
"""Test gradient clipping example."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.ones(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
model.weight = torch.nn.Parameter(torch.zeros(1, 1) + 0.5)
model.bias = torch.nn.Parameter(torch.zeros(1))
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
prior_grad = model.weight.grad.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
clipped_grad = model.weight.grad.item()
assert abs(prior_grad) > abs(clipped_grad)
with optimizer.skip_synchronize():
optimizer.step()
def test_synchronize_step_warning(self):
"""
Test that .synchronize() followed by .step() without
optimizer.skip_synchronize() context will produce a warning.
"""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.zeros(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
with warnings.catch_warnings(record=True) as ws:
optimizer.step()
assert len(ws) == 1
assert 'optimizer.step() called without optimizer.skip_synchronize()' \
in str(ws[0].message)
def test_no_named_parameters(self):
"""Test that leaving the default named_parameters=None will not throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_missing_named_parameters(self):
"""Test that naming half of the model parameters will throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
opt = torch.optim.SGD(model.parameters(), lr=0.1)
try:
hvd.DistributedOptimizer(opt,
named_parameters=list(model.named_parameters())[0:1])
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_horovod_join_allreduce(self):
"""Test Join op with allreduce."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
integral_types = [torch.IntTensor, torch.LongTensor, torch.cuda.IntTensor, torch.cuda.LongTensor]
dims = [1, 2, 3]
first_join_ranks = [0, 1]
cachings = [False, True]
for dtype, dim, first_join_rank, caching in itertools.product(dtypes, dims, first_join_ranks, cachings):
torch.manual_seed(1234)
def div(t, s):
if _1_5_api and dtype in integral_types:
return t.floor_divide(s)
return t / s
# Use two tensors to test fusion
tensor_a = torch.FloatTensor(*([5] * dim)).random_(-100, 100)
tensor_a = self.cast_and_place(tensor_a, dtype)
tensor_b = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor_b = self.cast_and_place(tensor_b, dtype)
if caching:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if rank == first_join_rank:
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in integral_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged_a, div(tensor_a * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
assert torch.allclose(averaged_b, div(tensor_b * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
self.assertNotEqual(ret, first_join_rank,
msg="The return value of hvd.join() may not be equal to first_join_rank")
ret_values = hvd.allgather_object(ret)
self.assertSequenceEqual(ret_values, [ret] * size,
msg="hvd.join() did not return the same value on each rank")
def test_horovod_join_allgather(self):
"""Test Join op with allgather."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
ret = hvd.join(hvd.local_rank())
self.assertNotEqual(ret, 0,
msg="The return value of hvd.join() may not be equal to 0 because that would be the first rank to join")
ret_values = hvd.allgather_object(ret)
self.assertSequenceEqual(ret_values, [ret] * size,
msg="hvd.join() did not return the same value on each rank")
def test_horovod_join_broadcast(self):
"""Test Join op with broadcast."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
ret = hvd.join(hvd.local_rank())
else:
try:
broadcasted_tensor = hvd.broadcast(tensor, 1, name="test_horovod_join_broadcast")
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
self.assertNotEqual(ret, 0,
msg="The return value of hvd.join() may not be equal to 0 because that would be the first rank to join")
ret_values = hvd.allgather_object(ret)
self.assertSequenceEqual(ret_values, [ret] * size,
msg="hvd.join() did not return the same value on each rank")
def test_horovod_sync_batch_norm(self):
"""Tests Horovod version of SyncBatchNorm."""
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
ts_list = [
torch.stack([
torch.tensor([
[r, r + 1],
[r * 2, r * 2 + 1],
[r * 3, r * 3 + 1],
[r * 4, r * 4 + 1]
])
for r in range(hvd.size())
]),
torch.stack([
torch.tensor([
[r + 1],
[r * 2 + 1],
[r * 3 + 1],
[r * 4 + 1]
])
for r in range(hvd.size())
]),
]
for ts in ts_list:
sync_bn = hvd.SyncBatchNorm(num_features=4)
sync_bn.cuda(hvd.local_rank())
bn = torch.nn.BatchNorm1d(num_features=4)
bn.cuda(hvd.local_rank())
ts = ts.cuda(hvd.local_rank()).float()
ts1 = ts.clone().requires_grad_()
ts2 = ts.clone().requires_grad_()
# Training
sync_bn_out = sync_bn(ts1[hvd.rank()].unsqueeze(0))
bn_out = bn(ts2)
assert torch.allclose(sync_bn_out, bn_out[hvd.rank()].unsqueeze(0), 1e-6)
assert torch.allclose(sync_bn.running_mean, bn.running_mean, 1e-6)
assert torch.allclose(sync_bn.running_var, bn.running_var, 1e-6)
# Gradients
sync_bn_out.sum().backward()
bn_out.mean(dim=0).sum().backward()
assert torch.allclose(hvd.allreduce(sync_bn.weight.grad, name='sync_bn.weight.grad'), bn.weight.grad, 1e-6)
assert torch.allclose(hvd.allreduce(sync_bn.bias.grad, name='sync_bn.bias.grad'), bn.bias.grad, 1e-6)
assert torch.allclose(hvd.allreduce(ts1.grad, name='ts1.grad'), ts2.grad, 1e-6)
@pytest.mark.skip(reason='https://github.com/horovod/horovod/issues/2496')
def test_timeline_api(self):
hvd.init()
def check_file(fname, check_cycle=True):
if hvd.rank() == 0:
with open(fname, 'r') as timeline_file:
timeline_text = timeline_file.read()
assert 'allreduce.test_allreduce' in timeline_text, timeline_text
assert 'start_time_since_epoch_in_micros' in timeline_text, timeline_text
assert 'NEGOTIATE_ALLREDUCE' in timeline_text, timeline_text
assert 'ALLREDUCE' in timeline_text, timeline_text
json_obj = json.loads(timeline_text)
assert json_obj is not None
if check_cycle:
assert 'CYCLE_START' in timeline_text, timeline_text
with temppath() as fname1:
hvd.start_timeline(fname1, mark_cycles=True)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that mark_cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname1)
# Test resuming with a different filename.
with temppath() as fname2:
hvd.start_timeline(fname2, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname2)
# Test resuming with a different filename, but mark_cycles=False
with temppath() as fname3:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname3, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that events can be registered in timeline file.
hvd.stop_timeline()
check_file(fname3, check_cycle=False)
# Test resuming with a different filename, but mark_cycles=True
with temppath() as fname4:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname4, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname4, check_cycle=True)
with temppath() as fname5:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname5, mark_cycles=False)
hvd.start_timeline(fname5, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname5, check_cycle=False)
hvd.shutdown()
def test_optimizer_no_named_parameters(self):
hvd.init()
model = nn.Sequential(nn.Linear(10, 10), nn.Linear(10, 10))
optimizer = torch.optim.SGD(
[{"params": model[0].parameters()}, {"params": model[1].parameters()}, ],
lr=0.001,
)
optimizer = hvd.DistributedOptimizer(optimizer)
params = optimizer._parameter_names
self.assertEqual(len(params), len(set(params.values())))
# Make sure all workers have the same set of parameter names
all_param_names = hvd.allgather_object(set(params.values()))
self.assertEqual(len(all_param_names), hvd.size())
for param_names in all_param_names:
self.assertEqual(all_param_names[0], param_names)
def test_sparse_embeddings(self):
"""Test that Horovod will correctly aggregate sparse gradients."""
hvd.init()
for sparse_as_dense in [False, True]:
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.embedding = nn.Embedding(10, 3, sparse=True)
def forward(self, x):
x = self.embedding(x)
return x
model = Net()
if hvd.rank() == 0:
inp = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
else:
inp = torch.LongTensor([[1, 3, 4], [4, 7, 9]])
# list() see: https://github.com/pytorch/pytorch/issues/47594
opt = torch.optim.SparseAdam(list(model.parameters()), lr=0.1)
opt = hvd.DistributedOptimizer(opt, sparse_as_dense=sparse_as_dense)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_async_sparse_allreduce(self):
"""Test that allgather over indices and values is equivalent to allreduce."""
hvd.init()
# Generate random tensors, then convert them to sparse
def random_sparse_tensor(*shape):
t = torch.rand(*shape)
t[t < 0.8] = 0
return t.to_sparse()
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensors = [random_sparse_tensor(d0, 10) for d0 in tensor_sizes]
allreduced_tensors = [hvd.allreduce(t.to_dense()) for t in tensors]
handles = [hvd.sparse_allreduce_async(t, op=hvd.Average, name=str(i))
for i, t in enumerate(tensors)]
allgathered_tensors = [handle() for handle in handles]
for reduced, gathered in zip(allreduced_tensors, allgathered_tensors):
assert torch.allclose(reduced, gathered.to_dense(), 1e-6)
def test_async_sparse_allreduce_process_sets(self):
"""Test that allgather over indices and values is equivalent to allreduce if restricted to process sets."""
hvd.init()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if hvd.size() == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if hvd.rank() in even_ranks:
set_ranks = even_ranks
this_set = even_set
elif hvd.rank() in odd_ranks:
set_ranks = odd_ranks
this_set = odd_set
# Generate random tensors, then convert them to sparse
def random_sparse_tensor(*shape):
t = torch.rand(*shape)
t[t < 0.8] = 0
return t.to_sparse()
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensors = [random_sparse_tensor(d0, 10) for d0 in tensor_sizes]
allreduced_tensors = [hvd.allreduce(t.to_dense(), process_set=this_set) for t in tensors]
handles = [hvd.sparse_allreduce_async(t, op=hvd.Average, name=str(i), process_set=this_set)
for i, t in enumerate(tensors)]
allgathered_tensors = [handle() for handle in handles]
for reduced, gathered in zip(allreduced_tensors, allgathered_tensors):
assert torch.allclose(reduced, gathered.to_dense(), 1e-6)
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_optimizer_process_sets(self):
"""Test DistributedOptimizer restricted to a process set for an entire model.
Note that this test makes the most sense when running with > 2 processes."""
hvd.init()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if hvd.size() == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if hvd.rank() in even_ranks:
this_set = even_set
elif hvd.rank() in odd_ranks:
this_set = odd_set
N, D_in, H, D_out = 64, 100, 10, 10
torch.manual_seed(hvd.rank())
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
def create_model(opt_class, opt_params, process_set):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
optimizer = new_optimizer(opt_class, opt_params, model)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters(),
process_set=process_set)
return model, optimizer
model, optimizer = create_model(torch.optim.SGD, dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
even_set)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
v = model.state_dict()["2.weight"]
all_v = hvd.allgather(v, process_set=this_set)
if this_set == even_set:
for start in range(0, all_v.numel(), v.numel()):
assert torch.allclose(v.flatten(), all_v.flatten()[start:start+v.numel()])
else:
for start in range(0, all_v.numel(), v.numel()):
if start // v.numel() == this_set.rank():
continue
# They might randomly agree by chance, but that's extremely unlikely:
assert not torch.allclose(v.flatten(), all_v.flatten()[start:start + v.numel()])
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
if __name__ == "__main__":
unittest.main()
| [
"torch.nn.Linear",
"torch.cat",
"torch.ones",
"torch.nn.Parameter",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.cuda.FloatTensor",
"torch.load",
"torch.allclose",
"torch.IntTensor",
"torch.is_tensor",
"torch.FloatTensor",
"torch.manual_seed",
"torch.optim.Optimizer.__subclasses__",
"torch.tensor",
"torch.nn.functional.relu",
"torch.empty",
"torch.Tensor",
"torch.nn.Embedding",
"torch.zeros",
"torch.set_default_tensor_type",
"torch.optim.SGD",
"torch.save",
"torch.cuda.device_count",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.rand",
"torch.eq",
"torch.nn.functional.mse_loss",
"torch.nn.BatchNorm1d",
"torch.randn"
] | 1.4.0 | xinyual/horovod | 65ae9afd05b854bc0dc9719dc246454edadf9487 |
1.5 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from itertools import product
from typing import Tuple
import onnx
import pytest
import torch
from nncf import NNCFConfig
from nncf.torch.quantization.layers import PTQuantizerSpec
from nncf.torch.quantization.layers import QUANTIZATION_MODULES
from nncf.torch.quantization.layers import QuantizationMode
from nncf.torch.quantization.layers import QuantizerExportMode
from tests.torch.helpers import get_nodes_by_type
from tests.torch.helpers import register_bn_adaptation_init_args
from tests.torch.helpers import resolve_constant_node_inputs_to_values
from tests.torch.test_helpers import TwoConvTestModel
from tests.torch.test_helpers import load_exported_onnx_version
def get_config_for_export_mode(should_be_onnx_standard: bool) -> NNCFConfig:
nncf_config = NNCFConfig()
nncf_config.update({
"input_info": {
"sample_size": [1, 1, 4, 4]
},
"compression": {
"algorithm": "quantization",
"export_to_onnx_standard_ops": should_be_onnx_standard
}
})
register_bn_adaptation_init_args(nncf_config)
return nncf_config
def test_onnx_export_to_fake_quantize(tmp_path):
model = TwoConvTestModel()
nncf_config = get_config_for_export_mode(should_be_onnx_standard=False)
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
num_fq = 0
num_model_nodes = 0
num_other_nodes = 0
# pylint:disable=no-member
for node in onnx_model_proto.graph.node:
op_type = node.op_type
if op_type == 'FakeQuantize':
num_fq += 1
elif op_type in ['Conv', 'Constant']:
num_model_nodes += 1
else:
num_other_nodes += 1
assert num_fq == 4
assert num_other_nodes == 0
def test_onnx_export_to_quantize_dequantize(tmp_path):
# It doesn't work with CPU target_device because
# per-channel quantization is not supported in onnxruntime.
model = TwoConvTestModel()
nncf_config = get_config_for_export_mode(should_be_onnx_standard=True)
nncf_config['target_device'] = 'TRIAL'
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
num_q = 0
num_dq = 0
num_model_nodes = 0
num_other_nodes = 0
# pylint:disable=no-member
for node in onnx_model_proto.graph.node:
op_type = node.op_type
if op_type == 'QuantizeLinear':
num_q += 1
elif op_type == 'DequantizeLinear':
num_dq += 1
elif op_type in ['Conv', 'Constant']:
num_model_nodes += 1
else:
num_other_nodes += 1
assert num_q == 4
assert num_q == num_dq
assert num_other_nodes == 0
INPUT_TENSOR_SHAPE = (2, 64, 15, 10)
PER_CHANNEL_AQ_SCALE_SHAPE = (1, INPUT_TENSOR_SHAPE[1], 1, 1)
@pytest.mark.parametrize('per_channel, qmode, export_mode',
product(
[True, False],
[QuantizationMode.SYMMETRIC, QuantizationMode.ASYMMETRIC],
[QuantizerExportMode.FAKE_QUANTIZE, QuantizerExportMode.ONNX_QUANTIZE_DEQUANTIZE_PAIRS]
))
def test_onnx_export_to_quantize_dequantize_per_channel(per_channel: bool,
qmode: QuantizationMode,
export_mode: QuantizerExportMode):
scale_shape = PER_CHANNEL_AQ_SCALE_SHAPE if per_channel else (1,)
qspec = PTQuantizerSpec(
scale_shape=scale_shape,
num_bits=8,
mode=qmode,
signedness_to_force=None,
logarithm_scale=False,
narrow_range=False,
half_range=False,
)
q_cls = QUANTIZATION_MODULES.get(qmode)
quantizer = q_cls(qspec)
if qmode is QuantizationMode.SYMMETRIC:
quantizer.scale = torch.nn.Parameter(torch.rand_like(quantizer.scale))
else:
quantizer.input_low = torch.nn.Parameter(torch.rand_like(quantizer.input_low))
quantizer.input_range = torch.nn.Parameter(torch.rand_like(quantizer.input_range))
# pylint: disable=protected-access
quantizer._export_mode = export_mode
x = torch.rand(INPUT_TENSOR_SHAPE)
if quantizer.per_channel and export_mode is QuantizerExportMode.ONNX_QUANTIZE_DEQUANTIZE_PAIRS:
with pytest.raises(RuntimeError):
quantizer.run_export_quantization(x)
else:
quantizer.run_export_quantization(x)
class TargetCompressionIdxTestModel(torch.nn.Module):
CONV2D_TARGET_CHANNEL_COUNT = 5
CONV2D_TRANSPOSE_TARGET_CHANNEL_COUNT = 10
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels=1,
out_channels=self.CONV2D_TARGET_CHANNEL_COUNT,
kernel_size=(1, 1))
self.conv_t = torch.nn.ConvTranspose2d(in_channels=self.CONV2D_TARGET_CHANNEL_COUNT,
out_channels=self.CONV2D_TRANSPOSE_TARGET_CHANNEL_COUNT,
kernel_size=(1, 1))
def forward(self, x):
x = self.conv(x)
x = self.conv_t(x)
return x
def get_weight_fq_for_conv_node(node: onnx.NodeProto, graph: onnx.GraphProto):
weight_input_tensor_id = node.input[1]
matches = [x for x in graph.node if weight_input_tensor_id in x.output]
assert len(matches) == 1
match = next(iter(matches))
assert match.op_type == "FakeQuantize"
return match
def get_input_low_input_high_for_wfq_node(wfq_node: onnx.NodeProto, graph: onnx.GraphProto) \
-> Tuple[onnx.AttributeProto, onnx.AttributeProto]:
assert wfq_node.op_type == "FakeQuantize"
conv_wfq_inputs = list(resolve_constant_node_inputs_to_values(wfq_node, graph).values())
return conv_wfq_inputs[1], conv_wfq_inputs[2]
def test_target_compression_idx(tmp_path):
model = TargetCompressionIdxTestModel()
nncf_config = get_config_for_export_mode(should_be_onnx_standard=False)
onnx_model_proto = load_exported_onnx_version(nncf_config, model,
path_to_storage_dir=tmp_path)
onnx_graph = onnx_model_proto.graph # pylint:disable=no-member
conv_nodes = get_nodes_by_type(onnx_model_proto, "Conv")
assert len(conv_nodes) == 1
conv_node = next(iter(conv_nodes))
conv_wfq_node = get_weight_fq_for_conv_node(conv_node, onnx_graph)
input_low_attr, input_high_attr = get_input_low_input_high_for_wfq_node(conv_wfq_node,
onnx_graph)
assert input_low_attr.shape == (TargetCompressionIdxTestModel.CONV2D_TARGET_CHANNEL_COUNT, 1, 1, 1)
assert input_low_attr.shape == input_high_attr.shape
conv_t_nodes = get_nodes_by_type(onnx_model_proto, "ConvTranspose")
assert len(conv_t_nodes) == 1
conv_t_node = next(iter(conv_t_nodes))
conv_t_wfq_node = get_weight_fq_for_conv_node(conv_t_node, onnx_graph)
input_low_t_attr, input_high_t_attr = get_input_low_input_high_for_wfq_node(conv_t_wfq_node,
onnx_graph)
assert input_low_t_attr.shape == (1, TargetCompressionIdxTestModel.CONV2D_TRANSPOSE_TARGET_CHANNEL_COUNT, 1, 1)
assert input_low_t_attr.shape == input_high_t_attr.shape
| [
"torch.rand",
"torch.nn.Conv2d",
"torch.nn.ConvTranspose2d",
"torch.rand_like"
] | 1.5.0 | sarthakpati/nncf | 29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac |
1.5 | """
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
from collections import OrderedDict
from enum import Enum
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import TypeVar
import functools
import torch
from copy import deepcopy
from torch import nn
from nncf.common.graph.definitions import MODEL_INPUT_OP_NAME
from nncf.common.graph.definitions import MODEL_OUTPUT_OP_NAME
from nncf.common.graph import NNCFNode
from nncf.common.graph import NNCFNodeName
from nncf.common.graph.model_transformer import ModelTransformer
from nncf.common.graph.transformations.commands import TargetType
from nncf.common.graph.transformations.commands import TransformationPriority
from nncf.common.insertion_point_graph import InsertionPointGraph
from nncf.common.insertion_point_graph import PostHookInsertionPoint
from nncf.common.insertion_point_graph import PreHookInsertionPoint
from nncf.common.utils.logger import logger as nncf_logger
from nncf.common.utils.ordered_enum import OrderedEnum
from nncf.torch.debug import CombinedDebugInterface
from nncf.torch.debug import debuggable_forward
from nncf.common.utils.debug import is_debug
from nncf.torch.dynamic_graph.context import TracingContext
from nncf.torch.dynamic_graph.graph import DynamicGraph
from nncf.torch.dynamic_graph.graph import ShapeIgnoringTensorMetaComparator
from nncf.torch.dynamic_graph.graph_tracer import GraphTracer
from nncf.torch.dynamic_graph.graph_tracer import ModelInputInfo
from nncf.torch.dynamic_graph.graph_tracer import PostGraphBuildActing
from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn
from nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.dynamic_graph.operation_address import OperationAddress
from nncf.torch.dynamic_graph.patch_pytorch import ignore_scope
from nncf.torch.dynamic_graph.scope import Scope
from nncf.torch.dynamic_graph.trace_tensor import TracedTensor
from nncf.torch.dynamic_graph.transform_graph import replace_modules_by_nncf_modules
from nncf.torch.graph.graph import PTNNCFGraph
from nncf.torch.graph.graph_builder import GraphBuilder
from nncf.torch.graph.graph_builder import GraphConverter
from nncf.torch.graph.operator_metatypes import SplitMetatype
from nncf.torch.graph.transformations.commands import PTInsertionCommand
from nncf.torch.graph.transformations.commands import PTTargetPoint
from nncf.torch.graph.transformations.layout import PTTransformationLayout
from nncf.torch.knowledge_distillation.knowledge_distillation_handler import KnowledgeDistillationLossHandler
from nncf.torch.layers import NNCF_MODULES
from nncf.torch.layers import NNCF_WRAPPED_USER_MODULES_DICT
from nncf.torch.module_operations import UpdateWeight
from nncf.torch.quantization.layers import QUANTIZATION_MODULES
from nncf.torch.utils import compute_FLOPs_hook
from nncf.torch.utils import get_all_modules_by_type
from nncf.torch.utils import get_state_dict_names_with_modules
from nncf.torch.nested_objects_traversal import objwalk
MODEL_WRAPPED_BY_NNCF_ATTR_NAME = 'nncf_module'
LEGACY_ACT_STORAGE_NAME = "activation_quantizers"
EXTERNAL_QUANTIZERS_STORAGE_NAME = "external_quantizers"
Module = TypeVar('Module', bound=nn.Module)
class ExtraCompressionModuleType(Enum):
EXTERNAL_QUANTIZER = 0
class LoadStateListener:
"""
Resets the initialization flags (`initialized`) for all quantization modules on `load_state_dict` call.
These flags are used to update not loaded params (from checkpoint or model's state)
on initialization stage of algorithm.
Flags reset is required on each call of `load_state_dict`, because internal method (`build_graph`)
restores model state by calling this method.
"""
def __init__(self, model, all_quantizations):
# pylint: disable=protected-access
self.hook = model._register_load_state_dict_pre_hook(
functools.partial(self.hook_fn, quantize_modules=all_quantizations.values()))
def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs,
quantize_modules):
for module in quantize_modules:
module.initialized = False
def close(self):
self.hook.remove()
class PTInsertionType(OrderedEnum):
NNCF_MODULE_PRE_OP = 0
NNCF_MODULE_POST_OP = 1
OPERATOR_PRE_HOOK = 2
OPERATOR_POST_HOOK = 3
class PTInsertionPoint:
TARGET_TYPE_VS_PT_INSERTION_TYPE_DICT = {
TargetType.PRE_LAYER_OPERATION: PTInsertionType.NNCF_MODULE_PRE_OP,
TargetType.POST_LAYER_OPERATION: PTInsertionType.NNCF_MODULE_POST_OP,
TargetType.OPERATION_WITH_WEIGHTS: PTInsertionType.NNCF_MODULE_PRE_OP,
TargetType.OPERATOR_PRE_HOOK: PTInsertionType.OPERATOR_PRE_HOOK,
TargetType.OPERATOR_POST_HOOK: PTInsertionType.OPERATOR_POST_HOOK
}
def _get_pt_insertion_type(self, target_type: TargetType) -> PTInsertionType:
if target_type not in PTInsertionPoint.TARGET_TYPE_VS_PT_INSERTION_TYPE_DICT:
raise RuntimeError("Unsupported target type for PyTorch: {}".format(target_type))
return PTInsertionPoint.TARGET_TYPE_VS_PT_INSERTION_TYPE_DICT[target_type]
def __init__(self, target_type: TargetType, op_address: OperationAddress,
input_port_id: int = None):
self.insertion_type = self._get_pt_insertion_type(target_type)
self.op_address = op_address
self.module_scope = op_address.scope_in_model
self.input_port_id = input_port_id
def __eq__(self, other: 'PTInsertionPoint'):
return self.insertion_type == other.insertion_type and \
self.op_address == other.op_address and \
self.module_scope == other.module_scope and \
self.input_port_id == other.input_port_id
def __str__(self):
return ' '.join([str(v) for v in self.__dict__.values()])
def __hash__(self):
return hash(str(self))
# pylint: disable=too-many-public-methods
@ignore_scope
class NNCFNetwork(nn.Module, PostGraphBuildActing):
MODEL_STATE_VERSION_ATTR = '_nncf_model_state_version'
MODEL_STATE_VERSION = 1
def __init__(self, module, input_infos: List[ModelInputInfo],
dummy_forward_fn=None, wrap_inputs_fn=None, scopes_without_shape_matching=None,
ignored_scopes=None, target_scopes=None, reset: bool = False, wrap_outputs_fn=None,
original_model_accuracy=None):
super().__init__()
self._set_nncf_wrapped_model(module)
self._forward_signature = inspect.signature(module.forward)
self.input_infos = input_infos
self._original_model_accuracy = original_model_accuracy
self.ignored_scopes = ignored_scopes
self.target_scopes = target_scopes
self._user_dummy_forward_fn = dummy_forward_fn
self._kd_loss_handler = None
try:
device = next(module.parameters()).device
except StopIteration:
# Param-less model, assume CPU
device = 'cpu'
if wrap_inputs_fn is not None:
self._wrap_inputs_fn = wrap_inputs_fn
else:
self.__input_infos_based_input_wrapper = InputInfoWrapManager(self.input_infos,
self._forward_signature,
module_ref_for_device=self)
self._wrap_inputs_fn = self.__input_infos_based_input_wrapper.wrap_inputs
if wrap_outputs_fn is not None:
self._wrap_outputs_fn = wrap_outputs_fn
else:
self._wrap_outputs_fn = wrap_nncf_model_outputs_with_objwalk
self._nncf_module_scopes = [] # type: List[Scope]
self.scopes_without_shape_matching = scopes_without_shape_matching
self.debug_interface = CombinedDebugInterface() if is_debug() else None
self._extra_module_types = [] # type: List[ExtraCompressionModuleType]
# pylint:disable=line-too-long
self._insertions_into_original_graph = {} # type: Dict[PTTargetPoint, List[Tuple[Callable, TransformationPriority]]]
_orig_graph_build_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=True,
with_output_tracing=True)
nncf_wrapped_model = self.get_nncf_wrapped_model()
eval_only_op_scopes = self._collect_eval_only_op_scopes(nncf_wrapped_model,
_orig_graph_build_forward_fn)
# all modules called in eval mode should be replaced prior to graph building
self._replace_modules_by_nncf_modules(device, eval_only_op_scopes, reset)
_orig_context = TracingContext()
_orig_context.add_node_comparators([MODEL_INPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())
_orig_context.add_node_comparators([MODEL_OUTPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())
if self.scopes_without_shape_matching:
_orig_context.add_node_comparators(scopes_without_shape_matching,
ShapeIgnoringTensorMetaComparator())
self._original_dynamic_graph = GraphTracer(_orig_graph_build_forward_fn).trace_graph(nncf_wrapped_model,
_orig_context,
as_eval=True)
self._original_graph = GraphConverter.convert(self._original_dynamic_graph,
input_infos=self.input_infos)
self._compressed_graph = None # type: PTNNCFGraph
self._compressed_context = TracingContext()
self._dummy_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=False,
with_output_tracing=False)
self._in_user_dummy_forward = False
self._compressed_context.add_node_comparators([MODEL_INPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())
self._compressed_context.add_node_comparators([MODEL_OUTPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())
if self.scopes_without_shape_matching:
self._compressed_context.add_node_comparators(scopes_without_shape_matching,
ShapeIgnoringTensorMetaComparator())
self._load_listener = None
@debuggable_forward
def forward(self, *args, **kwargs):
with self._compressed_context as ctx: # type: TracingContext
ctx.base_module_thread_local_replica = self
args, kwargs = replicate_same_tensors((args, kwargs))
if not self._in_user_dummy_forward:
# If a user supplies own dummy forward, he is responsible for
# correctly wrapping inputs inside it as well.
args, kwargs = self._strip_traced_tensors(args, kwargs)
args, kwargs = self._wrap_inputs_fn(args, kwargs)
retval = self.get_nncf_wrapped_model()(*args, **kwargs)
retval = replicate_same_tensors(retval)
if not self._in_user_dummy_forward:
retval = self._wrap_outputs_fn(retval)
if self._kd_loss_handler is not None and self.get_nncf_wrapped_model().training:
self._kd_loss_handler(retval, *args, **kwargs)
return retval
def _strip_traced_tensors(self, args: Tuple, kwargs: Dict) -> Tuple[Tuple, Dict]:
"""
Required to guard against new forward calls on tensors that have already passed
through NNCF's forward once and got turned into TracedTensors by reference access.
"""
is_traced_tensor_predicate = lambda x: isinstance(x, TracedTensor)
def strip_fn(tensor: TracedTensor) -> torch.Tensor:
if hasattr(torch.Tensor, 'as_subclass'):
return torch.Tensor.as_subclass(tensor, torch.Tensor)
# Torch < 1.7.0 fallback
return torch.tensor(tensor, device=tensor.device, requires_grad=tensor.requires_grad)
args = objwalk(args, is_traced_tensor_predicate, strip_fn)
kwargs = objwalk(kwargs, is_traced_tensor_predicate, strip_fn)
return args, kwargs
def create_knowledge_distillation_loss_handler(self, kd_original_model: nn.Module, calculate_fn)\
-> KnowledgeDistillationLossHandler:
"""
Creates KnowledgeDistillationLossHandler instance for enabling Knowledge Distillation feature.
Also returns created KnowledgeDistillationLossHandler for control over Knowledge Distillation logic.
:param kd_original_model: original non compressed model used for distillation
:param calculate_fn: function used to parse model outputs and calculate knowledge distillation loss
:return: KnowledgeDistillationLossHandler instance
"""
device = next(self.get_nncf_wrapped_model().parameters()).device
self._kd_loss_handler = KnowledgeDistillationLossHandler(self._compressed_context,
kd_original_model,
calculate_fn,
device)
return self._kd_loss_handler
# Cannnot use property syntax here, otherwise the wrapped module will end up
# being twice in the same checkpoint with different prefixes
def get_nncf_wrapped_model(self):
return getattr(self, MODEL_WRAPPED_BY_NNCF_ATTR_NAME)
def _set_nncf_wrapped_model(self, value):
setattr(self, MODEL_WRAPPED_BY_NNCF_ATTR_NAME, value)
def get_clean_shallow_copy(self) -> 'NNCFNetwork':
# WARNING: Will reset pre- and post-ops of the underlying model. Use save_nncf_module_additions
# and load_nncf_module_additions to preserve these, or temporary_clean_view().
from nncf.torch.utils import save_module_state, load_module_state
saved_state = save_module_state(self)
model_copy = NNCFNetwork(self.get_nncf_wrapped_model(), self.input_infos,
self._user_dummy_forward_fn, self._wrap_inputs_fn,
self.scopes_without_shape_matching, self.ignored_scopes, self.target_scopes,
reset=True)
load_module_state(model_copy, saved_state)
return model_copy
def get_modules_in_nncf_modules_by_type(self, types) -> Dict[Scope, nn.Module]:
nncf_modules = self.get_nncf_modules()
retval = {}
for nncf_module_scope, nncf_module in nncf_modules.items():
nncf_module_scope.pop()
for relative_scope, target_module in get_all_modules_by_type(nncf_module, types).items():
retval[nncf_module_scope + relative_scope] = target_module
return retval
def insert_at_point(self, point: PTInsertionPoint, fn_list: List[Callable]):
if point.insertion_type == PTInsertionType.OPERATOR_PRE_HOOK:
self._compressed_context.register_pre_hooks(fn_list, point.op_address, point.input_port_id)
elif point.insertion_type == PTInsertionType.OPERATOR_POST_HOOK:
self._compressed_context.register_post_hooks(fn_list, point.op_address)
elif point.insertion_type in [PTInsertionType.NNCF_MODULE_PRE_OP,
PTInsertionType.NNCF_MODULE_POST_OP]:
norm_target_scope = self._normalize_variable_recurrent_scope(point.module_scope)
norm_nncf_scopes = [self._normalize_variable_recurrent_scope(x) for x in self._nncf_module_scopes]
assert norm_target_scope in norm_nncf_scopes # Required for proper Recurrent/VariableRecurrent addressing
nncf_module = self.get_module_by_scope(point.module_scope)
if point.insertion_type == PTInsertionType.NNCF_MODULE_PRE_OP:
for fn in fn_list:
nncf_module.register_pre_forward_operation(fn)
elif point.insertion_type == PTInsertionType.NNCF_MODULE_POST_OP:
for fn in fn_list:
nncf_module.register_post_forward_operation(fn)
else:
raise RuntimeError("Unsupported insertion type: {}".format(point.insertion_type))
def __getattr__(self, name):
wrapped_module = super().__getattr__(MODEL_WRAPPED_BY_NNCF_ATTR_NAME)
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
def get_graph(self) -> PTNNCFGraph:
if self._compressed_context.graph.get_nodes_count() == 0 or self._compressed_graph is None:
self.rebuild_graph()
return self._compressed_graph
def get_dynamic_graph(self) -> DynamicGraph:
return self._compressed_context.graph
def get_original_graph(self) -> PTNNCFGraph:
return self._original_graph
def get_tracing_context(self) -> TracingContext:
return self._compressed_context
def enable_dynamic_graph_building(self):
self._compressed_context.enable_node_additions()
def disable_dynamic_graph_building(self):
self._compressed_context.disable_node_additions()
def _get_dummy_forward_fn_for_graph_building(self, with_input_tracing, with_output_tracing):
if self._user_dummy_forward_fn is None:
return create_dummy_forward_fn(self.input_infos,
with_input_tracing=with_input_tracing,
wrap_inputs_fn=self._wrap_inputs_fn,
wrap_outputs_fn=self._wrap_outputs_fn,
with_output_tracing=with_output_tracing)
def wrapped_user_dummy_forward_fn(*args, **kwargs):
self._in_user_dummy_forward = True
retval = self._user_dummy_forward_fn(*args, **kwargs)
self._in_user_dummy_forward = False
return retval
return wrapped_user_dummy_forward_fn
def _replace_modules_by_nncf_modules(self, device, eval_only_op_scopes: List[Scope] = None,
reset: bool = False):
module, self._nncf_module_scopes = replace_modules_by_nncf_modules(
self.get_nncf_wrapped_model(), ignored_scopes=self.ignored_scopes,
target_scopes=self.target_scopes, eval_op_scopes=eval_only_op_scopes,
reset=reset)
self._set_nncf_wrapped_model(module.to(device))
def get_nncf_module_scopes(self) -> List[Scope]:
return self._nncf_module_scopes
def get_nncf_modules(self) -> Dict[Scope, torch.nn.Module]:
nncf_module_names_list = NNCF_MODULES + [x.__name__ for x in NNCF_WRAPPED_USER_MODULES_DICT.values()]
return get_all_modules_by_type(self.get_nncf_wrapped_model(), nncf_module_names_list)
def get_weighted_original_graph_nodes(self, nncf_module_names: List[str] = None) -> List[NNCFNode]:
retval = []
for nncf_module_scope in self._nncf_module_scopes:
if nncf_module_names is not None:
module_name = nncf_module_scope[-1].calling_module_class_name
if module_name not in nncf_module_names:
continue
nodes_in_scope = self._original_graph.get_op_nodes_in_scope(nncf_module_scope)
for node in nodes_in_scope:
if node.layer_attributes is not None: # TODO(vshampor): implement more explicit filtering
retval.append(node)
return retval
def get_nncf_modules_by_module_names(self, nncf_module_names_list: List[str]) -> Dict["Scope", torch.nn.Module]:
return get_all_modules_by_type(self.get_nncf_wrapped_model(), nncf_module_names_list)
def rebuild_graph(self, *input_args):
self._compressed_context.reset_graph()
dummy_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=False,
with_output_tracing=False)
builder = GraphBuilder(dummy_forward_fn)
self._compressed_graph = builder.build_graph(self, self._compressed_context,
input_infos=self.input_infos)
def post_build_graph_actions(self):
# Reset initialization flags (`initialized`) for all quantization modules
# after dummy `load_state_dict` call.
quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]
all_quantizations = get_state_dict_names_with_modules(self, quantization_types)
for module in all_quantizations.values():
module.initialized = False
def is_scope_in_nncf_module_scope(self, scope: Scope):
# TODO: optimize
norm_nncf_scopes = [self._normalize_variable_recurrent_scope(x) for x in self._nncf_module_scopes]
norm_op_scope = self._normalize_variable_recurrent_scope(scope)
for nncf_scope in norm_nncf_scopes:
if norm_op_scope in nncf_scope:
return True
return False
def register_compression_module_type(self, compression_module_type: ExtraCompressionModuleType):
attr_name = self._compression_module_type_to_attr_name(compression_module_type)
if compression_module_type in self._extra_module_types:
raise RuntimeError("Module type {} is already registered".format(compression_module_type))
self.__setattr__(attr_name, nn.ModuleDict())
self._extra_module_types.append(compression_module_type)
def add_compression_module(self, module_key: str, module: nn.Module,
compression_module_type: ExtraCompressionModuleType):
attr_name = self._compression_module_type_to_attr_name(compression_module_type)
if compression_module_type not in self._extra_module_types:
raise RuntimeError("Module type {} was not registered".format(compression_module_type))
storage = self.__getattr__(attr_name)
if module_key in storage:
raise RuntimeError("Module {} is already registered under {}".format(module_key, attr_name))
storage[module_key] = module
def get_compression_modules_by_type(self, compression_module_type: ExtraCompressionModuleType) -> nn.ModuleDict:
attr_name = self._compression_module_type_to_attr_name(compression_module_type)
if compression_module_type not in self._extra_module_types:
raise RuntimeError("Module type {} was not registered".format(compression_module_type))
return self.__getattr__(attr_name)
@staticmethod
def _compression_module_type_to_attr_name(compression_module_type: ExtraCompressionModuleType):
"""
Required for backward compatibility with checkpoints that store function and activation
quantizers directly under corresponding attributes of NNCFNetwork.
"""
if compression_module_type == ExtraCompressionModuleType.EXTERNAL_QUANTIZER:
return EXTERNAL_QUANTIZERS_STORAGE_NAME
raise RuntimeError("Unknown extra module type")
def sort_compression_modules(self, compression_module_type: ExtraCompressionModuleType):
attr_name = self._compression_module_type_to_attr_name(compression_module_type)
if compression_module_type not in self._extra_module_types:
raise RuntimeError("Module type {} was not registered".format(compression_module_type))
module_dict = self.__getattr__(attr_name)
# pylint: disable=protected-access
module_dict._modules = OrderedDict(sorted(module_dict._modules.items()))
self.__setattr__(attr_name, module_dict)
@staticmethod
def _normalize_variable_recurrent_scope(scope: Scope):
"""
Two scopes pointing to an NNCF module that only differ in a Recurrent/VariableRecurrent/VariableRecurrentReverse
scope node actually point to one and the same module.
"""
ret_scope = scope.copy()
for scope_element in ret_scope:
if scope_element.calling_module_class_name in ["Recurrent", "VariableRecurrent",
"VariableRecurrentReverse"]:
scope_element.calling_module_class_name = "NormalizedName_Recurrent"
return ret_scope
def do_dummy_forward(self, force_eval=False):
"""
Attention: If run with force_eval=False, this may spoil the batchnorm statistics,
and an eval run of the model will perform much worse than the train run.
"""
if force_eval:
train_mode = self.training
self.eval()
with torch.no_grad():
with self._compressed_context as ctx:
ctx.base_module_thread_local_replica = self
self._dummy_forward_fn(self)
if force_eval:
if train_mode:
self.train()
def get_insertion_point_graph(self) -> InsertionPointGraph:
# Set up a pre- and post-hooks on almost every op in PyTorch
nncf_graph = self.get_original_graph()
pre_hooks = [] # type: List[PreHookInsertionPoint]
post_hooks = [] # type: List[PostHookInsertionPoint]
for node in nncf_graph.get_all_nodes():
# Pre-hook insertion point nodes
# Will insert a pre-hook IP for each input edge. The input edge must be marked with
# a port ID attribute.
in_edges = nncf_graph.get_input_edges(node)
for edge in in_edges:
port_id = edge.input_port_id
pre_hook_ip = PreHookInsertionPoint(target_node_name=node.node_name,
input_port_id=port_id)
pre_hooks.append(pre_hook_ip)
if issubclass(node.metatype, SplitMetatype):
# chunk returns a tuple of tensors, which can only be handled in NNCF
# once post-hook ports are enabled. Work around it for now by disallowing post-hook
# insertion for chunks
# TODO: enable post-hook ports and remove this
continue
# Post-hook insertion point nodes
post_hook_ip = PostHookInsertionPoint(node.node_name)
post_hooks.append(post_hook_ip)
weighted_nodes = self.get_weighted_original_graph_nodes()
weighted_node_names = [weighted_node.node_name for weighted_node in weighted_nodes]
ip_graph = InsertionPointGraph(self._original_graph, weight_modifiable_node_names=weighted_node_names,
allowed_pre_hook_insertion_points=pre_hooks,
allowed_post_hook_insertion_points=post_hooks)
return ip_graph
def get_module_by_scope(self, scope: Scope) -> Optional[torch.nn.Module]:
curr_module = self.get_nncf_wrapped_model()
for scope_element in scope[1:]: # omit first scope element which corresponds to base module
if scope_element.calling_field_name is None:
# The module used is being created in-place every time and never stored in the model,
# happens for nn.Softmax in BERT implementations.
return None
# pylint: disable=protected-access
next_module = curr_module._modules.get(scope_element.calling_field_name)
if next_module is None:
raise RuntimeError("Could not find a {} module member in {} module of scope {} during node search"
.format(scope_element.calling_field_name,
scope_element.calling_module_class_name,
str(scope)))
curr_module = next_module
return curr_module
def get_containing_module(self, node_name: NNCFNodeName) -> torch.nn.Module:
if self._compressed_graph is not None:
try:
scope = self._compressed_graph.get_scope_by_node_name(node_name)
except RuntimeError:
nncf_logger.debug("Node {} not found in compressed graph when trying to determine containing module, "
"trying the original graph to see if the node was present there "
"during graph building")
scope = self._original_graph.get_scope_by_node_name(node_name)
else:
scope = self._original_graph.get_scope_by_node_name(node_name)
return self.get_module_by_scope(scope)
def get_parameters_count_in_model(self):
"""
Return total amount of model parameters.
"""
count = 0
for param in self.parameters():
count = count + param.numel()
return count
def get_flops_per_module(self) -> Dict[NNCFNodeName, int]:
"""
Calculates FLOPS count for modules.
"""
model = self
flops_count_dict = {}
def get_hook(name):
return functools.partial(compute_FLOPs_hook, dict_to_save=flops_count_dict,
module_node_name=name)
hook_list = []
for nncf_node in self._original_graph.get_all_nodes():
node_module = self.get_containing_module(nncf_node.node_name)
hook_list.append(node_module.register_forward_hook(get_hook(nncf_node.node_name)))
model.do_dummy_forward(force_eval=True)
for h in hook_list:
h.remove()
return flops_count_dict
def get_MACs_in_model(self):
"""
Calculates MAC units count for model.
"""
flops_count_dict = self.get_flops_per_module()
total_MACs_count = sum(v // 2 for v in flops_count_dict.values())
return total_MACs_count
def get_input_infos(self) -> List[ModelInputInfo]:
return deepcopy(self.input_infos)
def save_nncf_module_additions(self) -> Dict[Scope, Tuple[torch.nn.ModuleDict, torch.nn.ModuleDict]]:
retval = {}
for module_scope, nncf_module in self.get_nncf_modules().items():
retval[module_scope] = (deepcopy(nncf_module.pre_ops), deepcopy(nncf_module.post_ops))
return retval
def load_nncf_module_additions(self,
scope_vs_pre_post_ops_dict: Dict[Scope, Tuple[torch.nn.ModuleDict,
torch.nn.ModuleDict]]):
for module_scope, nncf_module in self.get_nncf_modules().items():
nncf_module.pre_ops = scope_vs_pre_post_ops_dict[module_scope][0]
nncf_module.post_ops = scope_vs_pre_post_ops_dict[module_scope][1]
def temporary_clean_view(self):
class Mgr:
def __init__(self, model: NNCFNetwork):
self.model = model
self.storage_dict = {}
def __enter__(self):
self.storage_dict = self.model.save_nncf_module_additions()
clean_model = self.model.get_clean_shallow_copy()
return clean_model
def __exit__(self, exc_type, exc_val, exc_tb):
self.model.load_nncf_module_additions(self.storage_dict)
return Mgr(self)
def _collect_eval_only_op_scopes(self, model: nn.Module, dummy_forward_fn: Callable) -> List[Scope]:
"""
Returns scopes of the modules which are executed in evaluation mode only.
"""
tracer = GraphTracer(dummy_forward_fn)
result = []
eval_graph = tracer.trace_graph(model, as_eval=True)
for dyn_graph_node in eval_graph.get_all_nodes():
result.append(dyn_graph_node.op_exec_context.scope_in_model)
return result
@property
def original_model_accuracy(self):
return self._original_model_accuracy
def get_node_to_op_address_mapping(self) -> Dict[NNCFNodeName, OperationAddress]:
# The IDs of corresponding nodes of the original dynamic graph and original NNCF graph
# must be equal for this to work.
retval = {}
for node in self._original_dynamic_graph.get_all_nodes():
node_id = node.node_id
op_address = node.op_exec_context.op_address
nncf_node = self._original_graph.get_node_by_id(node_id)
retval[nncf_node.node_name] = op_address
return retval
class PTModelTransformer(ModelTransformer):
def __init__(self, model: NNCFNetwork):
super().__init__(model)
self._node_to_op_address_mapping = model.get_node_to_op_address_mapping()
def transform(self, transformation_layout: PTTransformationLayout) -> NNCFNetwork:
fns_grouped_by_points = {} # type: Dict[PTInsertionPoint, List[Tuple[Callable, TransformationPriority]]]
for transformation_command in transformation_layout.transformations: # type: PTInsertionCommand
target_point = transformation_command.target_point # type: PTTargetPoint
target_node_name = target_point.target_node_name
pt_ip = PTInsertionPoint(target_type=target_point.target_type,
op_address=self._node_to_op_address_mapping[target_node_name],
input_port_id=target_point.input_port_id)
fn = transformation_command.fn
if target_point.type is TargetType.OPERATION_WITH_WEIGHTS:
fn = UpdateWeight(fn)
tup = (fn, transformation_command.priority)
if pt_ip not in fns_grouped_by_points:
fns_grouped_by_points[pt_ip] = [tup]
else:
fns_grouped_by_points[pt_ip].append(tup)
for pt_ip, fn_list_with_priority in fns_grouped_by_points.items():
fn_list_with_priority = sorted(fn_list_with_priority, key=lambda x: x[1])
self._model.insert_at_point(pt_ip, [x[0] for x in fn_list_with_priority])
return self._model
| [
"torch.nn.ModuleDict",
"torch.no_grad",
"torch.tensor",
"torch.Tensor.as_subclass"
] | 1.5.0 | sarthakpati/nncf | 29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac |
1.5 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
from functools import partial
from openvino.inference_engine import IENetwork, IEPlugin, get_version
from nncf.config import NNCFConfig
from nncf.torch.dynamic_graph.graph_tracer import create_input_infos
from tools.ir_utils import get_ir_paths
def getExecNet(plugin, net):
return plugin.load(network=net)
argparser = argparse.ArgumentParser()
argparser.add_argument("-m", "--model", help="input IR name", required=True)
argparser.add_argument("--bin", help="Input *.bin file name")
argparser.add_argument("-o", "--output-dir", help="Output directory to dump weights", required=True)
argparser.add_argument("-c", "--config", type=str, help="Model's config", required=True)
argparser.add_argument("--cuda", help="inference PyTorch model on CUDA", action='store_true')
argparser.add_argument('--data', metavar='DIR', help='path to dataset', required=True)
argparser.add_argument('--cpu-plugin-dir', metavar='DIR',
help='path to the directory with CPU Plugin and CPU Extension libraries', required=True)
argparser.add_argument("-n", "--num-layers", type=int, default=-1, help="Dump activations for given number of layers")
argparser.add_argument("--dump", action='store_true', help="Enables dump of activations")
args = argparser.parse_args()
def validate_torch_model(output_dir, config, num_layers, dump, val_loader=None, cuda=False):
from tools.debug.common import load_torch_model, register_print_hooks
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model = load_torch_model(config, cuda)
model_e = model.eval()
if dump:
register_print_hooks(output_dir, model_e, num_layers=num_layers, data_to_compare=None, dump_activations=True)
validate_general(val_loader, model_e, infer_pytorch_model, cuda)
def main():
model_bin, model_xml = get_ir_paths(args.model, args.bin)
config = NNCFConfig.from_json(args.config)
input_infos_list = create_input_infos(config)
image_size = input_infos_list[0].shape[-1]
size = int(image_size / 0.875)
print('IE version: {}'.format(get_version()))
# NOTE: importing torch after loading IE to plugin to avoid issue with built-in MKLDNN of PyTorch
plugin = IEPlugin(device='CPU',
plugin_dirs=args.cpu_plugin_dir)
plugin.add_cpu_extension(os.path.join(args.cpu_plugin_dir, "libcpu_extension.so"))
net = IENetwork(model=model_xml, weights=model_bin)
exec_net = getExecNet(plugin, net)
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
val_loader = DataLoader(
datasets.ImageFolder(args.data, transforms.Compose([
transforms.Resize(size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])),
batch_size=1, shuffle=False, num_workers=4, pin_memory=True)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
config['log_dir'] = args.output_dir
infer_fn = partial(infer_ie_model, net=net)
validate_general(val_loader, exec_net, infer_fn)
validate_torch_model(os.path.join(args.output_dir, "PTH"), config=config, num_layers=args.num_layers,
dump=args.dump, val_loader=val_loader, cuda=args.cuda)
def infer_ie_model(exec_net, inputs, net):
input_cpu = inputs.numpy()
input_name = next(iter(net.inputs))
output_name = next(iter(net.outputs))
res = exec_net.infer(inputs={input_name: input_cpu})
output = res[output_name]
import torch
torch_output = torch.from_numpy(output)
return torch_output
def infer_pytorch_model(model, inputs):
return model(inputs)
def validate_general(val_loader, model, infer_model_fn, cuda=False):
from examples.torch.classification.main import AverageMeter, accuracy
top1 = AverageMeter()
top5 = AverageMeter()
for i, (input_, target) in enumerate(val_loader):
# compute output
output = infer_model_fn(model, input_)
if cuda:
target = target.cuda(None, non_blocking=True)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1, input_.size(0))
top5.update(acc5, input_.size(0))
if i % 10 == 0:
print('IE Test : [{0}/{1}]\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
if __name__ == '__main__':
main()
| [
"torch.from_numpy"
] | 1.5.0 | sarthakpati/nncf | 29ad62c664c1dd53b3c8c50fc001a1b36bd1e8ac |
0.27 | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""
Tests a variety of export options with our surgery methods applied, including
torchscript, torch.fx, and ONNX.
"""
import os
import pathlib
from typing import Any, Callable, Type
import pytest
import torch
import torch.fx
from composer.algorithms.blurpool.blurpool import BlurPool
from composer.algorithms.channels_last.channels_last import ChannelsLast
from composer.algorithms.factorize.factorize import Factorize
from composer.algorithms.ghost_batchnorm.ghost_batchnorm import GhostBatchNorm
from composer.algorithms.squeeze_excite.squeeze_excite import SqueezeExcite
from composer.algorithms.stochastic_depth.stochastic_depth import StochasticDepth
from composer.core.algorithm import Algorithm
from composer.functional import (apply_blurpool, apply_channels_last, apply_factorization, apply_ghost_batchnorm,
apply_squeeze_excite, apply_stochastic_depth)
from tests.algorithms.algorithm_settings import get_alg_kwargs, get_alg_model, get_algs_with_marks
algo_kwargs = {
apply_stochastic_depth: {
'stochastic_method': 'block',
'target_layer_name': 'ResNetBottleneck'
},
apply_ghost_batchnorm: {
'ghost_batch_size': 2
}
}
@pytest.fixture
def input():
# input batch to ComposerModel is (input, target) tuple
return (torch.rand(4, 3, 112, 112), torch.Tensor())
torchscript_algs_with_marks = [
x for x in get_algs_with_marks()
if x.values[0] in (BlurPool, Factorize, GhostBatchNorm, SqueezeExcite, StochasticDepth, ChannelsLast)
]
# <--- torchscript export --->
def get_surgery_method(alg_cls: Type[Algorithm]) -> Callable:
if alg_cls is BlurPool:
return apply_blurpool
if alg_cls is Factorize:
return apply_factorization
if alg_cls is GhostBatchNorm:
return apply_ghost_batchnorm
if alg_cls is SqueezeExcite:
return apply_squeeze_excite
if alg_cls is StochasticDepth:
return apply_stochastic_depth
if alg_cls is ChannelsLast:
return apply_channels_last
raise ValueError(f'Unknown algorithm class {alg_cls}')
@pytest.mark.timeout(10)
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
def test_surgery_torchscript_train(input: Any, alg_cls: Type[Algorithm]):
"""Tests torchscript model in train mode."""
if alg_cls in (Factorize, GhostBatchNorm, StochasticDepth):
pytest.xfail('Unsupported')
alg_kwargs = get_alg_kwargs(alg_cls)
model = get_alg_model(alg_cls)
surgery_method = get_surgery_method(alg_cls)
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
scripted_func = torch.jit.script(model)
scripted_func.train() # type: ignore (third-party)
model.train()
torch.testing.assert_allclose(scripted_func(input), model(input)) # type: ignore (third-party)
@pytest.mark.timeout(10)
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
def test_surgery_torchscript_eval(input: Any, alg_cls: Type[Algorithm]):
"""Tests torchscript model in eval mode."""
if alg_cls is Factorize:
pytest.xfail('Unsupported')
surgery_method = get_surgery_method(alg_cls)
alg_kwargs = get_alg_kwargs(alg_cls)
model = get_alg_model(alg_cls)
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
scripted_func = torch.jit.script(model)
scripted_func.eval() # type: ignore (third-party)
model.eval()
torch.testing.assert_allclose(scripted_func(input), model(input)) # type: ignore (third-party)
# <--- torch.fx export --->
@pytest.mark.timeout(10)
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
def test_surgery_torchfx_eval(
input: Any,
alg_cls: Type[Algorithm],
):
"""Tests torch.fx model in eval mode."""
alg_kwargs = get_alg_kwargs(alg_cls)
model = get_alg_model(alg_cls)
surgery_method = get_surgery_method(alg_cls)
if alg_cls in (BlurPool, GhostBatchNorm):
pytest.xfail('Control flow')
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
model.eval()
traced_func = torch.fx.symbolic_trace(model)
torch.testing.assert_allclose(traced_func(input), model(input)) # type: ignore (third-party)
# <--- onnx export --->
@pytest.mark.timeout(10)
@pytest.mark.parametrize('alg_cls', torchscript_algs_with_marks)
@pytest.mark.filterwarnings(
r'ignore:Converting a tensor to a Python .* might cause the trace to be incorrect:torch.jit._trace.TracerWarning')
def test_surgery_onnx(
input: Any,
alg_cls: Type[Algorithm],
tmp_path: pathlib.Path,
):
"""Tests onnx export and runtime"""
pytest.importorskip('onnx')
import onnx # type: ignore
import onnxruntime as ort # type: ignore
surgery_method = get_surgery_method(alg_cls)
model = get_alg_model(alg_cls)
alg_kwargs = get_alg_kwargs(alg_cls)
alg_kwargs = algo_kwargs.get(surgery_method, alg_kwargs)
surgery_method(model, **alg_kwargs)
model.eval()
onnx_path = os.path.join(tmp_path, 'model.onnx')
torch.onnx.export(
model,
(input,),
onnx_path,
input_names=['input'],
output_names=['output'],
)
# check onnx model
onnx_model = onnx.load(onnx_path)
onnx.checker.check_model(onnx_model)
# run inference
ort_session = ort.InferenceSession(onnx_path)
outputs = ort_session.run(
None,
{'input': input[0].numpy()},
)
torch.testing.assert_allclose(
outputs[0],
model(input),
rtol=1e-4, # lower tolerance for ONNX
atol=1e-3, # lower tolerance for ONNX
)
| [
"torch.rand",
"torch.fx.symbolic_trace",
"torch.onnx.export",
"torch.jit.script",
"torch.Tensor"
] | 0.27 | moinnadeem/composer | bc3f41b766bd4450f05a99f44db4a6b3901ea1c8 |
0.27 | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core Exponential Moving Average (EMA) classes and functions."""
from __future__ import annotations
import copy
import itertools
import logging
from typing import Any, Dict, List, Optional, Union
import torch
from composer.core import Algorithm, Event, State, Time, TimeUnit
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['EMA', 'compute_ema']
def compute_ema(model: T_Model, ema_model: T_Model, smoothing: float = 0.99):
r"""Updates the weights of ``ema_model`` to be closer to the weights of ``model`` according to an exponential
weighted average. Weights are updated according to
.. math::
W_{ema_model}^{(t+1)} = smoothing\times W_{ema_model}^{(t)}+(1-smoothing)\times W_{model}^{(t)}
The update to ``ema_model`` happens in place.
The half life of the weights for terms in the average is given by
.. math::
t_{1/2} = -\frac{\log(2)}{\log(smoothing)}
Therefore to set smoothing to obtain a target half life, set smoothing according to
.. math::
smoothing = \exp\left[-\frac{\log(2)}{t_{1/2}}\right]
Args:
model (torch.nn.Module): the model containing the latest weights to use to update the moving average weights.
ema_model (torch.nn.Module): the model containing the moving average weights to be updated.
smoothing (float, optional): the coefficient representing the degree to which older observations are kept.
Must be in the interval :math:`(0, 1)`. Default: ``0.99``.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
ema_model = models.resnet50()
cf.compute_ema(model, ema_model, smoothing=0.9)
"""
with torch.no_grad():
model_params = itertools.chain(model.parameters(), model.buffers())
ema_model_params = itertools.chain(ema_model.parameters(), ema_model.buffers())
for ema_param, model_param in zip(ema_model_params, model_params):
model_param = model_param.detach()
ema_param.copy_(ema_param * smoothing + (1. - smoothing) * model_param)
class EMA(Algorithm):
r"""Maintains a shadow model with weights that follow the exponential moving average of the trained model weights.
Weights are updated according to
.. math::
W_{ema_model}^{(t+1)} = smoothing\times W_{ema_model}^{(t)}+(1-smoothing)\times W_{model}^{(t)}
Where the smoothing is determined from ``half_life`` according to
.. math::
smoothing = \exp\left[-\frac{\log(2)}{t_{1/2}}\right]
Model evaluation is done with the moving average weights, which can result in better generalization. Because of the
shadow models, EMA triples the model's memory consumption. Note that this does not mean that the total memory
required doubles, since stored activations and the optimizer state are not duplicated. EMA also uses a small
amount of extra compute to update the moving average weights.
See the :doc:`Method Card </method_cards/ema>` for more details.
Args:
half_life (str): The time string specifying the half life for terms in the average. A longer half life means
old information is remembered longer, a shorter half life means old information is discared sooner.
A half life of ``0`` means no averaging is done, an infinite half life means no update is done. Currently
only units of epoch ('ep') and batch ('ba'). Value must be an integer.
update_interval (str, optional): The time string specifying the period at which updates are done. For example,
an ``update_interval='1ep'`` means updates are done every epoch, while ``update_interval='10ba'`` means
updates are done once every ten batches. Units must match the units used to specify ``half_life``. If not
specified, ``update_interval`` will default to ``1`` in the units of ``half_life``. Value must be an
integer. Default: ``None``.
train_with_ema_weights (bool, optional): An experimental feature that uses the ema weights as the training
weights. In most cases should be left as ``False``. Default ``False``.
Example:
.. testcode::
from composer.algorithms import EMA
algorithm = EMA(half_life='50ba', update_interval='1ba')
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self, half_life: str, update_interval: Optional[str] = None, train_with_ema_weights: bool = False):
self.half_life = half_life
self.update_interval = update_interval
self.train_with_ema_weights = train_with_ema_weights
self.ema_model = None
self.training_model = None
self.serialized_attributes = [
'ema_model',
'training_model',
]
# Check timestrings are parsable and convert into time object
try:
self.half_life = Time.from_timestring(half_life)
except ValueError as error:
raise ValueError(f'Invalid time string for parameter half_life') from error
# Create the update interval if none is specified
if self.update_interval is None:
self.update_interval = Time(1, self.half_life.unit)
elif type(update_interval) is str:
try:
self.update_interval = Time.from_timestring(update_interval)
except ValueError as error:
raise ValueError(f'Invalid time string for parameter update_interval') from error
else:
raise ValueError(f'update_interval must be None or a time string.')
# Verify that the units of half_life and update_interval are compatible
if self.half_life.unit != self.update_interval.unit:
raise ValueError(f'Units of half_life and update_interval must match.')
# Verify that the time strings have supported units.
if self.half_life.unit not in [TimeUnit.BATCH, TimeUnit.EPOCH]:
raise ValueError(f'Invalid time unit for parameter half_life: '
f'{self.update_interval.unit}')
# Calculate the appropriate weighting for the moving average
self.smoothing = 2**(-(self.update_interval.value / self.half_life.value))
# Construct the appropriate matching events
self.match_events = [Event.FIT_START, Event.EVAL_START, Event.EVAL_END]
if self.half_life.unit == TimeUnit.EPOCH:
self.match_events.append(Event.EPOCH_END)
if self.half_life.unit == TimeUnit.BATCH:
self.match_events.append(Event.BATCH_END)
def match(self, event: Event, state: State) -> bool:
return event in self.match_events
def apply(self, event: Event, state: State, logger: Logger) -> None:
assert isinstance(self.update_interval, Time)
if event == Event.FIT_START:
if self.ema_model is not None:
_move_shadow_model_to_device(self.ema_model, state.model)
if self.training_model is not None:
_move_shadow_model_to_device(self.training_model, state.model)
if event in [Event.BATCH_END, Event.EPOCH_END]:
# Check if an update should happen
if state.timestamp.get(self.update_interval.unit).value % self.update_interval.value == 0:
# Initialize the shadow models if they don't exist yet
if self.ema_model is None:
self.ema_model = ShadowModel(state.model)
if self.training_model is None and self.train_with_ema_weights is False:
self.training_model = ShadowModel(state.model)
# Update the ema model
compute_ema(state.model, self.ema_model, smoothing=self.smoothing)
if self.train_with_ema_weights:
# Use the ema weights for further training
_copy_model(self.ema_model, state.model)
if event == Event.EVAL_START and self.ema_model is not None and self.training_model is not None:
# Swap out the training model for the ema model in state
_copy_model(state.model, self.training_model)
_copy_model(self.ema_model, state.model)
if event == Event.EVAL_END and self.training_model is not None:
# Swap out the ema model for the training model in state
_copy_model(self.training_model, state.model)
def get_ema_model(self, model: torch.nn.Module):
"""Copies ema model parameters and buffers to the input model and returns it.
Args:
model (torch.nn.Module): the model to convert into the ema model.
Returns:
model (torch.nn.Module): the input model with parameters and buffers replaced with the averaged parameters
and buffers.
"""
if self.ema_model is None:
raise AttributeError('ema model has not been initialized yet')
_copy_model(self.ema_model, model)
return model
def state_dict(self) -> Dict[str, ShadowModel]:
state_dict = {}
for attribute_name in self.serialized_attributes:
shadow_model = getattr(self, attribute_name)
state_dict[attribute_name] = {}
state_dict[attribute_name]['parameters'] = shadow_model.parameters()
state_dict[attribute_name]['buffers'] = shadow_model.buffers()
return state_dict
def load_shadow_model(self, name, parameters: List, buffers: List):
shadow_model = ShadowModel(None)
shadow_model.param_list = parameters
shadow_model.buffer_list = buffers
setattr(self, name, shadow_model)
def load_state_dict(self, state: Dict[str, Any], strict: bool = False):
for attribute_name, serialized_value in state.items():
self.load_shadow_model(attribute_name, serialized_value['parameters'], serialized_value['buffers'])
class ShadowModel:
"""A shadow model that tracks parameters and buffers from an original source model.
Args:
model (torch.nn.Module): the source model containing the parameters and buffers to shadow.
"""
def __init__(self, model: Union[None, torch.nn.Module]):
if model is not None:
self.param_list = [copy.deepcopy(p.data) for p in model.parameters()]
self.buffer_list = [copy.deepcopy(b.data) for b in model.buffers()]
else:
self.param_list = []
self.buffer_list = []
def parameters(self):
return self.param_list
def buffers(self):
return self.buffer_list
T_Model = Union[torch.nn.Module, ShadowModel]
def _copy_model(source_model: T_Model, destination_model: T_Model):
"""Copies parameters and buffers from ``source_model`` to ``destination_model``"""
with torch.no_grad():
source_params = itertools.chain(source_model.parameters(), source_model.buffers())
destination_params = itertools.chain(destination_model.parameters(), destination_model.buffers())
for source_param, destination_param in zip(source_params, destination_params):
destination_param.data = source_param.data
def _move_shadow_model_to_device(shadow_model: ShadowModel, destination_model: torch.nn.Module):
"""Ensures the tensors of a shadow model are on the same device as a destination model"""
with torch.no_grad():
destination_params = destination_model.parameters()
shadow_params = shadow_model.parameters()
shadow_model.param_list = [s.to(d.device) for s, d in zip(shadow_params, destination_params)]
destination_buffers = destination_model.buffers()
shadow_buffers = shadow_model.buffers()
shadow_model.buffer_list = [s.to(d.device) for s, d in zip(shadow_buffers, destination_buffers)]
| [
"torch.no_grad"
] | 0.27 | moinnadeem/composer | bc3f41b766bd4450f05a99f44db4a6b3901ea1c8 |
1.4 | import torch
import torch.nn as nn
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-6):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
return loss
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'gan' or self.gan_type == 'ragan':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp,
grad_outputs=grad_outputs, create_graph=True,
retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class SpatialGradientLoss(nn.Module):
"""Super sharp Loss"""
def __init__(self):
super(SpatialGradientLoss, self).__init__()
def diffMap(self, A, alongX):
B, N, C, H = A.shape
if alongX:
return A[:, :, 1:C, :] - A[:, :, 0:C-1, :]
return A[:, :, :, 1:H] - A[:, :, :, 0:H-1]
def forward(self, A, B):
Amap = self.diffMap(A, alongX=True)
Bmap = self.diffMap(B, alongX=True)
loss = torch.sum((Amap - Bmap) ** 2)
Amap = self.diffMap(A, alongX=False)
Bmap = self.diffMap(B, alongX=False)
loss += torch.sum((Amap - Bmap) ** 2)
loss = torch.sqrt(loss)
return loss
class KLDivergence(nn.Module):
"""KL loss for VAE regularization"""
def __init__(self):
super(KLDivergence, self).__init__()
def forward(self, X):
B, N = X.shape
mean = X.mean(dim=0).to(X.device)
var = torch.zeros((N, N)).to(X.device)
for i in range(B):
y = X[i, :] - mean
var += torch.mm(y.resize(N, 1), y.resize(1, N))
for i in range(N):
if var[i, i] <= 0:
print(var[i][i])
var = var.clamp(min=1e-18) / N
kl = 0.5 * (-(var.log().trace()) + torch.trace(var)
- N + mean.pow(2).sum())
return kl
class FrobeniousNorm(nn.Module):
def __init__(self, eps=1e-6):
super(FrobeniousNorm, self).__init__()
self.eps = eps
def forward(self, X):
B, C, H, W = X.shape
return torch.sqrt(torch.sum(X ** 2, dim=(1, 2, 3)) + self.eps)
| [
"torch.zeros",
"torch.device",
"torch.sqrt",
"torch.trace",
"torch.nn.MSELoss",
"torch.empty_like",
"torch.autograd.grad",
"torch.nn.BCEWithLogitsLoss",
"torch.Tensor",
"torch.sum"
] | 1.4.0 | P0lyFish/noise2-series | a21ad1b7cb20e44161393156efd7dcdab729b4a3 |
1.4 | #!/usr/bin/env python3
import argparse
import logging
import os
from typing import Tuple
import torch
import torchaudio
from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
from greedy_decoder import Decoder
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION >= (1, 10):
import torch.ao.quantization as tq
else:
import torch.quantization as tq
_LG = logging.getLogger(__name__)
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--model',
required=True,
help='Path to the input pretrained weight file.'
)
parser.add_argument(
'--output-path',
help='Path to the directory, where the Torchscript-ed pipelines are saved.',
)
parser.add_argument(
'--test-file',
help='Path to a test audio file.',
)
parser.add_argument(
'--quantize',
action='store_true',
help='Quantize the model.',
)
parser.add_argument(
'--debug',
action='store_true',
help=(
'When enabled, individual components are separately tested '
'for the numerical compatibility and TorchScript compatibility.'
)
)
return parser.parse_args()
class Loader(torch.nn.Module):
def forward(self, audio_path: str) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(audio_path)
if sample_rate != 16000:
waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)
return waveform
class Encoder(torch.nn.Module):
def __init__(self, encoder: torch.nn.Module):
super().__init__()
self.encoder = encoder
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
result, _ = self.encoder(waveform)
return result[0]
def _get_model(model_id):
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
tokenizer = Wav2Vec2Processor.from_pretrained(model_id).tokenizer
labels = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda kv: kv[1])]
original = Wav2Vec2ForCTC.from_pretrained(model_id)
model = import_huggingface_model(original)
return model.eval(), labels
def _get_decoder(labels):
return Decoder(labels)
def _main():
args = _parse_args()
_init_logging(args.debug)
_LG.info('Loading model: %s', args.model)
model, labels = _get_model(args.model)
_LG.info('Labels: %s', labels)
_LG.info('Building pipeline')
loader = Loader()
encoder = Encoder(model)
decoder = _get_decoder(labels)
_LG.info(encoder)
if args.quantize:
_LG.info('Quantizing the model')
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
encoder = tq.quantize_dynamic(
encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
_LG.info(encoder)
# test
if args.test_file:
_LG.info('Testing with %s', args.test_file)
waveform = loader(args.test_file)
emission = encoder(waveform)
transcript = decoder(emission)
_LG.info(transcript)
torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))
torch.jit.script(encoder).save(os.path.join(args.output_path, 'encoder.zip'))
torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))
def _init_logging(debug=False):
level = logging.DEBUG if debug else logging.INFO
format_ = (
'%(message)s' if not debug else
'%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'
)
logging.basicConfig(level=level, format=format_)
if __name__ == '__main__':
_main()
| [
"torch.jit.script",
"torch.__version__.split",
"torch.quantization.quantize_dynamic"
] | 1.4.0 | albertvillanova/audio | 0cd25093626d067e008e1f81ad76e072bd4a1edd |
1.4 | import math
from typing import Optional
from itertools import permutations
import torch
def sdr(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8
) -> torch.Tensor:
"""Computes source-to-distortion ratio.
1. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
2. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L34-L56
"""
reference_pow = reference.pow(2).mean(axis=2, keepdim=True)
mix_pow = (estimate * reference).mean(axis=2, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
if mask is None:
reference_pow = reference_pow.mean(axis=2)
error_pow = error_pow.mean(axis=2)
else:
denom = mask.sum(axis=2)
reference_pow = (mask * reference_pow).sum(axis=2) / denom
error_pow = (mask * error_pow).sum(axis=2) / denom
return 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
class PIT(torch.nn.Module):
"""Applies utterance-level speaker permutation
Computes the maxium possible value of the given utility function
over the permutations of the speakers.
Args:
utility_func (function):
Function that computes the utility (opposite of loss) with signature of
(extimate: torch.Tensor, reference: torch.Tensor) -> torch.Tensor
where input Tensors are shape of [batch, speakers, frame] and
the output Tensor is shape of [batch, speakers].
References:
- Multi-talker Speech Separation with Utterance-level Permutation Invariant Training of
Deep Recurrent Neural Networks
Morten Kolbæk, Dong Yu, Zheng-Hua Tan and Jesper Jensen
https://arxiv.org/abs/1703.06284
"""
def __init__(self, utility_func):
super().__init__()
self.utility_func = utility_func
def forward(
self,
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8
) -> torch.Tensor:
"""Compute utterance-level PIT Loss
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [bacth, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Maximum criterion over the speaker permutation.
Shape: [batch, ]
"""
assert estimate.shape == reference.shape
batch_size, num_speakers = reference.shape[:2]
num_permute = math.factorial(num_speakers)
util_mat = torch.zeros(
batch_size, num_permute, dtype=estimate.dtype, device=estimate.device
)
for i, idx in enumerate(permutations(range(num_speakers))):
util = self.utility_func(estimate, reference[:, idx, :], mask=mask, epsilon=epsilon)
util_mat[:, i] = util.mean(dim=1) # take the average over speaker dimension
return util_mat.max(dim=1).values
_sdr_pit = PIT(utility_func=sdr)
def sdr_pit(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8):
"""Computes scale-invariant source-to-distortion ratio.
1. adjust both estimate and reference to have 0-mean
2. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
3. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as the reference implementation,
*when the inputs have 0-mean*
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L107-L153
"""
return _sdr_pit(estimate, reference, mask, epsilon)
def sdri(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Compute the improvement of SDR (SDRi).
This function compute how much SDR is improved if the estimation is changed from
the original mixture signal to the actual estimated source signals. That is,
``SDR(estimate, reference) - SDR(mix, reference)``.
For computing ``SDR(estimate, reference)``, PIT (permutation invariant training) is applied,
so that best combination of sources between the reference signals and the esimate signals
are picked.
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [batch, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Shape: [batch, speakers == 1, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Improved SDR. Shape: [batch, ]
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
sdr_ = sdr_pit(estimate, reference, mask=mask, epsilon=epsilon) # [batch, ]
base_sdr = sdr(mix, reference, mask=mask, epsilon=epsilon) # [batch, speaker]
return sdr_ - base_sdr.mean(dim=1)
| [
"torch.zeros",
"torch.log10"
] | 1.4.0 | albertvillanova/audio | 0cd25093626d067e008e1f81ad76e072bd4a1edd |
1.10 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from mqbench.convert_deploy import convert_deploy
from mqbench.prepare_by_platform import prepare_by_platform, BackendType
from mqbench.utils.state import enable_calibration, enable_quantization, disable_all
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--train_data', metavar='DIR',
help='path to dataset', required=True)
parser.add_argument('--val_data', metavar='DIR',
help='path to dataset', required=True)
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--model_path', type=str, default=None)
parser.add_argument('--backend', type=str, choices=['tensorrt', 'nnie', 'ppl', 'snpe'], default='tensorrt')
parser.add_argument('--optim', type=str, default='sgd')
parser.add_argument('--not-quant', action='store_true')
parser.add_argument('--deploy', action='store_true')
BackendMap = {'tensorrt': BackendType.Tensorrt,
'nnie': BackendType.NNIE,
'ppl': BackendType.PPLW8A16,
'snpe': BackendType.SNPE,
'vitis': BackendType.Vitis}
best_acc1 = 0
def main():
args = parser.parse_args()
args.quant = not args.not_quant
args.backend = BackendMap[args.backend]
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# for internal cluster
if args.model_path:
state_dict = torch.load(args.model_path)
print(f'load pretrained checkpoint from: {args.model_path}')
model.load_state_dict(state_dict)
# quantize model
if args.quant:
model = prepare_by_platform(model, args.backend)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
if args.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), args.lr,
betas=(0.9, 0.999), eps=1e-08,
weight_decay=args.weight_decay,
amsgrad=False)
# prepare dataset
train_loader, train_sampler, val_loader, cali_loader = prepare_dataloader(args)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
state_dict = checkpoint['state_dict']
model_dict = model.state_dict()
if 'module.' in list(state_dict.keys())[0] and 'module.' not in list(model_dict.keys())[0]:
for k in list(state_dict.keys()):
state_dict[k[7:]] = state_dict.pop(k)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {}), acc = {}"
.format(args.resume, checkpoint['epoch'], best_acc1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
elif args.quant:
enable_calibration(model)
calibrate(cali_loader, model, args)
cudnn.benchmark = True
if args.quant:
enable_quantization(model)
if args.quant and args.deploy:
convert_deploy(model.eval(), args.backend, input_shape_dict={'data': [10, 3, 224, 224]})
return
if args.evaluate:
if args.quant:
from mqbench.convert_deploy import convert_merge_bn
convert_merge_bn(model.eval())
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def prepare_dataloader(args):
traindir = os.path.join(args.train_data, 'train')
valdir = os.path.join(args.val_data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
cali_batch_size = 10
cali_batch = 10
cali_dataset = torch.utils.data.Subset(train_dataset, indices=torch.arange(cali_batch_size * cali_batch))
cali_loader = torch.utils.data.DataLoader(cali_dataset, batch_size=cali_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, train_sampler, val_loader, cali_loader
def calibrate(cali_loader, model, args):
model.eval()
print("Start calibration ...")
print("Calibrate images number = ", len(cali_loader.dataset))
with torch.no_grad():
for i, (images, target) in enumerate(cali_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
output = model(images)
print("Calibration ==> ", i+1)
print("End calibration.")
return
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| [
"torch.arange",
"torch.distributed.init_process_group",
"torch.save",
"torch.no_grad",
"torch.multiprocessing.spawn",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.distributed.DistributedSampler",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
] | 1.10.0 | 415905716/MQBench | 3f8321ec9ab9fd05d99c21700a901b1ff6a90a1e |
1.8 | import torch
from torch.quantization import FakeQuantizeBase
from torch.quantization.observer import MovingAverageMinMaxObserver
from torch.quantization.fake_quantize import _is_per_channel, _is_per_tensor
from mqbench.utils import is_symmetric_quant
class QuantizeBase(FakeQuantizeBase):
r""" This is an extension of the FakeQuantize module in fake_quantize.py, which
supports more generalized lower-bit quantization and support learning of the scale
and zero point parameters through backpropagation. For literature references,
please see the class _LearnableFakeQuantizePerTensorOp.
In addition to the attributes in the original FakeQuantize module, the _LearnableFakeQuantize
module also includes the following attributes to support quantization parameter learning.
"""
def __init__(self, observer=MovingAverageMinMaxObserver, **observer_kwargs):
super().__init__()
self.activation_post_process = observer(**observer_kwargs)
self.dtype = self.activation_post_process.dtype
self.qscheme = self.activation_post_process.qscheme
self.quant_min = self.activation_post_process.quant_min
self.quant_max = self.activation_post_process.quant_max
assert self.quant_min <= self.quant_max, \
'quant_min must be less than or equal to quant_max'
self.pot_scale = self.activation_post_process.pot_scale
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
assert _is_per_channel(self.qscheme) or \
_is_per_tensor(self.qscheme), \
'Only per channel and per tensor quantization are supported in fake quantize' + \
' got qscheme: ' + str(self.qscheme)
self.is_per_channel = _is_per_channel(self.qscheme)
bitrange = torch.tensor(self.quant_max - self.quant_min + 1).double()
self.bitwidth = int(torch.log2(bitrange).item())
self.is_symmetric_quant = is_symmetric_quant(self.qscheme)
@torch.jit.export
def calculate_qparams(self):
return self.activation_post_process.calculate_qparams()
@torch.jit.export
def extra_repr(self):
return 'fake_quant_enabled={}, observer_enabled={}, ' \
'quant_min={}, quant_max={}, dtype={}, qscheme={}, ch_axis={}, '.format(
self.fake_quant_enabled, self.observer_enabled,
self.quant_min, self.quant_max,
self.dtype, self.qscheme, self.ch_axis) | [
"torch.quantization.fake_quantize._is_per_channel",
"torch.tensor",
"torch.quantization.fake_quantize._is_per_tensor",
"torch.log2"
] | 1.8.1 | 415905716/MQBench | 3ac8928ef6641e0ea78f9a5f0524b574a835463e |
1.3 | from abc import ABCMeta, abstractmethod
import torch
from pfrl.agents.dqn import DQN
from pfrl.utils.recurrent import pack_and_forward
class AbstractDPP(DQN, metaclass=ABCMeta):
"""Dynamic Policy Programming.
See: https://arxiv.org/abs/1004.2027.
"""
@abstractmethod
def _l_operator(self, qout):
raise NotImplementedError()
def _compute_target_values(self, exp_batch):
batch_next_state = exp_batch["next_state"]
if self.recurrent:
target_next_qout, _ = pack_and_forward(
self.target_model,
batch_next_state,
exp_batch["next_recurrent_state"],
)
else:
target_next_qout = self.target_model(batch_next_state)
next_q_expect = self._l_operator(target_next_qout)
batch_rewards = exp_batch["reward"]
batch_terminal = exp_batch["is_state_terminal"]
return (
batch_rewards + exp_batch["discount"] * (1 - batch_terminal) * next_q_expect
)
def _compute_y_and_t(self, exp_batch):
batch_state = exp_batch["state"]
batch_size = len(exp_batch["reward"])
if self.recurrent:
qout, _ = pack_and_forward(
self.model,
batch_state,
exp_batch["recurrent_state"],
)
else:
qout = self.model(batch_state)
batch_actions = exp_batch["action"]
# Q(s_t,a_t)
batch_q = qout.evaluate_actions(batch_actions).reshape((batch_size, 1))
with torch.no_grad():
# Compute target values
if self.recurrent:
target_qout, _ = pack_and_forward(
self.target_model,
batch_state,
exp_batch["recurrent_state"],
)
else:
target_qout = self.target_model(batch_state)
# Q'(s_t,a_t)
target_q = target_qout.evaluate_actions(batch_actions).reshape(
(batch_size, 1)
)
# LQ'(s_t,a)
target_q_expect = self._l_operator(target_qout).reshape((batch_size, 1))
# r + g * LQ'(s_{t+1},a)
batch_q_target = self._compute_target_values(exp_batch).reshape(
(batch_size, 1)
)
# Q'(s_t,a_t) + r + g * LQ'(s_{t+1},a) - LQ'(s_t,a)
t = target_q + batch_q_target - target_q_expect
return batch_q, t
class DPP(AbstractDPP):
"""Dynamic Policy Programming with softmax operator.
Args:
eta (float): Positive constant.
For other arguments, see DQN.
"""
def __init__(self, *args, **kwargs):
self.eta = kwargs.pop("eta", 1.0)
super().__init__(*args, **kwargs)
def _l_operator(self, qout):
return qout.compute_expectation(self.eta)
class DPPL(AbstractDPP):
"""Dynamic Policy Programming with L operator.
Args:
eta (float): Positive constant.
For other arguments, see DQN.
"""
def __init__(self, *args, **kwargs):
self.eta = kwargs.pop("eta", 1.0)
super().__init__(*args, **kwargs)
def _l_operator(self, qout):
return torch.logsumexp(self.eta * qout.q_values, dim=1) / self.eta
class DPPGreedy(AbstractDPP):
"""Dynamic Policy Programming with max operator.
This algorithm corresponds to DPP with eta = infinity.
"""
def _l_operator(self, qout):
return qout.max
| [
"torch.no_grad",
"torch.logsumexp"
] | 1.3.0 | ummavi/pfrl-1 | e856a7cca30fcc3871024cdf7522d066006a5f0c |
1.0 | import pathlib
import sys
import torch
here = pathlib.Path(__file__).resolve().parent
sys.path.append(str(here / '..' / '..'))
import controldiffeq
class NeuralCDE(torch.nn.Module):
"""A Neural CDE model. Provides a wrapper around the lower-level cdeint function, to get a flexible Neural CDE
model.
Specifically, considering the CDE
```
z_t = z_{t_0} + \int_{t_0}^t f(z_s)dX_s
```
where X is determined by the data, and given some terminal time t_N, then this model first computes z_{t_N}, then
performs a linear function on it, and then outputs the result.
It's known that linear functions on CDEs are universal approximators, so this is a very general type of model.
"""
def __init__(self, func, input_channels, hidden_channels, output_channels, initial=True):
"""
Arguments:
func: As cdeint.
input_channels: How many channels there are in the input.
hidden_channels: The number of hidden channels, i.e. the size of z_t.
output_channels: How many channels to perform a linear map to at the end.
initial: Whether to automatically construct the initial value from data (in which case z0 must not be passed
during forward()), or to use the one supplied during forward (in which case z0 must be passed during
forward()).
"""
if isinstance(func, ContinuousRNNConverter): # ugly hack
hidden_channels = hidden_channels + input_channels
super(NeuralCDE, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.output_channels = output_channels
self.func = func
self.initial = initial
if initial and not isinstance(func, ContinuousRNNConverter): # very ugly hack
self.initial_network = torch.nn.Linear(input_channels, hidden_channels)
self.linear = torch.nn.Linear(hidden_channels, output_channels)
self.sigmoid = nn.Sigmoid()
def extra_repr(self):
return "input_channels={}, hidden_channels={}, output_channels={}, initial={}" \
"".format(self.input_channels, self.hidden_channels, self.output_channels, self.initial)
def forward(self, times, coeffs, final_index, z0=None, stream=False, **kwargs):
"""
Arguments:
times: The times of the observations for the input path X, e.g. as passed as an argument to
`controldiffeq.natural_cubic_spline_coeffs`.
coeffs: The coefficients describing the input path X, e.g. as returned by
`controldiffeq.natural_cubic_spline_coeffs`.
final_index: Each batch element may have a different final time. This defines the index within the tensor
`times` of where the final time for each batch element is.
z0: See the 'initial' argument to __init__.
stream: Whether to return the result of the Neural CDE model at all times (True), or just the final time
(False). Defaults to just the final time. The `final_index` argument is ignored if stream is True.
**kwargs: Will be passed to cdeint.
Returns:
If stream is False, then this will return the terminal time z_T. If stream is True, then this will return
all intermediate times z_t, for those t for which there was data.
"""
# Extract the sizes of the batch dimensions from the coefficients
coeff, _, _, _ = coeffs
batch_dims = coeff.shape[:-2]
if not stream:
assert batch_dims == final_index.shape, "coeff.shape[:-2] must be the same as final_index.shape. " \
"coeff.shape[:-2]={}, final_index.shape={}" \
"".format(batch_dims, final_index.shape)
cubic_spline = controldiffeq.NaturalCubicSpline(times, coeffs)
if z0 is None:
assert self.initial, "Was not expecting to be given no value of z0."
if isinstance(self.func, ContinuousRNNConverter): # still an ugly hack
z0 = torch.zeros(*batch_dims, self.hidden_channels, dtype=coeff.dtype, device=coeff.device)
else:
z0 = self.initial_network(cubic_spline.evaluate(times[0]))
else:
assert not self.initial, "Was expecting to be given a value of z0."
if isinstance(self.func, ContinuousRNNConverter): # continuing adventures in ugly hacks
z0_extra = torch.zeros(*batch_dims, self.input_channels, dtype=z0.dtype, device=z0.device)
z0 = torch.cat([z0_extra, z0], dim=-1)
# Figure out what times we need to solve for
if stream:
t = times
else:
# faff around to make sure that we're outputting at all the times we need for final_index.
sorted_final_index, inverse_final_index = final_index.unique(sorted=True, return_inverse=True)
if 0 in sorted_final_index:
sorted_final_index = sorted_final_index[1:]
final_index = inverse_final_index
else:
final_index = inverse_final_index + 1
if len(times) - 1 in sorted_final_index:
sorted_final_index = sorted_final_index[:-1]
t = torch.cat([times[0].unsqueeze(0), times[sorted_final_index], times[-1].unsqueeze(0)])
# Switch default solver
if 'method' not in kwargs:
kwargs['method'] = 'rk4'
if kwargs['method'] == 'rk4':
if 'options' not in kwargs:
kwargs['options'] = {}
options = kwargs['options']
if 'step_size' not in options and 'grid_constructor' not in options:
time_diffs = times[1:] - times[:-1]
options['step_size'] = time_diffs.min().item()
# Actually solve the CDE
z_t = controldiffeq.cdeint(dX_dt=cubic_spline.derivative,
z0=z0,
func=self.func,
t=t,
**kwargs)
# Organise the output
if stream:
# z_t is a tensor of shape (times, ..., channels), so change this to (..., times, channels)
for i in range(len(z_t.shape) - 2, 0, -1):
z_t = z_t.transpose(0, i)
else:
# final_index is a tensor of shape (...)
# z_t is a tensor of shape (times, ..., channels)
final_index_indices = final_index.unsqueeze(-1).expand(z_t.shape[1:]).unsqueeze(0)
z_t = z_t.gather(dim=0, index=final_index_indices).squeeze(0)
# Linear map and return
pred_y = self.linear(z_t)
pred_y = self.sigmoid(pred_y)
return pred_y
# Note that this relies on the first channel being time
class ContinuousRNNConverter(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, model):
super(ContinuousRNNConverter, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.model = model
out_base = torch.zeros(self.input_channels + self.hidden_channels, self.input_channels)
for i in range(self.input_channels):
out_base[i, i] = 1
self.register_buffer('out_base', out_base)
def extra_repr(self):
return "input_channels: {}, hidden_channels: {}".format(self.input_channels, self.hidden_channels)
def forward(self, z):
# z is a tensor of shape (..., input_channels + hidden_channels)
x = z[..., :self.input_channels]
h = z[..., self.input_channels:]
# In theory the hidden state must lie in this region. And most of the time it does anyway! Very occasionally
# it escapes this and breaks everything, though. (Even when using adaptive solvers or small step sizes.) Which
# is kind of surprising given how similar the GRU-ODE is to a standard negative exponential problem, we'd
# expect to get absolute stability without too much difficulty. Maybe there's a bug in the implementation
# somewhere, but not that I've been able to find... (and h does only escape this region quite rarely.)
h = h.clamp(-1, 1)
# model_out is a tensor of shape (..., hidden_channels)
model_out = self.model(x, h)
batch_dims = model_out.shape[:-1]
out = self.out_base.repeat(*batch_dims, 1, 1).clone()
out[..., self.input_channels:, 0] = model_out
return out
| [
"torch.nn.Linear",
"torch.cat",
"torch.zeros"
] | 1.0.0 | dungxibo123/NeuralCDE | 19f7ed24223f5822142c676127c92d818d290903 |
0.4 | import torch
import torch.nn as nn
from saliency.saliency import Saliency
class DeconvSaliency(Saliency):
"""docstring for DeconvSaliency."""
def __init__(self, model):
super(DeconvSaliency, self).__init__(model)
def guided_relu_hook(self, module, grad_in, grad_out):
return (torch.nn.functional.relu(grad_in[0]), )
def generate_saliency(self, input, target):
input.requires_grad = True
self.model.zero_grad()
for module in self.model.modules():
if type(module) == nn.ReLU:
module.register_backward_hook(self.guided_relu_hook)
output = self.model(input)
grad_outputs = torch.zeros_like(output)
grad_outputs[:, target] = 1
output.backward(gradient = grad_outputs)
input.requires_grad = False
#print(input)
return (input.grad.clone()[0] * input)
| [
"torch.zeros_like",
"torch.nn.functional.relu"
] | 0.4.0 | dendisuhubdy/pytorch-saliency | dcb3499be127637435a577cb42161b3e096aa28d |
0.4 | import torch
from saliency.saliency import Saliency
import numpy as np
from scipy.ndimage import label
import torchvision
from torch.autograd import Variable
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import matplotlib.pyplot as plt
import random
import copy
class LimeSaliency(Saliency):
def __init__(self, model):
super(LimeSaliency, self).__init__(model)
def generate_saliency(self, input, target):
#self.model.zero_grad()
output = self.model(input)
#index: 0 layer: HP
# index: 1 layer: Ship
# index: 2 layer: Small Towers
# index: 3 layer: Big Towers
# index: 4 layer: Small Cities
# index: 5 layer: Big Cities
# index: 6 layer: Friend
# index: 7 layer: Enemy
#return (input.grad.clone()[0] * input)
input2 = input.clone()
image = np.zeros((40, 40))
input2 = input2.view(40, 40, 8)
input3 = input.clone()
#logical or of the input images to get the original image
#only do or over small big cities and towers to get proper coordinates of cities and towers
#otherwise it will include enemy ship too if enemy channel is included which will cause object to
#be merged with tower or city object
for i in range(2,6):
image = np.logical_or(image, input2[:, :, i].numpy())*1
#get the number of objects in the image
labeled_array, num_objects = label(image)
indices = []
for i in range(num_objects):
indices.append(np.argwhere(labeled_array == i+1))
#self.generate_file(labeled_array, 'labeled_array')
# print('object 1\n')
# print(indices[0])
#print('object 2\n')
#print(indices[1])
#print('object 3\n')
#print(indices[2])
#print('object 4\n')
#print(indices[3])
#print('object 5\n')
#print(indices[4])
#hp
hp_array, hp = label(input2[:, :, 0].numpy())
hp_indices = []
for i in range(hp):
hp_indices.append(np.argwhere(hp_array == i+1))
#self.generate_file(hp_array, 'hp_array')
#ship
#remove agent because we don't want to perturb that
ship_image = input2[:, :, 1].clone().numpy()
ship_image[19][20] = 0
ship_image[20][20] = 0
ship_image[21][20] = 0
ship_array, ships = label(ship_image)
# print('ships ', ships )
ship_indices = []
for i in range(ships):
ship_indices.append(np.argwhere(ship_array == i+1))
#self.generate_file(ship_array, 'ship_array')
values = torch.zeros(40*40*5)
values = values.view(40, 40, 5)
input2 = input.clone()
input2 = input2.view(40, 40, 8)
features = self.generate_features(input2)
#print(features)
outputs = output[:, target].data.numpy()
# index: 0 layer: HP
# index: 1 layer: Ship
# index: 2 layer: Small Towers
# index: 3 layer: Big Towers
# index: 4 layer: Small Cities
# index: 5 layer: Big Cities
# index: 6 layer: Friend
# index: 7 layer: Enemy
#output layers:
#0 HP
#1 agent
#2 size
#3 type
#4 friend/enemy
#here i refers to the output salient layers
#print('num_objects', num_objects)
for i in range(5):
if i==0:# output HP
for j in range(hp):
for k in range(hp_indices[j].shape[0]):
x = hp_indices[j][k][0]
y = hp_indices[j][k][1]
temp = 0.3*input2[:, :, 0][x][y]
input2[:, :, 0][x][y] += temp
perturbed_output = self.model(input2.view(1, 12800))
feature = self.generate_features(input2)
features = np.concatenate((features, feature))
outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))
input2 = input.clone()
input2 = input2.view(40, 40, 8)
elif i==1:#output agent
for j in range(ships):
for k in range(ship_indices[j].shape[0]):
x = ship_indices[j][k][0]
y = ship_indices[j][k][1]
if input2[:, :, 1][x][y] == 1:
input2[:, :, 1][x][y] = 0
perturbed_output = self.model(input2.view(1, 12800))
feature = self.generate_features(input2)
features = np.concatenate((features, feature))
outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))
input2 = input.clone()
input2 = input2.view(40, 40, 8)
elif i==2: #output size
#print('in i== 2')
for l in range(2, 6):
#print('layer: ',l)
for j in range(num_objects):
# print('object: ',j)
s = 0
for k in range(indices[j].shape[0]):
x = indices[j][k][0]
y = indices[j][k][1]
# print('x: '+str(x)+' y: '+str(y))
# print('Value of input: '+str(input2[:, :, i][x][y]))
# print(input2[:, :, l][x][y])
if l == 2 or l==4: #small tower/city
if input2[:, :, l][x][y] == 1:
s = 1
input2[:, :, l][x][y] = 0
input2[:, :, l+1][x][y] = 1
else: #big tower/city
if input2[:, :, l ][x][y] == 1:
s = 1
input2[:, :, l][x][y] = 0
input2[:, :, l-1][x][y] = 1
#print(saliency)
if s==1:
perturbed_output = self.model(input2.view(1, 12800))
feature = self.generate_features(input2)
features = np.concatenate((features, feature))
outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))
input2 = input.clone()
input2 = input2.view(40, 40, 8)
#print(saliency[0][target])
elif i==3: #output type
for l in range(2, 6):
for j in range(num_objects):
s = 0
for k in range(indices[j].shape[0]):
x = indices[j][k][0]
y = indices[j][k][1]
# print('x: '+str(x)+' y: '+str(y))
# print('Value of input: '+str(input2[:, :, i][x][y]))
if l == 2 or l == 3: #small tower/city
if input2[:, :, l][x][y] == 1:
s = 1
input2[:, :, l][x][y] = 0
input2[:, :, l+2][x][y] = 1
else: #big tower/city
if input2[:, :, l ][x][y] == 1:
s = 1
input2[:, :, l][x][y] = 0
input2[:, :, l-2][x][y] = 1
#print(saliency)
if s==1:
perturbed_output = self.model(input2.view(1, 12800))
feature = self.generate_features(input2)
features = np.concatenate((features, feature))
outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))
input2 = input.clone()
input2 = input2.view(40, 40, 8)
else:# output frenemy
for l in range(6, 8):
for j in range(num_objects):
s = 0
for k in range(indices[j].shape[0]):
x = indices[j][k][0]
y = indices[j][k][1]
if l == 6:
if input2[:, :, l][x][y] == 1:
s = 1
input2[:, :, l][x][y] = 0
input2[:, :, l+1][x][y] = 1
else:
if input2[:, :, l][x][y] == 1:
s = 1
input2[:, :, l][x][y] = 0
input2[:, :, l-1][x][y] = 1
if s==1:
perturbed_output = self.model(input2.view(1, 12800))
feature = self.generate_features(input2)
features = np.concatenate((features, feature))
outputs = np.concatenate((outputs, perturbed_output[:, target].data.numpy()))
input2 = input.clone()
input2 = input2.view(40, 40, 8)
#print(features)
#print(outputs)
linear_model = LinearRegressionModel(21, 1)
linear_model.train()
criterion = nn.L1Loss()
optimiser = torch.optim.SGD(linear_model.parameters(), lr = 0.01)
epochs = 5000
for epoch in range(epochs):
inputs = Variable(torch.from_numpy(features).float())
labels = Variable(torch.from_numpy(outputs))
optimiser.zero_grad()
pred = linear_model.forward(inputs)
loss = criterion(pred, labels)
loss.backward()
optimiser.step()
#print('epoch {}, loss {}'.format(epoch,loss.item()))
#train_loss = eval_net(features, outputs, linear_model)
# print('train_loss: %.5f ' %
# (train_loss))
# weights_ = []
# for name, param in linear_model.parameters() :
# weights_.append(param.data.numpy())
#new_model = copy.deepcopy(linear_model)
weights = linear_model.linear.weight.clone()
weights = weights.data.numpy()
#print(weights)
# print('weights')
# print(weights_)
# weights_ = np.asarray(weights_)
values = self.plot_saliency(weights, input3)
return (values.view(1, 40*40*5))
#0 HP
#1 enemy ship
#2 size
#3 type
#4 friend/enemy
def generate_features(self, input):
hp_array, hp = label(input[:, :, 0].numpy())
hp_indices = []
for i in range(hp):
hp_indices.append(np.argwhere(hp_array == i+1))
image = np.zeros((40, 40))
for i in range(2,6):
image = np.logical_or(image, input[:, :, i].numpy())*1
feature = np.zeros((1, 21))
#put hp of agent. agent will always be object 3 - 1
feature[0][20] = input[:, :, 0][hp_indices[2][0][0]][hp_indices[2][0][1]]
#print(feature[0][20])
#self.generate_file(hp_array, 'feature_hp_array')
#zero out the agent
ship_image = input[:, :, 1].clone().numpy()
ship_image[19][20] = 0
ship_image[20][20] = 0
ship_image[21][20] = 0
ship_image, _ = label(ship_image)
#self.generate_file(ship_array, 'mod_ship_array')
#self.generate_file(image, 'image')
#self.generate_file(ship_image, 'ship_image')
counter = 0
#slicing the hp_array quadrant vise
for i in range(2):
for j in range(2):
#array = hp_array[0 + 20*i :19 + 20*i, 0 + 20*j :19 + 20*j]
#labeled_array, num_objects = label(array)
indices = np.argwhere(image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j] > 0)
# print(indices)
# print('\n\n')
# print(indices[0][0])
# print(indices[0][1])
x = indices[0][0] + 20*i
y = indices[0][1] + 20*j
# print('x ',x)
# print('y ',y)
#first feature will be HP
feature[0][counter + 0] = input[:, :, 0][x][y]
#second feature will be checking prescence of enemy ship
_, objs = label(ship_image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j])
feature[0][counter + 1] = (1 if objs>0 else 0)
#third feature check size 1 if big 0 if small
feature[0][counter + 2] = (1 if input[:, :, 3][x][y] == 1
or input[:, :, 5][x][y] == 1 else 0)
#fourth feature will check type. 1 if city 0 if tower
feature[0][counter + 3] = (1 if input[:, :, 4][x][y] == 1
or input[:, :, 5][x][y] == 1 else 0)
#fifth feature will check frie\nd/enemy. 1 if friend 0 if enemy
feature[0][counter + 4] = (1 if input[:, :, 6][x][y] == 1 else 0)
counter += 5
return feature
def generate_file(self, array, name):
f = open(str(name)+'.txt', 'w')
f.write('\n\n\n')
for i in range(array.shape[0]):
for j in range(array.shape[1]):
f.write(str(array[i,j]))
f.write('\n')
f.close()
def plot_saliency(self, feature, input3):
print('in plot ')
values = torch.zeros(40*40*5)
values = values.view(40, 40, 5)
input3 = input3.view(40,40,8)
feature = torch.from_numpy(feature).float()
print('feature: ')
print(feature)
#this will give you dimensions of only objects
image = np.zeros((40, 40))
for i in range(2,6):
image = np.logical_or(image, input3[:, :, i].numpy())*1
labeled_array, num_objects = label(image)
self.generate_file(image, 'image')
ship_image = input3[:, :, 1].clone().numpy()
ship_image[19][20] = 0
ship_image[20][20] = 0
ship_image[21][20] = 0
ship_image, _ = label(ship_image)
self.generate_file(ship_image, 'ship_image')
counter = 0
#slicing the hp_array quadrant vise
for i in range(2):
for j in range(2):
#array = hp_array[0 + 20*i :19 + 20*i, 0 + 20*j :19 + 20*j]
#labeled_array, num_objects = label(array)
indices = np.argwhere(image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j] > 0)
#second feature will be checking prescence of enemy ship
print('i ',i)
print('j ',j)
print(indices)
print('\n\n')
# print(indices[0][0])
# print(indices[0][1])
#first take care of HP
for k in range(indices.shape[0]):
x = indices[k][0] + 20*i
y = indices[k][1] + 20*j
print('x ',x)
print('y ',y)
#first feature will be HP
values[:, :, 0][x][y] = feature[0][counter + 0]
values[:, :, 2][x][y] = feature[0][counter + 2]
values[:, :, 3][x][y] = feature[0][counter + 3]
values[:, :, 4][x][y] = feature[0][counter + 4]
#second feature will be checking prescence of enemy ship
_, objs = label(ship_image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j])
enemytank_indices = np.argwhere(ship_image[0 + 20*i :20 + 20*i, 0 + 20*j :20 + 20*j]>0)
if objs > 0:
print('objs ')
print(objs)
for k in range(enemytank_indices.shape[0]):
x = enemytank_indices[k][0] + 20*i
y = enemytank_indices[k][1] + 20*j
print('x ',x)
print('y ',y)
values[:, :, 1][x][y] = feature[0][counter + 1]
values[:, :, 0][x][y] = feature[0][counter + 0]
# #third feature check size 1 if big 0 if small
# feature[0][counter + 2] = (1 if input[:, :, 3][x][y] == 1
# or input[:, :, 5][x][y] == 1 else 0)
# #fourth feature will check type. 1 if city 0 if tower
# feature[0][counter + 3] = (1 if input[:, :, 4][x][y] == 1
# or input[:, :, 5][x][y] == 1 else 0)
# #fifth feature will check friend/enemy. 1 if friend 0 if enemy
# feature[0][counter + 4] = (1 if input[:, :, 6][x][y] == 1 else 0)
counter += 5
values[:, :, 0][19][20] = feature[0][20]
values[:, :, 0][20][20] = feature[0][20]
values[:, :, 0][21][20] = feature[0][20]
return values
class LinearRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearRegressionModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.linear(x)
return out
def eval_net(x, y, model):
correct = 0
total = 0
total_loss = 0
model.eval() # set model to evaluation mode
criterion = nn.L1Loss()
for i, (x1, y1) in enumerate(zip(x, y)):
inputs = Variable(torch.from_numpy(x1).float())
labels = Variable(torch.from_numpy(y1))
pred = model.forward(inputs)
total += labels.size(0)
#correct += (pred == labels.data).sum()
loss = criterion(pred, labels)
total_loss += loss.item()
#total_loss += loss.item()
model.train() # set model back to train mode
return total_loss / total
| [
"torch.zeros",
"torch.nn.Linear",
"torch.nn.L1Loss",
"torch.from_numpy"
] | 0.4.0 | dendisuhubdy/pytorch-saliency | dcb3499be127637435a577cb42161b3e096aa28d |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Mapping, Sequence, Type, Union
import torch
import torchvision
from pytorch_lightning.metrics import Accuracy
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch import nn
from torch.nn import functional as F
from flash.core.classification import ClassificationTask
from flash.vision.classification.backbones import torchvision_backbone_and_num_features
from flash.vision.classification.data import ImageClassificationData, ImageClassificationDataPipeline
class ImageClassifier(ClassificationTask):
"""Task that classifies images.
Args:
num_classes: Number of classes to classify.
backbone: A model to use to compute image features.
pretrained: Use a pretrained backbone.
loss_fn: Loss function for training, defaults to cross entropy.
optimizer: Optimizer to use for training, defaults to `torch.optim.SGD`.
metrics: Metrics to compute for training and evaluation.
learning_rate: Learning rate to use for training, defaults to `1e-3`
"""
def __init__(
self,
num_classes,
backbone="resnet18",
num_features: int = None,
pretrained=True,
loss_fn: Callable = F.cross_entropy,
optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,
metrics: Union[Callable, Mapping, Sequence, None] = (Accuracy()),
learning_rate: float = 1e-3,
):
super().__init__(
model=None,
loss_fn=loss_fn,
optimizer=optimizer,
metrics=metrics,
learning_rate=learning_rate,
)
self.save_hyperparameters()
self.backbone, num_features = torchvision_backbone_and_num_features(backbone, pretrained)
self.head = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(num_features, num_classes),
)
def forward(self, x) -> Any:
x = self.backbone(x)
return self.head(x)
@staticmethod
def default_pipeline() -> ImageClassificationDataPipeline:
return ImageClassificationData.default_pipeline()
| [
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Flatten"
] | 1.7.1 | billy-horn/lightning-flash | 61c741d37182d137f39b771879254db8fd20308f |
1.7 | from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import numpy as np
import time
from data.config import cfg_mobilenetv2
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface_mbv2 import RetinaFace
import math
from math import cos, sin
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
import torch.nn.functional as F
import matplotlib.cm
import copy
from scipy.spatial.transform import Rotation
from matplotlib import pyplot as plt
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('-m', '--trained_model', default='/home/lyp/mos_source_code_open/MOS-Multi-Task-Face-Detect/pt17_pose_weights_mbv2/mobilenetv2_epoch_249.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='shuffle_0.5', help='Backbone network mobile0.25 or slim or RFB')
parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
parser.add_argument('--long_side', default=840, help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')
parser.add_argument('--cpu', action="store_true", default=True, help='Use cpu inference')
parser.add_argument('--confidence_threshold', default=0.55, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('--save_image', action="store_true", default=True, help='show detection results')
parser.add_argument('--vis_thres', default=0.55, type=float, help='visualization_threshold')
args = parser.parse_args()
def get_pose(vertices, twod_landmarks, camera_intrinsics, initial_pose=None):
threed_landmarks = vertices
twod_landmarks = np.asarray(twod_landmarks).astype("float32")
# if initial_pose is provided, use it as a guess to solve new pose
if initial_pose is not None:
initial_pose = np.asarray(initial_pose)
retval, rvecs, tvecs = cv2.solvePnP(
threed_landmarks,
twod_landmarks,
camera_intrinsics,
None,
rvec=initial_pose[:3],
tvec=initial_pose[3:],
flags=cv2.SOLVEPNP_EPNP,
useExtrinsicGuess=True,
)
else:
retval, rvecs, tvecs = cv2.solvePnP(
threed_landmarks,
twod_landmarks,
camera_intrinsics,
None,
flags=cv2.SOLVEPNP_EPNP,
)
rotation_mat = np.zeros(shape=(3, 3))
R = cv2.Rodrigues(rvecs, rotation_mat)[0]
RT = np.column_stack((R, tvecs))
P = np.matmul(camera_intrinsics, RT)
dof = np.append(rvecs, tvecs)
return P, dof
def bbox_is_dict(bbox):
# check if the bbox is a not dict and convert it if needed
if not isinstance(bbox, dict):
temp_bbox = {}
temp_bbox["left"] = bbox[0]
temp_bbox["top"] = bbox[1]
temp_bbox["right"] = bbox[2]
temp_bbox["bottom"] = bbox[3]
bbox = temp_bbox
return bbox
def get_bbox_intrinsics(image_intrinsics, bbox):
# crop principle point of view
bbox_center_x = bbox["left"] + ((bbox["right"] - bbox["left"]) // 2)
bbox_center_y = bbox["top"] + ((bbox["bottom"] - bbox["top"]) // 2)
# create a camera intrinsics from the bbox center
bbox_intrinsics = image_intrinsics.copy()
bbox_intrinsics[0, 2] = bbox_center_x
bbox_intrinsics[1, 2] = bbox_center_y
return bbox_intrinsics
def pose_bbox_to_full_image(pose, image_intrinsics, bbox):
# check if bbox is np or dict
bbox = bbox_is_dict(bbox)
# rotation vector
rvec = pose[:3].copy()
# translation and scale vector
tvec = pose[3:].copy()
# get camera intrinsics using bbox
bbox_intrinsics = get_bbox_intrinsics(image_intrinsics, bbox)
# focal length
focal_length = image_intrinsics[0, 0]
# bbox_size
bbox_width = bbox["right"] - bbox["left"]
bbox_height = bbox["bottom"] - bbox["top"]
bbox_size = bbox_width + bbox_height
# adjust scale
tvec[2] *= focal_length / bbox_size
# project crop points using the crop camera intrinsics
projected_point = bbox_intrinsics.dot(tvec.T)
# reverse the projected points using the full image camera intrinsics
tvec = projected_point.dot(np.linalg.inv(image_intrinsics.T))
# same for rotation
rmat = Rotation.from_rotvec(rvec).as_matrix()
# project crop points using the crop camera intrinsics
projected_point = bbox_intrinsics.dot(rmat)
# reverse the projected points using the full image camera intrinsics
rmat = np.linalg.inv(image_intrinsics).dot(projected_point)
rvec = Rotation.from_matrix(rmat).as_rotvec()
return np.concatenate([rvec, tvec])
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
def plot_pose_cube(img, yaw, pitch, roll, tdx=None, tdy=None, size=150.):
# Input is a cv2 image
# pose_params: (pitch, yaw, roll, tdx, tdy)
# Where (tdx, tdy) is the translation of the face.
# For pose we have [pitch yaw roll tdx tdy tdz scale_factor]
p = pitch * np.pi / 180
y = -(yaw * np.pi / 180)
r = roll * np.pi / 180
if tdx != None and tdy != None:
face_x = tdx - 0.50 * size
face_y = tdy - 0.50 * size
else:
height, width = img.shape[:2]
face_x = width / 2 - 0.5 * size
face_y = height / 2 - 0.5 * size
x1 = size * (cos(y) * cos(r)) + face_x
y1 = size * (cos(p) * sin(r) + cos(r) * sin(p) * sin(y)) + face_y
x2 = size * (-cos(y) * sin(r)) + face_x
y2 = size * (cos(p) * cos(r) - sin(p) * sin(y) * sin(r)) + face_y
x3 = size * (sin(y)) + face_x
y3 = size * (-cos(y) * sin(p)) + face_y
# Draw base in red
cv2.line(img, (int(face_x), int(face_y)), (int(x1),int(y1)),(0,0,255),3)
cv2.line(img, (int(face_x), int(face_y)), (int(x2),int(y2)),(0,0,255),3)
cv2.line(img, (int(x2), int(y2)), (int(x2+x1-face_x),int(y2+y1-face_y)),(0,0,255),3)
cv2.line(img, (int(x1), int(y1)), (int(x1+x2-face_x),int(y1+y2-face_y)),(0,0,255),3)
# Draw pillars in blue
cv2.line(img, (int(face_x), int(face_y)), (int(x3),int(y3)),(255,0,0),2)
cv2.line(img, (int(x1), int(y1)), (int(x1+x3-face_x),int(y1+y3-face_y)),(255,0,0),2)
cv2.line(img, (int(x2), int(y2)), (int(x2+x3-face_x),int(y2+y3-face_y)),(255,0,0),2)
cv2.line(img, (int(x2+x1-face_x),int(y2+y1-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(255,0,0),2)
# Draw top in green
cv2.line(img, (int(x3+x1-face_x),int(y3+y1-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(0,255,0),2)
cv2.line(img, (int(x2+x3-face_x),int(y2+y3-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(0,255,0),2)
cv2.line(img, (int(x3), int(y3)), (int(x3+x1-face_x),int(y3+y1-face_y)),(0,255,0),2)
cv2.line(img, (int(x3), int(y3)), (int(x3+x2-face_x),int(y3+y2-face_y)),(0,255,0),2)
return img
def draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 100):
pitch = pitch * np.pi / 180
yaw = -(yaw * np.pi / 180)
roll = roll * np.pi / 180
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X-Axis pointing to right. drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y-Axis | drawn in green
# v
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),4)
return img
p_in = []
p_out = []
#
def hook_fn(module, inputs, outputs):
p_in.append(inputs)
p_out.append(outputs)
def put_heatmap_on_image(ori_image, activation, colormap_name):
"""
ori_image (PIL image): 原始图像
activation (numpy arr): 即上面得到的p2_logits
colormap_name (str): 采用何种matplotlib.cm的colormap
"""
# colormap
color_map = matplotlib.cm.get_cmap(colormap_name)
# 把colormap添加到activation,即activation的以何种
# colormap进行显示
no_trans_heatmap = color_map(activation)
# 添加alpha通道,即透明度
heatmap = copy.copy(no_trans_heatmap)
heatmap[:, :, 3] = 0.4
heatmap = Image.fromarray((heatmap*255).astype(np.uint8))
no_trans_heatmap = Image.fromarray((no_trans_heatmap*255).astype(np.uint8))
#
heatmap_on_image = Image.new("RGBA", ori_image.size)
heatmap_on_image = Image.alpha_composite(
heatmap_on_image, ori_image.convert("RGBA"))
heatmap_on_image = Image.alpha_composite(
heatmap_on_image, heatmap)
return no_trans_heatmap, heatmap_on_image
def rot2Euler(imgpath, rotation_vector):
# calculate rotation angles
theta = cv2.norm(rotation_vector, cv2.NORM_L2)
# transformed to quaterniond
w = math.cos(theta / 2)
x = math.sin(theta / 2) * rotation_vector[0][0] / theta
y = math.sin(theta / 2) * rotation_vector[1][0] / theta
z = math.sin(theta / 2) * rotation_vector[2][0] / theta
ysqr = y * y
# pitch (x-axis rotation)
t0 = 2.0 * (w * x + y * z)
t1 = 1.0 - 2.0 * (x * x + ysqr)
print('t0:{}, t1:{}'.format(t0, t1))
pitch = math.atan2(t0, t1) - 0.8356857
# yaw (y-axis rotation)
t2 = 2.0 * (w * y - z * x)
if t2 > 1.0:
t2 = 1.0
if t2 < -1.0:
t2 = -1.0
yaw = math.asin(t2) + 0.005409
# roll (z-axis rotation)
t3 = 2.0 * (w * z + x * y)
t4 = 1.0 - 2.0 * (ysqr + z * z)
roll = math.atan2(t3, t4) - 2.573345436
# 单位转换:将弧度转换为度
pitch_degree = int((pitch / math.pi) * 180)
yaw_degree = int((yaw / math.pi) * 180)
roll_degree = int((roll / math.pi) * 180)
#drawResult(imgpath, yaw, pitch, roll, save_dir)
print("Radians:")
print("Yaw:", yaw_degree)
print("Pitch:", pitch_degree)
print("Roll:", roll_degree)
str_angle=[yaw_degree,pitch_degree,roll_degree]
return str_angle
# img = cv2.imread(imgpath)
# draw = img.copy()
# cv2.putText(draw, "Yaw:" + str(yaw), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
# cv2.putText(draw, "Pitch:" + str(pitch), (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
# cv2.putText(draw, "Roll:" + str(roll), (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
# cv2.waitKey()
# cv2.imwrite(os.path.splitext(imgpath)[0] + '_pose_estimate1.jpg', draw)
#
# print("Degrees:")
# draw = img.copy()
# if yaw_degree > 0:
# output_yaw = "face turns left:" + str(abs(yaw_degree)) + " degrees"
# cv2.putText(draw, output_yaw, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0))
# print(output_yaw)
# if yaw_degree < 0:
# output_yaw = "face turns right:" + str(abs(yaw_degree)) + " degrees"
# cv2.putText(draw, output_yaw, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0))
# print(output_yaw)
# if pitch_degree < 0:
# output_pitch = "face downwards:" + str(abs(pitch_degree)) + " degrees"
# cv2.putText(draw, output_pitch, (20, 80), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0))
# print(output_pitch)
# if pitch_degree > 0:
# output_pitch = "face upwards:" + str(abs(pitch_degree)) + " degrees"
# cv2.putText(draw, output_pitch, (20, 80), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0))
# print(output_pitch)
# if roll_degree < 0:
# output_roll = "face bends to the right:" + str(abs(roll_degree)) + " degrees"
# cv2.putText(draw, output_roll, (20, 120), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0))
# print(output_roll)
# if roll_degree > 0:
# output_roll = "face bends to the left:" + str(abs(roll_degree)) + " degrees"
# cv2.putText(draw, output_roll, (20, 120), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0))
# print(output_roll)
# if abs(yaw) < 0.00001 and abs(pitch) < 0.00001 and abs(roll) < 0.00001:
# cv2.putText(draw, "Initial ststus", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 0))
# print("Initial ststus")
# cv2.imwrite(save_dir + os.path.splitext(imgpath)[0] + '_pose_estimate2.jpg', draw)
def headPosEstimate(imgpath, landmarks):
# 3D model points
model_3d_points = np.array(([-165.0, 170.0, -115.0], # Left eye
[165.0, 170.0, -115.0], # Right eye
[0.0, 0.0, 0.0], # Nose tip
[-150.0, -150.0, -125.0], # Left Mouth corner
[150.0, -150.0, -125.0]), dtype=np.double) # Right Mouth corner)
landmarks.dtype = np.double
# Camera internals
img = cv2.imread(imgpath)
img_size = img.shape
focal_length = img_size[1]
center = [img_size[1]/2, img_size[0]/2]
camera_matrix = np.array(([focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]),dtype=np.double)
dist_coeffs = np.array([0,0,0,0], dtype=np.double)
found, rotation_vector, translation_vector = cv2.solvePnP(model_3d_points, landmarks, camera_matrix, dist_coeffs)
angle_result=rot2Euler(imgpath,rotation_vector)
return angle_result
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
net = None
cfg = cfg_mobilenetv2
net = RetinaFace(cfg=cfg, phase='test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
#print(net)
cudnn.benchmark = True
#device = torch.device("cpu" if args.cpu else "cuda")
device=torch.device("cuda")
net = net.to(device)
flag = True
##################################################
#derface_val_dir = "/home/lyp/paper_experiments/pre_data/code/real_test/shuffilenet_face/data/val/wider_val.txt"
#input1 = open(derface_val_dir)
#lines1 = input1.readlines()
time1 = 0
#cap=cv2.VideoCapture("/home/lyp/Downloads/333.mp4")
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter("/home/lyp/o11.avi", fourcc, 25.0, (640, 480))
#while(cap.isOpened()):
# ret,frame=cap.read()
#for i in range(1):
#for iii in range(len(lines1)):
#image_path = "/home/lyp/paper_experiments/pre_data/code/real_test/shuffilenet_face/data/val/images" + lines1[iii].strip()
image_path = "/home/lyp/mos_source_code_open/MOS-Multi-Task-Face-Detect/figures/4_Dancing_Dancing_4_85.jpg"
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
# testing scale
target_size = args.long_side
max_size = args.long_side
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if args.origin_size:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
# img = cv2.resize(img, (640,480))
img_rgb = img_raw.copy()
im_height, im_width, _ = img.shape
print(im_height, im_width)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
tic = time.time()
loc, conf, landms, head_cls_y, head_cls_p, head_cls_r = net(img) # forward pass
tic1 = time.time() - tic
head_cls_y = head_cls_y.squeeze(0)
head_cls_p = head_cls_p.squeeze(0)
head_cls_r = head_cls_r.squeeze(0)
idx_tensor = [idx for idx in range(66)]
idx_tensor = torch.FloatTensor(idx_tensor).to(device)
head_cls_y = torch.sum(head_cls_y * idx_tensor, 1).to(device) * 3 - 99
head_cls_p = torch.sum(head_cls_p * idx_tensor, 1).to(device) * 3 - 99
head_cls_r = torch.sum(head_cls_r * idx_tensor, 1).to(device) * 3 - 99
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
head_cls_y = head_cls_y.cpu().numpy()
head_cls_p = head_cls_p.cpu().numpy()
head_cls_r = head_cls_r.cpu().numpy()
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
head_cls_y = head_cls_y[inds]
head_cls_p = head_cls_p[inds]
head_cls_r = head_cls_r[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
head_cls_y = head_cls_y[order]
head_cls_p = head_cls_p[order]
head_cls_r = head_cls_r[order]
idx_tensor = [idx for idx in range(66)]
idx_tensor = np.array(idx_tensor)
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, args.nms_threshold)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
landms = landms[keep]
yaw_predicted = head_cls_y[keep]
pitch_predicted = head_cls_p[keep]
roll_predicted = head_cls_r[keep]
dets = dets[:args.keep_top_k, :]
landms = landms[:args.keep_top_k, :]
yaw_predicted = yaw_predicted[:args.keep_top_k]
pitch_predicted = pitch_predicted[:args.keep_top_k]
roll_predicted = roll_predicted[:args.keep_top_k]
dets = np.concatenate((dets, landms), axis=1)
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('/home/lyp/output.avi', fourcc, 20.0, (640, 480))
# img_cv = cv2.imread("/home/lyp/paper_experiments/plot_paper_curve/compare_withsota/10_People_Marching_People_Marching_2_514.jpg")
# image_new2 = Image.fromarray(cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB))
if args.save_image:
for i in range(len(dets)):
b = dets[i]
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_rgb, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 3)
cx = b[0]
cy = b[1] + 12
# cv2.putText(img_raw, text, (cx, cy),
# cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
################################# transfer landmark into pose ##################################
# landmark_array=np.asarray([[b[5], b[6]],[b[7], b[8]],[b[9], b[10]],[b[11], b[12]],[b[13], b[14]]]).astype(float)
# (w, h) = image_new2.size
# bbox_intrinsics = np.array([[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]])
# P, pose = get_pose(threed_5_points, landmark_array, bbox_intrinsics)
# trans_vertices = renderer.transform_vertices(image_new2, [pose])
# image_new2 = renderer.render(image_new2, trans_vertices, alpha=1)
# image_new2 = Image.fromarray(image_new2)
text = "y:" + str(int(yaw_predicted[i])) # + "," + "p:" + str(int(pitch_predicted[i])) + "," + "r:" + str(
# int(roll_predicted[i]))
cv2.putText(img_rgb, text, (cx - 10, cy - 25),
cv2.FONT_HERSHEY_TRIPLEX, 0.6, (255, 0, 255))
fps_text = "FPS: " + str(int(1 / tic1))
cv2.putText(img_rgb, fps_text, (20, 40),
cv2.FONT_HERSHEY_TRIPLEX, 0.6, (0, 255, 0))
#
# # landms
# cv2.circle(img_rgb, (b[5], b[6]), 1, (0, 0, 255), 4)
# cv2.circle(img_rgb, (b[7], b[8]), 1, (0, 255, 255), 4)
# cv2.circle(img_rgb, (b[9], b[10]), 1, (255, 0, 255), 4)
# cv2.circle(img_rgb, (b[11], b[12]), 1, (0, 255, 0), 4)
# cv2.circle(img_rgb, (b[13], b[14]), 1, (255, 0, 0), 4)
draw_axis(img_rgb, int(yaw_predicted[i]), int(pitch_predicted[i]), int(roll_predicted[i]), tdx=b[9],
tdy=b[10], size=30)
# plot_pose_cube(img_raw, int(yaw_predicted[i]), int(pitch_predicted[i]), int(roll_predicted[i]), tdx=b[0]+0.5*(b[2]-b[0]), tdy=b[1]+0.5*(b[3]-b[1]), size=150.)
# landmark_array = np.asarray(
# [[b[5], b[6]], [b[7], b[8]], [b[9], b[10]], [b[11], b[12]], [b[13], b[14]]]).astype(float)
# #imgpath="/home/lyp/paper_experiments/pre_data/code/real_test/shuffilenet_face/data/val/images/1--Handshaking/1_Handshaking_Handshaking_1_567.jpg"
#
# angle_result=headPosEstimate(image_path, landmark_array)
# text = "y:" + str(int(angle_result[0]))# + "," + "p:" + str(int(angle_result[1])) + "," + "r:" + str(
# int(angle_result[2]))
# cv2.putText(img_rgb, text, (cx - 60, cy - 28),
# cv2.FONT_HERSHEY_TRIPLEX, 0.8, (255, 0, 0))
# out.write(frame)
cv2.imshow("frame", img_rgb)
# if ret == True:
# out.write(img_rgb)
# else:
# break
#cv2.imwrite("/home/lyp/paper_experiments/plot_paper_curve/suppm/img/widerface/results/r68.jpg", img_rgb)
| [
"torch.cuda.current_device",
"torch.load",
"torch.sum",
"torch.FloatTensor",
"torch.Tensor",
"torch.device",
"torch.from_numpy",
"torch.set_grad_enabled"
] | 1.7 | lyp-deeplearning/MOS-Multi-Task-Face-Detect | 1bea754752e13fafdeb06f5fedcba1bd08e836de |
1.10 | import torch
from torch.utils.data import RandomSampler
from pathlib import Path
import sys
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0].parents[0] # CogKGE root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add CogKGE root directory to PATH
from cogkge import *
device=init_cogkge(device_id="0,1,2,3",seed=1)
loader =FB15KLoader(dataset_path="../dataset",download=True)
train_data, valid_data, test_data = loader.load_all_data()
node_lut, relation_lut= loader.load_all_lut()
# loader.describe()
# train_data.describe()
# node_lut.describe()
processor = FB15KProcessor(node_lut, relation_lut,reprocess=True)
train_dataset = processor.process(train_data)
valid_dataset = processor.process(valid_data)
test_dataset = processor.process(test_data)
node_lut,relation_lut=processor.process_lut()
# node_lut.print_table(front=3)
# relation_lut.print_table(front=3)
train_sampler = RandomSampler(train_dataset)
valid_sampler = RandomSampler(valid_dataset)
test_sampler = RandomSampler(test_dataset)
model = DistMult(entity_dict_len=len(node_lut),
relation_dict_len=len(relation_lut),
embedding_dim=50)
loss = NegLogLikehoodLoss(C=0.1)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0)
metric = Link_Prediction(link_prediction_raw=True,
link_prediction_filt=False,
batch_size=5000000,
reverse=True)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', patience=3, threshold_mode='abs', threshold=5,
factor=0.5, min_lr=1e-9, verbose=True
)
negative_sampler = UnifNegativeSampler(triples=train_dataset,
entity_dict_len=len(node_lut),
relation_dict_len=len(relation_lut))
trainer = ScoreTrainer(
train_dataset=train_dataset,
valid_dataset=test_dataset,
train_sampler=train_sampler,
valid_sampler=test_sampler,
model=model,
loss=loss,
optimizer=optimizer,
negative_sampler=negative_sampler,
device=device,
output_path="../dataset",
lookuptable_E= node_lut,
lookuptable_R= relation_lut,
metric=metric,
lr_scheduler=lr_scheduler,
log=True,
trainer_batch_size=100000,
epoch=1000,
visualization=False,
apex=True,
dataloaderX=True,
num_workers=4,
pin_memory=True,
metric_step=100,
save_step=100,
metric_final_model=True,
save_final_model=True,
load_checkpoint= None
)
trainer.train()
evaluator = Evaluator(
test_dataset=test_dataset,
test_sampler=test_sampler,
model=model,
device=device,
metric=metric,
output_path="../dataset",
train_dataset=train_dataset,
valid_dataset=valid_dataset,
lookuptable_E= node_lut,
lookuptable_R= relation_lut,
log=True,
evaluator_batch_size=50000,
dataloaderX=True,
num_workers= 4,
pin_memory=True,
trained_model_path=None
)
evaluator.evaluate()
| [
"torch.utils.data.RandomSampler",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
] | 1.10.1 | CogNLP/CogKGE | 70d851d6489600c1e90eb25b0388a3ceba2f078c |
1.7 | import torch
from torch import nn as nn
from torch.nn.functional import binary_cross_entropy_with_logits, cross_entropy
from bases.nn.conv2d import DenseConv2d
from bases.nn.linear import DenseLinear
from bases.nn.models.base_model import BaseModel
from bases.nn.sequential import DenseSequential
from .utils import is_conv, is_fc
__all__ = ["Conv2", "Conv4"]
class Conv2(BaseModel):
def __init__(self, dict_module: dict = None):
if dict_module is None:
dict_module = dict()
features = nn.Sequential(DenseConv2d(1, 32, kernel_size=5, padding=2), # 32x28x28
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2), # 32x14x14
DenseConv2d(32, 64, kernel_size=5, padding=2), # 64x14x14
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2)) # 64x7x7
classifier = DenseSequential(DenseLinear(64 * 7 * 7, 2048, mode="fan_out"),
nn.ReLU(inplace=True),
DenseLinear(2048, 62, mode="fan_out"))
dict_module["features"] = features
dict_module["classifier"] = classifier
super(Conv2, self).__init__(binary_cross_entropy_with_logits, dict_module)
def collect_layers(self):
self.get_param_layers(self.param_layers, self.param_layer_prefixes)
self.prunable_layers = self.param_layers
self.prunable_layer_prefixes = self.param_layer_prefixes
def forward(self, inputs):
outputs = self.features(inputs)
outputs = outputs.view(outputs.size(0), -1)
outputs = self.classifier(outputs)
return outputs
def loss(self, inputs, labels) -> torch.Tensor:
return self.loss_func(self(inputs), labels)
def to_sparse(self):
new_features = [ft.to_sparse() if isinstance(ft, DenseConv2d) else ft for ft in self.features]
new_module_dict = {"features": nn.Sequential(*new_features), "classifier": self.classifier.to_sparse()}
return self.__class__(new_module_dict)
def remove_empty_channels(self):
list_in_out = []
is_transition = False
prev_is_transition = False
for idx, (layer, next_layer) in enumerate(zip(self.prunable_layers, self.prunable_layers[1:] + [None])):
# works for both conv and fc
if is_conv(layer) and is_fc(next_layer):
is_transition = True
num_out, num_in = layer.weight.size()[:2]
if idx == 0 or prev_is_transition:
list_remain_in = "all"
else:
list_remain_in = set()
for in_id in range(num_in):
mask_slice = layer.mask.index_select(dim=1, index=torch.tensor([in_id]))
if not torch.equal(mask_slice, torch.zeros_like(mask_slice)):
list_remain_in.add(in_id)
if len(list_remain_in) == layer.weight.size()[1]:
list_remain_in = "all"
if next_layer is None or is_transition:
list_remain_out = "all"
else:
list_remain_out = set()
for out_id in range(num_out):
mask_slice = layer.mask.index_select(dim=0, index=torch.tensor([out_id]))
if not torch.equal(mask_slice, torch.zeros_like(mask_slice)):
list_remain_out.add(out_id)
if len(list_remain_out) == layer.weight.size()[0]:
list_remain_out = "all"
list_in_out.append((list_remain_in, list_remain_out))
if prev_is_transition:
prev_is_transition = False
if is_transition:
prev_is_transition = True
is_transition = False
for ((in_indices, out_indices),
(in_indices_next, out_indices_next),
layer,
next_layer) in zip(list_in_out[:-1], list_in_out[1:], self.prunable_layers[:-1],
self.prunable_layers[1:]):
if out_indices == "all" or in_indices_next == "all":
merged_indices = "all"
else:
merged_indices = list(out_indices.intersection(in_indices_next))
if merged_indices != "all":
layer.weight = nn.Parameter(layer.weight.index_select(dim=0, index=torch.tensor(merged_indices)))
layer.mask = layer.mask.index_select(dim=0, index=torch.tensor(merged_indices))
len_merged_indices = len(merged_indices)
if layer.bias is not None:
layer.bias = nn.Parameter(layer.bias[merged_indices])
if is_conv(layer):
layer.out_channels = len_merged_indices
elif is_fc(layer):
layer.out_features = len_merged_indices
next_layer.weight = nn.Parameter(
next_layer.weight.index_select(dim=1, index=torch.tensor(merged_indices)))
next_layer.mask = next_layer.mask.index_select(dim=1, index=torch.tensor(merged_indices))
if is_conv(next_layer):
next_layer.in_channels = len_merged_indices
elif is_fc(next_layer):
next_layer.in_features = len_merged_indices
# class FEMNISTModel(BaseModel):
# def __init__(self, dict_module: dict = None):
# if dict_module is None:
# dict_module = dict()
# features = nn.Sequential(DenseConv2d(1, 32, kernel_size=5, padding=2), # 32x28x28
# nn.ReLU(inplace=True),
# nn.MaxPool2d(2, stride=2), # 32x14x14
# DenseConv2d(32, 64, kernel_size=5, padding=2), # 64x14x14
# nn.ReLU(inplace=True),
# nn.MaxPool2d(2, stride=2)) # 64x7x7
#
# classifier = DenseSequential(DenseLinear(64 * 7 * 7, 2048, init_mode="fan_out"),
# nn.ReLU(inplace=True),
# DenseLinear(2048, 62, init_mode="fan_out"))
#
# dict_module["features"] = features
# dict_module["classifier"] = classifier
#
# super(FEMNISTModel, self).__init__(binary_cross_entropy_with_logits, dict_module)
#
# def collect_layers(self):
# self.get_param_layers(self.param_layers, self.param_layer_prefixes)
# self.prunable_layers = self.param_layers
# self.prunable_layer_prefixes = self.param_layer_prefixes
#
# def forward(self, inputs):
# outputs = self.features(inputs)
# outputs = outputs.view(outputs.size(0), -1)
# outputs = self.classifier(outputs)
# return outputs
#
# def loss(self, inputs, labels) -> torch.Tensor:
# return self.loss_func(self(inputs), labels)
#
# def to_sparse(self):
# new_features = [ft.to_sparse() if isinstance(ft, DenseConv2d) else ft for ft in self.features]
# new_module_dict = {"features": nn.Sequential(*new_features), "classifier": self.classifier.to_sparse()}
# return self.__class__(new_module_dict)
#
# def remove_empty_channels(self):
# list_in_out = []
# is_transition = False
# prev_is_transition = False
# for idx, (layer, next_layer) in enumerate(zip(self.prunable_layers, self.prunable_layers[1:] + [None])):
# # works for both conv and fc
# if is_conv(layer) and is_fc(next_layer):
# is_transition = True
#
# num_out, num_in = layer.weight.size()[:2]
#
# if idx == 0 or prev_is_transition:
# list_remain_in = "all"
# else:
# list_remain_in = set()
# for in_id in range(num_in):
# mask_slice = layer.mask.index_select(dim=1, index=torch.tensor([in_id]))
# if not torch.equal(mask_slice, torch.zeros_like(mask_slice)):
# list_remain_in.add(in_id)
# if len(list_remain_in) == layer.weight.size()[1]:
# list_remain_in = "all"
#
# if next_layer is None or is_transition:
# list_remain_out = "all"
# else:
# list_remain_out = set()
# for out_id in range(num_out):
# mask_slice = layer.mask.index_select(dim=0, index=torch.tensor([out_id]))
# if not torch.equal(mask_slice, torch.zeros_like(mask_slice)):
# list_remain_out.add(out_id)
# if len(list_remain_out) == layer.weight.size()[0]:
# list_remain_out = "all"
#
# list_in_out.append((list_remain_in, list_remain_out))
#
# if prev_is_transition:
# prev_is_transition = False
# if is_transition:
# prev_is_transition = True
# is_transition = False
#
# for ((in_indices, out_indices),
# (in_indices_next, out_indices_next),
# layer,
# next_layer) in zip(list_in_out[:-1], list_in_out[1:], self.prunable_layers[:-1],
# self.prunable_layers[1:]):
#
# if out_indices == "all" or in_indices_next == "all":
# merged_indices = "all"
# else:
# merged_indices = list(out_indices.intersection(in_indices_next))
#
# if merged_indices != "all":
# layer.weight = nn.Parameter(layer.weight.index_select(dim=0, index=torch.tensor(merged_indices)))
# layer.mask = layer.mask.index_select(dim=0, index=torch.tensor(merged_indices))
# len_merged_indices = len(merged_indices)
# if layer.bias is not None:
# layer.bias = nn.Parameter(layer.bias[merged_indices])
# if is_conv(layer):
# layer.out_channels = len_merged_indices
# elif is_fc(layer):
# layer.out_features = len_merged_indices
#
# next_layer.weight = nn.Parameter(
# next_layer.weight.index_select(dim=1, index=torch.tensor(merged_indices)))
# next_layer.mask = next_layer.mask.index_select(dim=1, index=torch.tensor(merged_indices))
# if is_conv(next_layer):
# next_layer.in_channels = len_merged_indices
# elif is_fc(next_layer):
# next_layer.in_features = len_merged_indices
class Conv4(BaseModel):
def __init__(self, dict_module: dict = None):
if dict_module is None:
dict_module = dict()
features = nn.Sequential(DenseConv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2))
classifier = DenseLinear(in_features=32 * 6 * 6, out_features=2)
dict_module["features"] = features
dict_module["classifier"] = classifier
super(Conv4, self).__init__(cross_entropy, dict_module)
def collect_layers(self):
self.get_param_layers(self.param_layers, self.param_layer_prefixes)
prunable_ids = [idx for idx, layer in enumerate(self.param_layers) if not isinstance(layer, nn.BatchNorm2d)]
self.prunable_layers = list(self.param_layers[i] for i in prunable_ids)
self.prunable_layer_prefixes = list(self.param_layer_prefixes[i] for i in prunable_ids)
def forward(self, inputs):
outputs = self.features(inputs)
outputs = outputs.view(outputs.size(0), -1)
outputs = self.classifier(outputs)
return outputs
def loss(self, inputs, labels) -> torch.Tensor:
return self.loss_func(self(inputs), labels)
def to_sparse(self):
new_features = [ft.to_sparse() if isinstance(ft, DenseConv2d) else ft for ft in self.features]
new_module_dict = {"features": nn.Sequential(*new_features),
"classifier": self.classifier.to_sparse(transpose=True)}
return self.__class__(new_module_dict)
| [
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Parameter",
"torch.nn.ReLU",
"torch.tensor",
"torch.zeros_like"
] | 1.7.1 | yeshwanthv5/PruneFL | ad1f7f33b0605d1d79abfbe42ef287fcc613a943 |
1.1 | # Copyright (c) 2019. TsumiNa. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import torch
from torch import nn
__all__ = ['ConvLayer', 'CrystalGraphConvNet']
class ConvLayer(nn.Module):
"""
Convolutional operation on graphs
"""
def __init__(self, atom_fea_len, nbr_fea_len):
"""
Initialize ConvLayer.
Parameters
----------
atom_fea_len: int
Number of atom hidden features.
nbr_fea_len: int
Number of bond features.
"""
super(ConvLayer, self).__init__()
self.atom_fea_len = atom_fea_len
self.nbr_fea_len = nbr_fea_len
self.fc_full = nn.Linear(2 * self.atom_fea_len + self.nbr_fea_len,
2 * self.atom_fea_len)
self.sigmoid = nn.Sigmoid()
self.softplus1 = nn.Softplus()
self.bn1 = nn.BatchNorm1d(2 * self.atom_fea_len)
self.bn2 = nn.BatchNorm1d(self.atom_fea_len)
self.softplus2 = nn.Softplus()
def forward(self, atom_in_fea, nbr_fea, nbr_fea_idx):
"""
Forward pass
N: Total number of atoms in the batch
M: Max number of neighbors
Parameters
----------
atom_in_fea: Variable(torch.Tensor) shape (N, atom_fea_len)
Atom hidden features before convolution
nbr_fea: Variable(torch.Tensor) shape (N, M, nbr_fea_len)
Bond features of each atom's M neighbors
nbr_fea_idx: torch.LongTensor shape (N, M)
Indices of M neighbors of each atom
Returns
-------
atom_out_fea: nn.Variable shape (N, atom_fea_len)
Atom hidden features after convolution
"""
# TODO will there be problems with the index zero padding?
N, M = nbr_fea_idx.shape
# convolution
atom_nbr_fea = atom_in_fea[nbr_fea_idx, :]
total_nbr_fea = torch.cat(
[atom_in_fea.unsqueeze(1).expand(N, M, self.atom_fea_len),
atom_nbr_fea, nbr_fea], dim=2)
total_gated_fea = self.fc_full(total_nbr_fea)
total_gated_fea = self.bn1(total_gated_fea.view(
-1, self.atom_fea_len * 2)).view(N, M, self.atom_fea_len * 2)
nbr_filter, nbr_core = total_gated_fea.chunk(2, dim=2)
nbr_filter = self.sigmoid(nbr_filter)
nbr_core = self.softplus1(nbr_core)
nbr_sumed = torch.sum(nbr_filter * nbr_core, dim=1)
nbr_sumed = self.bn2(nbr_sumed)
out = self.softplus2(atom_in_fea + nbr_sumed)
return out
class CrystalGraphConvNet(nn.Module):
"""
Create a crystal graph convolutional neural network for predicting total
material properties.
See Also: [CGCNN]_.
.. [CGCNN] `Crystal Graph Convolutional Neural Networks for an Accurate and Interpretable Prediction of Material Properties`__
__ https://doi.org/10.1103/PhysRevLett.120.145301
"""
def __init__(self, orig_atom_fea_len, nbr_fea_len,
atom_fea_len=64, n_conv=3, h_fea_len=128, n_h=1,
classification=False):
"""
Initialize CrystalGraphConvNet.
Parameters
----------
orig_atom_fea_len: int
Number of atom features in the input.
nbr_fea_len: int
Number of bond features.
atom_fea_len: int
Number of hidden atom features in the convolutional layers
n_conv: int
Number of convolutional layers
h_fea_len: int
Number of hidden features after pooling
n_h: int
Number of hidden layers after pooling
"""
super(CrystalGraphConvNet, self).__init__()
self.classification = classification
self.embedding = nn.Linear(orig_atom_fea_len, atom_fea_len)
self.convs = nn.ModuleList([ConvLayer(atom_fea_len=atom_fea_len,
nbr_fea_len=nbr_fea_len)
for _ in range(n_conv)])
self.conv_to_fc = nn.Linear(atom_fea_len, h_fea_len)
self.conv_to_fc_softplus = nn.Softplus()
if n_h > 1:
self.fcs = nn.ModuleList([nn.Linear(h_fea_len, h_fea_len)
for _ in range(n_h - 1)])
self.softpluses = nn.ModuleList([nn.Softplus()
for _ in range(n_h - 1)])
if self.classification:
self.fc_out = nn.Linear(h_fea_len, 2)
else:
self.fc_out = nn.Linear(h_fea_len, 1)
if self.classification:
self.logsoftmax = nn.LogSoftmax()
self.dropout = nn.Dropout()
def forward(self, atom_fea, nbr_fea, nbr_fea_idx, crystal_atom_idx):
"""
Forward pass
N: Total number of atoms in the batch
M: Max number of neighbors
N0: Total number of crystals in the batch
Parameters
----------
atom_fea: Variable(torch.Tensor) shape (N, orig_atom_fea_len)
Atom features from atom type
nbr_fea: Variable(torch.Tensor) shape (N, M, nbr_fea_len)
Bond features of each atom's M neighbors
nbr_fea_idx: torch.LongTensor shape (N, M)
Indices of M neighbors of each atom
crystal_atom_idx: list of torch.LongTensor of length N0
Mapping from the crystal idx to atom idx
Returns
-------
prediction: nn.Variable shape (N, )
Atom hidden features after convolution
"""
atom_fea = self.embedding(atom_fea)
for conv_func in self.convs:
atom_fea = conv_func(atom_fea, nbr_fea, nbr_fea_idx)
crys_fea = self.pooling(atom_fea, crystal_atom_idx)
crys_fea = self.conv_to_fc(self.conv_to_fc_softplus(crys_fea))
crys_fea = self.conv_to_fc_softplus(crys_fea)
if self.classification:
crys_fea = self.dropout(crys_fea)
if hasattr(self, 'fcs') and hasattr(self, 'softpluses'):
for fc, softplus in zip(self.fcs, self.softpluses):
crys_fea = softplus(fc(crys_fea))
out = self.fc_out(crys_fea)
if self.classification:
out = self.logsoftmax(out)
return out
@staticmethod
def pooling(atom_fea, crystal_atom_idx):
"""
Pooling the atom features to crystal features
N: Total number of atoms in the batch
N0: Total number of crystals in the batch
Parameters
----------
atom_fea: Variable(torch.Tensor) shape (N, atom_fea_len)
Atom feature vectors of the batch
crystal_atom_idx: list of torch.LongTensor of length N0
Mapping from the crystal idx to atom idx
"""
assert sum([len(idx_map) for idx_map in crystal_atom_idx]) == \
atom_fea.data.shape[0]
summed_fea = [torch.mean(atom_fea[idx_map], dim=0, keepdim=True)
for idx_map in crystal_atom_idx]
return torch.cat(summed_fea, dim=0)
| [
"torch.nn.Linear",
"torch.nn.LogSoftmax",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.nn.BatchNorm1d",
"torch.nn.Softplus",
"torch.mean",
"torch.sum"
] | 1.1.0 | qi-zh/XenonPy | e91c680c773022982b80686c9faaf962e304916d |
1.8 | import torch
from torch import nn
from transformers import BertTokenizer, VisualBertModel, VisualBertConfig
import numpy as np
class VisualBertClassifier(nn.Module):
def __init__(self,
visual_bert_model,
num_classes: int = 8,
initial_visual_embedding_dim: int = 96,
final_dropout_rate: float = 0.1):
"""
pooler_output (torch.FloatTensor of shape (batch_size, hidden_size))
— Last layer hidden-state of the first token of the sequence (classification token)
after further processing through the layers used for the auxiliary pretraining task.
E.g. for BERT-family of models, this returns the classification token after processing through
a linear layer and a tanh activation function.
The linear layer weights are trained from the next sentence prediction (classification) objective
during pretraining.
@param initial_visual_embedding_dim:
"""
super().__init__()
self.visual_embedding_projection = nn.Linear(initial_visual_embedding_dim, 2048)
self.visual_bert = visual_bert_model
self.final_dropout = nn.Dropout(final_dropout_rate)
self.out = nn.Linear(768, num_classes)
def forward(self,
text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask
):
visual_embeds = self.visual_embedding_projection(visual_embeds)
output = self.visual_bert(input_ids=text_input_ids,
token_type_ids=text_token_type_ids,
attention_mask=text_attention_mask,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
visual_attention_mask=visual_attention_mask)
output = self.final_dropout(output.pooler_output)
output = self.out(output)
return output
if __name__ == '__main__':
bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt")
text_input_ids = inputs.data['input_ids'].to('cuda')
text_token_type_ids = inputs.data['token_type_ids'].to('cuda')
text_attention_mask = inputs.data['attention_mask'].to('cuda')
sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy"
sample_face_body_embedding = np.load(sample_face_body_embedding_path)
visual_embeds = torch.from_numpy(sample_face_body_embedding)
visual_embeds = visual_embeds.to('cuda')
visual_embeds = torch.unsqueeze(visual_embeds, 0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda')
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda')
classifier = VisualBertClassifier()
classifier.to('cuda')
classifier.forward(text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.unsqueeze",
"torch.from_numpy",
"torch.ones"
] | 1.8.1 | inzva/emotion-recognition-drawings | 56435f42d76c10c10fa58149ccbcc8d05efccdc0 |
0.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SCTProcessor(DataProcessor):
"""Processor for the SCT data set."""
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = line[1]
label = "0" if line[2] == "True" else "1"
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "sct":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
"sct": SCTProcessor,
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
"sct": "classification",
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
model = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=cache_dir,
num_labels=num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.do_train:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# define a new function to compute loss values for both output_modes
logits = model(input_ids, segment_ids, input_mask, labels=None)
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), label_ids.view(-1))
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
else:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask, labels=None)
# create eval loss and other metric required by the task
if output_mode == "classification":
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(task_name, preds, all_label_ids.numpy())
loss = tr_loss/global_step if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
# hack for MNLI-MM
if task_name == "mnli":
task_name = "mnli-mm"
processor = processors[task_name]()
if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir + '-MM'):
os.makedirs(args.output_dir + '-MM')
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask, labels=None)
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
preds = np.argmax(preds, axis=1)
result = compute_metrics(task_name, preds, all_label_ids.numpy())
loss = tr_loss/global_step if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| [
"torch.distributed.get_world_size",
"torch.utils.data.RandomSampler",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.utils.data.SequentialSampler",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.utils.data.TensorDataset",
"torch.nn.MSELoss",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 0.4.1 | SebastianMuszynski/pytorch-pretrained-BERT | 1892015692a28096859a46243ae458f9f8aa003f |
1.3 | import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
import random
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader, Dataset
import torch
normalize_birds = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
normalize_mnist = transforms.Normalize(mean=[0.1307], std=[0.3081])
normalize_fiw = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
def get_data_loader(opt):
if opt.dataset == "birds":
my_transform = transforms.Compose([
transforms.Resize((opt.img_size, opt.img_size)),
transforms.ToTensor(),
normalize_birds
])
train_dataset = datasets.ImageFolder(root=opt.image_root, transform=my_transform)
train_loader = DataLoader(dataset=train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers)
test_loader = None
val_loader = None
elif opt.dataset == "mnist":
my_transform = transforms.Compose([
transforms.Resize((opt.img_size, opt.img_size)),
transforms.ToTensor(),
normalize_mnist
])
train_loader = DataLoader(datasets.MNIST(opt.image_root, train=True, download=True, transform=my_transform),
batch_size=opt.batch_size, shuffle=True)
test_loader = None
val_loader = None
elif opt.dataset == "celebA" or opt.dataset == "celebA_reduced":
my_transform = transforms.Compose([
transforms.Resize((opt.img_size, opt.img_size)),
transforms.CenterCrop(opt.img_size),
transforms.ToTensor(),
normalize_fiw
])
train_dataset = datasets.ImageFolder(root=opt.image_root_train, transform=my_transform)
val_dataset = datasets.ImageFolder(root=opt.image_root_val, transform=my_transform)
test_dataset = datasets.ImageFolder(root=opt.image_root_test, transform=my_transform)
train_loader = DataLoader(dataset=train_dataset, batch_size=opt.batch_size_train, shuffle=True, num_workers=opt.num_workers)
val_loader = DataLoader(dataset=val_dataset, batch_size=opt.batch_size_val, shuffle=False, num_workers=opt.num_workers)
test_loader = DataLoader(dataset=test_dataset, batch_size=opt.batch_size_test, shuffle=False, num_workers=opt.num_workers)
return train_loader, val_loader, test_loader | [
"torch.utils.data.DataLoader"
] | 1.3.1 | RicoFio/disentangle_mlp | 1fb3b6070b5846051b8b9e9333e8ee61418f4893 |
1.6 | import pytest
import torch
import numpy as np
import gym
from d3rlpy.dataset import MDPDataset, Episode
from d3rlpy.preprocessing import create_action_scaler
from d3rlpy.preprocessing import MinMaxActionScaler
@pytest.mark.parametrize("scaler_type", ["min_max"])
def test_create_action_scaler(scaler_type):
scaler = create_action_scaler(scaler_type)
if scaler_type == "min_max":
assert isinstance(scaler, MinMaxActionScaler)
@pytest.mark.parametrize("action_size", [10])
@pytest.mark.parametrize("batch_size", [32])
def test_min_max_action_scaler(action_size, batch_size):
actions = np.random.random((batch_size, action_size)).astype("f4")
max = actions.max(axis=0)
min = actions.min(axis=0)
scaler = MinMaxActionScaler(maximum=max, minimum=min)
# check range
y = scaler.transform(torch.tensor(actions))
assert np.all(y.numpy() >= -1.0)
assert np.all(y.numpy() <= 1.0)
x = torch.rand((batch_size, action_size))
y = scaler.transform(x)
ref_y = (x.numpy() - min.reshape((1, -1))) / (max - min).reshape((1, -1))
assert np.allclose(y.numpy(), ref_y * 2.0 - 1.0)
assert scaler.get_type() == "min_max"
params = scaler.get_params()
assert np.all(params["minimum"] == min)
assert np.all(params["maximum"] == max)
assert torch.allclose(scaler.reverse_transform(y), x, atol=1e-6)
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("action_size", [10])
@pytest.mark.parametrize("batch_size", [32])
def test_min_max_action_scaler_with_episode(
observation_shape, action_size, batch_size
):
shape = (batch_size,) + observation_shape
observations = np.random.random(shape)
actions = np.random.random((batch_size, action_size)).astype("f4")
rewards = np.random.random(batch_size)
terminals = np.random.randint(2, size=batch_size)
terminals[-1] = 1.0
dataset = MDPDataset(
observations=observations,
actions=actions,
rewards=rewards,
terminals=terminals,
)
max = actions.max(axis=0)
min = actions.min(axis=0)
scaler = MinMaxActionScaler()
scaler.fit(dataset.episodes)
x = torch.rand((batch_size, action_size))
y = scaler.transform(x)
ref_y = (x.numpy() - min.reshape((1, -1))) / (max - min).reshape((1, -1))
assert np.allclose(y.numpy(), ref_y * 2.0 - 1.0)
params = scaler.get_params()
assert np.all(params["minimum"] == min)
assert np.all(params["maximum"] == max)
def test_min_max_action_scaler_with_env():
env = gym.make("Pendulum-v0")
scaler = MinMaxActionScaler()
scaler.fit_with_env(env)
assert np.all(scaler._minimum == env.action_space.low)
assert np.all(scaler._maximum == env.action_space.high)
| [
"torch.rand",
"torch.tensor"
] | 1.6.0 | jamartinh/d3rlpy | 87f478451674ef769eb8ce74e3663c4d3b1c325d |
1.8 | import torch
import torch.nn as nn
from .midas.midas_net import MidasNet
class DoepdNet(torch.nn.Module):
"""
There are 3 run modes available for this model.
1. Yolo : Trains/Inferences only yolo layer, while ignoring midas and planeRCNN
2. PlaneRCNN : Trains/Inferences only PlaneRCNN layer, while ignoring midas and yolo
3. All : Trains/Inferences every layer
"""
midas_encoder_layered_output = []
def __init__(self, run_mode, midas_weights = "weights/model-f6b98070.pt", image_size=384):
super(DoepdNet, self).__init__()
self.run_mode = run_mode
self.midas_net = MidasNet(midas_weights)
midas_encoder_filters = [256, 256, 512, 512, 1024] # output filters from each layer of resnext 101
if self.run_mode == 'yolo' or self.run_mode == 'all':
from .yolo.yolo_decoder import YoloDecoder
# Each of the three layers in yolo takes input from last 3 layers of midas
self.yolo_decoder = YoloDecoder(midas_encoder_filters, (image_size, image_size))
self.yolo_layers = self.yolo_decoder.yolo_layers
self.midas_layer_2_to_yolo_small_obj = nn.Conv2d(in_channels= 512, out_channels = 256, kernel_size = 1, padding = 0)
self.midas_layer_3_to_yolo_med_obj = nn.Conv2d(in_channels= 1024, out_channels = 512, kernel_size = 1, padding = 0)
self.midas_layer_4_to_yolo_med_obj = nn.Conv2d(in_channels= 2048, out_channels = 512, kernel_size = 1, padding = 0)
self.midas_layer_4_to_yolo_large_obj = nn.Conv2d(in_channels= 2048, out_channels = 1024, kernel_size = 1, padding = 0)
# if self.run_mode == 'planercnn' or self.run_mode == 'all':
# from .planercnn.planercnn_decoder import MaskRCNN
# from utils.config import PlaneConfig
# import sys
# sys.argv=['']
# del sys
# from utils.options import parse_args
# args = parse_args()
# config = PlaneConfig(args)
# self.plane_rcnn_decoder = MaskRCNN(config)
# Freeze training for midas (encoder)
for param in self.midas_net.pretrained.parameters():
param.requires_grad = False
def forward(self, x, plane_rcnn_image_meta = None, augment=False, mode='inference_detection', use_nms=2, use_refinement=True, return_feature_map=False):
doepd_forward_output = [None, None, None]
encoder_layered_outputs = self.midas_net.forward_encoder(x)
if self.run_mode == 'yolo' or self.run_mode == 'all':
yolo_small = self.midas_layer_2_to_yolo_small_obj(encoder_layered_outputs[1]) # midas resnext 101 layer 2
yolo_med = self.midas_layer_3_to_yolo_med_obj(encoder_layered_outputs[2]) # midas resnext 101 layer 3
yolo_med_before_upsample = self.midas_layer_4_to_yolo_med_obj(encoder_layered_outputs[3]) # midas resnext 101 layer 4
yolo_large = self.midas_layer_4_to_yolo_large_obj(encoder_layered_outputs[3]) # midas resnext 101 layer 4
doepd_forward_output[0] = self.yolo_decoder.forward([yolo_small, yolo_med_before_upsample, yolo_med, yolo_large], augment=augment)
if self.run_mode == 'midas' or self.run_mode == 'all':
doepd_forward_output[1] = self.midas_net.forward_decoder(encoder_layered_outputs)
# if self.run_mode == 'planercnn' or self.run_mode == 'all':
# doepd_forward_output[2] = self.plane_rcnn_decoder.predict(x, plane_rcnn_image_meta, mode, encoder_layered_outputs = encoder_layered_outputs, return_feature_map= return_feature_map)
return doepd_forward_output
def load_doepd_weights(self, device='cpu', scratch=False, train_mode = False, load_mode='all'):
yolo_weights = []
chkpt = [None, None]
from .yolo.yolo_decoder import load_yolo_decoder_weights
if not scratch:
# loading yolo weights
if self.run_mode == 'yolo' or self.run_mode == 'all':
yolo_weight_file = None
# loading yolo weights from last/best based on train_mode. Will update to add planercnn weights
if train_mode:
yolo_weight_file = 'weights/doepd_yolo_last.pt'
else:
yolo_weight_file = 'weights/doepd_yolo_best.pt'
chkpt[0] = torch.load(yolo_weight_file, map_location = "cpu")
num_items = 0
for k, v in chkpt[0]['model'].items():
if num_items>=666 and num_items<756:
if not k.endswith('num_batches_tracked'):
yolo_weights.append(v.detach().numpy())
num_items = num_items + 1
load_yolo_decoder_weights(self.yolo_decoder, yolo_weights)
self.midas_layer_2_to_yolo_small_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_2_to_yolo_small_obj.weight'])
self.midas_layer_2_to_yolo_small_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_2_to_yolo_small_obj.bias'])
self.midas_layer_3_to_yolo_med_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_3_to_yolo_med_obj.weight'])
self.midas_layer_3_to_yolo_med_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_3_to_yolo_med_obj.bias'])
self.midas_layer_4_to_yolo_med_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_med_obj.weight'])
self.midas_layer_4_to_yolo_med_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_med_obj.bias'])
self.midas_layer_4_to_yolo_large_obj.weight = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_large_obj.weight'])
self.midas_layer_4_to_yolo_large_obj.bias = torch.nn.Parameter(chkpt[0]['model']['midas_layer_4_to_yolo_large_obj.bias'])
# elif self.run_mode == 'planercnn' or self.run_mode == 'all':
# planer_cnn_file = 'weights/planer_checkpoint.pth'
# chkpt[1] = torch.load(planer_cnn_file, map_location = "cpu")
# num_items = 0
# for k, v in chkpt[1].items():
# if num_items>=728:
# # eg k = depth.deconv1.2.running_var
# # we need plane_rcnn_decoder.depth.deconv1.2.running_var
# self.plane_rcnn_decoder.state_dict()[f'plane_rcnn_decoder.{k}'] = v.data
# num_items = num_items + 1
else:
# loading yolo_best weights : got from 300 epochs trained in Assignment 13
yolo_weight_file='weights/yolo_old_300.pt'
chkpt[0] = torch.load(yolo_weight_file, map_location = device)
num_items=0
for k, v in chkpt[0]['model'].items():
if num_items >= 354:
if not k.endswith('num_batches_tracked'):
if v.shape[0]!=255:
yolo_weights.append(v.detach().numpy())
num_items = num_items + 1
load_yolo_decoder_weights(self.yolo_decoder, yolo_weights)
return chkpt
| [
"torch.nn.Conv2d",
"torch.load",
"torch.nn.Parameter"
] | 1.8.1 | namanshrimali/doepd.ai | fc57af2e131965d9d6c89e39a3eeab41c8dff40b |
1.1 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, multiclass_nms_with_mask
from mmdet.ops import ModulatedDeformConvPack
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob, build_norm_layer
from IPython import embed
import cv2
import numpy as np
import math
import time
INF = 1e8
@HEADS.register_module
class PolarMask_Head(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
use_dcn=False,
mask_nms=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_mask=dict(type='MaskIOULoss'),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(PolarMask_Head, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_mask = build_loss(loss_mask)
self.loss_centerness = build_loss(loss_centerness)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
# xez add for polarmask
self.use_dcn = use_dcn
self.mask_nms = mask_nms
# debug vis img
self.vis_num = 1000
self.count = 0
# test
self.angles = torch.range(0, 350, 10).cuda() / 180 * math.pi
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.mask_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if not self.use_dcn:
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.mask_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
else:
self.cls_convs.append(
ModulatedDeformConvPack(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
dilation=1,
deformable_groups=1,
))
if self.norm_cfg:
self.cls_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])
self.cls_convs.append(nn.ReLU(inplace=True))
self.reg_convs.append(
ModulatedDeformConvPack(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
dilation=1,
deformable_groups=1,
))
if self.norm_cfg:
self.reg_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])
self.reg_convs.append(nn.ReLU(inplace=True))
self.mask_convs.append(
ModulatedDeformConvPack(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
dilation=1,
deformable_groups=1,
))
if self.norm_cfg:
self.mask_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])
self.mask_convs.append(nn.ReLU(inplace=True))
self.polar_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.polar_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.polar_mask = nn.Conv2d(self.feat_channels, 36, 3, padding=1)
self.polar_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales_bbox = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.scales_mask = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
if not self.use_dcn:
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
for m in self.mask_convs:
normal_init(m.conv, std=0.01)
else:
pass
bias_cls = bias_init_with_prob(0.01)
normal_init(self.polar_cls, std=0.01, bias=bias_cls)
normal_init(self.polar_reg, std=0.01)
normal_init(self.polar_mask, std=0.01)
normal_init(self.polar_centerness, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales_bbox, self.scales_mask)
def forward_single(self, x, scale_bbox, scale_mask):
cls_feat = x
reg_feat = x
mask_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.polar_cls(cls_feat)
centerness = self.polar_centerness(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale_bbox(self.polar_reg(reg_feat)).float().exp()
for mask_layer in self.mask_convs:
mask_feat = mask_layer(mask_feat)
mask_pred = scale_mask(self.polar_mask(mask_feat)).float().exp()
return cls_score, bbox_pred, centerness, mask_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'mask_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
mask_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_masks,
gt_bboxes_ignore=None,
extra_data=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses) == len(mask_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets, mask_targets = self.polar_target(all_level_points, extra_data)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_mask_preds = [
mask_pred.permute(0, 2, 3, 1).reshape(-1, 36)
for mask_pred in mask_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores) # [num_pixel, 80]
flatten_bbox_preds = torch.cat(flatten_bbox_preds) # [num_pixel, 4]
flatten_mask_preds = torch.cat(flatten_mask_preds) # [num_pixel, 36]
flatten_centerness = torch.cat(flatten_centerness) # [num_pixel]
flatten_labels = torch.cat(labels).long() # [num_pixel]
flatten_bbox_targets = torch.cat(bbox_targets) # [num_pixel, 4]
flatten_mask_targets = torch.cat(mask_targets) # [num_pixel, 36]
flatten_points = torch.cat([points.repeat(num_imgs, 1)
for points in all_level_points]) # [num_pixel,2]
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
pos_mask_preds = flatten_mask_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_mask_targets = flatten_mask_targets[pos_inds]
pos_centerness_targets = self.polar_centerness_target(pos_mask_targets)
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_mask = self.loss_mask(pos_mask_preds,
pos_mask_targets,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_mask = pos_mask_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_mask=loss_mask,
loss_centerness=loss_centerness)
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def polar_target(self, points, extra_data):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
labels_list, bbox_targets_list, mask_targets_list = extra_data.values()
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
mask_targets_list = [
mask_targets.split(num_points, 0)
for mask_targets in mask_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
concat_lvl_mask_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
concat_lvl_mask_targets.append(
torch.cat(
[mask_targets[i] for mask_targets in mask_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets, concat_lvl_mask_targets
def polar_centerness_target(self, pos_mask_targets):
# only calculate pos centerness targets, otherwise there may be nan
centerness_targets = (pos_mask_targets.min(dim=-1)[0] / pos_mask_targets.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
mask_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
mask_pred_list = [
mask_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list,
bbox_pred_list,
mask_pred_list,
centerness_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mask_preds,
centernesses,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_masks = []
mlvl_centerness = []
for cls_score, bbox_pred, mask_pred, centerness, points in zip(
cls_scores, bbox_preds, mask_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
mask_pred = mask_pred.permute(1, 2, 0).reshape(-1, 36)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
mask_pred = mask_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
masks = distance2mask(points, mask_pred, self.angles, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_masks.append(masks)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_masks = torch.cat(mlvl_masks)
if rescale:
_mlvl_bboxes = mlvl_bboxes / mlvl_bboxes.new_tensor(scale_factor)
try:
scale_factor = torch.Tensor(scale_factor)[:2].cuda().unsqueeze(1).repeat(1, 36)
_mlvl_masks = mlvl_masks / scale_factor
except:
_mlvl_masks = mlvl_masks / mlvl_masks.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
centerness_factor = 0.5 # mask centerness is smaller than origin centerness, so add a constant is important or the score will be too low.
if self.mask_nms:
'''1 mask->min_bbox->nms, performance same to origin box'''
a = _mlvl_masks
_mlvl_bboxes = torch.stack([a[:, 0].min(1)[0],a[:, 1].min(1)[0],a[:, 0].max(1)[0],a[:, 1].max(1)[0]],-1)
det_bboxes, det_labels, det_masks = multiclass_nms_with_mask(
_mlvl_bboxes,
mlvl_scores,
_mlvl_masks,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness + centerness_factor)
else:
'''2 origin bbox->nms, performance same to mask->min_bbox'''
det_bboxes, det_labels, det_masks = multiclass_nms_with_mask(
_mlvl_bboxes,
mlvl_scores,
_mlvl_masks,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness + centerness_factor)
return det_bboxes, det_labels, det_masks
# test
def distance2mask(points, distances, angles, max_shape=None):
'''Decode distance prediction to 36 mask points
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 36,from angle 0 to 350.
angles (Tensor):
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded masks.
'''
num_points = points.shape[0]
points = points[:, :, None].repeat(1, 1, 36)
c_x, c_y = points[:, 0], points[:, 1]
sin = torch.sin(angles)
cos = torch.cos(angles)
sin = sin[None, :].repeat(num_points, 1)
cos = cos[None, :].repeat(num_points, 1)
x = distances * sin + c_x
y = distances * cos + c_y
if max_shape is not None:
x = x.clamp(min=0, max=max_shape[1] - 1)
y = y.clamp(min=0, max=max_shape[0] - 1)
res = torch.cat([x[:, None, :], y[:, None, :]], dim=1)
return res
| [
"torch.cos",
"torch.cat",
"torch.sqrt",
"torch.nn.ModuleList",
"torch.sin",
"torch.arange",
"torch.nn.ReLU",
"torch.range",
"torch.nn.Conv2d",
"torch.meshgrid",
"torch.Tensor"
] | 1.1 | PanAndy/PolarMask | 0421f03a66ad4cbf7bdfe7a17a2e47e9fcc53737 |
0.4 | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt
from dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *
import os
import sys
from tqdm import tqdm
import time
from fn import getTime
import cv2
from pPose_nms import pose_nms, write_json
args = opt
args.dataset = 'coco'
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
if __name__ == "__main__":
videofile = args.video
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
if not len(videofile):
raise IOError('Error: must contain --video')
# Load input video
data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
(fourcc,fps,frameSize) = data_loader.videoinfo()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model.cuda()
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_'+videofile.split('/')[-1].split('.')[0]+'.avi')
writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
im_names_desc = tqdm(range(data_loader.length()))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if orig_img is None:
break
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)].cuda()
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while(writer.running()):
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
| [
"torch.cat",
"torch.no_grad",
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.set_sharing_strategy"
] | 0.4.0 | LangwenH/AlphaPose-for-Mice-Behavior | 357923f5993a521507fe7359fa763d2b5d2493f7 |
1.1 | import torch.utils.data as data
import torch
import albumentations
import cv2
import numpy as np
import random
import math
from settings import train_png_dir
def generate_transforms(image_size):
IMAGENET_SIZE = image_size
train_transform = albumentations.Compose([
albumentations.Resize(IMAGENET_SIZE, IMAGENET_SIZE),
albumentations.Normalize(mean=(0.456, 0.456, 0.456), std=(0.224, 0.224, 0.224), max_pixel_value=255.0, p=1.0)
])
val_transform = albumentations.Compose([
albumentations.Resize(IMAGENET_SIZE, IMAGENET_SIZE),
albumentations.Normalize(mean=(0.456, 0.456, 0.456), std=(0.224, 0.224, 0.224), max_pixel_value=255.0, p=1.0)
])
return train_transform, val_transform
def generate_random_list(length):
new_list = []
for i in range(length):
if i <= length/2:
weight = int(i/4)
else:
weight = int((length - i)/4)
weight = np.max([1, weight])
new_list += [i]*weight
return new_list
class RSNA_Dataset_train_by_study_context(data.Dataset):
def __init__(self,
df = None,
name_list = None,
transform = None
):
self.df = df[df['study_instance_uid'].isin(name_list)]
self.name_list = name_list
self.transform = transform
def __getitem__(self, idx):
study_name = self.name_list[idx % len(self.name_list)]
study_train_df = self.df[self.df['study_instance_uid']==study_name]
print(study_train_df.head())
study_index = random.choice(generate_random_list(study_train_df.shape[0]-1))
slice_id = study_name + '_' + str(study_index)
filename = study_train_df[study_train_df['slice_id']==slice_id]['filename'].values[0]
if study_index == (study_train_df.shape[0]-1):
filename_up = filename
else:
slice_id_up = study_name + '_' + str(study_index+1)
filename_up = study_train_df[study_train_df['slice_id']==slice_id_up]['filename'].values[0]
if study_index == 0:
filename_down = filename
else:
slice_id_down = study_name + '_' + str(study_index-1)
filename_down = study_train_df[study_train_df['slice_id']==slice_id_down]['filename'].values[0]
# print(train_png_dir)
# print("\n")
# print(filename)
image = cv2.imread(train_png_dir + filename, 0)
image = cv2.resize(image, (512, 512))
image_up = cv2.imread(train_png_dir + filename_up, 0)
image_up = cv2.resize(image_up, (512, 512))
image_down = cv2.imread(train_png_dir + filename_down, 0)
image_down = cv2.resize(image_down, (512, 512))
image_cat = np.concatenate([image_up[:,:,np.newaxis], image[:,:,np.newaxis], image_down[:,:,np.newaxis]],2)
label = torch.FloatTensor(study_train_df[study_train_df['filename']==filename].loc[:, 'any': 'subdural'].values)
if random.random() < 0.5:
image_cat = cv2.cvtColor(image_cat, cv2.COLOR_BGR2RGB)
image_cat = aug_image(image_cat, is_infer=False)
if self.transform is not None:
augmented = self.transform(image=image_cat)
image_cat = augmented['image'].transpose(2, 0, 1)
# print(label)
# exit(0)
return image_cat, label
def __len__(self):
return len(self.name_list) * 4
class RSNA_Dataset_val_by_study_context(data.Dataset):
def __init__(self,
df = None,
name_list = None,
transform = None
):
self.df = df
self.name_list = name_list
self.transform = transform
def __getitem__(self, idx):
filename = self.name_list[idx % len(self.name_list)]
filename_train_df = self.df[self.df['filename']==filename]
study_name = filename_train_df['study_instance_uid'].values[0]
study_index = int(filename_train_df['slice_id'].values[0].split('_')[-1])
study_train_df = self.df[self.df['study_instance_uid']==study_name]
if study_index == (study_train_df.shape[0]-1):
filename_up = filename
else:
slice_id_up = study_name + '_' + str(study_index+1)
filename_up = study_train_df[study_train_df['slice_id']==slice_id_up]['filename'].values[0]
if study_index == 0:
filename_down = filename
else:
slice_id_down = study_name + '_' + str(study_index-1)
filename_down = study_train_df[study_train_df['slice_id']==slice_id_down]['filename'].values[0]
image = cv2.imread(train_png_dir + filename, 0)
image = cv2.resize(image, (512, 512))
image_up = cv2.imread(train_png_dir + filename_up, 0)
image_up = cv2.resize(image_up, (512, 512))
image_down = cv2.imread(train_png_dir + filename_down, 0)
image_down = cv2.resize(image_down, (512, 512))
image_cat = np.concatenate([image_up[:,:,np.newaxis], image[:,:,np.newaxis], image_down[:,:,np.newaxis]],2)
label = torch.FloatTensor(study_train_df[study_train_df['filename']==filename].loc[:, 'any':'subdural'].values)
image_cat = aug_image(image_cat, is_infer=True)
if self.transform is not None:
augmented = self.transform(image=image_cat)
image_cat = augmented['image'].transpose(2, 0, 1)
return image_cat, label
def __len__(self):
return len(self.name_list)
def randomHorizontalFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
return image
def randomVerticleFlip(image, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
return image
def randomRotate90(image, u=0.5):
if np.random.random() < u:
image[:,:,0:3] = np.rot90(image[:,:,0:3])
return image
#===================================================origin=============================================================
def random_cropping(image, ratio=0.8, is_random = True):
height, width, _ = image.shape
target_h = int(height*ratio)
target_w = int(width*ratio)
if is_random:
start_x = random.randint(0, width - target_w)
start_y = random.randint(0, height - target_h)
else:
start_x = ( width - target_w ) // 2
start_y = ( height - target_h ) // 2
zeros = image[start_y:start_y+target_h,start_x:start_x+target_w,:]
zeros = cv2.resize(zeros ,(width,height))
return zeros
def cropping(image, ratio=0.8, code = 0):
height, width, _ = image.shape
target_h = int(height*ratio)
target_w = int(width*ratio)
if code==0:
start_x = ( width - target_w ) // 2
start_y = ( height - target_h ) // 2
elif code == 1:
start_x = 0
start_y = 0
elif code == 2:
start_x = width - target_w
start_y = 0
elif code == 3:
start_x = 0
start_y = height - target_h
elif code == 4:
start_x = width - target_w
start_y = height - target_h
elif code == -1:
return image
zeros = image[start_y:start_y+target_h,start_x:start_x+target_w,:]
zeros = cv2.resize(zeros ,(width,height))
return zeros
def random_erasing(img, probability=0.5, sl=0.02, sh=0.4, r1=0.3):
if random.uniform(0, 1) > probability:
return img
for attempt in range(100):
area = img.shape[0] * img.shape[1]
target_area = random.uniform(sl, sh) * area
aspect_ratio = random.uniform(r1, 1 / r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.shape[1] and h < img.shape[0]:
x1 = random.randint(0, img.shape[0] - h)
y1 = random.randint(0, img.shape[1] - w)
if img.shape[2] == 3:
img[x1:x1 + h, y1:y1 + w, :] = 0.0
else:
print('!!!!!!!! random_erasing dim wrong!!!!!!!!!!!')
return
return img
return img
def randomShiftScaleRotate(image,
shift_limit=(-0.0, 0.0),
scale_limit=(-0.0, 0.0),
rotate_limit=(-0.0, 0.0),
aspect_limit=(-0.0, 0.0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1])
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image
def aug_image(image, is_infer=False):
if is_infer:
image = randomHorizontalFlip(image, u=0)
image = np.asarray(image)
image = cropping(image, ratio=0.8, code=0)
return image
else:
image = randomHorizontalFlip(image)
height, width, _ = image.shape
image = randomShiftScaleRotate(image,
shift_limit=(-0.1, 0.1),
scale_limit=(-0.1, 0.1),
aspect_limit=(-0.1, 0.1),
rotate_limit=(-30, 30))
image = cv2.resize(image, (width, height))
image = random_erasing(image, probability=0.5, sl=0.02, sh=0.4, r1=0.3)
ratio = random.uniform(0.6,0.99)
image = random_cropping(image, ratio=ratio, is_random=True)
return image
def generate_dataset_loader(df_all, c_train, train_transform, train_batch_size, c_val, val_transform, val_batch_size, workers):
train_dataset = RSNA_Dataset_train_by_study_context(df_all, c_train, train_transform)
val_dataset = RSNA_Dataset_val_by_study_context(df_all, c_val, val_transform)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=train_batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=val_batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True,
drop_last=False)
return train_loader, val_loader
| [
"torch.FloatTensor",
"torch.utils.data.DataLoader"
] | 1.1.0 | BhaveshJP25/RSNA | 48d85faf82651b1ae4fdcd829ce2d4978a858d3f |
0.4 | import torch as th
from torch.distributions import Categorical
from .epsilon_schedules import DecayThenFlatSchedule
REGISTRY = {}
class MultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
self.test_greedy = getattr(args, "test_greedy", True)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
masked_policies = agent_inputs.clone()
masked_policies[avail_actions == 0.0] = 0.0
self.epsilon = self.schedule.eval(t_env)
if test_mode and self.test_greedy:
picked_actions = masked_policies.max(dim=2)[1]
else:
picked_actions = Categorical(masked_policies).sample().long()
return picked_actions
REGISTRY["multinomial"] = MultinomialActionSelector
class EpsilonGreedyActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
# Assuming agent_inputs is a batch of Q-Values for each agent bav
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = 0.0
# mask actions that are excluded from selection -> use avail_actions for this
masked_q_values = agent_inputs.clone()
masked_q_values[avail_actions == 0.0] = -float("inf") # should never be selected!
# Randomly decide for each agent to go with random action and explore
random_numbers = th.rand_like(agent_inputs[:, :, 0])
pick_random = (random_numbers < self.epsilon).long() # pick can either be 0 or 1 (=use random)
# Generate random action from available ones
random_actions = Categorical(avail_actions.float()).sample().long()
# If pick_random = 1 use the random action else use the action chosen by q value (masked by available ones)
picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
return picked_actions
REGISTRY["epsilon_greedy"] = EpsilonGreedyActionSelector
| [
"torch.distributions.Categorical",
"torch.rand_like"
] | 0.4.1 | PMatthaei/pymarl | eeec978e930c9e36d8102724c3b4d0459547cb36 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import pytest
from sklearn.metrics import accuracy_score as sk_accuracy
from torch import tensor
from tests.classification.inputs import _input_binary, _input_binary_prob
from tests.classification.inputs import _input_multiclass as _input_mcls
from tests.classification.inputs import _input_multiclass_prob as _input_mcls_prob
from tests.classification.inputs import _input_multidim_multiclass as _input_mdmc
from tests.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob
from tests.classification.inputs import _input_multilabel as _input_mlb
from tests.classification.inputs import _input_multilabel_multidim as _input_mlmd
from tests.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob
from tests.classification.inputs import _input_multilabel_prob as _input_mlb_prob
from tests.helpers import seed_all
from tests.helpers.testers import THRESHOLD, MetricTester
from torchmetrics import Accuracy
from torchmetrics.functional import accuracy
from torchmetrics.utilities.checks import _input_format_classification
from torchmetrics.utilities.enums import DataType
seed_all(42)
def _sk_accuracy(preds, target, subset_accuracy):
sk_preds, sk_target, mode = _input_format_classification(preds, target, threshold=THRESHOLD)
sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy()
if mode == DataType.MULTIDIM_MULTICLASS and not subset_accuracy:
sk_preds, sk_target = np.transpose(sk_preds, (0, 2, 1)), np.transpose(sk_target, (0, 2, 1))
sk_preds, sk_target = sk_preds.reshape(-1, sk_preds.shape[2]), sk_target.reshape(-1, sk_target.shape[2])
elif mode == DataType.MULTIDIM_MULTICLASS and subset_accuracy:
return np.all(sk_preds == sk_target, axis=(1, 2)).mean()
elif mode == DataType.MULTILABEL and not subset_accuracy:
sk_preds, sk_target = sk_preds.reshape(-1), sk_target.reshape(-1)
return sk_accuracy(y_true=sk_target, y_pred=sk_preds)
@pytest.mark.parametrize(
"preds, target, subset_accuracy",
[
(_input_binary_prob.preds, _input_binary_prob.target, False),
(_input_binary.preds, _input_binary.target, False),
(_input_mlb_prob.preds, _input_mlb_prob.target, True),
(_input_mlb_prob.preds, _input_mlb_prob.target, False),
(_input_mlb.preds, _input_mlb.target, True),
(_input_mlb.preds, _input_mlb.target, False),
(_input_mcls_prob.preds, _input_mcls_prob.target, False),
(_input_mcls.preds, _input_mcls.target, False),
(_input_mdmc_prob.preds, _input_mdmc_prob.target, False),
(_input_mdmc_prob.preds, _input_mdmc_prob.target, True),
(_input_mdmc.preds, _input_mdmc.target, False),
(_input_mdmc.preds, _input_mdmc.target, True),
(_input_mlmd_prob.preds, _input_mlmd_prob.target, True),
(_input_mlmd_prob.preds, _input_mlmd_prob.target, False),
(_input_mlmd.preds, _input_mlmd.target, True),
(_input_mlmd.preds, _input_mlmd.target, False),
],
)
class TestAccuracies(MetricTester):
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("dist_sync_on_step", [False, True])
def test_accuracy_class(self, ddp, dist_sync_on_step, preds, target, subset_accuracy):
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=Accuracy,
sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy),
dist_sync_on_step=dist_sync_on_step,
metric_args={
"threshold": THRESHOLD,
"subset_accuracy": subset_accuracy
},
)
def test_accuracy_fn(self, preds, target, subset_accuracy):
self.run_functional_metric_test(
preds,
target,
metric_functional=accuracy,
sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy),
metric_args={
"threshold": THRESHOLD,
"subset_accuracy": subset_accuracy
},
)
def test_accuracy_differentiability(self, preds, target, subset_accuracy):
self.run_differentiability_test(
preds=preds,
target=target,
metric_module=Accuracy,
metric_functional=accuracy,
metric_args={
"threshold": THRESHOLD,
"subset_accuracy": subset_accuracy
}
)
_l1to4 = [0.1, 0.2, 0.3, 0.4]
_l1to4t3 = np.array([_l1to4, _l1to4, _l1to4])
_l1to4t3_mcls = [_l1to4t3.T, _l1to4t3.T, _l1to4t3.T]
# The preds in these examples always put highest probability on class 3, second highest on class 2,
# third highest on class 1, and lowest on class 0
_topk_preds_mcls = tensor([_l1to4t3, _l1to4t3]).float()
_topk_target_mcls = tensor([[1, 2, 3], [2, 1, 0]])
# This is like for MC case, but one sample in each batch is sabotaged with 0 class prediction :)
_topk_preds_mdmc = tensor([_l1to4t3_mcls, _l1to4t3_mcls]).float()
_topk_target_mdmc = tensor([[[1, 1, 0], [2, 2, 2], [3, 3, 3]], [[2, 2, 0], [1, 1, 1], [0, 0, 0]]])
# Replace with a proper sk_metric test once sklearn 0.24 hits :)
@pytest.mark.parametrize(
"preds, target, exp_result, k, subset_accuracy",
[
(_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, False),
(_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, False),
(_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, False),
(_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, True),
(_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, True),
(_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, True),
(_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, False),
(_topk_preds_mdmc, _topk_target_mdmc, 8 / 18, 2, False),
(_topk_preds_mdmc, _topk_target_mdmc, 13 / 18, 3, False),
(_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, True),
(_topk_preds_mdmc, _topk_target_mdmc, 2 / 6, 2, True),
(_topk_preds_mdmc, _topk_target_mdmc, 3 / 6, 3, True),
],
)
def test_topk_accuracy(preds, target, exp_result, k, subset_accuracy):
topk = Accuracy(top_k=k, subset_accuracy=subset_accuracy)
for batch in range(preds.shape[0]):
topk(preds[batch], target[batch])
assert topk.compute() == exp_result
# Test functional
total_samples = target.shape[0] * target.shape[1]
preds = preds.view(total_samples, 4, -1)
target = target.view(total_samples, -1)
assert accuracy(preds, target, top_k=k, subset_accuracy=subset_accuracy) == exp_result
# Only MC and MDMC with probs input type should be accepted for top_k
@pytest.mark.parametrize(
"preds, target",
[
(_input_binary_prob.preds, _input_binary_prob.target),
(_input_binary.preds, _input_binary.target),
(_input_mlb_prob.preds, _input_mlb_prob.target),
(_input_mlb.preds, _input_mlb.target),
(_input_mcls.preds, _input_mcls.target),
(_input_mdmc.preds, _input_mdmc.target),
(_input_mlmd_prob.preds, _input_mlmd_prob.target),
(_input_mlmd.preds, _input_mlmd.target),
],
)
def test_topk_accuracy_wrong_input_types(preds, target):
topk = Accuracy(top_k=1)
with pytest.raises(ValueError):
topk(preds[0], target[0])
with pytest.raises(ValueError):
accuracy(preds[0], target[0], top_k=1)
@pytest.mark.parametrize("top_k, threshold", [(0, 0.5), (None, 1.5)])
def test_wrong_params(top_k, threshold):
preds, target = _input_mcls_prob.preds, _input_mcls_prob.target
with pytest.raises(ValueError):
acc = Accuracy(threshold=threshold, top_k=top_k)
acc(preds, target)
acc.compute()
with pytest.raises(ValueError):
accuracy(preds, target, threshold=threshold, top_k=top_k)
_ignoreindex_binary_preds = tensor([1, 0, 1, 1, 0, 1, 0])
_ignoreindex_target_preds = tensor([1, 1, 0, 1, 1, 1, 1])
_ignoreindex_binary_preds_prob = tensor([0.3, 0.6, 0.1, 0.3, 0.7, 0.9, 0.4])
_ignoreindex_mc_target = tensor([0, 1, 2])
_ignoreindex_mc_preds = tensor([[0.35, 0.4, 0.25], [0.1, 0.5, 0.4], [0.2, 0.1, 0.7]])
_ignoreindex_ml_target = tensor([[0, 1, 0], [1, 1, 0], [0, 0, 0]])
_ignoreindex_ml_preds = tensor([[0.9, 0.8, 0.75], [0.6, 0.7, 0.1], [0.6, 0.1, 0.2]])
@pytest.mark.parametrize(
"preds, target, ignore_index, exp_result, subset_accuracy",
[
(_ignoreindex_binary_preds, _ignoreindex_target_preds, 0, 3 / 6, False),
(_ignoreindex_binary_preds, _ignoreindex_target_preds, 1, 0, False),
(_ignoreindex_binary_preds, _ignoreindex_target_preds, None, 3 / 6, False),
(_ignoreindex_binary_preds_prob, _ignoreindex_target_preds, 0, 3 / 6, False),
(_ignoreindex_binary_preds_prob, _ignoreindex_target_preds, 1, 1, False),
(_ignoreindex_mc_preds, _ignoreindex_mc_target, 0, 1, False),
(_ignoreindex_mc_preds, _ignoreindex_mc_target, 1, 1 / 2, False),
(_ignoreindex_mc_preds, _ignoreindex_mc_target, 2, 1 / 2, False),
(_ignoreindex_ml_preds, _ignoreindex_ml_target, 0, 2 / 3, False),
(_ignoreindex_ml_preds, _ignoreindex_ml_target, 1, 2 / 3, False),
]
)
def test_ignore_index(preds, target, ignore_index, exp_result, subset_accuracy):
ignoreindex = Accuracy(ignore_index=ignore_index, subset_accuracy=subset_accuracy)
for batch in range(preds.shape[0]):
ignoreindex(preds[batch], target[batch])
assert ignoreindex.compute() == exp_result
assert accuracy(preds, target, ignore_index=ignore_index, subset_accuracy=subset_accuracy) == exp_result
| [
"torch.tensor"
] | 1.3.1 | avinashsai/metrics | e383af24085bf7c0bd4e08db2757c25ff4feccdc |
1.7 | import torch.nn as nn
from wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss
from wilds.common.metrics.all_metrics import MSE
def initialize_loss(config, d_out):
if config.get('loss_function') == 'cross_entropy':
return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))
elif config.get('loss_function') == 'lm_cross_entropy':
return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))
elif config.get('loss_function') == 'mse':
return MSE(name='loss')
elif config.get('loss_function') == 'multitask_bce':
return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none'))
elif config.get('loss_function') == 'fasterrcnn_criterion':
from models.detection.fasterrcnn import FasterRCNNLoss
return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.get('device')))
else:
raise ValueError(f'config.get("loss_function") {config.get("loss_function")} not recognized')
| [
"torch.nn.CrossEntropyLoss",
"torch.nn.BCEWithLogitsLoss"
] | 1.7.0 | itsjohnward/wilds | aeafefd01456840c7bd5173d714b184ec86758af |
1.10 | from abc import ABC, abstractmethod
import random
import hashlib
import os
import traceback
import pathlib
import h5py
import torch
import numpy as np
from nn_analysis import utils
def attach_hooks(model, layer_names, get_hook):
handles = []
for layer_name, module in model.named_modules():
if layer_name in layer_names:
hook = get_hook(layer_name)
handle = module.register_forward_hook(hook)
handles.append(handle)
return handles
def remove_hooks(handles):
for handle in handles:
handle.remove()
def compute_sizes(model, layer_names, dataset, device='cpu'):
dataloader = torch.utils.data.DataLoader(dataset, batch_size=2)
images = next(iter(dataloader))[0].to(device)
sizes = {}
def get_hook(layer_name):
def hook(_0, _1, out):
sizes[layer_name] = utils.prod(out.size()[1:])
return hook
try:
handles = attach_hooks(model, layer_names, get_hook)
model.eval()
with torch.no_grad():
model(images)
finally:
remove_hooks(handles)
targets = next(iter(dataloader))[1]
assert targets.ndim <= 2
if targets.ndim == 1:
sizes['target'] = 1
else:
sizes['target'] = targets.size()[1]
sizes['dataset'] = dataset.shape
return sizes
def create_group_datasets(grp, model, layer_names, sizes, meta_dicts=None, dtype='float32'):
for layer_name, module in model.named_modules():
if layer_name in layer_names:
layer_grp = grp.create_group(layer_name) # Must be new, cannot overwrite
if meta_dicts is not None:
for k, v in meta_dicts[0].items():
if layer_name in v.keys():
layer_grp.attrs[k] = v[layer_name]
for k, v in meta_dicts[1].items():
if layer_name in v.keys():
layer_grp[k] = v[layer_name] # Too large to fit in as attribute
layer_grp.create_dataset('y', shape=(*sizes['dataset'],sizes[layer_name]), dtype=dtype) # Must be new, cannot overwrite
grp.create_dataset('x', shape=(*sizes['dataset'],sizes['target']), dtype=dtype) # Must be new, cannot overwrite
def save_dataset(filename, path, model, layer_names, dataset, device='cpu', batch_size=128, postprocessor=None, dtype='float32', log=False):
sizes = compute_sizes(model, layer_names, dataset, device=device)
if postprocessor is None:
postprocess = lambda y, *args, **kwargs: y
else:
sizes = postprocessor.configure(sizes)
postprocess = postprocessor.process
meta_dicts = postprocessor.meta_dicts if postprocessor is not None else None
with h5py.File(filename, 'a') as f:
grp = f[path]
create_group_datasets(grp, model, layer_names, sizes, meta_dicts=meta_dicts, dtype=dtype)
model.eval()
def get_hook(layer_name):
def hook(_0, _1, out):
y = out.detach()
y = y.reshape(y.size(0),-1)
activations = postprocess(y,layer_name,device=device,dtype=dtype).cpu()
with h5py.File(filename, 'a') as f:
# print(f"Activations size: {activations.size()}")
# print(f"file size: {os.path.getsize(filename)}")
try:
f[path][layer_name]['y'][indices] = activations
except TypeError as err:
# Fancy indexing cannot handle multi-dimensional individual elements inexing
for j, index in enumerate(zip(*indices)):
f[path][layer_name]['y'][index] = activations[j]
return hook
try:
handles = attach_hooks(model, layer_names, get_hook)
dl = torch.utils.data.DataLoader(dataset,batch_size=batch_size,shuffle=False)
print_freq = len(dl)//10 if len(dl) > 10 else 1
for i, (images, targets, indices) in enumerate(dl):
if i % print_freq == 0:
print(f"Processing batch {i}/{len(dl)}")
images = images.to(device)
if indices.ndim == 1:
indices = indices.view(-1,1)
indices = tuple(indices.t().long().numpy())
if targets.ndim == 1:
targets = targets.view(-1,1)
with h5py.File(filename, 'a') as f:
try:
f[path]['x'][indices] = targets
except TypeError as err:
# Fancy indexing cannot handle multi-dimensional individual elements inexing
for j, index in enumerate(zip(*indices)):
f[path]['x'][index] = targets[j]
with torch.no_grad():
model(images)
finally:
remove_hooks(handles)
def save(filename, model, model_name, epoch, layer_names, datasets, dataset_names, seeds=None, device='cpu', batch_size=256, postprocessor=None, dtype='float32', log=False):
assert len(dataset_names) == len(seeds)
pathlib.Path(filename).parent.mkdir(parents=True, exist_ok=True)
if epoch is None:
model_version = '0000'
else:
model_version = f'{epoch:04d}'
with h5py.File(filename, 'a') as f:
model_grp = f.require_group(model_name)
model_version_grp = model_grp.require_group(model_version)
for i, dataset in enumerate(datasets):
print(f"Processing dataset {i}: {dataset_names[i]}")
dataset_grp = model_version_grp.require_group(dataset_names[i])
if seeds is not None:
dataset_grp.attrs['seed'] = seeds[i]
for i, dataset in enumerate(datasets):
with h5py.File(filename, 'a') as f:
path = f[model_name][model_version][dataset_names[i]].name
if seeds is not None:
with utils.set_seed(seeds[i]):
save_dataset(filename, path, model, layer_names, dataset, device=device, batch_size=batch_size, postprocessor=postprocessor, dtype=dtype, log=log)
else:
save_dataset(filename, path, model, layer_names, dataset, device=device, batch_size=batch_size, postprocessor=postprocessor, dtype=dtype, log=log)
def load(filename, model_name, epoch, dataset_name, layer_name):
if epoch is None:
model_version = '0000'
else:
model_version = f'{epoch:04d}'
with h5py.File(filename, 'r') as f:
grp = f[model_name][model_version][dataset_name]
y = grp[layer_name]['y'][...]
return y
def load_x(filename, model_name, epoch, dataset_name):
if epoch is None:
model_version = '0000'
else:
model_version = f'{epoch:04d}'
with h5py.File(filename, 'r') as f:
grp = f[model_name][model_version][dataset_name]
x = grp['x'][...]
return x
class Processor(ABC):
@property
@abstractmethod
def meta_dicts(self):
# List of two dicts, the first one containing meta attributes and the second one containing meta datasets
pass
@abstractmethod
def configure(self, layer_sizes):
pass
@abstractmethod
def process(self, tensor, layer_name, **kwargs):
pass
class Compose(Processor):
def __init__(self, processors):
self.processors = processors
@property
def meta_dicts(self):
out = [{},{}]
for processor in self.processor:
out[0].update(processor.meta_dicts[0])
out[1].update(processor.meta_dicts[1])
return out
def configure(self, layer_sizes):
for processor in self.processors:
layer_sizes = processor.configure(layer_sizes)
return layer_sizes
def process(self, tensor, layer_name, **kwargs):
for processor in self.processors:
tensor = processor.process(tensor, layer_name, **kwargs)
return tensor
class Sampler(Processor):
def __init__(self, n_samples, set_seed=True):
self.n_samples = n_samples
self.indices = {}
self.configured = False
if set_seed:
self.seeds = {}
self.set_seed = set_seed
@property
def meta_dicts(self):
if self.set_seed:
return [{'seed': self.seeds}, {'indices': self.indices}]
return [{}, {'indices': self.indices}]
def configure(self, sizes):
layer_sizes = {k: v for k, v in sizes.items() if k not in ['target', 'dataset']}
output_sizes = {}
for layer_name, size in layer_sizes.items():
if self.n_samples > size:
self.indices[layer_name] = torch.arange(size)
output_sizes[layer_name] = size
else:
if self.set_seed:
seed = int(hashlib.sha256(layer_name.encode('utf-8')).hexdigest(), 16) % (2**32) # Get seed corresponding to layer
self.seeds[layer_name] = seed
with utils.set_seed(seed):
self.indices[layer_name] = torch.from_numpy(np.random.choice(size,size=self.n_samples,replace=False)).long()
else:
self.indices[layer_name] = torch.from_numpy(np.random.choice(size,size=self.n_samples,replace=False)).long()
output_sizes[layer_name] = self.n_samples
self.configured = True
output_sizes.update({'target': sizes['target'], 'dataset': sizes['dataset']})
return output_sizes
def process(self, tensor, layer_name, **kwargs):
"""
tensor - (batch_size, N)
"""
assert self.configured
assert tensor.ndim == 2
layer_indices = self.indices[layer_name]
if tensor.is_cuda:
layer_indices.to(tensor.get_device())
return tensor[:,layer_indices]
| [
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.arange"
] | 1.10.1 | hchau630/nn-analysis | 0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc |
1.10 | from argparse import Namespace
import torch
import torch.nn as nn
import torchvision.models as models
def off_diagonal(x):
# return a flattened view of the off-diagonal elements of a square matrix
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
class BarlowTwins(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.backbone = models.resnet50(zero_init_residual=True)
self.backbone.fc = nn.Identity()
# projector
sizes = [2048] + list(map(int, args.projector.split('-')))
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.BatchNorm1d(sizes[i + 1]))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
self.projector = nn.Sequential(*layers)
self.projector_sizes = sizes
# normalization layer for the representations z1 and z2
self.bn = nn.BatchNorm1d(sizes[-1], affine=False)
def forward(self, x):
return self.bn(self.projector(self.backbone(x)))
def loss_forward(self, y1, y2):
z1 = self.projector(self.backbone(y1))
z2 = self.projector(self.backbone(y2))
# empirical cross-correlation matrix
c = self.bn(z1).T @ self.bn(z2)
# sum the cross-correlation matrix between all gpus
c.div_(self.args.batch_size)
torch.distributed.all_reduce(c)
# use --scale-loss to multiply the loss by a constant factor
# see the Issues section of the readme
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum().mul(self.args.scale_loss)
off_diag = off_diagonal(c).pow_(2).sum().mul(self.args.scale_loss)
loss = on_diag + self.args.lambd * off_diag
return loss
def barlowtwins(projector='8192-8192-8192', batch_size=1024, scale_loss=0.024, lambd=0.0051, **kwargs):
args = Namespace(projector=projector, batch_size=batch_size, scale_loss=scale_loss, lambd=lambd, **kwargs)
return BarlowTwins(args) | [
"torch.nn.Linear",
"torch.nn.Identity",
"torch.diagonal",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.distributed.all_reduce"
] | 1.10.1 | hchau630/nn-analysis | 0fbe7ad7b2b4566b9f88d8f21413a6d405f96bdc |
1.0 | from distutils.version import LooseVersion
import logging
import math
import random
import six
import numpy as np
import torch
import torch.nn.functional as F
from argparse import Namespace
from espnet.nets.ctc_prefix_score import CTCPrefixScore
from espnet.nets.ctc_prefix_score import CTCPrefixScoreTH
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.pytorch_backend.rnn.attentions import att_to_numpy
from espnet.nets.pytorch_backend.nets_utils import mask_by_length
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.nets_utils import to_device
from espnet.nets.scorer_interface import ScorerInterface
MAX_DECODER_OUTPUT = 5
CTC_SCORING_RATIO = 1.5
class Decoder(torch.nn.Module, ScorerInterface):
"""Decoder module
:param int eprojs: # encoder projection units
:param int odim: dimension of outputs
:param str dtype: gru or lstm
:param int dlayers: # decoder layers
:param int dunits: # decoder units
:param int sos: start of sequence symbol id
:param int eos: end of sequence symbol id
:param torch.nn.Module att: attention module
:param int verbose: verbose level
:param list char_list: list of character strings
:param ndarray labeldist: distribution of label smoothing
:param float lsm_weight: label smoothing weight
:param float sampling_probability: scheduled sampling probability
:param float dropout: dropout rate
:param float context_residual: if True, use context vector for token generation
:param float replace_sos: use for multilingual (speech/text) translation
"""
def __init__(self, eprojs, odim, dtype, dlayers, dunits, sos, eos, att, verbose=0,
char_list=None, labeldist=None, lsm_weight=0., sampling_probability=0.0,
dropout=0.0, context_residual=False, replace_sos=False, num_encs=1):
torch.nn.Module.__init__(self)
self.dtype = dtype
self.dunits = dunits
self.dlayers = dlayers
self.context_residual = context_residual
self.embed = torch.nn.Embedding(odim, dunits)
self.dropout_emb = torch.nn.Dropout(p=dropout)
self.decoder = torch.nn.ModuleList()
self.dropout_dec = torch.nn.ModuleList()
self.decoder += [
torch.nn.LSTMCell(dunits + eprojs, dunits) if self.dtype == "lstm" else torch.nn.GRUCell(dunits + eprojs,
dunits)]
self.dropout_dec += [torch.nn.Dropout(p=dropout)]
for _ in six.moves.range(1, self.dlayers):
self.decoder += [
torch.nn.LSTMCell(dunits, dunits) if self.dtype == "lstm" else torch.nn.GRUCell(dunits, dunits)]
self.dropout_dec += [torch.nn.Dropout(p=dropout)]
# NOTE: dropout is applied only for the vertical connections
# see https://arxiv.org/pdf/1409.2329.pdf
self.ignore_id = -1
if context_residual:
self.output = torch.nn.Linear(dunits + eprojs, odim)
else:
self.output = torch.nn.Linear(dunits, odim)
self.loss = None
self.att = att
self.dunits = dunits
self.sos = sos
self.eos = eos
self.odim = odim
self.verbose = verbose
self.char_list = char_list
# for label smoothing
self.labeldist = labeldist
self.vlabeldist = None
self.lsm_weight = lsm_weight
self.sampling_probability = sampling_probability
self.dropout = dropout
self.num_encs = num_encs
# for multilingual translation
self.replace_sos = replace_sos
self.logzero = -10000000000.0
def zero_state(self, hs_pad):
return hs_pad.new_zeros(hs_pad.size(0), self.dunits)
def rnn_forward(self, ey, z_list, c_list, z_prev, c_prev):
if self.dtype == "lstm":
z_list[0], c_list[0] = self.decoder[0](ey, (z_prev[0], c_prev[0]))
for l in six.moves.range(1, self.dlayers):
z_list[l], c_list[l] = self.decoder[l](
self.dropout_dec[l - 1](z_list[l - 1]), (z_prev[l], c_prev[l]))
else:
z_list[0] = self.decoder[0](ey, z_prev[0])
for l in six.moves.range(1, self.dlayers):
z_list[l] = self.decoder[l](self.dropout_dec[l - 1](z_list[l - 1]), z_prev[l])
return z_list, c_list
def forward(self, hs_pad, hlens, ys_pad, strm_idx=0, tgt_lang_ids=None):
"""Decoder forward
:param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)
[in multi-encoder case,
list of torch.Tensor, [(B, Tmax_1, D), (B, Tmax_2, D), ..., ] ]
:param torch.Tensor hlens: batch of lengths of hidden state sequences (B)
[in multi-encoder case, list of torch.Tensor, [(B), (B), ..., ]
:param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)
:param int strm_idx: stream index indicates the index of decoding stream.
:param torch.Tensor tgt_lang_ids: batch of target language id tensor (B, 1)
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy
:rtype: float
"""
# to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
hs_pad = [hs_pad]
hlens = [hlens]
# TODO(kan-bayashi): need to make more smart way
ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys
# attention index for the attention module
# in SPA (speaker parallel attention), att_idx is used to select attention module. In other cases, it is 0.
att_idx = min(strm_idx, len(self.att) - 1)
# hlens should be list of list of integer
hlens = [list(map(int, hlens[idx])) for idx in range(self.num_encs)]
self.loss = None
# prepare input and output word sequences with sos/eos IDs
eos = ys[0].new([self.eos])
sos = ys[0].new([self.sos])
if self.replace_sos:
ys_in = [torch.cat([idx, y], dim=0) for idx, y in zip(tgt_lang_ids, ys)]
else:
ys_in = [torch.cat([sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, eos], dim=0) for y in ys]
# padding for ys with -1
# pys: utt x olen
ys_in_pad = pad_list(ys_in, self.eos)
ys_out_pad = pad_list(ys_out, self.ignore_id)
# get dim, length info
batch = ys_out_pad.size(0)
olength = ys_out_pad.size(1)
for idx in range(self.num_encs):
logging.info(
self.__class__.__name__ + 'Number of Encoder:{}; enc{}: input lengths: {}.'.format(self.num_encs,
idx + 1, hlens[idx]))
logging.info(self.__class__.__name__ + ' output lengths: ' + str([y.size(0) for y in ys_out]))
# initialization
c_list = [self.zero_state(hs_pad[0])]
z_list = [self.zero_state(hs_pad[0])]
for _ in six.moves.range(1, self.dlayers):
c_list.append(self.zero_state(hs_pad[0]))
z_list.append(self.zero_state(hs_pad[0]))
z_all = []
if self.num_encs == 1:
att_w = None
self.att[att_idx].reset() # reset pre-computation of h
else:
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
# pre-computation of embedding
eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim
# loop for an output sequence
for i in six.moves.range(olength):
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](hs_pad[0], hlens[0], self.dropout_dec[0](z_list[0]), att_w)
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](hs_pad[idx], hlens[idx],
self.dropout_dec[0](z_list[0]), att_w_list[idx])
hs_pad_han = torch.stack(att_c_list, dim=1)
hlens_han = [self.num_encs] * len(ys_in)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](hs_pad_han, hlens_han,
self.dropout_dec[0](z_list[0]),
att_w_list[self.num_encs])
if i > 0 and random.random() < self.sampling_probability:
logging.info(' scheduled sampling ')
z_out = self.output(z_all[-1])
z_out = np.argmax(z_out.detach().cpu(), axis=1)
z_out = self.dropout_emb(self.embed(to_device(self, z_out)))
ey = torch.cat((z_out, att_c), dim=1) # utt x (zdim + hdim)
else:
ey = torch.cat((eys[:, i, :], att_c), dim=1) # utt x (zdim + hdim)
z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)
if self.context_residual:
z_all.append(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1)) # utt x (zdim + hdim)
else:
z_all.append(self.dropout_dec[-1](z_list[-1])) # utt x (zdim)
z_all = torch.stack(z_all, dim=1).view(batch * olength, -1)
# compute loss
y_all = self.output(z_all)
if LooseVersion(torch.__version__) < LooseVersion('1.0'):
reduction_str = 'elementwise_mean'
else:
reduction_str = 'mean'
self.loss = F.cross_entropy(y_all, ys_out_pad.view(-1),
ignore_index=self.ignore_id,
reduction=reduction_str)
# compute perplexity
ppl = math.exp(self.loss.item())
# -1: eos, which is removed in the loss computation
self.loss *= (np.mean([len(x) for x in ys_in]) - 1)
acc = th_accuracy(y_all, ys_out_pad, ignore_label=self.ignore_id)
logging.info('att loss:' + ''.join(str(self.loss.item()).split('\n')))
# show predicted character sequence for debug
if self.verbose > 0 and self.char_list is not None:
ys_hat = y_all.view(batch, olength, -1)
ys_true = ys_out_pad
for (i, y_hat), y_true in zip(enumerate(ys_hat.detach().cpu().numpy()),
ys_true.detach().cpu().numpy()):
if i == MAX_DECODER_OUTPUT:
break
idx_hat = np.argmax(y_hat[y_true != self.ignore_id], axis=1)
idx_true = y_true[y_true != self.ignore_id]
seq_hat = [self.char_list[int(idx)] for idx in idx_hat]
seq_true = [self.char_list[int(idx)] for idx in idx_true]
seq_hat = "".join(seq_hat)
seq_true = "".join(seq_true)
logging.info("groundtruth[%d]: " % i + seq_true)
logging.info("prediction [%d]: " % i + seq_hat)
if self.labeldist is not None:
if self.vlabeldist is None:
self.vlabeldist = to_device(self, torch.from_numpy(self.labeldist))
loss_reg = - torch.sum((F.log_softmax(y_all, dim=1) * self.vlabeldist).view(-1), dim=0) / len(ys_in)
self.loss = (1. - self.lsm_weight) * self.loss + self.lsm_weight * loss_reg
return self.loss, acc, ppl
def recognize_beam(self, h, lpz, recog_args, char_list, rnnlm=None, strm_idx=0):
"""beam search implementation
:param torch.Tensor h: encoder hidden state (T, eprojs)
[in multi-encoder case, list of torch.Tensor, [(T1, eprojs), (T2, eprojs), ...] ]
:param torch.Tensor lpz: ctc log softmax output (T, odim)
[in multi-encoder case, list of torch.Tensor, [(T1, odim), (T2, odim), ...] ]
:param Namespace recog_args: argument Namespace containing options
:param char_list: list of character strings
:param torch.nn.Module rnnlm: language module
:param int strm_idx: stream index for speaker parallel attention in multi-speaker case
:return: N-best decoding results
:rtype: list of dicts
"""
# to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
h = [h]
lpz = [lpz]
if self.num_encs > 1 and lpz is None:
lpz = [lpz] * self.num_encs
for idx in range(self.num_encs):
logging.info('Number of Encoder:{}; enc{}: input lengths: {}.'.format(self.num_encs, idx + 1, h[0].size(0)))
att_idx = min(strm_idx, len(self.att) - 1)
# initialization
c_list = [self.zero_state(h[0].unsqueeze(0))]
z_list = [self.zero_state(h[0].unsqueeze(0))]
for _ in six.moves.range(1, self.dlayers):
c_list.append(self.zero_state(h[0].unsqueeze(0)))
z_list.append(self.zero_state(h[0].unsqueeze(0)))
if self.num_encs == 1:
a = None
self.att[att_idx].reset() # reset pre-computation of h
else:
a = [None] * (self.num_encs + 1) # atts + han
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = getattr(recog_args, "ctc_weight", False) # for NMT
if lpz[0] is not None and self.num_encs > 1:
# weights-ctc, e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss
weights_ctc_dec = recog_args.weights_ctc_dec / np.sum(recog_args.weights_ctc_dec) # normalize
logging.info('ctc weights (decoding): ' + ' '.join([str(x) for x in weights_ctc_dec]))
else:
weights_ctc_dec = [1.0]
# preprate sos
if self.replace_sos and recog_args.tgt_lang:
y = char_list.index(recog_args.tgt_lang)
else:
y = self.sos
logging.info('<sos> index: ' + str(y))
logging.info('<sos> mark: ' + char_list[y])
vy = h[0].new_zeros(1).long()
maxlen = np.amin([h[idx].size(0) for idx in range(self.num_encs)])
if recog_args.maxlenratio != 0:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * maxlen))
minlen = int(recog_args.minlenratio * maxlen)
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list,
'z_prev': z_list, 'a_prev': a, 'rnnlm_prev': None}
else:
hyp = {'score': 0.0, 'yseq': [y], 'c_prev': c_list, 'z_prev': z_list, 'a_prev': a}
if lpz[0] is not None:
ctc_prefix_score = [CTCPrefixScore(lpz[idx].detach().numpy(), 0, self.eos, np) for idx in
range(self.num_encs)]
hyp['ctc_state_prev'] = [ctc_prefix_score[idx].initial_state() for idx in range(self.num_encs)]
hyp['ctc_score_prev'] = [0.0] * self.num_encs
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz[0].shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz[0].shape[-1]
hyps = [hyp]
ended_hyps = []
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
hyps_best_kept = []
for hyp in hyps:
vy.unsqueeze(1)
vy[0] = hyp['yseq'][i]
ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim
ey.unsqueeze(0)
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](h[0].unsqueeze(0), [h[0].size(0)],
self.dropout_dec[0](hyp['z_prev'][0]), hyp['a_prev'])
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](h[idx].unsqueeze(0), [h[idx].size(0)],
self.dropout_dec[0](hyp['z_prev'][0]),
hyp['a_prev'][idx])
h_han = torch.stack(att_c_list, dim=1)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](h_han, [self.num_encs],
self.dropout_dec[0](hyp['z_prev'][0]),
hyp['a_prev'][self.num_encs])
ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
z_list, c_list = self.rnn_forward(ey, z_list, c_list, hyp['z_prev'], hyp['c_prev'])
# get nbest local scores and their ids
if self.context_residual:
logits = self.output(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1))
else:
logits = self.output(self.dropout_dec[-1](z_list[-1]))
local_att_scores = F.log_softmax(logits, dim=1)
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp['rnnlm_prev'], vy)
local_scores = local_att_scores + recog_args.lm_weight * local_lm_scores
else:
local_scores = local_att_scores
if lpz[0] is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1)
ctc_scores, ctc_states = [None] * self.num_encs, [None] * self.num_encs
for idx in range(self.num_encs):
ctc_scores[idx], ctc_states[idx] = ctc_prefix_score[idx](
hyp['yseq'], local_best_ids[0], hyp['ctc_state_prev'][idx])
local_scores = \
(1.0 - ctc_weight) * local_att_scores[:, local_best_ids[0]]
if self.num_encs == 1:
local_scores += ctc_weight * torch.from_numpy(ctc_scores[0] - hyp['ctc_score_prev'][0])
else:
for idx in range(self.num_encs):
local_scores += ctc_weight * weights_ctc_dec[idx] * torch.from_numpy(
ctc_scores[idx] - hyp['ctc_score_prev'][idx])
if rnnlm:
local_scores += recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
local_best_scores, joint_best_ids = torch.topk(local_scores, beam, dim=1)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(local_scores, beam, dim=1)
for j in six.moves.range(beam):
new_hyp = {}
# [:] is needed!
new_hyp['z_prev'] = z_list[:]
new_hyp['c_prev'] = c_list[:]
if self.num_encs == 1:
new_hyp['a_prev'] = att_w[:]
else:
new_hyp['a_prev'] = [att_w_list[idx][:] for idx in range(self.num_encs + 1)]
new_hyp['score'] = hyp['score'] + local_best_scores[0, j]
new_hyp['yseq'] = [0] * (1 + len(hyp['yseq']))
new_hyp['yseq'][:len(hyp['yseq'])] = hyp['yseq']
new_hyp['yseq'][len(hyp['yseq'])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp['rnnlm_prev'] = rnnlm_state
if lpz[0] is not None:
new_hyp['ctc_state_prev'] = [ctc_states[idx][joint_best_ids[0, j]] for idx in
range(self.num_encs)]
new_hyp['ctc_score_prev'] = [ctc_scores[idx][joint_best_ids[0, j]] for idx in
range(self.num_encs)]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x['score'], reverse=True)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug('number of pruned hypotheses: ' + str(len(hyps)))
logging.debug(
'best hypo: ' + ''.join([char_list[int(x)] for x in hyps[0]['yseq'][1:]]))
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info('adding <eos> in the last position in the loop')
for hyp in hyps:
hyp['yseq'].append(self.eos)
# add ended hypotheses to a final list, and removed them from current hypotheses
# (this will be a problem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp['yseq']) > minlen:
hyp['score'] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp['score'] += recog_args.lm_weight * rnnlm.final(
hyp['rnnlm_prev'])
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info('end detected at %d', i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug('remaining hypotheses: ' + str(len(hyps)))
else:
logging.info('no hypothesis. Finish decoding.')
break
for hyp in hyps:
logging.debug(
'hypo: ' + ''.join([char_list[int(x)] for x in hyp['yseq'][1:]]))
logging.debug('number of ended hypotheses: ' + str(len(ended_hyps)))
nbest_hyps = sorted(
ended_hyps, key=lambda x: x['score'], reverse=True)[:min(len(ended_hyps), recog_args.nbest)]
# check number of hypotheses
if len(nbest_hyps) == 0:
logging.warning('there is no N-best results, perform recognition again with smaller minlenratio.')
# should copy because Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
if self.num_encs == 1:
return self.recognize_beam(h[0], lpz[0], recog_args, char_list, rnnlm)
else:
return self.recognize_beam(h, lpz, recog_args, char_list, rnnlm)
logging.info('total log probability: ' + str(nbest_hyps[0]['score']))
logging.info('normalized log probability: ' + str(nbest_hyps[0]['score'] / len(nbest_hyps[0]['yseq'])))
# remove sos
return nbest_hyps
def recognize_beam_batch(self, h, hlens, lpz, recog_args, char_list, rnnlm=None,
normalize_score=True, strm_idx=0, tgt_lang_ids=None):
# to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
h = [h]
hlens = [hlens]
lpz = [lpz]
if self.num_encs > 1 and lpz is None:
lpz = [lpz] * self.num_encs
att_idx = min(strm_idx, len(self.att) - 1)
for idx in range(self.num_encs):
logging.info(
'Number of Encoder:{}; enc{}: input lengths: {}.'.format(self.num_encs, idx + 1, h[idx].size(1)))
h[idx] = mask_by_length(h[idx], hlens[idx], 0.0)
# search params
batch = len(hlens[0])
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = getattr(recog_args, "ctc_weight", 0) # for NMT
att_weight = 1.0 - ctc_weight
ctc_margin = getattr(recog_args, "ctc_window_margin", 0) # use getattr to keep compatibility
# weights-ctc, e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss
if lpz[0] is not None and self.num_encs > 1:
weights_ctc_dec = recog_args.weights_ctc_dec / np.sum(recog_args.weights_ctc_dec) # normalize
logging.info('ctc weights (decoding): ' + ' '.join([str(x) for x in weights_ctc_dec]))
else:
weights_ctc_dec = [1.0]
n_bb = batch * beam
pad_b = to_device(self, torch.arange(batch) * beam).view(-1, 1)
max_hlen = np.amin([max(hlens[idx]) for idx in range(self.num_encs)])
if recog_args.maxlenratio == 0:
maxlen = max_hlen
else:
maxlen = max(1, int(recog_args.maxlenratio * max_hlen))
minlen = int(recog_args.minlenratio * max_hlen)
logging.info('max output length: ' + str(maxlen))
logging.info('min output length: ' + str(minlen))
# initialization
c_prev = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]
z_prev = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]
c_list = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]
z_list = [to_device(self, torch.zeros(n_bb, self.dunits)) for _ in range(self.dlayers)]
vscores = to_device(self, torch.zeros(batch, beam))
rnnlm_state = None
if self.num_encs == 1:
a_prev = [None]
att_w_list, ctc_scorer, ctc_state = [None], [None], [None]
self.att[att_idx].reset() # reset pre-computation of h
else:
a_prev = [None] * (self.num_encs + 1) # atts + han
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
ctc_scorer, ctc_state = [None] * (self.num_encs), [None] * (self.num_encs)
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
if self.replace_sos and recog_args.tgt_lang:
logging.info('<sos> index: ' + str(char_list.index(recog_args.tgt_lang)))
logging.info('<sos> mark: ' + recog_args.tgt_lang)
yseq = [[char_list.index(recog_args.tgt_lang)] for _ in six.moves.range(n_bb)]
elif tgt_lang_ids is not None:
# NOTE: used for evaluation during training
yseq = [[tgt_lang_ids[b // recog_args.beam_size]] for b in six.moves.range(n_bb)]
else:
logging.info('<sos> index: ' + str(self.sos))
logging.info('<sos> mark: ' + char_list[self.sos])
yseq = [[self.sos] for _ in six.moves.range(n_bb)]
accum_odim_ids = [self.sos for _ in six.moves.range(n_bb)]
stop_search = [False for _ in six.moves.range(batch)]
nbest_hyps = [[] for _ in six.moves.range(batch)]
ended_hyps = [[] for _ in range(batch)]
exp_hlens = [hlens[idx].repeat(beam).view(beam, batch).transpose(0, 1).contiguous() for idx in
range(self.num_encs)]
exp_hlens = [exp_hlens[idx].view(-1).tolist() for idx in range(self.num_encs)]
exp_h = [h[idx].unsqueeze(1).repeat(1, beam, 1, 1).contiguous() for idx in range(self.num_encs)]
exp_h = [exp_h[idx].view(n_bb, h[idx].size()[1], h[idx].size()[2]) for idx in range(self.num_encs)]
if lpz[0] is not None:
scoring_ratio = CTC_SCORING_RATIO if att_weight > 0.0 and not lpz[0].is_cuda else 0
ctc_scorer = [CTCPrefixScoreTH(lpz[idx], hlens[idx], 0, self.eos, beam,
scoring_ratio, margin=ctc_margin) for idx in range(self.num_encs)]
for i in six.moves.range(maxlen):
logging.debug('position ' + str(i))
vy = to_device(self, torch.LongTensor(self._get_last_yseq(yseq)))
ey = self.dropout_emb(self.embed(vy))
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](exp_h[0], exp_hlens[0], self.dropout_dec[0](z_prev[0]), a_prev[0])
att_w_list = [att_w]
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](exp_h[idx], exp_hlens[idx],
self.dropout_dec[0](z_prev[0]), a_prev[idx])
exp_h_han = torch.stack(att_c_list, dim=1)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](exp_h_han, [self.num_encs] * n_bb,
self.dropout_dec[0](z_prev[0]),
a_prev[self.num_encs])
ey = torch.cat((ey, att_c), dim=1)
# attention decoder
z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_prev, c_prev)
if self.context_residual:
logits = self.output(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1))
else:
logits = self.output(self.dropout_dec[-1](z_list[-1]))
local_scores = att_weight * F.log_softmax(logits, dim=1)
# rnnlm
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.buff_predict(rnnlm_state, vy, n_bb)
local_scores = local_scores + recog_args.lm_weight * local_lm_scores
# ctc
if ctc_scorer[0]:
for idx in range(self.num_encs):
att_w = att_w_list[idx]
att_w_ = att_w if isinstance(att_w, torch.Tensor) else att_w[0]
ctc_state[idx], local_ctc_scores = ctc_scorer[idx](yseq, ctc_state[idx], local_scores, att_w_)
local_scores = local_scores + ctc_weight * weights_ctc_dec[idx] * local_ctc_scores
local_scores = local_scores.view(batch, beam, self.odim)
if i == 0:
local_scores[:, 1:, :] = self.logzero
# accumulate scores
eos_vscores = local_scores[:, :, self.eos] + vscores
vscores = vscores.view(batch, beam, 1).repeat(1, 1, self.odim)
vscores[:, :, self.eos] = self.logzero
vscores = (vscores + local_scores).view(batch, -1)
# global pruning
accum_best_scores, accum_best_ids = torch.topk(vscores, beam, 1)
accum_odim_ids = torch.fmod(accum_best_ids, self.odim).view(-1).data.cpu().tolist()
accum_padded_beam_ids = (torch.div(accum_best_ids, self.odim) + pad_b).view(-1).data.cpu().tolist()
y_prev = yseq[:][:]
yseq = self._index_select_list(yseq, accum_padded_beam_ids)
yseq = self._append_ids(yseq, accum_odim_ids)
vscores = accum_best_scores
vidx = to_device(self, torch.LongTensor(accum_padded_beam_ids))
a_prev = []
num_atts = self.num_encs if self.num_encs == 1 else self.num_encs + 1
for idx in range(num_atts):
if isinstance(att_w_list[idx], torch.Tensor):
_a_prev = torch.index_select(att_w_list[idx].view(n_bb, *att_w_list[idx].shape[1:]), 0, vidx)
elif isinstance(att_w_list[idx], list):
# handle the case of multi-head attention
_a_prev = [torch.index_select(att_w_one.view(n_bb, -1), 0, vidx) for att_w_one in att_w_list[idx]]
else:
# handle the case of location_recurrent when return is a tuple
_a_prev_ = torch.index_select(att_w_list[idx][0].view(n_bb, -1), 0, vidx)
_h_prev_ = torch.index_select(att_w_list[idx][1][0].view(n_bb, -1), 0, vidx)
_c_prev_ = torch.index_select(att_w_list[idx][1][1].view(n_bb, -1), 0, vidx)
_a_prev = (_a_prev_, (_h_prev_, _c_prev_))
a_prev.append(_a_prev)
z_prev = [torch.index_select(z_list[li].view(n_bb, -1), 0, vidx) for li in range(self.dlayers)]
c_prev = [torch.index_select(c_list[li].view(n_bb, -1), 0, vidx) for li in range(self.dlayers)]
# pick ended hyps
if i > minlen:
k = 0
penalty_i = (i + 1) * penalty
thr = accum_best_scores[:, -1]
for samp_i in six.moves.range(batch):
if stop_search[samp_i]:
k = k + beam
continue
for beam_j in six.moves.range(beam):
if eos_vscores[samp_i, beam_j] > thr[samp_i]:
yk = y_prev[k][:]
yk.append(self.eos)
if len(yk) < min(hlens[idx][samp_i] for idx in range(self.num_encs)):
_vscore = eos_vscores[samp_i][beam_j] + penalty_i
if rnnlm:
_vscore += recog_args.lm_weight * rnnlm.final(rnnlm_state, index=k)
_score = _vscore.data.cpu().numpy()
ended_hyps[samp_i].append({'yseq': yk, 'vscore': _vscore, 'score': _score})
k = k + 1
# end detection
stop_search = [stop_search[samp_i] or end_detect(ended_hyps[samp_i], i)
for samp_i in six.moves.range(batch)]
stop_search_summary = list(set(stop_search))
if len(stop_search_summary) == 1 and stop_search_summary[0]:
break
if rnnlm:
rnnlm_state = self._index_select_lm_state(rnnlm_state, 0, vidx)
if ctc_scorer[0]:
for idx in range(self.num_encs):
ctc_state[idx] = ctc_scorer[idx].index_select_state(ctc_state[idx], accum_best_ids)
torch.cuda.empty_cache()
dummy_hyps = [{'yseq': [self.sos, self.eos], 'score': np.array([-float('inf')])}]
ended_hyps = [ended_hyps[samp_i] if len(ended_hyps[samp_i]) != 0 else dummy_hyps
for samp_i in six.moves.range(batch)]
if normalize_score:
for samp_i in six.moves.range(batch):
for x in ended_hyps[samp_i]:
x['score'] /= len(x['yseq'])
nbest_hyps = [sorted(ended_hyps[samp_i], key=lambda x: x['score'],
reverse=True)[:min(len(ended_hyps[samp_i]), recog_args.nbest)]
for samp_i in six.moves.range(batch)]
return nbest_hyps
def calculate_all_attentions(self, hs_pad, hlen, ys_pad, strm_idx=0, tgt_lang_ids=None):
"""Calculate all of attentions
:param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)
[in multi-encoder case,
list of torch.Tensor, [(B, Tmax_1, D), (B, Tmax_2, D), ..., ] ]
:param torch.Tensor hlen: batch of lengths of hidden state sequences (B)
[in multi-encoder case, list of torch.Tensor, [(B), (B), ..., ]
:param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)
:param int strm_idx: stream index for parallel speaker attention in multi-speaker case
:param torch.Tensor tgt_lang_ids: batch of target language id tensor (B, 1)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) multi-encoder case => [(B, Lmax, Tmax1), (B, Lmax, Tmax2), ..., (B, Lmax, NumEncs)]
3) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray
"""
# to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
hs_pad = [hs_pad]
hlen = [hlen]
# TODO(kan-bayashi): need to make more smart way
ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys
att_idx = min(strm_idx, len(self.att) - 1)
# hlen should be list of list of integer
hlen = [list(map(int, hlen[idx])) for idx in range(self.num_encs)]
self.loss = None
# prepare input and output word sequences with sos/eos IDs
eos = ys[0].new([self.eos])
sos = ys[0].new([self.sos])
if self.replace_sos:
ys_in = [torch.cat([idx, y], dim=0) for idx, y in zip(tgt_lang_ids, ys)]
else:
ys_in = [torch.cat([sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, eos], dim=0) for y in ys]
# padding for ys with -1
# pys: utt x olen
ys_in_pad = pad_list(ys_in, self.eos)
ys_out_pad = pad_list(ys_out, self.ignore_id)
# get length info
olength = ys_out_pad.size(1)
# initialization
c_list = [self.zero_state(hs_pad[0])]
z_list = [self.zero_state(hs_pad[0])]
for _ in six.moves.range(1, self.dlayers):
c_list.append(self.zero_state(hs_pad[0]))
z_list.append(self.zero_state(hs_pad[0]))
att_ws = []
if self.num_encs == 1:
att_w = None
self.att[att_idx].reset() # reset pre-computation of h
else:
att_w_list = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
# pre-computation of embedding
eys = self.dropout_emb(self.embed(ys_in_pad)) # utt x olen x zdim
# loop for an output sequence
for i in six.moves.range(olength):
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](hs_pad[0], hlen[0], self.dropout_dec[0](z_list[0]), att_w)
att_ws.append(att_w)
else:
for idx in range(self.num_encs):
att_c_list[idx], att_w_list[idx] = self.att[idx](hs_pad[idx], hlen[idx],
self.dropout_dec[0](z_list[0]), att_w_list[idx])
hs_pad_han = torch.stack(att_c_list, dim=1)
hlen_han = [self.num_encs] * len(ys_in)
att_c, att_w_list[self.num_encs] = self.att[self.num_encs](hs_pad_han, hlen_han,
self.dropout_dec[0](z_list[0]),
att_w_list[self.num_encs])
att_ws.append(att_w_list)
ey = torch.cat((eys[:, i, :], att_c), dim=1) # utt x (zdim + hdim)
z_list, c_list = self.rnn_forward(ey, z_list, c_list, z_list, c_list)
if self.num_encs == 1:
# convert to numpy array with the shape (B, Lmax, Tmax)
att_ws = att_to_numpy(att_ws, self.att[att_idx])
else:
_att_ws = []
for idx, ws in enumerate(zip(*att_ws)):
ws = att_to_numpy(ws, self.att[idx])
_att_ws.append(ws)
att_ws = _att_ws
return att_ws
@staticmethod
def _get_last_yseq(exp_yseq):
last = []
for y_seq in exp_yseq:
last.append(y_seq[-1])
return last
@staticmethod
def _append_ids(yseq, ids):
if isinstance(ids, list):
for i, j in enumerate(ids):
yseq[i].append(j)
else:
for i in range(len(yseq)):
yseq[i].append(ids)
return yseq
@staticmethod
def _index_select_list(yseq, lst):
new_yseq = []
for l in lst:
new_yseq.append(yseq[l][:])
return new_yseq
@staticmethod
def _index_select_lm_state(rnnlm_state, dim, vidx):
if isinstance(rnnlm_state, dict):
new_state = {}
for k, v in rnnlm_state.items():
new_state[k] = [torch.index_select(vi, dim, vidx) for vi in v]
elif isinstance(rnnlm_state, list):
new_state = []
for i in vidx:
new_state.append(rnnlm_state[int(i)][:])
return new_state
# scorer interface methods
def init_state(self, x):
# to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
x = [x]
c_list = [self.zero_state(x[0].unsqueeze(0))]
z_list = [self.zero_state(x[0].unsqueeze(0))]
for _ in six.moves.range(1, self.dlayers):
c_list.append(self.zero_state(x[0].unsqueeze(0)))
z_list.append(self.zero_state(x[0].unsqueeze(0)))
# TODO(karita): support strm_index for `asr_mix`
strm_index = 0
att_idx = min(strm_index, len(self.att) - 1)
if self.num_encs == 1:
a = None
self.att[att_idx].reset() # reset pre-computation of h
else:
a = [None] * (self.num_encs + 1) # atts + han
for idx in range(self.num_encs + 1):
self.att[idx].reset() # reset pre-computation of h in atts and han
return dict(c_prev=c_list[:], z_prev=z_list[:], a_prev=a, workspace=(att_idx, z_list, c_list))
def score(self, yseq, state, x):
# to support mutiple encoder asr mode, in single encoder mode, convert torch.Tensor to List of torch.Tensor
if self.num_encs == 1:
x = [x]
att_idx, z_list, c_list = state["workspace"]
vy = yseq[-1].unsqueeze(0)
ey = self.dropout_emb(self.embed(vy)) # utt list (1) x zdim
if self.num_encs == 1:
att_c, att_w = self.att[att_idx](
x[0].unsqueeze(0), [x[0].size(0)],
self.dropout_dec[0](state['z_prev'][0]), state['a_prev'])
else:
att_w = [None] * (self.num_encs + 1) # atts + han
att_c_list = [None] * (self.num_encs) # atts
for idx in range(self.num_encs):
att_c_list[idx], att_w[idx] = self.att[idx](x[idx].unsqueeze(0), [x[idx].size(0)],
self.dropout_dec[0](state['z_prev'][0]),
state['a_prev'][idx])
h_han = torch.stack(att_c_list, dim=1)
att_c, att_w[self.num_encs] = self.att[self.num_encs](h_han, [self.num_encs],
self.dropout_dec[0](state['z_prev'][0]),
state['a_prev'][self.num_encs])
ey = torch.cat((ey, att_c), dim=1) # utt(1) x (zdim + hdim)
z_list, c_list = self.rnn_forward(ey, z_list, c_list, state['z_prev'], state['c_prev'])
if self.context_residual:
logits = self.output(torch.cat((self.dropout_dec[-1](z_list[-1]), att_c), dim=-1))
else:
logits = self.output(self.dropout_dec[-1](z_list[-1]))
logp = F.log_softmax(logits, dim=1).squeeze(0)
return logp, dict(c_prev=c_list[:], z_prev=z_list[:], a_prev=att_w, workspace=(att_idx, z_list, c_list))
def decoder_for(args, odim, sos, eos, att, labeldist):
return Decoder(args.eprojs, odim, args.dtype, args.dlayers, args.dunits, sos, eos, att, args.verbose,
args.char_list, labeldist,
args.lsm_weight, args.sampling_probability, args.dropout_rate_decoder,
getattr(args, "context_residual", False), # use getattr to keep compatibility
getattr(args, "replace_sos", False), # use getattr to keep compatibility
getattr(args, "num_encs", 1)) # use getattr to keep compatibility
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.LongTensor",
"torch.topk",
"torch.index_select",
"torch.div",
"torch.zeros",
"torch.nn.LSTMCell",
"torch.fmod",
"torch.nn.Module.__init__",
"torch.nn.functional.log_softmax",
"torch.cuda.empty_cache",
"torch.nn.GRUCell",
"torch.nn.Dropout",
"torch.arange",
"torch.from_numpy",
"torch.nn.Embedding"
] | 1.0.1 | koso019003/espnet | 7735c992b3d71fabbc0f0c48c1d8f78d72785e17 |
1.5 | import logging
import os
import torch
import dill
import json
import pickle
import msgpack
from eisen.utils import EisenModuleWrapper
from eisen_deploy.utils import encode_data, decode_data
logger = logging.getLogger(__name__)
def json_file_to_dict(json_file):
if not os.path.exists(json_file):
raise FileNotFoundError('The JSON file {} cannot be read'.format(json_file))
with open(json_file) as json_file:
dictionary = json.load(json_file)
return dictionary
class EisenServingHandler(object):
"""
EisenServingHandler is a custom object to handle inference request within TorchServing. It is usually included
automatically in the MAR.
"""
def __init__(self):
self.model = None
self.device = None
self.pre_process_tform = None
self.post_process_tform = None
self.metadata = None
self.initialized = False
self.input_name_list = []
self.output_name_list = []
def initialize(self, ctx):
"""
Initializes the fields of the EisenServingHandler object based on the context.
:param ctx: context of an inference request
:return: None
"""
properties = ctx.system_properties
self.device = torch.device("cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() else "cpu")
model_dir = properties.get("model_dir")
# Model file
model_pt_path = os.path.join(model_dir, "model.pt")
# Pre processing chain
pre_processing_pkl = os.path.join(model_dir, "pre_process_tform.pkl")
# Post processing chain
post_processing_pkl = os.path.join(model_dir, "post_process_tform.pkl")
# unpickle serialized transform chain
with open(pre_processing_pkl, "rb") as f:
self.pre_process_tform = dill.load(f)
with open(post_processing_pkl, "rb") as f:
self.post_process_tform = dill.load(f)
# Metadata about the model
metadata_json = os.path.join(model_dir, "metadata.json")
self.metadata = json_file_to_dict(metadata_json)
self.input_name_list = self.metadata['model_input_list']
self.output_name_list = self.metadata['model_output_list']
# deserialize pytorch model
base_model = torch.load(model_pt_path, map_location=self.device)
self.model = EisenModuleWrapper(base_model, self.input_name_list, self.output_name_list)
# put model in eval mode
self.model.eval()
logger.debug('Model file {0} loaded successfully'.format(model_pt_path))
self.initialized = True
def get_metadata(self):
"""
This function returns metadata about the model as JSON
:return: list
"""
return [json.dumps(self.metadata)]
def pre_process(self, data):
"""
Applies pre-processing transform using de-pickled transform chain in the MAR.
:param data: dictionary containing a collated batch of data
:type data: dict
"""
input_dict = self.pre_process_tform(data)
return input_dict
def inference(self, input_dict):
"""
Performs prediction using the model. Feeds the necessary information to the model starting from the
received data and creates an output dictionary as a result.
:param input_dict: input batch, in form of a dictionary of collated datapoints
:type input_dict: dict
:return: dict
"""
for name in self.model.input_names:
input_dict[name] = torch.Tensor(input_dict[name]).to(self.device)
output_dict = self.model(**input_dict)
for name in self.model.output_names:
output_dict[name] = output_dict[name].data.cpu().numpy()
return output_dict
def post_process(self, output_dict):
"""
Applies post-processing transform using de-pickled transform chain in the MAR.
:param output_dict: dictionary containing the result of inference on a collated batch of data
:type output_dict: dict
"""
prediction = self.post_process_tform(output_dict)
return prediction
def handle(self, data):
"""
Handles one request.
:param data: dictionary of data
:type data: dict
:return: list
"""
input_data = {}
for input in self.metadata['inputs']:
input_data[input['name']] = data[input['name']]
model_input = self.pre_process(input_data)
model_out = self.inference(model_input)
model_out.update(model_input) # output dictionary still contains inputs (which may be useful for tforms)
prediction = self.post_process(model_out)
output_data = {}
for output in self.metadata['outputs']:
output_data[output['name']] = prediction[output['name']]
buffer = msgpack.packb(prediction, default=encode_data, use_bin_type=True)
return [buffer]
_service = EisenServingHandler()
def handle(data, context):
if not _service.initialized:
_service.initialize(context)
if data is not None and hasattr(data, '__getitem__') and 'body' in data[0].keys() and len(data[0]['body']) > 0:
data = data[0]['body']
else:
return _service.get_metadata()
data = msgpack.unpackb(data, object_hook=decode_data, raw=False)
if not all([key in data.keys() for key in _service.input_name_list]):
return _service.get_metadata()
else:
return _service.handle(data)
| [
"torch.cuda.is_available",
"torch.Tensor",
"torch.load"
] | 1.5.0 | eisen-ai/eisen-deploy | ab1cdf0f8726cbfbdc7029616b1c753706b0039c |
1.2 | import csv
import errno
import hashlib
import logging
import os
import sys
import tarfile
import threading
import zipfile
from queue import Queue
import torch
import urllib
from torch.utils.data import Dataset
from torch.utils.model_zoo import tqdm
def unicode_csv_reader(unicode_csv_data, **kwargs):
r"""Since the standard csv library does not handle unicode in Python 2, we need a wrapper.
Borrowed and slightly modified from the Python docs:
https://docs.python.org/2/library/csv.html#csv-examples
Arguments:
unicode_csv_data: unicode csv data (see example below)
Examples:
>>> from torchaudio.datasets.utils import unicode_csv_reader
>>> import io
>>> with io.open(data_path, encoding="utf8") as f:
>>> reader = unicode_csv_reader(f)
"""
# Fix field larger than field limit error
maxInt = sys.maxsize
while True:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt / 10)
csv.field_size_limit(maxInt)
for line in csv.reader(unicode_csv_data, **kwargs):
yield line
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def stream_url(url, start_byte=None, block_size=32 * 1024, progress_bar=True):
"""Stream url by chunk
Args:
url (str): Url.
start_byte (Optional[int]): Start streaming at that point.
block_size (int): Size of chunks to stream.
progress_bar (bool): Display a progress bar.
"""
# If we already have the whole file, there is no need to download it again
req = urllib.request.Request(url, method="HEAD")
url_size = int(urllib.request.urlopen(req).info().get("Content-Length", -1))
if url_size == start_byte:
return
req = urllib.request.Request(url)
if start_byte:
req.headers["Range"] = "bytes={}-".format(start_byte)
with urllib.request.urlopen(req) as upointer, tqdm(
unit="B",
unit_scale=True,
unit_divisor=1024,
total=url_size,
disable=not progress_bar,
) as pbar:
num_bytes = 0
while True:
chunk = upointer.read(block_size)
if not chunk:
break
yield chunk
num_bytes += len(chunk)
pbar.update(len(chunk))
def download_url(
url,
download_folder,
filename=None,
hash_value=None,
hash_type="sha256",
progress_bar=True,
resume=False,
):
"""Download file to disk.
Args:
url (str): Url.
download_folder (str): Folder to download file.
filename (str): Name of downloaded file. If None, it is inferred from the url.
hash_value (str): Hash for url.
hash_type (str): Hash type, among "sha256" and "md5".
progress_bar (bool): Display a progress bar.
resume (bool): Enable resuming download.
"""
req = urllib.request.Request(url, method="HEAD")
req_info = urllib.request.urlopen(req).info()
# Detect filename
filename = filename or req_info.get_filename() or os.path.basename(url)
filepath = os.path.join(download_folder, filename)
if resume and os.path.exists(filepath):
mode = "ab"
local_size = os.path.getsize(filepath)
elif not resume and os.path.exists(filepath):
raise RuntimeError(
"{} already exists. Delete the file manually and retry.".format(filepath)
)
else:
mode = "wb"
local_size = None
if hash_value and local_size == int(req_info.get("Content-Length", -1)):
with open(filepath, "rb") as file_obj:
if validate_file(file_obj, hash_value, hash_type):
return
raise RuntimeError(
"The hash of {} does not match. Delete the file manually and retry.".format(
filepath
)
)
with open(filepath, mode) as fpointer:
for chunk in stream_url(url, start_byte=local_size, progress_bar=progress_bar):
fpointer.write(chunk)
with open(filepath, "rb") as file_obj:
if hash_value and not validate_file(file_obj, hash_value, hash_type):
raise RuntimeError(
"The hash of {} does not match. Delete the file manually and retry.".format(
filepath
)
)
def validate_file(file_obj, hash_value, hash_type="sha256"):
"""Validate a given file object with its hash.
Args:
file_obj: File object to read from.
hash_value (str): Hash for url.
hash_type (str): Hash type, among "sha256" and "md5".
"""
if hash_type == "sha256":
hash_func = hashlib.sha256()
elif hash_type == "md5":
hash_func = hashlib.md5()
else:
raise ValueError
while True:
# Read by chunk to avoid filling memory
chunk = file_obj.read(1024 ** 2)
if not chunk:
break
hash_func.update(chunk)
return hash_func.hexdigest() == hash_value
def extract_archive(from_path, to_path=None, overwrite=False):
"""Extract archive.
Arguments:
from_path: the path of the archive.
to_path: the root path of the extraced files (directory of from_path)
overwrite: overwrite existing files (False)
Returns:
List of paths to extracted files even if not overwritten.
Examples:
>>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz'
>>> from_path = './validation.tar.gz'
>>> to_path = './'
>>> torchaudio.datasets.utils.download_from_url(url, from_path)
>>> torchaudio.datasets.utils.extract_archive(from_path, to_path)
"""
if to_path is None:
to_path = os.path.dirname(from_path)
try:
with tarfile.open(from_path, "r") as tar:
logging.info("Opened tar file {}.".format(from_path))
files = []
for file_ in tar:
file_path = os.path.join(to_path, file_.name)
if file_.isfile():
files.append(file_path)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
tar.extract(file_, to_path)
return files
except tarfile.ReadError:
pass
try:
with zipfile.ZipFile(from_path, "r") as zfile:
logging.info("Opened zip file {}.".format(from_path))
files = zfile.namelist()
for file_ in files:
file_path = os.path.join(to_path, file_)
if os.path.exists(file_path):
logging.info("{} already extracted.".format(file_path))
if not overwrite:
continue
zfile.extract(file_, to_path)
return files
except zipfile.BadZipFile:
pass
raise NotImplementedError("We currently only support tar.gz, tgz, and zip achives.")
def walk_files(root, suffix, prefix=False, remove_suffix=False):
"""List recursively all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the full path to each result, otherwise
only returns the name of the files found (Default: ``False``)
remove_suffix (bool, optional): If true, removes the suffix to each result defined in suffix,
otherwise will return the result as found (Default: ``False``).
"""
root = os.path.expanduser(root)
for dirpath, _, files in os.walk(root):
for f in files:
if f.endswith(suffix):
if remove_suffix:
f = f[: -len(suffix)]
if prefix:
f = os.path.join(dirpath, f)
yield f
class _DiskCache(Dataset):
"""
Wrap a dataset so that, whenever a new item is returned, it is saved to disk.
"""
def __init__(self, dataset, location=".cached"):
self.dataset = dataset
self.location = location
self._id = id(self)
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n]:
f = self._cache[n]
return torch.load(f)
f = str(self._id) + "-" + str(n)
f = os.path.join(self.location, f)
item = self.dataset[n]
self._cache[n] = f
makedir_exist_ok(self.location)
torch.save(item, f)
return item
def __len__(self):
return len(self.dataset)
def diskcache_iterator(dataset, location=".cached"):
return _DiskCache(dataset, location)
class _ThreadedIterator(threading.Thread):
"""
Prefetch the next queue_length items from iterator in a background thread.
Example:
>> for i in bg_iterator(range(10)):
>> print(i)
"""
class _End:
pass
def __init__(self, generator, maxsize):
threading.Thread.__init__(self)
self.queue = Queue(maxsize)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(self._End)
def __iter__(self):
return self
def __next__(self):
next_item = self.queue.get()
if next_item == self._End:
raise StopIteration
return next_item
# Required for Python 2.7 compatibility
def next(self):
return self.__next__()
def bg_iterator(iterable, maxsize):
return _ThreadedIterator(iterable, maxsize=maxsize)
| [
"torch.utils.model_zoo.tqdm",
"torch.save",
"torch.load"
] | 1.2.0 | tomassosorio/audio | 0f8fa5f82af47543a68f1d3fb8921f8f9b6b15f8 |
1.0 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
if model.num_outputs != 1:
raise UnsupportedError(
"Must specify an objective when using a multi-output model."
)
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f.unsqueeze(-1)).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
| [
"torch.is_tensor",
"torch.sigmoid"
] | 1.0.0 | shalijiang/bo | af13f0a38b579ab504f49a01f1ced13532a3ad49 |
1.0 | import torch
from torch import Tensor
import numpy as np
import matplotlib.pyplot as plt
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.models import SingleTaskGP
from rollout import rollout, rollout_quad
import warnings
import time
import pickle
warnings.filterwarnings("ignore")
bound = 1.0
n = 50
x = torch.linspace(-bound, bound, n).view(-1, 1)
train_idx = [np.round(n / 3), np.round(n * 2 / 3)]
train_idx = [0, n - 1]
train_x = x[train_idx]
model = SingleTaskGP(train_x, Tensor([0, 0]))
model.covar_module.base_kernel.lengthscale = 0.4
model.covar_module.outputscale = 1.0
model.likelihood.noise = 0.0001
model.eval()
y_prior = model(x)
torch.manual_seed(0)
y = y_prior.sample()
train_y = y[train_idx]
y_best = torch.max(train_y).item()
print(train_x)
print(train_y)
model.set_train_data(train_x, train_y, strict=False)
model.eval()
y_post = model(x)
f, ax = plt.subplots(2, 1, figsize=(6, 12))
with torch.no_grad():
# Initialize plot
# Get upper and lower confidence bounds
lower, upper = y_post.confidence_region()
# Plot training data as black stars
ax[0].plot(x.squeeze().numpy(), y.numpy(), "r")
ax[0].plot(train_x.squeeze().numpy(), train_y.numpy(), "k*")
# Plot predictive means as blue line
ax[0].plot(x.squeeze().numpy(), y_post.mean.detach().numpy(), "b")
# Shade between the lower and upper confidence bounds
ax[0].fill_between(x.squeeze().numpy(), lower.numpy(), upper.numpy(), alpha=0.5)
ax[0].set_ylim([-3, 3])
ax[0].legend(["True func", "Observed Data", "Mean", "Confidence"])
## compute EI
expected_improvement = ExpectedImprovement(model, best_f=y_best)
with torch.no_grad():
ei_values = expected_improvement(x.unsqueeze(1))
ax[1].plot(x.squeeze().numpy(), ei_values.numpy(), label="EI")
## compute two-step EI
two_step_ei = np.zeros((3, n))
times = np.zeros((3, n))
num_y_samples = 100
samples, weights = np.polynomial.hermite.hermgauss(num_y_samples)
for i in range(n):
print("point", i)
this_x = x[i]
# start = time.time()
# two_step_ei[0, i] = rollout(this_x, model,
# best_f=y_best,
# bounds=Tensor([-bound, bound]).view(-1, 1),
# horizon=2,
# quadrature='qmc',
# num_y_samples=num_y_samples,
# )
# end = time.time()
# times[0, i] = end - start
# print('qmc', end - start)
start = time.time()
two_step_ei[1, i] = rollout(
this_x,
model,
best_f=y_best,
bounds=Tensor([-bound, bound]).view(-1, 1),
horizon=2,
x_grid=x,
idx=i,
quadrature=(samples, weights),
num_y_samples=num_y_samples,
)
end = time.time()
times[1, i] = end - start
print("gauss-hermite", end - start)
# start = time.time()
# two_step_ei[2, i] = rollout_quad(this_x, model,
# best_f=y_best,
# bounds=Tensor([-bound, bound]).view(-1, 1),
# horizon=2,
# )
# end = time.time()
# times[2, i] = end - start
# print('adap-gauss', end - start)
mean_time = times.mean(axis=1)
with torch.no_grad():
# ax[1].plot(x.squeeze().numpy(), two_step_ei[0], label='two-step EI qmc %.2fs' % mean_time[0])
ax[1].plot(
x.squeeze().numpy(),
two_step_ei[1],
label="two-step EI gauss-hermite %.2fs" % mean_time[1],
)
# ax[1].plot(x.squeeze().numpy(), two_step_ei[2], label='two-step EI adap-gauss %.2fs' % mean_time[2])
ax[1].legend()
print(times.mean(axis=1))
with open("rollout_test_results", "wb") as f:
pickle.dump({"time": times, "ei": two_step_ei}, f)
plt.show()
| [
"torch.max",
"torch.no_grad",
"torch.linspace",
"torch.manual_seed",
"torch.Tensor"
] | 1.0.0 | shalijiang/bo | af13f0a38b579ab504f49a01f1ced13532a3ad49 |
1.7 | import torch
def corner_to_center(xmin, ymin, xmax, ymax):
cx, cy = (xmin + xmax) / 2, (ymin + ymax) / 2
w = xmax - xmin
h = ymax - ymin
return cx, cy, w, h
def center_to_corner(cx, cy, w, h):
xmin, ymin = cx - 0.5 * w, cy - 0.5 * h
xmax, ymax = cx + 0.5 * w, cy + 0.5 * h
return xmin, ymin, xmax, ymax
def cells_to_bboxes(preds, anchors, S, is_pred):
batch_size = preds.shape[0]
num_anchors = len(anchors)
x_pred, y_pred = preds[..., 1:2], preds[..., 2:3]
w_pred, h_pred = preds[..., 3:4], preds[..., 4:5]
if is_pred:
anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
x_pred, y_pred = torch.sigmoid(x_pred), torch.sigmoid(y_pred)
w_pred, h_pred = torch.exp(w_pred) * anchors, torch.exp(h_pred) * anchors
scores = preds[..., 0:1]
best_class = preds[..., 5:6]
cell_indices = (
torch.arange(S)
.repeat(batch_size, num_anchors, S, 1)
.unsqueeze(-1)
.to(preds.device)
)
x = 1 / S * (x_pred + cell_indices)
y = 1 / S * (y_pred + cell_indices.permute(0, 1, 3, 2, 4))
w, h = 1 / S * w_pred, 1 / S * h_pred
converted_bboxes = torch.cat((best_class, scores, x, y, w, h), dim=-1).reshape(batch_size, num_anchors * S * S, 6)
return converted_bboxes.tolist()
| [
"torch.sigmoid",
"torch.cat",
"torch.exp",
"torch.arange"
] | 1.7.0 | DavianYang/yolo.ai | 0856d4f1e84428667046ee27270ff1bf742e658a |
1.7 | from typing import Sequence, Union, Callable, AnyStr, Any
import torch
from torch import nn
import torch.nn.functional as F
from torch import Tensor
from yolo.models.modules.activation import get_activation_layer
class ConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence],
stride: Union[int, Sequence] = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
activation: Union[Callable, AnyStr] = (lambda: nn.ReLU(inplace=True))
) -> None:
super().__init__()
self.activate = (activation is not None)
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size, stride,
padding, dilation,
groups, bias
)
self.bn = nn.BatchNorm2d(out_channels)
if self.activate:
self.act = get_activation_layer(activation)
def forward(self, x: Tensor) -> Tensor:
x = self.bn(self.conv(x))
if self.activate:
x = self.act(x)
return x
class ResBlock(nn.Module):
def __init__(
self,
channels: int,
blocks: list
) -> None:
super().__init__()
conv1 = blocks[0]
conv2 = blocks[1]
self.resblock = nn.Sequential(
ConvBlock(channels, conv1.filters,
kernel_size=conv1.kernel_size,
stride=conv1.stride, padding=conv1.padding),
ConvBlock(conv1.filters, conv2.filters,
kernel_size=conv2.kernel_size,
stride=conv2.stride, padding=conv2.padding)
)
def forward(self, x: Tensor) -> Tensor:
x = self.resblock(x) + x
return x
class Upsample(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence],
stride: Union[int, Sequence] = 1,
padding: int = 0,
):
super().__init__()
self.upsample = nn.Sequential(
ConvBlock(in_channels, out_channels, kernel_size, stride, padding),
nn.Upsample(scale_factor=2, mode="nearest")
)
def forward(self, x: Tensor) -> Tensor:
return self.upsample(x)
class ScalePrediction(nn.Module):
def __init__(
self,
in_channels,
num_classes,
num_anchors
) -> Any:
super().__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.pred = nn.Sequential(
ConvBlock(in_channels, 2*in_channels, kernel_size=3, padding=1),
nn.Conv2d(2*in_channels, (num_classes + 5) * num_anchors, kernel_size=1)
)
def forward(self, x: Tensor) -> Tensor:
return (
self.pred(x)
.reshape(x.shape[0], self.num_anchors, self.num_classes + 5, x.shape[2], x.shape[3])
.permute(0, 1, 3, 4, 2) # N x num_anchors x 13 x 13 x 5 + num_classes
)
class SEBlock(nn.Module):
def __init__(
self,
in_channels: int,
squeeze_channels: int,
activation: Union[Callable, AnyStr] = (lambda: nn.SiLU())
) -> None:
super().__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, squeeze_channels, kernel_size=1),
get_activation_layer(activation),
nn.Conv2d(squeeze_channels, in_channels, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x: Tensor) -> Tensor:
return x * self.se(x)
class MBConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Sequence],
stride: Union[int, Sequence],
padding: int,
expand_ratio: float,
reduction: int = 4, # squeeze excitation
survival_prob: float = 0.8 # for stochastic depth
) -> None:
super().__init__()
self.survival_prob = survival_prob
self.use_residual = in_channels == out_channels and stride == 1
hidden_dim = in_channels * expand_ratio
self.expand = in_channels != hidden_dim
squeeze_dim = int(in_channels / reduction)
if self.expand:
self.expand_conv = ConvBlock(
in_channels,
hidden_dim,
kernel_size=3,
stride=1,
padding=1,
activation='silu'
)
self.conv = nn.Sequential(
ConvBlock(
hidden_dim,
hidden_dim,
kernel_size,
stride,
padding,
groups=hidden_dim,
activation='silu'
),
SEBlock(hidden_dim, squeeze_dim),
nn.Conv2d(hidden_dim, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, inputs: Tensor) -> Tensor:
x = self.expand_conv(inputs) if self.expand else inputs
x = self.stochastic_depth(self.conv(x)) + inputs if self.use_residual else self.conv(x)
return x
def stochastic_depth(self, x: Tensor) -> Tensor:
if not self.training:
return x
binary_tensor = torch.rand(x.shape[0], 1, 1, 1, device=x.device) < self.survival_prob
return torch.div(x, self.survival_prob) * binary_tensor | [
"torch.rand",
"torch.nn.Sigmoid",
"torch.div",
"torch.nn.SiLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.7.0 | DavianYang/yolo.ai | 0856d4f1e84428667046ee27270ff1bf742e658a |
1.10 | import numpy as np
import scipy.stats.stats as sciStats
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from volsim.params import *
class CorrelationLoss(nn.modules.loss._Loss):
def __init__(self, params:Params, useGPU:bool):
super(CorrelationLoss, self).__init__()
self.useGPU = useGPU
if useGPU:
self.epsilon = torch.tensor(0.0000001).cuda()
else:
self.epsilon = torch.tensor(0.0000001)
self.params = params
self.corHistoryMode = params.corHistoryMode
self.weightMSE = params.lossFacMSE
self.weightRelMSE = params.lossFacRelMSE
self.weightPearsonCorr = params.lossFacPearsonCorr
self.weightSlConvReg = params.lossFacSlConvReg
self.weightSizeReg = params.lossFacSizeReg
self.sizeRegExp = params.lossSizeExp
self.useOnlineMean = params.lossOnlineMean
self.aggregateCorr = params.lossCorrAggregate
self.resetCorrAcc()
self.stepHist = np.zeros(6)
self.stepHistCount = 0
self.lastSampleSliceCorr = 0
self.epochHist = {"pred":[], "targ":[], "path":[], "enstd":[], "tempPred":[], "tempTarg":[], "tempPath":[], "tempEnstd":[]}
# has to be called after all simulation pairs of one sample are processed
# to ensure correct loss computation for next sample
def resetCorrAcc(self):
self.accX = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
self.accY = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
self.count = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
self.accFinal = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
self.countFinal = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
self.accX.requires_grad = False
self.accY.requires_grad = False
self.count.requires_grad = False
self.accFinal.requires_grad = False
self.countFinal.requires_grad = False
def forward(self, prediction:torch.Tensor, target:torch.Tensor, path:np.ndarray) -> torch.Tensor:
if self.useGPU:
prediction = prediction.cuda()
target = target.cuda()
corr = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
correlation = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
# pearson correlation
if self.weightPearsonCorr > 0:
corr = self.pearsonCorrOnline(prediction, target)
self.lastSampleSliceCorr = torch.mean(corr).item()
correlation = self.weightPearsonCorr * 0.5 * (1-corr)
# mse
l2 = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
if self.weightMSE > 0:
l2 = self.weightMSE * self.distanceL2(prediction, target)
# relative mse
relL2 = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
if self.weightRelMSE > 0:
predMean = self.accX.detach() / self.count.detach()
targMean = self.accY.detach() / self.count.detach()
relL2 = self.weightRelMSE * self.distanceL2(prediction-predMean, target-targMean)
# size regularization
sizeReg = torch.tensor([0.0]).cuda() if self.useGPU else torch.tensor([0.0])
if self.weightSizeReg > 0:
temp = torch.where(prediction > 1, torch.pow(prediction-1, self.sizeRegExp), torch.zeros_like(prediction))
sizeReg = self.weightSizeReg * torch.sum(temp, dim=1)
# step history
self.stepHist = self.stepHist + np.array([
torch.mean(l2+relL2+correlation+sizeReg).item(),
torch.mean(l2).item(),
torch.mean(correlation).item(),
torch.mean(corr).item(),
torch.mean(relL2).item(),
torch.mean(sizeReg).item(),
])
self.stepHistCount = self.stepHistCount + 1
# epoch history
self.epochHist["tempPred"] += [prediction.cpu().detach().numpy()]
self.epochHist["tempTarg"] += [target.cpu().detach().numpy()]
self.epochHist["tempPath"] += [np.repeat(path[:,None], target.shape[1], axis=1)]
result = torch.mean(l2 + relL2 + correlation + sizeReg)
if torch.isnan(result):
logging.error("NAN in loss!")
logging.error("L2 " + str(l2))
logging.error("Rel L2 " + str(relL2))
logging.error("Corr " + str(corr))
logging.error("Correlation " + str(correlation))
raise ValueError("NAN in loss!")
return result
def updateMeanAccs(self, x:torch.Tensor, y:torch.Tensor):
if self.useGPU:
x = x.cuda()
y = y.cuda()
self.count = self.count + x.shape[1]
self.accX = self.accX + torch.sum(x, dim=1, keepdim=True)
self.accY = self.accY + torch.sum(y, dim=1, keepdim=True)
def pearsonCorrOnline(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:
if self.useOnlineMean:
self.updateMeanAccs(x, y)
if self.count <= 1:
return torch.zeros_like(self.accFinal)
meanX = self.accX.detach() / self.count.detach()
meanY = self.accY.detach() / self.count.detach()
xm = x - meanX
ym = y - meanY
rNum = torch.sum(xm*ym, dim=1, keepdim=True) #manual dot product
rDen = torch.norm(xm, 2, dim=1, keepdim=True) * torch.norm(ym, 2, dim=1, keepdim=True)
rVal = rNum / torch.max(rDen, self.epsilon) #epsilon for numerical stability
if any(torch.isnan(rVal)):
logging.error("NAN in correlation computation!")
logging.error("x " + str(x))
logging.error("y " + str(y))
logging.error("accX " + str(self.accX))
logging.error("accY " + str(self.accY))
logging.error("count " + str(self.count))
logging.error("meanX " + str(meanX))
logging.error("meanY " + str(meanY))
logging.error("rNum " + str(rNum))
logging.error("rDen " + str(rDen))
logging.error("rVal " + str(rVal))
raise ValueError("NAN in correlation computation!")
if self.aggregateCorr:
# average over previous pairs from same sample for better stability
self.accFinal = self.accFinal.detach() + rVal
self.countFinal = self.countFinal.detach() + 1
return self.accFinal / self.countFinal
else:
return rVal
def getStepHistory(self) -> np.ndarray:
result = self.stepHist / self.stepHistCount
self.stepHist = np.zeros(6)
self.stepHistCount = 0
self.resetCorrAcc()
# normalize all step distances to [0.1, 1.0]
predStep = np.concatenate(self.epochHist["tempPred"], axis=1) #[3,55]
dMax = np.max(predStep, axis=1, keepdims=True) #[3,1]
dMin = np.min(predStep, axis=1, keepdims=True) #[3,1]
if (dMin == dMax).all():
predStep = predStep - dMin + 0.1
elif (dMin == dMax).any():
for i in range(dMin.shape[0]):
if dMin[i] == dMax[i]:
predStep[i] = predStep[i] - dMin[i] + 0.1
else:
predStep[i] = 0.9 * ((predStep[i] - dMin[i]) / (dMax[i] - dMin[i])) + 0.1
else:
predStep = 0.9 * ((predStep - dMin) / (dMax - dMin)) + 0.1
self.epochHist["pred"] += [predStep]
self.epochHist["targ"] += [np.concatenate(self.epochHist["tempTarg"], axis=1)]
self.epochHist["path"] += [np.concatenate(self.epochHist["tempPath"], axis=1)]
self.epochHist["tempPred"] = []
self.epochHist["tempTarg"] = []
self.epochHist["tempPath"] = []
return result
def getEpochHistory(self, splits:dict=None) -> tuple:
predEpoch = np.concatenate(self.epochHist["pred"], axis=0)
targEpoch = np.concatenate(self.epochHist["targ"], axis=0)
pathEpoch = np.concatenate(self.epochHist["path"], axis=0)
corrSplit = {}
if splits:
for split in splits:
idx = np.core.defchararray.find(pathEpoch.astype(str), splits[split]) >= 0
stacked = np.stack([predEpoch[idx], targEpoch[idx]])
if self.corHistoryMode == "pearson":
corr = np.corrcoef(stacked)[0,1]
elif self.corHistoryMode == "spearman":
corr, _ = sciStats.spearmanr(stacked.transpose((1,0)))
else:
raise ValueError("Invalid ground ")
corrSplit[split] = corr
stackedAll = np.stack([predEpoch.flatten(), targEpoch.flatten()])
if self.corHistoryMode == "pearson":
corrAll = np.corrcoef(stackedAll)[0,1]
elif self.corHistoryMode == "spearman":
corrAll, _ = sciStats.spearmanr(stackedAll.transpose((1,0)))
else:
raise ValueError("Invalid ground ")
self.epochHist["pred"] = []
self.epochHist["targ"] = []
self.epochHist["path"] = []
return corrAll, corrSplit
def distanceL2(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:
return F.mse_loss(x, y, reduction='none')
def distanceL1(self, x:torch.Tensor, y:torch.Tensor) -> torch.Tensor:
return F.l1_loss(x, y, reduction='none')
| [
"torch.isnan",
"torch.nn.functional.l1_loss",
"torch.norm",
"torch.max",
"torch.zeros_like",
"torch.pow",
"torch.nn.functional.mse_loss",
"torch.tensor",
"torch.mean",
"torch.sum"
] | 1.10.0 | tum-pbs/VOLSIM | 795a31c813bf072eb88289126d7abd9fba8b0e54 |
1.9 | from gettext import find
import torch
from ezflow.utils import (
AverageMeter,
coords_grid,
endpointerror,
find_free_port,
forward_interpolate,
is_port_available,
upflow,
)
def test_endpointerror():
pred = torch.rand(4, 2, 256, 256)
target = torch.rand(4, 2, 256, 256)
_ = endpointerror(pred, target)
multi_magnitude_epe = endpointerror(pred, target, multi_magnitude=True)
assert isinstance(multi_magnitude_epe, dict)
target = torch.rand(
4, 3, 256, 256
) # Ignore valid mask for EPE calculation if target contains it
_ = endpointerror(pred, target)
def test_forward_interpolate():
flow = torch.rand(2, 256, 256)
_ = forward_interpolate(flow)
def test_upflow():
flow = torch.rand(2, 2, 256, 256)
_ = upflow(flow)
def test_coords_grid():
_ = coords_grid(2, 256, 256)
def test_AverageMeter():
meter = AverageMeter()
meter.update(1)
assert meter.avg == 1
meter.reset()
assert meter.avg == 0
def test_find_free_port():
assert len(find_free_port()) == 5
def test_is_port_available():
port = find_free_port()
assert is_port_available(int(port)) is True
| [
"torch.rand"
] | 1.9.0 | neu-vig/ezflow | 1eb6f675e72b1de6db7b35d61ca4ef0082bae890 |
0.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import math
import random
import numpy
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import torch.nn.functional as F
import torch.autograd as autograd
import torchvision.transforms as T
import torch.optim as optim
from conf import *
"""PyTorch BERT model."""
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from pytorch_pretrained_bert.file_utils import cached_path
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-base-multilingual': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
# if gpu is to be used
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
random.seed(0)
numpy.random.seed(0)
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.beta.data.normal_(mean=0.0, std=self.config.initializer_range)
module.gamma.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
pretrained_model_name))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block,
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits, and
- the next sentence classification logits.
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(PreTrainedBertModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits.
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class Network(PreTrainedBertModel):
def __init__(self, config,hidden_dimention, output_dimention):
super(Network, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
# self.embedding_layer = nn.Embedding(embedding_size, embedding_dimention)
# self.embedding_layer.weight.data.copy_(torch.from_numpy(numpy.array(embedding_matrix)))
# self.inpt_layer_np = nn.Linear(embedding_dimention,hidden_dimention)
# self.inpt_layer_np_bert = nn.Linear(config.hidden_size, hidden_dimention)
self.inpt_layer_np_bert = nn.Linear(hidden_dimention, hidden_dimention)
self.hidden_layer_np = nn.Linear(hidden_dimention,hidden_dimention)
nh = hidden_dimention*2
# self.zp_bert_to_att_layer = nn.Linear(config.hidden_size, nh)#--------------------------------
self.zp_representation_layer = nn.Linear(nh, nh) # --------------------------------
self.np_representation_layer = nn.Linear(hidden_dimention,nh)
self.nps_representation_layer = nn.Linear(hidden_dimention,nh)
self.feature_representation_layer = nn.Linear(nnargs["feature_dimention"],nh)
self.representation_hidden_layer = nn.Linear(hidden_dimention*2,hidden_dimention*2)
self.output_layer = nn.Linear(hidden_dimention*2,output_dimention)
self.hidden_size = hidden_dimention
self.activate = nn.Tanh()
self.Attention_np = nn.Linear(256,1,bias=False)
self.Attention_zp = nn.Linear(nh,1,bias=False)
def forward_zp_pre(self, word_index, hiden_layer,dropout=0.0):
dropout_layer = nn.Dropout(dropout)
word_embedding = self.embedding_layer(word_index)#.view(-1,word_embedding_rep_dimention)
word_embedding = dropout_layer(word_embedding)
this_hidden = self.inpt_layer_zp_pre(word_embedding) + self.hidden_layer_zp_pre(hiden_layer)
this_hidden = self.activate(this_hidden)
this_hidden = dropout_layer(this_hidden)
return this_hidden
def forward_zp_post(self, word_index, hiden_layer,dropout=0.0):
dropout_layer = nn.Dropout(dropout)
word_embedding = self.embedding_layer(word_index)#.view(-1,word_embedding_rep_dimention)
this_hidden = self.inpt_layer_zp_post(word_embedding) + self.hidden_layer_zp_post(hiden_layer)
this_hidden = self.activate(this_hidden)
this_hidden = dropout_layer(this_hidden)
return this_hidden
def forward_np(self, word_index, hiden_layer,dropout=0.0):
dropout_layer = nn.Dropout(dropout)
word_embedding = self.embedding_layer(word_index)
this_hidden = self.inpt_layer_np(word_embedding) + self.hidden_layer_np(hiden_layer)
this_hidden = self.activate(this_hidden)
this_hidden = dropout_layer(this_hidden)
return this_hidden
def forward_np_bert(self, bert_out, hiden_layer,dropout=0.0):
dropout_layer = nn.Dropout(dropout)
# word_embedding = self.embedding_layer(word_index)
this_hidden = self.inpt_layer_np_bert(bert_out) + self.hidden_layer_np(hiden_layer)
this_hidden = self.activate(this_hidden)
this_hidden = dropout_layer(this_hidden)
return this_hidden
def generate_score(self,zp_pre,zp_post,np,feature,dropout=0.0):
dropout_layer = nn.Dropout(dropout)
x = self.zp_pre_representation_layer(zp_pre) + self.zp_post_representation_layer(zp_post) + self.np_representation_layer(np)\
+ self.feature_representation_layer(feature)
x = self.activate(x)
x = dropout_layer(x)
x = self.representation_hidden_layer(x)
x = self.activate(x)
x = dropout_layer(x)
x = self.output_layer(x)
xs = F.softmax(x)
return x,xs
def generate_score_bert(self,zp,np,feature,dropout=0.0):
dropout_layer = nn.Dropout(dropout)
x = self.zp_representation_layer(zp) + self.np_representation_layer(np)\
+ self.feature_representation_layer(feature)
x = self.activate(x)
x = dropout_layer(x)
x = self.representation_hidden_layer(x)
x = self.activate(x)
x = dropout_layer(x)
x = self.output_layer(x)
xs = F.softmax(x,dim=0)
return x,xs
def initHidden(self,batch=1):
return torch.tensor(numpy.zeros((batch, self.hidden_size))).type(torch.cuda.FloatTensor)
def get_attention_pre(self,inpt):
return self.selfAttentionB_pre(self.activate(self.selfAttentionA_pre(inpt)))
def get_attention_post(self,inpt):
return self.selfAttentionB_post(self.activate(self.selfAttentionA_post(inpt)))
def forward(self,data,dropout=0.0, attention_mask=None, masked_lm_labels=None):
# token_type_ids = None#----------
zp_reindex = torch.tensor(data["zp_reindex"]).type(torch.cuda.LongTensor)
# zps_sent_bert = []
# zp_i = 0
# for i, zp_reidx in enumerate(zp_reindex):
# if zp_i != zp_reidx:
# zp_i += 1
# zps_sent_bert.append(data["zp_sent_bert"][zp_i])
#
# zps_sent_mask_bert = []
# zp_i = 0
# for i, zp_reidx in enumerate(zp_reindex):
# if zp_i != zp_reidx:
# zp_i += 1
# zps_sent_mask_bert.append(data["zp_sent_mask_bert"][zp_i])
#
# # zp_sent_bert = torch.tensor(data["zp_sent_bert"]).type(torch.cuda.LongTensor)
# # zp_sent_mask_bert = torch.tensor(data["zp_sent_mask_bert"]).type(torch.cuda.FloatTensor)
# zp_sent_bert = torch.tensor(zps_sent_bert).type(torch.cuda.LongTensor)
# zp_sent_mask_bert = torch.tensor(zps_sent_mask_bert).type(torch.cuda.FloatTensor)
#
# # zp_orig_to_tok_bert = torch.tensor(data["zp_orig_to_tok_bert"]).type(torch.cuda.LongTensor)
# #input_ids
# sequence_output, _ = self.bert(zp_sent_bert, token_type_ids, zp_sent_mask_bert,output_all_encoded_layers=False)
#
# # for sent in zp_orig_to_tok_bert:
# # for i,ci in enumerate(sent):
#
# zp_representation = self.zp_bert_to_att_layer(torch.squeeze(sequence_output.narrow(1,0,1),1))
zps_sent_cls_output_bert = []
zp_i = 0
for i, zp_reidx in enumerate(zp_reindex):
if zp_i != zp_reidx:
zp_i += 1
zps_sent_cls_output_bert.append(data["zp_sent_cls_output_bert"][zp_i])
zp_representation = torch.tensor(zps_sent_cls_output_bert).type(torch.cuda.FloatTensor)[:,:512]#x*768->x*512
candi_reindex = torch.tensor(data["candi_reindex"]).type(torch.cuda.LongTensor)
# candi = torch.tensor(data["candi"]).type(torch.cuda.LongTensor)
# candi_mask = torch.tensor(data["candi_mask"]).type(torch.cuda.FloatTensor)
candi_bert = torch.tensor(data["candi_bert"]).type(torch.cuda.FloatTensor)[:,:,:256]#x*40*768->x*40*256
candi_mask_bert = torch.tensor(data["candi_mask_bert"]).type(torch.cuda.FloatTensor)#why not LongTensor
feature = torch.tensor(data["fl"]).type(torch.cuda.FloatTensor)
candi_bert = torch.transpose(candi_bert,0,1)#40*x*256
# mask_candi_bert = torch.transpose(candi_mask_bert,0,1)
# hidden_candi = self.initHidden()
# hiddens_candi = []
# for i in range(len(mask_candi_bert)):#40
# #hidden_candi = self.forward_np_bert(candi_bert[i], hidden_candi, dropout=dropout) * torch.transpose(mask_candi_bert[i:i + 1], 0, 1)#RNN
# hidden_candi = candi_bert[i]#choose the max
# hiddens_candi.append(hidden_candi)
# hiddens_candi = torch.cat(hiddens_candi,1)
# hiddens_candi = hiddens_candi.view(-1,len(mask_candi_bert),nnargs["hidden_dimention"])
nps = []
# for npt, zpt in zip(hiddens_candi, zp_representation): # 5*40*256
for npt, zpt in zip(torch.transpose(candi_bert,0,1), zp_representation):#5*40*256,5*512
attention = F.softmax(torch.squeeze(self.activate(self.Attention_np(npt) + self.Attention_zp(zpt))),dim=0)
#[8*256]*[256*1]+[1*256]*[256*1]+[1*256]*[256*1]=[8*1]+[1*1]+[1*1]=[8*1]-->[8]
average_np = torch.transpose(npt,0,1)*attention
average_np = torch.sum(average_np,1,keepdim=True)
nps.append(average_np)
nps = torch.transpose(torch.cat(nps,1),0,1)
candi_representation = nps[candi_reindex]
output, softmax_out = self.generate_score_bert(zp_representation, candi_representation,feature)
output = torch.squeeze(output)
return output,softmax_out
| [
"torch.nn.Linear",
"torch.cat",
"torch.cuda.manual_seed",
"torch.ones",
"torch.squeeze",
"torch.cuda.is_available",
"torch.load",
"torch.transpose",
"torch.nn.CrossEntropyLoss",
"torch.sum",
"torch.sigmoid",
"torch.sqrt",
"torch.nn.Softmax",
"torch.manual_seed",
"torch.tensor",
"torch.zeros_like",
"torch.zeros",
"torch.nn.Tanh",
"torch.nn.functional.softmax",
"torch.matmul",
"torch.nn.Dropout",
"torch.arange",
"torch.ones_like",
"torch.nn.Embedding"
] | 0.4.1 | mjj1094/Attention_BERT_62 | 22cae03ab7bcb09cfd3f8b0b9f2239f8e3ba56ce |
1.5 | import logging
import math
import os
import pickle
import random
import sys
import time
from math import ceil, log
from pathlib import Path
from typing import Dict, List, Set, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn import metrics
from torch.autograd import Variable
from torch.nn.utils.rnn import (pack_padded_sequence, pack_sequence,
pad_packed_sequence, pad_sequence)
from torch.utils.data import DataLoader, Dataset, TensorDataset, random_split
from src.data import SOURCE_ASSIST0910_ORIG, SOURCE_ASSIST0910_SELF
from src.utils import sAsMinutes, timeSince
# =========================
# Model
# =========================
class _Encoder(nn.Module):
def __init__(self, num_embeddings, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.num_embeddings = num_embeddings
self.emb_dim = emb_dim
self.hid_dim = hid_dim
self.n_layers = n_layers
self.dropout = dropout
# Layers
self.embedding = nn.Embedding(num_embeddings, emb_dim)
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, input):
embedded = self.dropout(self.embedding(input))
outputs, (hidden, cell) = self.rnn(embedded)
return hidden, cell
class _Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.emb_dim = emb_dim
self.hid_dim = hid_dim
self.output_dim = output_dim
self.n_layers = n_layers
self.dropout = dropout
# Layers
self.embedding = nn.Embedding(output_dim, emb_dim) # 250->6
self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout) # 6, 100, 1
self.out = nn.Linear(hid_dim, output_dim) # 100, 250
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, cell):
# print(input.shape) # 1, 100
# input = input.unsqueeze(0)
# もしx_trg1つだけ取り出して渡すと上のようになるので、unsqueezeする。
embedded = self.dropout(self.embedding(input))
output, (hidden, cell) = self.rnn(embedded, (hidden, cell))
# prediction = self.out(output.squeeze(0))
prediction = self.out(output)
return prediction, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, dev):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = dev
assert encoder.hid_dim == decoder.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.n_layers == decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, src, trg, teacher_forcing_ratio=0.5):
max_len = trg.shape[0] # should be 1
batch_size = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
hidden, cell = self.encoder(src)
# print(trg.shape) # 2, 100
# input_trg = trg[-1,:] # should be 1, 100, ?
input_trg = trg
output, hidden, cell = self.decoder(input_trg, hidden, cell)
# print(output.shape) # 1, 100, 250
# outputs = output.unsqueeze(0)
outputs = output
# Knowledge State
o_wro = torch.sigmoid(output[:,:, 2:2+124])
o_cor = torch.sigmoid(output[:,:, 2+124:])
outputs_prob = (o_cor / (o_cor + o_wro))
return outputs, outputs_prob
def get_loss_batch_seq2seq(extend_forward, ks_loss):
def loss_batch_encdec(model, loss_func, *args, opt=None):
# Unpack data from DataLoader
xs_src, xs_trg, ys, yq, ya, yp = args
input_src = xs_src
input_trg = xs_trg
target = ys
input_src = input_src.permute(1, 0)
input_trg = input_trg.permute(1, 0)
target = target.permute(1, 0)
out, out_prob = model(input_src, input_trg)
# print(out.shape, out_prob.shape) # 1, 100, 250; 1, 100, 124
out = out.permute(1, 0, 2)
out_prob = out_prob.permute(1, 0, 2)
pred = torch.sigmoid(out) # [0, 1]区間にする
# --- 指標評価用データ
prob = torch.max(out_prob * yq, 2)[0]
predicted = prob[:,-1 - extend_forward]
actual = ya[:,-1 - extend_forward]
predicted_ks = out_prob[:,-1,:].unsqueeze(1)
loss = loss_func(prob, ya)
if opt:
# バックプロバゲーション
opt.zero_grad()
loss.backward()
opt.step()
# Returns loss number, batch size
return loss.item(), len(ys), predicted, actual, predicted_ks
return loss_batch_encdec
def get_Seq2Seq(NUM_EMBEDDIGNS, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT, OUTPUT_DIM, DEC_EMB_DIM, DEC_DROPOUT, dev):
enc = _Encoder(NUM_EMBEDDIGNS, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = _Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, dev).to(dev)
return model
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.max",
"torch.nn.Embedding"
] | 1.5.0 | qqhann/KnowledgeTracing | cecdb9af0c44efffd1ce3359f331d7d7782f551b |
1.6 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import os
import shutil
from PIL import Image
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision
import cv2
import numpy as np
import time
import _init_paths
import models
from config import cfg
from config import update_config
from core.function import get_final_preds
from utils.transforms import get_affine_transform
from models.pose_hrnet import get_pose_net
COCO_KEYPOINT_INDEXES = {
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
}
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
SKELETON = [
[1,3],[1,0],[2,4],[2,0],[0,5],[0,6],[5,7],[7,9],[6,8],[8,10],[5,11],[6,12],[11,12],[11,13],[13,15],[12,14],[14,16]
]
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
NUM_KPTS = 17
CTX = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def draw_pose(keypoints, img):
"""draw the keypoints and the skeletons.
:params keypoints: the shape should be equal to [17,2]
:params img:
"""
assert keypoints.shape == (NUM_KPTS, 2)
for i in range(len(SKELETON)):
kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
x_a, y_a = keypoints[kpt_a][0], keypoints[kpt_a][1]
x_b, y_b = keypoints[kpt_b][0], keypoints[kpt_b][1]
cv2.circle(img, (int(x_a), int(y_a)), 6, CocoColors[i], -1)
cv2.circle(img, (int(x_b), int(y_b)), 6, CocoColors[i], -1)
cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 2)
def draw_bbox(box, img):
"""draw the detected bounding box on the image.
:param img:
"""
cv2.rectangle(img, box[0], box[1], color=(0, 255, 0), thickness=3)
def get_person_detection_boxes(model, img, threshold=0.5):
pred = model(img)
pred_classes = [COCO_INSTANCE_CATEGORY_NAMES[i]
for i in list(pred[0]['labels'].cpu().numpy())] # Get the Prediction Score
pred_boxes = [[(i[0], i[1]), (i[2], i[3])]
for i in list(pred[0]['boxes'].detach().cpu().numpy())] # Bounding boxes
pred_score = list(pred[0]['scores'].detach().cpu().numpy())
if not pred_score or max(pred_score)<threshold:
return []
# Get list of index with score greater than threshold
pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1]
pred_boxes = pred_boxes[:pred_t+1]
pred_classes = pred_classes[:pred_t+1]
person_boxes = []
for idx, box in enumerate(pred_boxes):
if pred_classes[idx] == 'person':
person_boxes.append(box)
return person_boxes
def get_pose_estimation_prediction(pose_model, image, center, scale):
rotation = 0
# pose estimation transformation
trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE)
model_input = cv2.warpAffine(
image,
trans,
(int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
flags=cv2.INTER_LINEAR)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# pose estimation inference
model_input = transform(model_input).unsqueeze(0)
# switch to evaluate mode
pose_model.eval()
with torch.no_grad():
# compute output heatmap
output = pose_model(model_input)
preds, _ = get_final_preds(
cfg,
output.clone().cpu().numpy(),
np.asarray([center]),
np.asarray([scale]))
return preds
def box_to_center_scale(box, model_image_width, model_image_height):
"""convert a box to center,scale information required for pose transformation
Parameters
----------
box : list of tuple
list of length 2 with two tuples of floats representing
bottom left and top right corner of a box
model_image_width : int
model_image_height : int
Returns
-------
(numpy array, numpy array)
Two numpy arrays, coordinates for the center of the box and the scale of the box
"""
center = np.zeros((2), dtype=np.float32)
bottom_left_corner = box[0]
top_right_corner = box[1]
box_width = top_right_corner[0]-bottom_left_corner[0]
box_height = top_right_corner[1]-bottom_left_corner[1]
bottom_left_x = bottom_left_corner[0]
bottom_left_y = bottom_left_corner[1]
center[0] = bottom_left_x + box_width * 0.5
center[1] = bottom_left_y + box_height * 0.5
aspect_ratio = model_image_width * 1.0 / model_image_height
pixel_std = 200
if box_width > aspect_ratio * box_height:
box_height = box_width * 1.0 / aspect_ratio
elif box_width < aspect_ratio * box_height:
box_width = box_height * aspect_ratio
scale = np.array(
[box_width * 1.0 / pixel_std, box_height * 1.0 / pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, default='./inference-config.yaml')
parser.add_argument('--video', type=str)
parser.add_argument('--webcam', action='store_true')
parser.add_argument('--image', type=str)
parser.add_argument('--write', action='store_true')
parser.add_argument('--showFps', action='store_true')
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args expected by supporting codebase
args.modelDir = ''
args.logDir = ''
args.dataDir = ''
args.prevModelDir = ''
return args
def main():
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
args = parse_args()
update_config(cfg, args)
box_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
box_model.to(CTX)
box_model.eval()
pose_model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
cfg, is_train=False
)
if cfg.TEST.MODEL_FILE:
print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
pose_model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
else:
print('expected model defined in config at TEST.MODEL_FILE')
# pose_model = get_pose_net(cfg, True)
pose_model = torch.nn.DataParallel(pose_model, device_ids=cfg.GPUS)
pose_model.to(CTX)
pose_model.eval()
# Loading an video or an image or webcam
if args.webcam:
vidcap = cv2.VideoCapture(0)
elif args.video:
vidcap = cv2.VideoCapture(args.video)
elif args.image:
image_bgr = cv2.imread(args.image)
else:
print('please use --video or --webcam or --image to define the input.')
return
if args.webcam or args.video:
if args.write:
save_path = 'output.avi'
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(save_path,fourcc, 24.0, (int(vidcap.get(3)),int(vidcap.get(4))))
while True:
ret, image_bgr = vidcap.read()
if ret:
last_time = time.time()
image = image_bgr[:, :, [2, 1, 0]]
input = []
img = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
img_tensor = torch.from_numpy(img/255.).permute(2,0,1).float().to(CTX)
input.append(img_tensor)
# object detection box
pred_boxes = get_person_detection_boxes(box_model, input, threshold=0.9)
# pose estimation
if len(pred_boxes) >= 1:
for box in pred_boxes:
center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
image_pose = image.copy() if cfg.DATASET.COLOR_RGB else image_bgr.copy()
pose_preds = get_pose_estimation_prediction(pose_model, image_pose, center, scale)
if len(pose_preds)>=1:
for kpt in pose_preds:
draw_pose(kpt,image_bgr) # draw the poses
if args.showFps:
fps = 1/(time.time()-last_time)
img = cv2.putText(image_bgr, 'fps: '+ "%.2f"%(fps), (25, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)
if args.write:
out.write(image_bgr)
cv2.imshow('demo',image_bgr)
if cv2.waitKey(1) & 0XFF==ord('q'):
break
else:
print('cannot load the video.')
break
cv2.destroyAllWindows()
vidcap.release()
if args.write:
print('video has been saved as {}'.format(save_path))
out.release()
else:
# estimate on the image
last_time = time.time()
image = image_bgr[:, :, [2, 1, 0]]
input = []
img = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
img_tensor = torch.from_numpy(img/255.).permute(2,0,1).float().to(CTX)
input.append(img_tensor)
# object detection box
pred_boxes = get_person_detection_boxes(box_model, input, threshold=0.9)
# pose estimation
if len(pred_boxes) >= 1:
for box in pred_boxes:
center, scale = box_to_center_scale(box, cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1])
image_pose = image.copy() if cfg.DATASET.COLOR_RGB else image_bgr.copy()
pose_preds = get_pose_estimation_prediction(pose_model, image_pose, center, scale)
if len(pose_preds)>=1:
for kpt in pose_preds:
draw_pose(kpt,image_bgr) # draw the poses
if args.showFps:
fps = 1/(time.time()-last_time)
img = cv2.putText(image_bgr, 'fps: '+ "%.2f"%(fps), (25, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)
if args.write:
save_path = 'output.jpg'
cv2.imwrite(save_path,image_bgr)
print('the result image has been saved as {}'.format(save_path))
cv2.imshow('demo',image_bgr)
if cv2.waitKey(0) & 0XFF==ord('q'):
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.no_grad",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel"
] | 1.6.0 | Zhou1993napolun/Pose_enabler | 669fffd6cea57fec5fa9bd95868cc48347700f42 |
1.7 | import torch
import torch.nn as nn
from torchvision import models, transforms
from dataset import HCPDataset
# from torch.utils.data import DataLoader
from torch_geometric.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
EPOCHS = 120
IS_SEX = True # comment: declare it only here
THRESHOLD = 0.3
LR = 0.003
HIDDEN_CHANNELS = 64
COMMENT = 'TEST'
class CNN_HCP(nn.Module):
def __init__(self, num_classes):
super(CNN_HCP, self).__init__()
self.conv1 = nn.Conv2d(1, 3, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.conv2 = nn.Conv2d(3, 6, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
self.fc1 = nn.Linear(in_features=6*7*7, out_features=64)
self.fc2 = nn.Linear(64, 16)
self.fc3 = nn.Linear(16, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 6*7*7)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.fc3(x))
return x
def get_data_hcp(batch_size=64):
# load the hcp data and do the normalization required for pretrained cnn models, the value is in range of [0,1]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# use unweighted binary graph
dataset = HCPDataset(root='data/hcp', is_sex=IS_SEX, threshold=THRESHOLD, bin=True)
# split dataset for training and test
dataset_for_training = dataset[:802]
dataset_for_training = dataset_for_training.shuffle()
test_dataset = dataset[802:]
# split the training dataset for validation
graphs_training = 702
train_dataset = dataset_for_training[:graphs_training]
val_dataset = dataset_for_training[graphs_training:]
train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size, shuffle=False)
return train_loader, val_loader, test_loader
def train(train_loader, model, criterion, optimizer, batch_size=64):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.train()
for data in train_loader: # Iterate in batches over the training dataset.
data = data.to(device)
if data.x.dim() < 3:
# change the dimension of node features into [batch_size, 15, 15]
data.x = torch.reshape(data.x, (-1, 15, 15))
# dimension required for conv2d [batch_size, channel_in, h, w]
data.x = torch.unsqueeze(data.x, dim=1)
# uncomment only for vgg16
# data.x = transforms.Resize((224, 224))(data.x)
# dimension of edge to [batch_size, channel_in, h, w]
out = model(data.x) # Perform a single forward pass.
loss = criterion(out, data.y.long()) # Compute the loss.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
optimizer.zero_grad() # Clear gradients.
def val(loader, model, device=None):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.eval()
correct = 0
for data in loader: # Iterate in batches over the training/test dataset.
data = data.to(device)
if data.x.dim() < 3:
# change the dimension into [batch_size, 15, 15]
data.x = torch.reshape(data.x, (-1, 15, 15))
data.x = torch.unsqueeze(data.x, dim=1)
# uncomment only for vgg16
# data.x = transforms.Resize((224, 224))(data.x)
out = model(data.x)
pred = out.argmax(dim=1) # Use the class with highest probability.
correct += int((pred == data.y).sum()) # Check against ground-truth labels.
return correct / len(loader.dataset) # Derive ratio of correct predictions.
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_loader, val_loader, test_loader = get_data_hcp(batch_size=16)
model = CNN_HCP(num_classes=2 if IS_SEX else 4).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
criterion = torch.nn.CrossEntropyLoss()
max_validation_accuracy = 0
tb = SummaryWriter(comment='{}_{}_th={}_lr={}_comment={}'.format(
model.__class__.__name__, 'sex' if IS_SEX else 'age', THRESHOLD, LR, COMMENT))
for epoch in range(1, EPOCHS):
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer)
train_acc = val(loader=train_loader, model=model)
val_acc = val(loader=val_loader, model=model)
print(f'Epoch: {epoch:03d}, Train Acc: {train_acc:.4f}, Val Acc: {val_acc:.4f}')
max_validation_accuracy = max(max_validation_accuracy, val_acc)
tb.add_scalars(f'accuracies', {
'train_acc': train_acc,
'val_acc': val_acc,
}, epoch)
print('Max validation accuracy: ', max_validation_accuracy)
if __name__ == "__main__":
main() | [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.dropout",
"torch.unsqueeze",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.reshape"
] | 1.7.1 | vimarkova/dggru | 019106a491f28f15aa33a3ae1b575794f1a6e1af |
1.9 | import torch
from torch import Tensor, log
from ..neko_module import NekoModule
class Log(NekoModule):
"""
The module version of :func:`torch.log` operation.
Args:
eps (``float``, optional): A bias applied to the input to avoid ``-inf``. Default ``0``.
Examples::
>>> log = Log()
>>> a = torch.randn(5)
>>> a
tensor([ 2.3020, -0.8679, -0.2174, 2.4228, -1.2341])
>>> log(a)
tensor([0.8338, nan, nan, 0.8849, nan])
"""
def __init__(self, eps: float = 0.):
super().__init__()
self.eps: float = eps
def forward(self, x: Tensor) -> Tensor:
return log(x) if self.eps == 0 else log(x + self.eps)
| [
"torch.log"
] | 1.9.0 | ControlNet/tensorneko | 70dfb2f6395e1703dbdf5d5adcfed7b1334efb8f |
1.9 | from typing import Union
import torch
from numpy import ndarray
from torch import Tensor
def iou_1d(pred: Union[Tensor, ndarray], real: Union[Tensor, ndarray]) -> Tensor:
"""
Calculate 1D IOU for N proposals with L labels.
Args:
pred (:class:`~torch.Tensor` | :class:``): The predicted array with [N, 2]. First column is begin, second column
is end.
real (:class:`~torch.Tensor` | :class:``): The label array with [L, 2]. First column is begin, second column
is end.
Returns:
(:class:`~torch.Tensor` | :class:``): The iou result with [N, L].
"""
if type(pred) is ndarray:
pred = torch.tensor(pred)
if type(real) is ndarray:
real = torch.tensor(real)
pred_begin = pred[:, 0].unsqueeze(0).T
pred_end = pred[:, 1].unsqueeze(0).T
real_begin = real[:, 0]
real_end = real[:, 1]
inner_begin = torch.maximum(pred_begin, real_begin)
inner_end = torch.minimum(pred_end, real_end)
outer_begin = torch.minimum(pred_begin, real_begin)
outer_end = torch.maximum(pred_end, real_end)
inter = torch.clamp(inner_end - inner_begin, min=0.)
union = outer_end - outer_begin
return inter / union
| [
"torch.clamp",
"torch.tensor",
"torch.minimum",
"torch.maximum"
] | 1.9.0 | ControlNet/tensorneko | 70dfb2f6395e1703dbdf5d5adcfed7b1334efb8f |
1.8 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from collections import namedtuple
from typing import Any, Dict, List, Optional
import torch
from layers import LayerType
Memory = namedtuple("Memory", "prev_max_mem, cur_mem")
def reset_peak_memory_stats(device: torch.device) -> Memory:
"""Safely resets CUDA peak memory statistics of device if it is
a CUDA device.
Notes: ``torch.cuda.reset_peak_memory_stats(device)`` will error
if no CUDA memory has been allocated to the device.
Args:
device: A torch.device
Returns:
max_memory_allocated before resetting the statistics and
memory_allocated, both in bytes
"""
prev_max_memory = torch.cuda.max_memory_allocated(device)
memory_allocated = torch.cuda.memory_allocated(device)
if prev_max_memory != memory_allocated and prev_max_memory > 0:
# raises RuntimeError if no previous allocation occurred
torch.cuda.reset_peak_memory_stats(device)
assert torch.cuda.max_memory_allocated(device) == memory_allocated
return Memory(prev_max_memory, memory_allocated)
def get_layer_set(layer: str) -> str:
"""Layers in the same layer set share a config.
Args:
layer: Full name of the layer. This will be the PyTorch or Opacus
name of the layer in lower case (e.g. linear, rnn, dprnn), prefixed with
gsm_ (e.g. gsm_linear, gsm_dprnn) if DP is enabled. MultiheadAttention
is abbreviated to mha.
Returns:
The name of the layer set, where a set of layers are defined as layers
that share the same __init__ signature.
Notes:
All RNN-based models share a config.
"""
layer_set = layer.replace("gsm_dp", "").replace("gsm_", "").replace("dp", "")
# all RNN-based model use the same config
if layer_set in ["rnn", "gru", "lstm"]:
layer_set = "rnn_base"
return layer_set
def get_path(
layer: LayerType,
batch_size: int,
num_runs: int,
num_repeats: int,
random_seed: Optional[int] = None,
forward_only: bool = False,
root: str = "./results/raw/",
suffix: str = "",
) -> str:
"""Gets the path to the file where the corresponding results are located.
File is presumed to be a pickle file.
Args:
layer: full layer name
batch_size: batch size
num_runs: number of runs per benchmark
num_repeats: how many benchmarks were run
random_seed: the initial random seed
forward_only: whether backward passes were skipped
root: directory to write results to
suffix: optional string to append to file name
Returns:
Path to results pickle file
"""
pickle_name = f"{layer}_bs_{batch_size}_runs_{num_runs}_repeats_{num_repeats}_seed_{random_seed}"
if forward_only:
pickle_name += "_forward_only"
if len(suffix) and not suffix.startswith("_"):
suffix = f"_{suffix}"
return f"{root}{pickle_name}{suffix}.pkl"
def save_results(
layer: LayerType,
batch_size: int,
num_runs: int,
num_repeats: int,
results: List[Dict[str, Any]],
config: Dict[str, Any],
random_seed: Optional[int] = None,
forward_only: bool = False,
root: str = "./results/raw/",
suffix: str = "",
) -> None:
"""Saves the corresponding results as a pickle file.
Args:
layer: full layer name
batch_size: batch size
num_runs: number of runs per benchmark
num_repeats: how many benchmarks were run
runtimes: list of runtimes of length num_repeats
memory: list of memory stats of length num_repeats
config: layer config
random_seed: the initial random seed
forward_only: whether backward passes were skipped
root: directory to write results to
suffix: optional string to append to file name
"""
path = get_path(
layer=layer,
batch_size=batch_size,
num_runs=num_runs,
num_repeats=num_repeats,
random_seed=random_seed,
forward_only=forward_only,
root=root,
suffix=suffix,
)
with open(path, "wb") as handle:
pickle.dump(
{
"layer": layer,
"batch_size": batch_size,
"num_runs": num_runs,
"num_repeats": num_repeats,
"random_seed": random_seed,
"forward_only": forward_only,
"results": results,
"config": config,
},
handle,
protocol=pickle.HIGHEST_PROTOCOL,
)
| [
"torch.cuda.reset_peak_memory_stats",
"torch.cuda.max_memory_allocated",
"torch.cuda.memory_allocated"
] | 1.8 | facebookresearch/opacus | 5cc574ff877b0be5634dde8fdd5130b7090491a6 |
1.0 | import argparse
import atexit
import copy
import datetime
import numpy as np
import os
import torch
import tensorboardX
from functools import partial
from prob_mbrl import utils, models, algorithms, envs
if __name__ == '__main__':
parser = argparse.ArgumentParser("Deep-PILCO with moment matching")
parser.add_argument('-e', '--env', type=str, default="Cartpole")
parser.add_argument('-o',
'--output_folder',
type=str,
default="~/.prob_mbrl/")
parser.add_argument('-s', '--seed', type=int, default=1)
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--n_initial_epi', type=int, default=0)
parser.add_argument('--load_from', type=str, default=None)
parser.add_argument('--pred_H', type=int, default=15)
parser.add_argument('--control_H', type=int, default=40)
parser.add_argument('--discount_factor', type=str, default=None)
parser.add_argument('--prioritized_replay', action='store_true')
parser.add_argument('--timesteps_to_sample',
type=utils.load_csv,
default=0)
parser.add_argument('--mm_groups', type=int, default=None)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--dyn_lr', type=float, default=1e-4)
parser.add_argument('--dyn_opt_iters', type=int, default=2000)
parser.add_argument('--dyn_batch_size', type=int, default=100)
parser.add_argument('--dyn_drop_rate', type=float, default=0.1)
parser.add_argument('--dyn_components', type=int, default=1)
parser.add_argument('--dyn_shape', type=utils.load_csv, default=[200, 200])
parser.add_argument('--pol_lr', type=float, default=1e-3)
parser.add_argument('--pol_clip', type=float, default=1.0)
parser.add_argument('--pol_drop_rate', type=float, default=0.1)
parser.add_argument('--pol_opt_iters', type=int, default=1000)
parser.add_argument('--pol_batch_size', type=int, default=100)
parser.add_argument('--ps_iters', type=int, default=100)
parser.add_argument('--pol_shape', type=utils.load_csv, default=[200, 200])
parser.add_argument('--plot_level', type=int, default=0)
parser.add_argument('--render', action='store_true')
parser.add_argument('--use_cuda', action='store_true')
parser.add_argument('--learn_reward', action='store_true')
parser.add_argument('--keep_best', action='store_true')
parser.add_argument('--stop_when_done', action='store_true')
parser.add_argument('--expl_noise', type=float, default=0.0)
# parameters
args = parser.parse_args()
loaded_from = args.load_from
if loaded_from is not None:
args = torch.load(os.path.join(loaded_from, 'args.pth.tar'))
# initialize environment
torch.set_num_threads(args.num_threads)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.set_flush_denormal(True)
if args.env in envs.__all__:
env = envs.__dict__[args.env]()
else:
import gym
env = gym.make(args.env)
env_name = env.spec.id if env.spec is not None else env.__class__.__name__
output_folder = os.path.expanduser(args.output_folder)
results_folder = os.path.join(
output_folder, "mc_pilco_mm", env_name,
datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S.%f"))
try:
os.makedirs(results_folder)
except OSError:
pass
results_filename = os.path.join(results_folder, "experience.pth.tar")
torch.save(args, os.path.join(results_folder, 'args.pth.tar'))
D = env.observation_space.shape[0]
U = env.action_space.shape[0]
maxU = env.action_space.high
minU = env.action_space.low
# initialize reward/cost function
if (args.learn_reward or not hasattr(env, 'reward_func')
or env.reward_func is None):
reward_func = None
args.learn_reward = True
else:
reward_func = env.reward_func
# intialize to max episode steps if available
if hasattr(env, 'spec'):
if hasattr(env.spec, 'max_episode_steps'):
args.control_H = env.spec.max_episode_steps
args.stop_when_done = True
initial_experience = args.control_H * args.n_initial_epi
# initialize discount factor
if args.discount_factor is not None:
if args.discount_factor == 'auto':
args.discount_factor = (1.0 / args.control_H)**(2.0 /
args.control_H)
else:
args.discount_factor = float(args.discount_factor)
# initialize dynamics model
dynE = 2 * (D + 1) if args.learn_reward else 2 * D
if args.dyn_components > 1:
output_density = models.GaussianMixtureDensity(dynE / 2,
args.dyn_components)
dynE = (dynE + 1) * args.dyn_components + 1
else:
output_density = models.DiagGaussianDensity(dynE / 2)
dyn_model = models.mlp(
D + U,
dynE,
args.dyn_shape,
dropout_layers=[
models.modules.CDropout(args.dyn_drop_rate * np.ones(hid))
if args.dyn_drop_rate > 0 else None for hid in args.dyn_shape
],
nonlin=torch.nn.ReLU)
dyn = models.DynamicsModel(dyn_model,
reward_func=reward_func,
output_density=output_density).float()
# initalize policy
pol_model = models.mlp(D,
2 * U,
args.pol_shape,
dropout_layers=[
models.modules.BDropout(args.pol_drop_rate)
if args.pol_drop_rate > 0 else None
for hid in args.pol_shape
],
biases_initializer=None,
nonlin=torch.nn.ReLU,
output_nonlin=partial(models.DiagGaussianDensity,
U))
pol = models.Policy(pol_model, maxU, minU).float()
print('args\n', args)
print('Dynamics model\n', dyn)
print('Policy\n', pol)
# initalize experience dataset
exp = utils.ExperienceDataset()
if loaded_from is not None:
utils.load_checkpoint(loaded_from, dyn, pol, exp)
# initialize dynamics optimizer
opt1 = torch.optim.Adam(dyn.parameters(), args.dyn_lr)
# initialize policy optimizer
opt2 = torch.optim.Adam(pol.parameters(), args.pol_lr)
if args.use_cuda and torch.cuda.is_available():
dyn = dyn.cuda()
pol = pol.cuda()
writer = tensorboardX.SummaryWriter(
logdir=os.path.join(results_folder, "logs"))
# callbacks
def on_close():
writer.close()
atexit.register(on_close)
# initial experience data collection
env.seed(args.seed)
rnd = lambda x, t: env.action_space.sample() # noqa: E731
while exp.n_samples() < initial_experience:
ret = utils.apply_controller(
env,
rnd,
min(args.control_H, initial_experience - exp.n_samples() + 1),
stop_when_done=args.stop_when_done)
exp.append_episode(*ret, policy_params=[])
if initial_experience > 0:
exp.policy_parameters[-1] = copy.deepcopy(pol.state_dict())
exp.save(results_filename)
# policy learning loop
expl_pol = lambda x, t: ( # noqa: E 731
pol(x) + args.expl_noise * rnd(x, t)).clip(minU, maxU)
render_fn = (lambda *args, **kwargs: env.render()) if args.render else None
for ps_it in range(args.ps_iters):
# apply policy
new_exp = exp.n_samples() + args.control_H
while exp.n_samples() < new_exp:
ret = utils.apply_controller(env,
expl_pol,
min(args.control_H,
new_exp - exp.n_samples() + 1),
stop_when_done=args.stop_when_done,
callback=render_fn)
exp.append_episode(*ret, policy_params=[])
exp.policy_parameters[-1] = copy.deepcopy(pol.state_dict())
exp.save(results_filename)
# train dynamics
X, Y = exp.get_dynmodel_dataset(deltas=True,
return_costs=args.learn_reward)
dyn.set_dataset(X.to(dyn.X.device, dyn.X.dtype),
Y.to(dyn.X.device, dyn.X.dtype))
utils.train_regressor(dyn,
args.dyn_opt_iters,
args.dyn_batch_size,
True,
opt1,
log_likelihood=dyn.output_density.log_prob,
prioritized_sampling=args.prioritized_replay,
summary_writer=writer,
summary_scope='model_learning/episode_%d' %
ps_it)
torch.save(dyn.state_dict(),
os.path.join(results_folder, 'latest_dynamics.pth.tar'))
# sample initial states for policy optimization
x0 = exp.sample_states(args.pol_batch_size,
timestep=0).to(dyn.X.device,
dyn.X.dtype).detach()
if args.plot_level > 0:
utils.plot_rollout(x0[:25], dyn, pol, args.pred_H * 2)
# train policy
def on_iteration(i, loss, states, actions, rewards, discount):
writer.add_scalar('mc_pilco/episode_%d/training loss' % ps_it,
loss, i)
print("Policy search iteration %d" % (ps_it + 1))
algorithms.mc_pilco(x0,
dyn,
pol,
args.pred_H,
opt2,
exp,
args.pol_opt_iters,
discount=args.discount_factor,
pegasus=True,
mm_states=True,
mm_rewards=True,
mm_groups=args.mm_groups,
maximize=True,
clip_grad=args.pol_clip,
step_idx_to_sample=args.timesteps_to_sample,
init_state_noise=1e-1 * x0.std(0),
prioritized_replay=args.prioritized_replay,
on_iteration=on_iteration,
debug=args.debug)
torch.save(pol.state_dict(),
os.path.join(results_folder, 'latest_policy.pth.tar'))
if args.plot_level > 0:
utils.plot_rollout(x0[:25], dyn, pol, args.pred_H * 2)
writer.add_scalar('robot/evaluation_loss',
torch.tensor(ret[2]).sum(), ps_it + 1)
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor",
"torch.set_flush_denormal",
"torch.set_num_threads"
] | 1.0 | gamerDecathlete/prob_mbrl | efba089bb066f32ad9133ac2504099e05aac5846 |
1.2 | #!/usr/bin/env python3
import typing
from enum import Enum
from inspect import signature
from typing import Any, Callable, List, Tuple, Union, cast, overload
import torch
from torch import Tensor, device
from torch.nn import Module
from .._utils.typing import Literal, TargetType
class ExpansionTypes(Enum):
repeat = 1
repeat_interleave = 2
def safe_div(denom: Tensor, quotient: float, default_value: Tensor) -> Tensor:
r"""
A simple utility function to perform `denom / quotient`
if the statement is undefined => result will be `default_value`
"""
return denom / quotient if quotient != 0.0 else default_value
@typing.overload
def _is_tuple(inputs: Tensor) -> Literal[False]:
...
@typing.overload
def _is_tuple(inputs: Tuple[Tensor, ...]) -> Literal[True]:
...
def _is_tuple(inputs: Union[Tensor, Tuple[Tensor, ...]]) -> bool:
return isinstance(inputs, tuple)
def _validate_target(num_samples: int, target: TargetType) -> None:
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
@overload
def _format_tensor_into_tuples(inputs: None) -> None:
...
@overload
def _format_tensor_into_tuples(
inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> Tuple[Tensor, ...]:
...
def _format_tensor_into_tuples(
inputs: Union[None, Tensor, Tuple[Tensor, ...]]
) -> Union[None, Tuple[Tensor, ...]]:
if inputs is None:
return None
if not isinstance(inputs, tuple):
assert isinstance(
inputs, torch.Tensor
), "`inputs` must have type " "torch.Tensor but {} found: ".format(type(inputs))
inputs = (inputs,)
return inputs
def _format_input(inputs: Union[Tensor, Tuple[Tensor, ...]]) -> Tuple[Tensor, ...]:
return _format_tensor_into_tuples(inputs)
@overload
def _format_additional_forward_args(additional_forward_args: None) -> None:
...
@overload
def _format_additional_forward_args(
additional_forward_args: Union[Tensor, Tuple]
) -> Tuple:
...
@overload
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
...
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
if additional_forward_args is not None and not isinstance(
additional_forward_args, tuple
):
additional_forward_args = (additional_forward_args,)
return additional_forward_args
def _expand_additional_forward_args(
additional_forward_args: Any,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Union[None, Tuple]:
def _expand_tensor_forward_arg(
additional_forward_arg: Tensor,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Tensor:
if len(additional_forward_arg.size()) == 0:
return additional_forward_arg
if expansion_type == ExpansionTypes.repeat:
return torch.cat([additional_forward_arg] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return additional_forward_arg.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
if additional_forward_args is None:
return None
return tuple(
_expand_tensor_forward_arg(additional_forward_arg, n_steps, expansion_type)
if isinstance(additional_forward_arg, torch.Tensor)
else additional_forward_arg
for additional_forward_arg in additional_forward_args
)
def _expand_target(
target: TargetType,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> TargetType:
if isinstance(target, list):
if expansion_type == ExpansionTypes.repeat:
return target * n_steps
elif expansion_type == ExpansionTypes.repeat_interleave:
expanded_target = []
for i in target:
expanded_target.extend([i] * n_steps)
return cast(Union[List[Tuple[int, ...]], List[int]], expanded_target)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
elif isinstance(target, torch.Tensor) and torch.numel(target) > 1:
if expansion_type == ExpansionTypes.repeat:
return torch.cat([target] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return target.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return target
def _run_forward(
forward_func: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
forward_func_args = signature(forward_func).parameters
if len(forward_func_args) == 0:
output = forward_func()
return output if target is None else _select_targets(output, target)
# make everything a tuple so that it is easy to unpack without
# using if-statements
inputs = _format_input(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
output = forward_func(
*(*inputs, *additional_forward_args)
if additional_forward_args is not None
else inputs
)
return _select_targets(output, target)
def _select_targets(output: Tensor, target: TargetType) -> Tensor:
if target is None:
return output
num_examples = output.shape[0]
dims = len(output.shape)
device = output.device
if isinstance(target, (int, tuple)):
return _verify_select_column(output, target)
elif isinstance(target, torch.Tensor):
if torch.numel(target) == 1 and isinstance(target.item(), int):
return _verify_select_column(output, cast(int, target.item()))
elif len(target.shape) == 1 and torch.numel(target) == num_examples:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, target.reshape(len(output), 1))
else:
raise AssertionError(
"Tensor target dimension %r is not valid. %r"
% (target.shape, output.shape)
)
elif isinstance(target, list):
assert len(target) == num_examples, "Target list length does not match output!"
if isinstance(target[0], int):
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(
output, 1, torch.tensor(target, device=device).reshape(len(output), 1)
)
elif isinstance(target[0], tuple):
return torch.stack(
[
output[(i,) + cast(Tuple, targ_elem)]
for i, targ_elem in enumerate(target)
]
)
else:
raise AssertionError("Target element type in list is not valid.")
else:
raise AssertionError("Target type %r is not valid." % target)
def _verify_select_column(
output: Tensor, target: Union[int, Tuple[int, ...]]
) -> Tensor:
target = cast(Tuple[int, ...], (target,) if isinstance(target, int) else target)
assert (
len(target) <= len(output.shape) - 1
), "Cannot choose target column with output shape %r." % (output.shape,)
return output[(slice(None), *target)]
def _extract_device(
module: Module,
hook_inputs: Union[None, Tensor, Tuple[Tensor, ...]],
hook_outputs: Union[None, Tensor, Tuple[Tensor, ...]],
) -> device:
params = list(module.parameters())
if (
(hook_inputs is None or len(hook_inputs) == 0)
and (hook_outputs is None or len(hook_outputs) == 0)
and len(params) == 0
):
raise RuntimeError(
"""Unable to extract device information for the module
{}. Both inputs and outputs to the forward hook and
`module.parameters()` are empty.
The reason that the inputs to the forward hook are empty
could be due to the fact that the arguments to that
module {} are all named and are passed as named
variables to its forward function.
""".format(
module, module
)
)
if hook_inputs is not None and len(hook_inputs) > 0:
return hook_inputs[0].device
if hook_outputs is not None and len(hook_outputs) > 0:
return hook_outputs[0].device
return params[0].device
| [
"torch.cat",
"torch.tensor",
"torch.numel"
] | 1.2 | vinnamkim/captum | b7429d1561b6018e0d53d68eaafc6632e97ac164 |
1.10 | import torch
import pandas as pd
class ESXDataset(torch.utils.data.Dataset):
def __init__(self, train_X, train_Y, train_T, train_E):
self.train_X = torch.from_numpy(train_X).float()
self.train_T = torch.from_numpy(train_T).float() # label of treatment status
self.train_Y = torch.from_numpy(train_Y).float() # label of conversion
self.train_E = torch.from_numpy(train_E).float() # label of randomized
self.data_num = len(train_X)
def __len__(self):
return self.data_num
def __getitem__(self, idx):
out_x = self.train_X[idx]
out_t = self.train_T[idx]
out_y = self.train_Y[idx]
out_e = self.train_E[idx]
return out_x, out_t, out_y, out_e | [
"torch.from_numpy"
] | 1.10.1 | kailiang-zhong/DESCN | 2aab9da518f1426d8bc753e82e2be6d8d54ce537 |
1.7 | import argparse
import os
import pickle
from types import SimpleNamespace
from typing import OrderedDict
from pytorch_lightning import callbacks
from torch._C import device
from utils.vocab import build_vocab, load_vocab
from utils.data_loader import get_loader
from utils import NLGEval
from torchvision.transforms import transforms
from copy import deepcopy
from models import IQ
import math
import numpy as np
import torch
from torch import nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import torch.multiprocessing
import math as m
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
torch.multiprocessing.set_sharing_strategy('file_system')
class TrainIQ(pl.LightningModule):
def __init__(self, vocab, args):
super().__init__()
self.latent_transformer = False
self.vocab = vocab
self.args = args
self.hp_string = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}. {}".format(
args.input_mode, args.emb_dim, "True", args.hidden_dim, args.latent_dim, args.pwffn_dim, args.num_layers, args.num_heads, args.lr, args.batch_size, args.print_note
)
self.iter = 0
self.kliter = 0
self.nlge = NLGEval(no_glove=True, no_skipthoughts=True)
metrics = {
"loss": [],
"img": [],
"ppl": [],
"kld": [],
"aux": [],
"elbo": [],
"rec": [],
}
self.val_metrics = deepcopy(metrics)
self.model = IQ(self.latent_transformer, vocab, args)
self.criterion = nn.CrossEntropyLoss(
ignore_index=self.vocab.word2idx[self.vocab.SYM_PAD])
self.image_recon_criterion = nn.MSELoss()
def token_decode(self, tokenized_tensor_of_ints, sample=5):
for i, batch_item in enumerate(tokenized_tensor_of_ints):
if i == sample:
break
sentence_string = " ".join(
[self.vocab.idx2word[token.item()] for token in batch_item])
print(sentence_string)
print()
def forward(self, batch):
images, _, questions, posteriors, answers, _, answer_types_for_input, _ = batch.values()
images, questions, posteriors, answers, answer_types_for_input = images.cuda(
), questions.to(self.args.device), posteriors.to(self.args.device), answers.to(self.args.device), answer_types_for_input.to(self.args.device)
if self.args.input_mode == "ans":
output, z, kld_loss, image_recon = self.model(
images, answers, posteriors, questions)
elif self.args.input_mode == "cat":
output, z, kld_loss, image_recon = self.model(
images, answer_types_for_input, posteriors, questions)
return output, z, kld_loss, image_recon
def calculate_losses(self, output, image_recon, kld_loss, z_logit, target):
loss_rec = self.criterion(
output.reshape(-1, output.size(-1)), target.reshape(-1))
loss_img = self.image_recon_criterion(image_recon[0], image_recon[1])
if not self.latent_transformer:
kld_loss = torch.tensor([0])
loss = loss_rec + self.args.image_recon_lambda * loss_img
elbo = loss_rec
aux = 0
else:
z_logit = z_logit.unsqueeze(1).repeat(1, output.size(1), 1)
loss_aux = self.criterion(
z_logit.reshape(-1, z_logit.size(-1)), target.reshape(-1))
kl_weight = min(math.tanh(6 * self.kliter /
self.args.full_kl_step - 3) + 1, 1)
aux = loss_aux.item()
elbo = loss_rec + kld_loss
loss = loss_rec + self.args.kl_ceiling * kl_weight * kld_loss + \
self.args.aux_ceiling*loss_aux + self.args.image_recon_lambda * loss_img
return loss, loss_rec.item(), loss_img.item(), math.exp(min(loss_rec.item(), 100)), kld_loss.item(), aux, elbo.item()
def training_step(self, batch, batch_idx):
# switch to latent transformer if we've reached num_pretraining_steps
if self.iter == self.args.num_pretraining_steps:
self.latent_transformer = True
self.model.switch_GVT_train_mode(self.latent_transformer)
self.configure_optimizers() # restart ADAM optimizer
output, z_logit, kld_loss, image_recon = self(batch)
target = batch["questions"].cuda()
loss, loss_rec, loss_img, ppl, kld_loss, aux, elbo = self.calculate_losses(
output, image_recon, kld_loss, z_logit, target)
if self.latent_transformer:
self.kliter += 1
self.log('train loss', loss)
self.log('train rec loss', loss_rec)
self.log('image recon loss', loss_img)
self.log('perplexity', ppl)
self.log('kld loss', kld_loss)
self.log('aux loss', aux)
self.log('elbo', elbo)
self.custom_optimizer(self.iter)
self.iter += 1
return loss
def validation_step(self, batch, batch_idx):
target = batch["questions"].cuda()
output, z_logit, kld_loss, image_recon = self(batch)
loss, loss_rec, loss_img, ppl, kld_loss, aux, elbo = self.calculate_losses(
output, image_recon, kld_loss, z_logit, target)
self.val_metrics["loss"].append(loss.item())
self.val_metrics["img"].append(self.args.image_recon_lambda * loss_img)
self.val_metrics["ppl"].append(ppl)
self.val_metrics["kld"].append(kld_loss)
self.val_metrics["aux"].append(aux)
self.val_metrics["elbo"].append(elbo)
self.val_metrics["rec"].append(loss_rec)
self.log("val_loss", loss.item())
self.log("val_loss_rec", loss_rec)
self.log("val_img_loss", loss_img)
self.log("val_ppl", ppl)
self.log("val_kld_loss", kld_loss)
self.log("val_aux", aux)
self.log("val_elbo", elbo)
return batch
def validation_epoch_end(self, batch) -> None:
print("##### End of Epoch validation #####")
batch = batch[0]
categories = batch["answer_types"].cuda().unsqueeze(-1)
images = batch["images"].cuda()
image_ids = batch["image_ids"]
print("VALIDATION SAMPLE")
preds = []
gts = []
decoded_sentences, top_args, top_vals = self.model.decode_greedy(
images, categories, max_decode_length=50)
for i, greedy_sentence in enumerate(decoded_sentences):
list_gt = self.filter_special_tokens(
[self.vocab.idx2word[word] for word in batch["questions"][i].tolist()])
list_pred = self.filter_special_tokens(greedy_sentence.split())
gt = " ".join(list_gt)
pred = " ".join(list_pred)
gts.append(gt)
preds.append(pred)
if i < 10:
print("Image ID:\t", image_ids[i])
print("Context:\t", " ".join(
[self.vocab.idx2word[category] for category in categories[i].tolist()]))
print("Generated: \t", pred)
print("Reference: \t", gt)
for j, word in enumerate(greedy_sentence.split()):
near_tokens = [self.vocab.idx2word[token.item()] for token in top_args[i, j]]
near_tokens_vals = [np.round(val.item(), 4) for val in top_vals[i, j]]
print(word, "\t \t", [(token, val) for token, val in list(zip(near_tokens, near_tokens_vals))])
print()
scores = self.nlge.compute_metrics(ref_list=[gts], hyp_list=preds)
for k, v in self.val_metrics.items():
print(k, "\t", np.round(np.mean(v), 4))
self.val_metrics[k] = [] # reset v
for k, v in scores.items():
print(k, "\t", np.round(np.mean(v), 4) * 100)
print()
print(self.hp_string)
def filter_special_tokens(self, decoded_sentence_list):
filtered = []
special_tokens = ["<start>", "<end>", "<pad>"]
for token in decoded_sentence_list:
if token not in special_tokens:
filtered.append(token)
return filtered
def test_step(self, batch, batch_idx):
images, questions, answers, categories = batch["images"], batch[
"questions"], batch["answers"], batch["answer_types"]
images, questions, answers, categories = images.to(self.args.device), questions.to(
self.args.device), answers.to(self.args.device), categories.to(self.args.device)
categories = categories.unsqueeze(1)
preds = []
gts = []
decoded_sentences = self.model.decode_greedy(
images, categories, max_decode_length=50)
for i, greedy_sentence in enumerate(decoded_sentences):
list_gt = self.filter_special_tokens(
[self.vocab.idx2word[word] for word in batch["questions"][i].tolist()])
list_pred = self.filter_special_tokens(greedy_sentence.split())
gt = " ".join(list_gt)
pred = " ".join(list_pred)
gts.append(gt)
preds.append(pred)
scores = self.nlge.compute_metrics(ref_list=[gts], hyp_list=preds)
for k, v in scores.items():
scores[k] = torch.tensor(v)
return scores
def test_end(self, all_scores):
for k, scores in all_scores.items():
all_scores[k] = scores.detach().cpu().numpy()
all_scores[k] = np.mean(all_scores[k])
print(all_scores)
print(self.hp_string)
return all_scores
def custom_optimizer(self, step, warmup_steps=4000):
min_arg1 = m.sqrt(1/(step+1))
min_arg2 = step * (warmup_steps**-1.5)
lr = m.sqrt(1/self.args.hidden_dim) * min(min_arg1, min_arg2)
self.trainer.lightning_optimizers[0].param_groups[0]["lr"] = lr
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.args.lr)
return optimizer
transform = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.RandomResizedCrop(224,
scale=(1.00, 1.2),
ratio=(0.75, 1.3333333333333333)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
class CheckpointEveryNSteps(pl.Callback):
"""
Save a checkpoint every N steps, instead of Lightning's default that checkpoints
based on validation loss.
"""
def __init__(
self,
save_step_frequency,
prefix="N-Step-Checkpoint",
use_modelcheckpoint_filename=False,
):
"""
Args:
save_step_frequency: how often to save in steps
prefix: add a prefix to the name, only used if
use_modelcheckpoint_filename=False
use_modelcheckpoint_filename: just use the ModelCheckpoint callback's
default filename, don't use ours.
"""
self.save_step_frequency = save_step_frequency
self.prefix = prefix
self.use_modelcheckpoint_filename = use_modelcheckpoint_filename
def on_batch_end(self, trainer: pl.Trainer, _):
""" Check if we should save a checkpoint after every train batch """
epoch = trainer.current_epoch
global_step = trainer.global_step
if global_step % self.save_step_frequency == 0:
if self.use_modelcheckpoint_filename:
filename = trainer.checkpoint_callback.filename
else:
filename = f"{self.prefix}_{epoch=}_{global_step=}.ckpt"
ckpt_path = os.path.join(trainer.checkpoint_callback.dirpath, filename)
trainer.save_checkpoint(ckpt_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Model hyperparameters
parser.add_argument("--emb_dim", type=int, default=300,
help="Embedding dimensionality of the model")
parser.add_argument("--hidden_dim", type=int, default=300,
help="Hidden dimensionality of the model")
parser.add_argument("--latent_dim", type=int, default=300,
help="Size of latent dimension")
parser.add_argument("--pwffn_dim", type=int, default=600,
help="Size of postionwise feedforward network in transformer")
parser.add_argument("--num_layers", type=int, default=4,
help="Number of transformer layers in encoder and decoder")
parser.add_argument("--num_heads", type=int, default=4,
help="Number of heads in the multi-head attention")
parser.add_argument("--lr", type=float, default=3e-5,
help="Learning rate of the network")
parser.add_argument("--num_pretraining_steps", type=float, default=12000,
help="Number of pretraining steps before turning on latent transformer")
parser.add_argument("--total_training_steps", type=int, default=35000,
help="Total number of training steps for the model")
parser.add_argument("--full_kl_step", type=int, default=15000,
help="Number of steps until KLD is annealed")
parser.add_argument("--kl_ceiling", type=float, default=0.5)
parser.add_argument("--aux_ceiling", type=float, default=1.0)
parser.add_argument("--image_recon_lambda", type=float, default=0.1,
help="How much to scale the image reconstruction loss by")
parser.add_argument("--batch_size", type=int, default=128)
# Data args
parser.add_argument("--emb_file", type=str, default="vectors/glove.6B.300d.txt",
help="Filepath for pretrained embeddings")
parser.add_argument("--dataset", type=str,
default="data/processed/iq_dataset.hdf5")
parser.add_argument("--val_dataset", type=str,
default="data/processed/iq_val_dataset.hdf5")
parser.add_argument("--vocab", type=str, default="vocab.pkl")
parser.add_argument("--use_gpu", type=bool, default=True)
parser.add_argument("--num_gpus", type=int, default=1)
parser.add_argument("--print_note", type=str, default="")
parser.add_argument("--input_mode", type=str, default="ans")
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available()
and args.use_gpu else 'cpu')
args.device = device
args.root_dir = os.getcwd()
if os.path.exists(args.vocab):
vocab = pickle.load(open(args.vocab, "rb"))
else:
vocab = build_vocab(
'data/vqa/v2_OpenEnded_mscoco_train2014_questions.json', 'data/vqa/iq_dataset.json', 4)
data_loader = get_loader(os.path.join(
os.getcwd(), args.dataset), transform, 128, shuffle=True, num_workers=8)
val_data_loader = get_loader(os.path.join(
os.getcwd(), args.val_dataset), transform, 128, shuffle=True, num_workers=8)
trainGVT = TrainIQ(vocab, args).to(args.device)
trainer = pl.Trainer(max_steps=args.total_training_steps, gradient_clip_val=5,
val_check_interval=500, limit_val_batches=100, gpus=args.num_gpus, callbacks=[CheckpointEveryNSteps(400)])
trainer.fit(trainGVT, data_loader, val_data_loader)
test_data_loader = get_loader(os.path.join(os.getcwd(), args.val_dataset), transform, 128, shuffle=False, num_workers=8)
trainer.test(trainGVT, test_dataloaders=test_data_loader)
| [
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.CrossEntropyLoss",
"torch.multiprocessing.set_sharing_strategy"
] | 1.7.0 | nihirv/blt-vqg | 73ce8510fb2a696b44b686e38418cc0a11982162 |
0.4 | import numpy as np
from sklearn.cluster import KMeans
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader, default_collate
from typing import Tuple, Callable, Optional, Union
from tqdm import tqdm
from ptdec.utils import target_distribution, cluster_accuracy
def train(
dataset: torch.utils.data.Dataset,
model: torch.nn.Module,
epochs: int,
batch_size: int,
optimizer: torch.optim.Optimizer,
stopping_delta: Optional[float] = None,
collate_fn=default_collate,
cuda: bool = True,
sampler: Optional[torch.utils.data.sampler.Sampler] = None,
silent: bool = False,
update_freq: int = 10,
evaluate_batch_size: int = 1024,
update_callback: Optional[Callable[[float, float], None]] = None,
epoch_callback: Optional[Callable[[int, torch.nn.Module], None]] = None,
) -> None:
"""
Train the DEC model given a dataset, a model instance and various configuration parameters.
:param dataset: instance of Dataset to use for training
:param model: instance of DEC model to train
:param epochs: number of training epochs
:param batch_size: size of the batch to train with
:param optimizer: instance of optimizer to use
:param stopping_delta: label delta as a proportion to use for stopping, None to disable, default None
:param collate_fn: function to merge a list of samples into mini-batch
:param cuda: whether to use CUDA, defaults to True
:param sampler: optional sampler to use in the DataLoader, defaults to None
:param silent: set to True to prevent printing out summary statistics, defaults to False
:param update_freq: frequency of batches with which to update counter, None disables, default 10
:param evaluate_batch_size: batch size for evaluation stage, default 1024
:param update_callback: optional function of accuracy and loss to update, default None
:param epoch_callback: optional function of epoch and model, default None
:return: None
"""
static_dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=collate_fn,
pin_memory=False,
sampler=sampler,
shuffle=False,
)
train_dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler,
shuffle=True,
)
data_iterator = tqdm(
static_dataloader,
leave=True,
unit="batch",
postfix={
"epo": -1,
"acc": "%.4f" % 0.0,
"lss": "%.8f" % 0.0,
"dlb": "%.4f" % -1,
},
disable=silent,
)
kmeans = KMeans(n_clusters=model.cluster_number, n_init=20)
model.train()
features = []
actual = []
# form initial cluster centres
for index, batch in enumerate(data_iterator):
if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:
batch, value = batch # if we have a prediction label, separate it to actual
actual.append(value)
if cuda:
batch = batch.cuda(non_blocking=True)
features.append(model.encoder(batch).detach().cpu())
actual = torch.cat(actual).long()
predicted = kmeans.fit_predict(torch.cat(features).numpy())
predicted_previous = torch.tensor(np.copy(predicted), dtype=torch.long)
_, accuracy = cluster_accuracy(predicted, actual.cpu().numpy())
cluster_centers = torch.tensor(
kmeans.cluster_centers_, dtype=torch.float, requires_grad=True
)
if cuda:
cluster_centers = cluster_centers.cuda(non_blocking=True)
with torch.no_grad():
# initialise the cluster centers
model.state_dict()["assignment.cluster_centers"].copy_(cluster_centers)
loss_function = nn.KLDivLoss(reduction="sum")
#loss_function = nn.KLDivLoss(size_average=False)
delta_label = None
for epoch in range(epochs):
features = []
data_iterator = tqdm(
train_dataloader,
leave=True,
unit="batch",
postfix={
"epo": epoch,
"acc": "%.4f" % (accuracy or 0.0),
"lss": "%.8f" % 0.0,
"dlb": "%.4f" % (delta_label or 0.0),
},
disable=silent,
)
model.train()
for index, batch in enumerate(data_iterator):
if (isinstance(batch, tuple) or isinstance(batch, list)) and len(
batch
) == 2:
batch, _ = batch # if we have a prediction label, strip it away
if cuda:
batch = batch.cuda(non_blocking=True)
output = model(batch)
target = target_distribution(output).detach()
loss = loss_function(output.log(), target) / output.shape[0]
data_iterator.set_postfix(
epo=epoch,
acc="%.4f" % (accuracy or 0.0),
lss="%.8f" % float(loss.item()),
dlb="%.4f" % (delta_label or 0.0),
)
optimizer.zero_grad()
loss.backward()
optimizer.step(closure=None)
features.append(model.encoder(batch).detach().cpu())
if update_freq is not None and index % update_freq == 0:
loss_value = float(loss.item())
data_iterator.set_postfix(
epo=epoch,
acc="%.4f" % (accuracy or 0.0),
lss="%.8f" % loss_value,
dlb="%.4f" % (delta_label or 0.0),
)
if update_callback is not None:
update_callback(accuracy, loss_value, delta_label)
predicted, actual = predict(
dataset,
model,
batch_size=evaluate_batch_size,
collate_fn=collate_fn,
silent=True,
return_actual=True,
cuda=cuda,
)
delta_label = (
float((predicted != predicted_previous).float().sum().item())
/ predicted_previous.shape[0]
)
if stopping_delta is not None and delta_label < stopping_delta:
print(
'Early stopping as label delta "%1.5f" less than "%1.5f".'
% (delta_label, stopping_delta)
)
break
predicted_previous = predicted
_, accuracy = cluster_accuracy(predicted.cpu().numpy(), actual.cpu().numpy())
data_iterator.set_postfix(
epo=epoch,
acc="%.4f" % (accuracy or 0.0),
lss="%.8f" % 0.0,
dlb="%.4f" % (delta_label or 0.0),
)
if epoch_callback is not None:
epoch_callback(epoch, model)
def predict(
dataset: torch.utils.data.Dataset,
model: torch.nn.Module,
batch_size: int = 1024,
collate_fn=default_collate,
cuda: bool = True,
silent: bool = False,
return_actual: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
"""
Predict clusters for a dataset given a DEC model instance and various configuration parameters.
:param dataset: instance of Dataset to use for training
:param model: instance of DEC model to predict
:param batch_size: size of the batch to predict with, default 1024
:param collate_fn: function to merge a list of samples into mini-batch
:param cuda: whether CUDA is used, defaults to True
:param silent: set to True to prevent printing out summary statistics, defaults to False
:param return_actual: return actual values, if present in the Dataset
:return: tuple of prediction and actual if return_actual is True otherwise prediction
"""
dataloader = DataLoader(
dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=False
)
data_iterator = tqdm(dataloader, leave=True, unit="batch", disable=silent,)
features = []
actual = []
model.eval()
for batch in data_iterator:
if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:
batch, value = batch # unpack if we have a prediction label
if return_actual:
actual.append(value)
elif return_actual:
raise ValueError(
"Dataset has no actual value to unpack, but return_actual is set."
)
if cuda:
batch = batch.cuda(non_blocking=True)
features.append(
model(batch).detach().cpu()
) # move to the CPU to prevent out of memory on the GPU
if return_actual:
return torch.cat(features).max(1)[1], torch.cat(actual).long()
else:
return torch.cat(features).max(1)[1]
| [
"torch.cat",
"torch.utils.data.dataloader.DataLoader",
"torch.no_grad",
"torch.tensor",
"torch.nn.KLDivLoss"
] | 0.4.0 | giorgosVardakas/pt-dec | c29b9634eb74c828efd9d2b87c613cdb0ddd1dd5 |
1.9 | from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
import numpy as np
import pandas as pd
import pickle as pi
from operator import itemgetter
import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.append("src/models/")
from early_stopping import EarlyStopping
class Classifier:
def __init__(self, X_train, X_test, y_train, y_test):
#Array für alle Ergebnisse
self.ergebnis = []
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
def train_models(self, models):
for model in models:
#-----------------------
#Knn-Classifier
#-----------------------
if model == 'knn':
#Optimalen Knn-Classifier bestimmen
error = []
for i in range(1, 250):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(self.X_train, self.y_train)
pred_i = knn.predict(self.X_test)
error.append([i, np.mean(pred_i != self.y_test)])
#Debug-Print
print()
print("Debug KNN-Classifier")
print("knn n: {}".format(sorted(error, key=itemgetter(1), reverse=False)[0][0]))
print("knn error: {}".format(sorted(error, key=itemgetter(1), reverse=False)[0][1]))
print()
#Optimale Anzahl der n_neighbors übergeben
optimal_n = sorted(error, key=itemgetter(1), reverse=False)[0][0]
#Knn-Classifier trainieren
knnclf = KNeighborsClassifier(n_neighbors=optimal_n)
knnclf.fit(self.X_train, self.y_train)
#Knn-Classifier Akkuranz bestimmen
score = knnclf.score(self.X_test,self.y_test)
self.ergebnis.append([knnclf.__class__.__name__, score, knnclf])
#-----------------------
#-----------------------
#Decision Tree
#-----------------------
elif model == 'dt':
#class_weight gebrauchen für DT und RF
#Optimalen Decision Tree bestimmen
#Zu testende Decision Tree Parameter
dt = DecisionTreeClassifier()
tree_para = {'criterion':['gini','entropy'],'max_depth':[i for i in range(1,20)]
, 'min_samples_split':[i for i in range (2,10)]}
#GridSearchCV
grd_clf = GridSearchCV(dt, tree_para, cv=5)
grd_clf.fit(self.X_train, self.y_train)
#Besten gefundenen Decision Tree übergeben
dt_clf = grd_clf.best_estimator_
#Debug-Print
print()
print("Debug DecisionTreeClassifier")
print("dt best parameters: {}".format(grd_clf.best_params_))
print()
score = dt_clf.score(self.X_test, self.y_test)
self.ergebnis.append([dt_clf.__class__.__name__, score, dt_clf])
#-----------------------
#-----------------------
#Random Forest
#-----------------------
elif model == 'rf':
#rf = RandomForestClassifier(max_depth=8, criterion="entropy", min_samples_split=9)
rf = RandomForestClassifier(n_estimators=100)
rf.fit(self.X_train, self.y_train)
score = rf.score(self.X_test, self.y_test)
self.ergebnis.append([rf.__class__.__name__, score, rf])
#-----------------------
#-----------------------
#Support Vector Machine
#-----------------------
elif model == 'svm':
svm = SVC(kernel = 'poly', probability=True)
svm.fit(self.X_train, self.y_train)
score = svm.score(self.X_test, self.y_test)
self.ergebnis.append([svm.__class__.__name__, score, svm])
#-----------------------
#MLP
#-----------------------
elif model == 'mlp':
mlp = MLPClassifier(hidden_layer_sizes=[100,100], max_iter=5000, solver='sgd'
, learning_rate='adaptive', learning_rate_init=0.01, n_iter_no_change=200, early_stopping=True)
mlp.fit(self.X_train, self.y_train)
score = mlp.score(self.X_test, self.y_test)
self.ergebnis.append([mlp.__class__.__name__, score, mlp])
#Debug-Print
print()
print("Debug MLPClassifier")
print("iterations: {}; layers: {}; loss: {}".format(mlp.n_iter_, mlp.n_layers_, mlp.loss_))
print()
#epochs = np.linspace(1,mlp.n_iter_, mlp.n_iter_)
return self.ergebnis
def ensemble_model(self):
#Alle inkludierten Modelle werden in eine Liste geladen, die dann als Parameter
#dem Voting Classifier übergeben wird.
models = list()
for model in self.ergebnis:
models.append([model[0], model[2]])
voting_clf = VotingClassifier(estimators=models, voting='soft')
voting_clf.fit(self.X_train, self.y_train)
score = voting_clf.score(self.X_test, self.y_test)
self.ergebnis.append([voting_clf.__class__.__name__, score, voting_clf])
return self.ergebnis
def neuronal_network(self, epochs, patience_early_stopping, threshold_for_early_stopping):
#Funktion für das Ansprechen und Ausführen des Neuronalen Netzes mittels Pytorch
#Standardausgabe für Pytorch, auf welcher processing unit gerechnet wird
#In meinem Falle nur CPU möglich
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('This Computation is running on {}'.format(device))
#Initialisierung des Neuronalen Netzwerks
nn_model = NN_Model()
#Als Fehlerfunktion wird CrossEntropyLoss verwendet
loss_func = torch.nn.CrossEntropyLoss()
#Als Optimizer der Adam-Optimizer mit einer learing rate von 0.001
optimizer = torch.optim.Adam(nn_model.parameters(), lr=0.0001)
#Leere Arrays für das Speichern von Fehler-/Akkuranzdaten über die Epochen hinweg
epoch_errors = []
epoch_train_accuracy = []
epoch_test_accuracy = []
#Initialisierung des Early Stopping
early_stopping = EarlyStopping(patience=patience_early_stopping)
#Umsetzen der Trainings- und Testdaten in die benötigten Tensoren-Formate
X_Train = torch.from_numpy(self.X_train).float()
y_Train = torch.tensor(self.y_train, dtype=torch.long)
X_Test = torch.from_numpy(self.X_test).float()
y_Test = torch.from_numpy(np.array(self.y_test)).long()
#Trainieren des Neuronalen Netzwerks; maximale Anzahl der Epochen als Funktionsparameter übergeben
for epoch in range(epochs):
#Vorbereiten der Ergebnisse des Neuronalen Netzwerkes
#LogSoftmax explizit hier, da diese in der Fehlerfunktion (CrossEntropyLoss) automatisch
#angewandt wird!
log_sm = torch.nn.LogSoftmax(dim=1)
train_nn_model = log_sm(nn_model(X_Train))
test_nn_model = log_sm(nn_model(X_Test))
#Erstellen von leerem Array für das Speichern der einzelnen vom Modell berechneten Ergebnisse
#Zusätzlich noch ein Zähler zum Aufsummieren der korrekt vorhergesagten Ergebnisse, mit 0 initialisiert
train_pred_ergebnis = []
train_running_correct = 0
test_pred_ergebnis = []
test_running_correct = 0
#Autograd ausschalten für das Berechnen der Ergebnisse zu Validierungszwecken
with torch.no_grad():
#Trainings-Akkuranz
# Leeren array füllen mit Ergebnissen aus Ergebnis-Tensor
# Hierbei werden die probalistischen Werte verglichen und das wahrscheinlichste Ergebnis übergeben
# als 0 - Heimsieg, 1 - Unentschieden, 2 - Auswärtssieg
for i in range(train_nn_model.shape[0]):
ergebnis = 0 if (train_nn_model[i][0] > train_nn_model[i][1] and train_nn_model[i][0] > train_nn_model[i][2]) else 1 if (train_nn_model[i][1] > train_nn_model[i][0] and train_nn_model[i][1] > train_nn_model[i][2]) else 2
train_pred_ergebnis.append(ergebnis)
#Test-Akkuranz
# Leeren array füllen mit Ergebnissen aus Ergebnis-Tensor
# Hierbei werden die probalistischen Werte verglichen und das wahrscheinlichste Ergebnis übergeben
# als 0 - Heimsieg, 1 - Unentschieden, 2 - Auswärtssieg
for i in range(test_nn_model.shape[0]):
ergebnis = 0 if (test_nn_model[i][0] > test_nn_model[i][1] and test_nn_model[i][0] > test_nn_model[i][2]) else 1 if (test_nn_model[i][1] > test_nn_model[i][0] and test_nn_model[i][1] > test_nn_model[i][2]) else 2
test_pred_ergebnis.append(ergebnis)
#Arrays in tensor umwandeln
train_pred_tensor = torch.tensor(train_pred_ergebnis, dtype=torch.float)
test_pred_tensor = torch.tensor(test_pred_ergebnis, dtype=torch.float)
#Die korrekten Ergebnisse aus dem Traininsdatensatz werden aufsummiert und
#daraus die Akkuranz dieser Epoche berechnet und dem Array epoch_train_accuracy für spätere Auswertung übergeben
train_running_correct += (train_pred_tensor == y_Train).sum().item()
train_accuracy = train_running_correct*100./y_Train.shape[0]
epoch_train_accuracy.append(train_accuracy)
#Die korrekten Ergebnisse aus dem Testdatensatz werden aufsummiert und
#daraus die Akkuranz dieser Epoche berechnet und dem Array epoch_test_accuracy für spätere Auswertung übergeben
test_running_correct += (test_pred_tensor == y_Test).sum().item()
test_accuracy = test_running_correct*100./y_Test.shape[0]
epoch_test_accuracy.append(test_accuracy)
#---------------------------------------------------------------------------------------
#Hier werden nun die entscheidenden Schritte zum Trainineren des NN Modells durchgeführt
#---------------------------------------------------------------------------------------
error = loss_func(nn_model(X_Train),y_Train)
optimizer.zero_grad()
error.backward()
epoch_errors.append(error.item())
optimizer.step()
#---------------------------------------------------------------------------------------
#Debug-Print Ausgabe der Epoche mit Akkuranzen
print("Epoche: {}/{} mit Train-Akkuranz: {} und Test-Akkuranz: {}".format(epoch, epochs, train_accuracy, test_accuracy))
#-----------------------------
#Early Stopping
#-----------------------------
#Loss für Testdaten berechnen
error_Test = loss_func(nn_model(X_Test),y_Test)
#Aufruf der Early Stopping Funktion
# Die Fehlerfunkion der Testdaten dient hier als zentrales Kriterium:
# Sinkt diese mit der Rate "delta" eine bestimmte Anzahl Schritte "patience"
# hintereinander NICHT MEHR, wird gestoppt.
# Zusätzlich wird ein Threshold mit angegeben, sodass erst ab einer bestimmten erreichten
# Akkuranz das Early Stopping aktiviert wird.
early_stopping(error_Test, nn_model, train_accuracy > threshold_for_early_stopping)
#Sollte ein Early Stop erreicht sein, wird das Durchlaufen der Epochen unterbrochen
if early_stopping.early_stop:
print("Early stopping")
break
#-----------------------------
#Debug-Print finales Loss-Ergebnis
#print('Loss nach {} Epochen: {}'.format(epoch+1,error.item()))
#Übergabe der Ergebnisdaten and den zentralen Ergebnis-Array
self.ergebnis.append([nn_model.__class__.__name__, test_accuracy/100, nn_model])
#Rückgabewerte für weitere Verwendung (Ausgabe, Test) im Hauptprogramm
return self.ergebnis, epoch_errors, epoch_train_accuracy, epoch_test_accuracy, test_pred_tensor
class NN_Model(torch.nn.Module):
def __init__(self):
super(NN_Model, self).__init__()
self.fc1 = nn.Linear(75,120)
self.fc2 = nn.Linear(120,180)
self.fc3 = nn.Linear(180,100)
self.fc4 = nn.Linear(100,40)
self.output = nn.Linear(40,3)
def forward(self,x):
x = torch.relu(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = torch.relu(self.fc3(x))
x = torch.sigmoid(self.fc4(x))
#Keine Softmax-Funktion benötigt bei output, da CrossEntropyLoss
#als Fehlerfunktion dies automatisch tut
#Bemerkung: softmax muss aber beim Validieren/Testen angewandt werden!
x = self.output(x)
return x | [
"torch.nn.Linear",
"torch.nn.LogSoftmax",
"torch.no_grad",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.CrossEntropyLoss"
] | 1.9.0 | ChristianCKKoch/Projektarbeit_Digethic | 80999e48de29106545398252bbc6cea2b8b953ce |
1.2 | """
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.sparse_losses import SparsemaxLoss
from onmt.modules.sparse_activations import LogSparsemax
def build_loss_compute(model, tgt_field, opt, train=True):
"""
Returns a LossCompute subclass which wraps around an nn.Module subclass
(such as nn.NLLLoss) which defines the loss criterion. The LossCompute
object allows this loss to be computed in shards and passes the relevant
data to a Statistics object which handles training/validation logging.
Currently, the NMTLossCompute class handles all loss computation except
for when using a copy mechanism.
"""
device = torch.device("cuda" if onmt.utils.misc.use_gpu(opt) else "cpu")
padding_idx = tgt_field.vocab.stoi[tgt_field.pad_token]
unk_idx = tgt_field.vocab.stoi[tgt_field.unk_token]
if opt.lambda_coverage != 0:
assert opt.coverage_attn, "--coverage_attn needs to be set in " \
"order to use --lambda_coverage != 0"
if opt.copy_attn:
criterion = onmt.modules.CopyGeneratorLoss(
len(tgt_field.vocab), opt.copy_attn_force,
unk_index=unk_idx, ignore_index=padding_idx
)
elif opt.label_smoothing > 0 and train:
criterion = LabelSmoothingLoss(
opt.label_smoothing, len(tgt_field.vocab), ignore_index=padding_idx
)
elif opt.adalab and train:
eos_idx = tgt_field.vocab.stoi[tgt_field.eos_token]
criterion = AdaLabLoss(
len(tgt_field.vocab),
opt.batch_size, ignore_index=padding_idx, reduction='sum',
temperature=opt.ada_temp, eos_index=eos_idx
)
elif isinstance(model.generator[-1], LogSparsemax):
criterion = SparsemaxLoss(ignore_index=padding_idx, reduction='sum')
else:
criterion = nn.NLLLoss(ignore_index=padding_idx, reduction='sum')
# if the loss function operates on vectors of raw logits instead of
# probabilities, only the first part of the generator needs to be
# passed to the NMTLossCompute. At the moment, the only supported
# loss function of this kind is the sparsemax loss.
use_raw_logits = isinstance(criterion, SparsemaxLoss)
loss_gen = model.generator[0] if use_raw_logits else model.generator
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
criterion, loss_gen, tgt_field.vocab, opt.copy_loss_by_seqlength,
lambda_coverage=opt.lambda_coverage
)
else:
bidecoder_loss_gen = model.bidecoder_generator
compute = AdaLabLossCompute(
criterion, loss_gen, bidecoder_loss_gen, lambda_coverage=opt.lambda_coverage)
# compute = NMTLossCompute(
# criterion, loss_gen, lambda_coverage=opt.lambda_coverage)
compute.to(device)
return compute
class LossComputeBase(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating multiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
normalzation (str): normalize by "sents" or "tokens"
"""
def __init__(self, criterion, generator):
super(LossComputeBase, self).__init__()
self.criterion = criterion
self.generator = generator
@property
def padding_idx(self):
return self.criterion.ignore_index
def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def __call__(self,
batch,
output,
attns,
normalization=1.0,
shard_size=0,
trunc_start=0,
trunc_size=None):
"""Compute the forward loss, possibly in shards in which case this
method also runs the backward pass and returns ``None`` as the loss
value.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(trunc_start, trunc_start + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
normalization: Optional normalization factor.
shard_size (int) : maximum number of examples in a shard
trunc_start (int) : starting position of truncation window
trunc_size (int) : length of truncation window
Returns:
A tuple with the loss and a :obj:`onmt.utils.Statistics` instance.
"""
if trunc_size is None:
trunc_size = batch.tgt.size(0) - trunc_start
trunc_range = (trunc_start, trunc_start + trunc_size)
shard_state = self._make_shard_state(batch, output, trunc_range, attns)
if shard_size == 0:
loss, stats = self._compute_loss(batch, **shard_state)
return loss / float(normalization), stats
batch_stats = onmt.utils.Statistics()
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return None, batch_stats
def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target).masked_select(non_padding).sum().item()
num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)
def _bottle(self, _v):
return _v.view(-1, _v.size(2))
def _unbottle(self, _v, batch_size):
return _v.view(-1, batch_size, _v.size(1))
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum')
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, criterion, generator, normalization="sents",
lambda_coverage=0.0):
super(NMTLossCompute, self).__init__(criterion, generator)
self.lambda_coverage = lambda_coverage
def _make_shard_state(self, batch, output, bidec_output, range_, attns=None):
shard_state = {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1], :, 0],
}
if self.lambda_coverage != 0.0:
coverage = attns.get("coverage", None)
std = attns.get("std", None)
assert attns is not None
assert std is not None, "lambda_coverage != 0.0 requires " \
"attention mechanism"
assert coverage is not None, "lambda_coverage != 0.0 requires " \
"coverage attention"
shard_state.update({
"std_attn": attns.get("std"),
"coverage_attn": coverage
})
return shard_state
def _compute_loss(self, batch, output, target, std_attn=None,
coverage_attn=None):
bottled_output = self._bottle(output)
scores = self.generator(bottled_output)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
if self.lambda_coverage != 0.0:
coverage_loss = self._compute_coverage_loss(
std_attn=std_attn, coverage_attn=coverage_attn)
loss += coverage_loss
stats = self._stats(loss.clone(), scores, gtruth)
return loss, stats
def _compute_coverage_loss(self, std_attn, coverage_attn):
covloss = torch.min(std_attn, coverage_attn).sum()
covloss *= self.lambda_coverage
return covloss
def filter_shard_state(state, shard_size=None):
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split)
def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
class AdaLabLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, criterion, generator, bidecoder_generator, normalization="sents",
lambda_coverage=0.0):
super(AdaLabLossCompute, self).__init__(criterion, generator)
self.lambda_coverage = lambda_coverage
self.bidecoder_generator = bidecoder_generator
def __call__(self,
batch,
output,
attns,
bidec_outputs,
normalization=1.0,
shard_size=0,
trunc_start=0,
trunc_size=None):
"""Compute the forward loss, possibly in shards in which case this
method also runs the backward pass and returns ``None`` as the loss
value.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(trunc_start, trunc_start + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
normalization: Optional normalization factor.
shard_size (int) : maximum number of examples in a shard
trunc_start (int) : starting position of truncation window
trunc_size (int) : length of truncation window
Returns:
A tuple with the loss and a :obj:`onmt.utils.Statistics` instance.
"""
if trunc_size is None:
trunc_size = batch.tgt.size(0) - trunc_start
trunc_range = (trunc_start, trunc_start + trunc_size)
shard_state = self._make_shard_state(batch, output, bidec_outputs, trunc_range, attns)
if shard_size == 0:
loss, stats = self._compute_loss(batch, **shard_state)
return loss / float(normalization), stats
batch_stats = onmt.utils.Statistics()
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return None, batch_stats
def _make_shard_state(self, batch, output, bidec_output, range_, attns=None):
shard_state = {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1], :, 0],
"bidec_output": bidec_output,
}
if self.lambda_coverage != 0.0:
coverage = attns.get("coverage", None)
std = attns.get("std", None)
assert attns is not None
assert std is not None, "lambda_coverage != 0.0 requires " \
"attention mechanism"
assert coverage is not None, "lambda_coverage != 0.0 requires " \
"coverage attention"
shard_state.update({
"std_attn": attns.get("std"),
"coverage_attn": coverage
})
return shard_state
def _compute_loss(self, batch, output, bidec_output, target, std_attn=None,
coverage_attn=None):
bottled_output = self._bottle(output)
scores = self.generator(bottled_output)
gtruth = target.view(-1)
if self.bidecoder_generator is not None and bidec_output is not None:
bidec_bottled_output = self._bottle(bidec_output)
bidec_scores = self.bidecoder_generator(bidec_bottled_output)
bidec_loss = F.cross_entropy(bidec_scores, gtruth,
ignore_index=self.criterion.ignore_index, reduction="sum")
else:
bidec_scores = None
bidec_loss = torch.tensor(0, device=gtruth.device)
if isinstance(self.criterion, AdaLabLoss):
loss = self.criterion(scores, gtruth, target, bidec_scores)
nll_loss = F.nll_loss(scores, gtruth,
ignore_index=self.criterion.ignore_index, reduction="sum")
else:
loss = self.criterion(scores, gtruth)
nll_loss = loss
# loss = self.criterion(scores, gtruth)
if self.lambda_coverage != 0.0:
coverage_loss = self._compute_coverage_loss(
std_attn=std_attn, coverage_attn=coverage_attn)
loss += coverage_loss
# stats = self._stats(loss.clone(), scores, gtruth)
stats = self._stats(loss.clone(), scores, gtruth,
bidec_loss.clone(), bidec_scores, nll_loss.clone())
return loss + bidec_loss, stats
def _compute_coverage_loss(self, std_attn, coverage_attn):
covloss = torch.min(std_attn, coverage_attn).sum()
covloss *= self.lambda_coverage
return covloss
def _stats(self, loss, scores, target, bidec_loss, bidec_scores, nll_loss):
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target).masked_select(non_padding).sum().item()
num_non_padding = non_padding.sum().item()
if bidec_scores is None:
bidec_num_correct = 0
else:
bidec_pred = bidec_scores.max(1)[1]
bidec_num_correct = bidec_pred.eq(target).masked_select(non_padding).sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct,
bidec_loss.item(), bidec_num_correct, nll_loss.item())
class AdaLabLoss(nn.Module):
"""
With adaptive label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, tgt_vocab_size, batch_size, ignore_index=-100, device="cuda", reduction='sum',
temperature=1, eos_index=3):
self.ignore_index = ignore_index
self.eos_index = eos_index
self.tgt_vocab_size = tgt_vocab_size
super(AdaLabLoss, self).__init__()
self.device = device
self.batch_size = batch_size
self.reduction = reduction
self.step = 0
self.temperature = temperature
self.top_head = 2
self.top_tail = 500
self.margin = 0.2
self.alpha_param = 2
self.topk = 5
def forward(self, output, target, tgt_batch=None, label_scores=None):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
v = self._get_v(label_scores, target)
epsilon = self._get_epsilon(output, target, v)
confidence = 1 - epsilon
smoothing_penalty = epsilon.unsqueeze(-1) * v
model_prob = torch.zeros_like(output, device=output.device, dtype=torch.float)
model_prob.scatter_(1, target.unsqueeze(1), confidence.unsqueeze(1))
model_prob = model_prob + smoothing_penalty
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction=self.reduction)
def _bottle(self, _v):
return _v.view(-1, _v.size(2))
def _unbottle(self, _v, batch_size):
return _v.view(-1, batch_size, _v.size(1))
def _get_epsilon(self, output, target, v):
probs = output.detach().clone().exp()
prob_max = probs.max(dim=1)[0]
prob_gtruth = probs.gather(dim=1, index=target.unsqueeze(1)).squeeze()
epsilon = 1 - prob_max
maxv = v.max(dim=-1)[0]
up_bond = 1 / (1 + maxv) - self.margin
mask = epsilon.gt(up_bond)
epsilon[mask] = up_bond[mask]
alpha = (prob_gtruth / prob_max).pow(self.alpha_param)
epsilon = alpha * epsilon
return epsilon
def _get_v(self, label_scores, target):
v = label_scores.detach().clone()
v = v / self.temperature
v.scatter_(1, target.unsqueeze(1), -float('inf'))
v[:, self.ignore_index] = -float('inf')
# truncate tail
upper_values, upper_indices = torch.topk(v, self.top_tail, dim=1)
kth_upper = upper_values[:, -1].view([-1, 1])
kth_upper = kth_upper.repeat([1, v.shape[1]]).float()
upper_ignore = torch.lt(v, kth_upper)
v = v.masked_fill(upper_ignore, -10000)
# truncate head
lower_values, lower_indices = torch.topk(v, self.top_head, dim=1)
kth_lower = lower_values[:, -1].view([-1, 1])
kth_lower = kth_lower.repeat([1, v.shape[1]]).float()
lower_ignore = torch.gt(v, kth_lower)
v = v.masked_fill(lower_ignore, -10000)
v = v.softmax(dim=-1)
return v
def _compute_entropy(self, output):
entropy = -torch.sum(output.exp() * output, -1)
return entropy
| [
"torch.nn.NLLLoss",
"torch.autograd.backward",
"torch.gt",
"torch.min",
"torch.lt",
"torch.split",
"torch.nn.functional.kl_div",
"torch.full",
"torch.nn.functional.cross_entropy",
"torch.tensor",
"torch.zeros_like",
"torch.nn.functional.nll_loss",
"torch.topk"
] | 1.2 | silverriver/AdaLabel | baefd765d79d90869ed4d28f76418c1a39eb0ae8 |
1.7 | """Internal utilities."""
import functools
import numbers
import numpy as np
import scipy.sparse
import torch
from pymde.average_distortion import _project_gradient
_DEVICE = torch.device("cpu")
class SolverError(Exception):
pass
def get_default_device():
return str(_DEVICE)
def _canonical_device(device):
if isinstance(device, str):
device = torch.device(device)
elif not isinstance(device, torch.device):
raise ValueError("device must be a str or a torch.device object.")
if device.type == "cuda" and device.index is None:
device = torch.device("cuda", torch.cuda.current_device())
return device
def set_default_device(device):
global _DEVICE
_DEVICE = _canonical_device(device)
def _module_device(module):
data = list(module.buffers())
if not data:
return None
device = str(data[0].device)
if any(str(datum.device) != device for datum in data):
return None
return device
def _is_numeric(arg):
return (
isinstance(arg, numbers.Number)
or isinstance(arg, np.ndarray)
or isinstance(arg, np.matrix)
or isinstance(arg, torch.Tensor)
)
def to_tensor(args, device=None):
"""Convert an arg or sequence of args to torch Tensors
"""
singleton = not isinstance(args, (list, tuple))
if singleton:
args = [args]
tensor_args = []
for arg in args:
if isinstance(arg, torch.Tensor):
tensor_args.append(arg)
elif _is_numeric(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.float64:
tensor_args.append(
torch.tensor(arg, dtype=torch.float32, device=device)
)
else:
tensor_args.append(torch.tensor(arg, device=device))
else:
raise ValueError("Received non-numeric argument ", arg)
return tensor_args[0] if singleton else tensor_args
def tensor_arguments(func):
"""Cast numeric args and kwargs of func to Tensors."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
tensor_args = to_tensor(args)
tensor_kwargs = {}
for key, arg in kwargs.items():
if isinstance(arg, torch.Tensor):
tensor_kwargs[key] = arg
elif _is_numeric(arg):
tensor_kwargs[key] = torch.tensor(arg, device=_DEVICE)
else:
raise ValueError(
"Received non-numeric argument (name %s, value %s)"
% (key, arg)
)
return func(*tensor_args, **tensor_kwargs)
return wrapper
def all_edges(n):
"""Return a tensor of all (n choose 2) edges
Constructs all possible edges among n items. For example, if ``n`` is 4,
the return value will be equal to
.. code:: python3
torch.Tensor([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]])
"""
return torch.triu_indices(n, n, 1).T
@tensor_arguments
def natural_length(n, m):
return (2.0 * n * m / (n - 1)).sqrt()
def in_stdemb(X):
cov = (1.0 / X.shape[0]) * X.T @ X
eye = torch.eye(2, dtype=X.dtype, device=X.device)
mean = X.mean(axis=0)
zero = torch.tensor(0.0, dtype=X.dtype, device=X.device)
return torch.isclose(cov, eye).all() and torch.isclose(mean, zero).all()
def proj_standardized(X, demean=False, inplace=False):
if demean:
if inplace:
X.sub_(X.mean(axis=0))
else:
X -= X.mean(axis=0)
# pytorch 1.8.0 has a bug in which torch.svd fails when requires_grad
# is true on the input (even if called under torch.no_grad)
requires_grad = X.requires_grad
X.requires_grad_(False)
n = torch.tensor(X.shape[0], dtype=X.dtype, device=X.device)
m = X.shape[1]
# TODO: Gracefully handle the rare svd failure
# TODO: debug alternative eigenvec approach ...
# (evals, V = torch.eig(X.T @ X, eigenvectors=True)
# proj = X @ V @ torch.diag(evals[:, 0].sqrt().pow(-1)) ...
# proj *= torch.sqrt(n)
if inplace:
s = torch.zeros(m, device=X.device, dtype=X.dtype)
V = torch.zeros((m, m), device=X.device, dtype=X.dtype)
try:
U, _, V = torch.svd(X, out=(X, s, V))
except RuntimeError as e:
X.requires_grad_(requires_grad)
raise SolverError(str(e))
torch.matmul(U[:, :m], V.T[:, :m], out=X)
X.mul_(torch.sqrt(n))
X.requires_grad_(requires_grad)
return X
else:
try:
U, _, V = torch.svd(X)
except RuntimeError as e:
X.requires_grad_(requires_grad)
raise SolverError(str(e))
proj = torch.sqrt(n) * U[:, :m] @ V.T[:, :m]
X.requires_grad_(requires_grad)
return proj
def adjacency_matrix(n, m, edges, weights):
if isinstance(weights, torch.Tensor):
weights = weights.detach().cpu().numpy()
if isinstance(edges, torch.Tensor):
edges = edges.detach().cpu().numpy()
A = scipy.sparse.coo_matrix(
(weights, (edges[:, 0], edges[:, 1])), shape=(n, n), dtype=np.float32
)
A = A + A.T
return A.tocoo()
@tensor_arguments
def procrustes(X_source, X_target):
"""min |X_source Q - X_target|_F s.t. Q^TQ = I"""
U, _, V = torch.svd(X_target.T @ X_source)
return V @ U.T
@tensor_arguments
def _rotate_2d(X, degrees):
theta = torch.deg2rad(degrees)
rot = torch.tensor(
[
[torch.cos(theta), -torch.sin(theta)],
[torch.sin(theta), torch.cos(theta)],
],
device=X.device,
)
return X @ rot
@tensor_arguments
def _rotate_3d(X, alpha, beta, gamma):
alpha = torch.deg2rad(alpha.float())
beta = torch.deg2rad(beta.float())
gamma = torch.deg2rad(gamma.float())
rot_x = torch.tensor(
[
[1, 0, 0],
[0, torch.cos(alpha), torch.sin(alpha)],
[0, -torch.sin(alpha), torch.cos(alpha)],
],
device=X.device,
)
rot_y = torch.tensor(
[
[torch.cos(beta), 0.0, -torch.sin(beta)],
[0, 1, 0],
[torch.sin(beta), 0.0, torch.cos(beta)],
],
device=X.device,
)
rot_z = torch.tensor(
[
[torch.cos(gamma), torch.sin(gamma), 0.0],
[-torch.sin(gamma), torch.cos(gamma), 0.0],
[0, 0, 1],
],
device=X.device,
)
rot_3d = rot_x @ rot_y @ rot_z
return X @ rot_3d
@tensor_arguments
def rotate(X, degrees):
"""Rotate a 2 or 3D embedding
Rotates a 2/3D embedding by ``degrees``. If ``X`` is a 2D embedding,
``degrees`` should be a scalar; if it is 3D, ``degrees`` should be
a length-3 ``torch.Tensor``, with one angle for each axis (the embedding
will be rotated along the x-axis first, then the y-axis, then the z-axis).
Arguments
---------
X : torch.Tensor
The embedding to rotate.
degrees: torch.Tensor
The angles of rotation.
Returns
-------
torch.Tensor
The rotated embedding
"""
if X.shape[1] not in [2, 3]:
raise ValueError(
"Only 2 or 3 dimensional embeddings can be "
"rotated using this method."
)
if X.shape[1] == 2:
if degrees.numel() != 1:
raise ValueError("`degrees` must be a scalar.")
return _rotate_2d(X, degrees)
else:
if degrees.numel() != 3:
raise ValueError("`degrees` must be a length-3 tensor.")
return _rotate_3d(X, degrees[0], degrees[1], degrees[2])
@tensor_arguments
def center(X):
"""Center an embedding
Returns a new embedding, equal to the given embedding minus the mean
of its rows.
"""
return X - X.mean(dim=0)[None, :]
@tensor_arguments
def align(source, target):
"""Align an source embedding to a target embedding
Align the source embedding to another target embedding, via
rotation. The alignment is done by solving an
orthogonal Procrustes problem.
Arguments
---------
source: torch.Tensor
The embedding to be aligned.
target: torch.Tensor
The target embedding, of the same shape as source.
Returns
-------
torch.Tensor
The rotation of source best aligned to the target.
"""
source_mean = source.mean(dim=0)
source = source - source_mean[None, :]
source_col_rms = source.norm(dim=0)
source = source / source_col_rms[None, :]
target = center(target)
target = target / target.norm(dim=0)
Q = procrustes(source, target)
rotated = source @ Q
return (rotated * source_col_rms[None, :]) + source_mean
@tensor_arguments
def scale_delta(delta, d_nat):
# scale delta so RMS(delta) == d_nat
N = delta.nelement()
rms = torch.sqrt(1 / N * torch.sum(delta ** 2))
return delta * d_nat / rms
class LinearOperator(object):
def __init__(self, matvec, device):
self._matvec = matvec
self.device = device
def matvec(self, vecs):
return self._matvec(vecs)
def make_hvp(f, edges, X, constraint):
X_shape = X.shape
def avg_dist_flat(X_flat):
X_reshaped = X_flat.view(X_shape)
if constraint is not None:
# a noop in the forward pass, but projects the gradient onto
# the tangent space of the constraint in the backward pass
X_reshaped = _project_gradient(X_reshaped, constraint)
# using custom average distortion yields a zero for hvp, since
# gradient graph is disconnected
differences = X_reshaped[edges[:, 0]] - X_reshaped[edges[:, 1]]
norms = differences.pow(2).sum(dim=1).sqrt()
return f(norms).mean()
X_flat = X.view(-1).detach()
def hvp(vecs):
vecs = torch.split(vecs, 1, dim=1)
products = []
for v in vecs:
_, product = torch.autograd.functional.vhp(
avg_dist_flat, X_flat, v.squeeze()
)
products.append(product)
return torch.stack(products, dim=1)
return hvp
def hutchpp(linear_operator, dimension, n_queries):
A = linear_operator
d = dimension
m = n_queries
S = torch.randn(d, m // 3, device=A.device)
G = torch.randn(d, m // 3, device=A.device)
Q, _ = torch.qr(A.matvec(S))
proj = G - Q @ (Q.T @ G)
return torch.trace(Q.T @ A.matvec(Q)) + (3.0 / m) * torch.trace(
proj.T @ A.matvec(proj)
)
def random_edges(n, p, seed=0):
randomstate = np.random.default_rng(seed)
edge_idx = randomstate.choice(
int(n * (n - 1) / 2), p, replace=False, shuffle=False
)
u = (
n
- 2
- np.floor(np.sqrt(-8 * edge_idx + 4 * n * (n - 1) - 7) / 2.0 - 0.5)
)
v = edge_idx + u + 1 - n * (n - 1) / 2 + (n - u) * ((n - u) - 1) / 2
return torch.tensor(np.stack([u, v], axis=1).astype(np.int64))
class Distortion(torch.autograd.Function):
"""Manual implementation of the average distortion gradient, for testing"""
@staticmethod
def forward(ctx, X, f, A, lhs, rhs):
distances = A.T @ X
norms = distances.norm(dim=1)
with torch.enable_grad():
X.requires_grad_(False)
norms.requires_grad_(True)
norms.grad = None
distortion = f(norms).mean()
distortion.backward()
g = norms.grad / norms
X.requires_grad_(True)
D = g.diag()
grad_E = A @ (D @ (A.T @ X))
ctx.grad_E = grad_E
return distortion
def backward(ctx, grad_output):
return ctx.grad_E * grad_output, None, None, None, None
_distortion = Distortion.apply
| [
"torch.stack",
"torch.triu_indices",
"torch.enable_grad",
"torch.cuda.current_device",
"torch.eye",
"torch.sum",
"torch.sqrt",
"torch.tensor",
"torch.zeros",
"torch.device",
"torch.cos",
"torch.isclose",
"torch.matmul",
"torch.sin",
"torch.split",
"torch.deg2rad",
"torch.randn",
"torch.svd"
] | 1.7.1 | kruus/pymde | 0bfa9c308660bda2fa5161ffce00ce22ef6e773b |
1.6 | from flash_pytorch import FLASHTransformer
from flash_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = FLASHTransformer(
num_tokens = 256,
dim = 512,
depth = 8,
causal = True,
group_size = 256,
shift_tokens = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
| [
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.from_numpy"
] | 1.6 | dumpmemory/FLASH-pytorch | 8e0d2fd7925c0de9703d666ea2cc004327f6e544 |
1.0 | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def audio_tagging(args):
"""Inference audio tagging result of an audio clip.
"""
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
classes_num = config.classes_num
labels = config.labels
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
if 'cuda' in str(device):
model.to(device)
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
else:
print('Using CPU.')
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
print('batch_output_dict\n', batch_output_dict)
clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]
"""(classes_num,)"""
print('clipwise_output:\n', clipwise_output)
sorted_indexes = np.argsort(clipwise_output)[::-1]
# Print audio tagging top probabilities
for k in range(10):
print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],
clipwise_output[sorted_indexes[k]]))
# Print embedding
if 'embedding' in batch_output_dict.keys():
embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]
print('embedding: {}'.format(embedding.shape))
return clipwise_output, labels
def sound_event_detection(args):
"""Inference sound event detection result of an audio clip.
"""
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
classes_num = config.classes_num
labels = config.labels
frames_per_second = sample_rate // hop_size
print('frames_per_second', frames_per_second)
# Paths
fig_path = os.path.join('results', '{}.png'.format(get_filename(audio_path)))
create_folder(os.path.dirname(fig_path))
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
print('framewise_output:', framewise_output[0].argmax())
print('Sound event detection result (time_steps x classes_num): {}'.format(
framewise_output.shape))
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0: top_k]]
print('top result mat', top_result_mat)
print('frame number', len(top_result_mat))
for idx, frame in enumerate(top_result_mat):
if idx % (frames_per_second//2) == 0:
if frame[frame.argmax()] > 0.03:
print('frame_label', idx/frames_per_second, np.array(labels)[sorted_indexes[0: top_k]][frame.argmax()])
else:
print('frame_label', 'None')
print(np.array(labels)[sorted_indexes[0: top_k]])
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=window_size,
hop_length=hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(labels)[sorted_indexes[0: top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
plt.savefig(fig_path)
print('Save sound event detection visualization to {}'.format(fig_path))
return framewise_output, labels
if __name__ == '__main__':
import time
start = time.time()
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--sample_rate', type=int, default=32000)
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
parser_at.add_argument('--model_type', type=str, required=True)
parser_at.add_argument('--checkpoint_path', type=str, required=True)
parser_at.add_argument('--audio_path', type=str, required=True)
parser_at.add_argument('--cuda', action='store_true', default=False)
parser_sed = subparsers.add_parser('sound_event_detection')
parser_sed.add_argument('--sample_rate', type=int, default=32000)
parser_sed.add_argument('--window_size', type=int, default=1024)
parser_sed.add_argument('--hop_size', type=int, default=320)
parser_sed.add_argument('--mel_bins', type=int, default=64)
parser_sed.add_argument('--fmin', type=int, default=50)
parser_sed.add_argument('--fmax', type=int, default=14000)
parser_sed.add_argument('--model_type', type=str, required=True)
parser_sed.add_argument('--checkpoint_path', type=str, required=True)
parser_sed.add_argument('--audio_path', type=str, required=True)
parser_sed.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'audio_tagging':
audio_tagging(args)
elif args.mode == 'sound_event_detection':
sound_event_detection(args)
else:
raise Exception('Error argument!')
print("time :", time.time() - start)
| [
"torch.device",
"torch.no_grad",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel"
] | 1.0.1 | rollingman1/audioset_tagging_cnn | 5036f772dfa8dd05fbfb0b6fa5bfedcea10cfb10 |
1.4 | """ Split BatchNorm
A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through
a separate BN layer. The first split is passed through the parent BN layers with weight/bias
keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'
namespace.
This allows easily removing the auxiliary BN layers after training to efficiently
achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,
'Disentangled Learning via An Auxiliary BN'
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
class SplitBatchNorm2d(torch.nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True, num_splits=2):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)'
self.num_splits = num_splits
self.aux_bn = nn.ModuleList([
nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)])
def forward(self, input: torch.Tensor):
if self.training: # aux BN only relevant while training
split_size = input.shape[0] // self.num_splits
assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits"
split_input = input.split(split_size)
x = [super().forward(split_input[0])]
for i, a in enumerate(self.aux_bn):
x.append(a(split_input[i + 1]))
return torch.cat(x, dim=0)
else:
return super().forward(input)
def convert_splitbn_model(module, num_splits=2):
"""
Recursively traverse module and its children to replace all instances of
``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`.
Args:
module (torch.nn.Module): input module
num_splits: number of separate batchnorm layers to split input across
Example::
>>> # model is an instance of torch.nn.Module
>>> model = mytimm.models.convert_splitbn_model(model, num_splits=2)
"""
mod = module
if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):
return module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
mod = SplitBatchNorm2d(
module.num_features, module.eps, module.momentum, module.affine,
module.track_running_stats, num_splits=num_splits)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
mod.num_batches_tracked = module.num_batches_tracked
if module.affine:
# print("model is affine")
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
# else:
# print("ooooooooooooooooooooooooooops")
for aux in mod.aux_bn:
aux.running_mean = module.running_mean.clone()
aux.running_var = module.running_var.clone()
aux.num_batches_tracked = module.num_batches_tracked.clone()
if module.affine:
aux.weight.data = module.weight.data.clone().detach()
aux.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
# print(name, child)
mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits))
del module
return mod
| [
"torch.cat",
"torch.nn.BatchNorm2d"
] | 1.4.0 | wenh18/OnDeviceNAS | d6e39500b794ddd9737ef4bc631cf4f977b47617 |
1.4 | """ EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2019, Ross Wightman
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from .layers import create_conv2d, drop_path, make_divisible, create_act_layer
from .layers.activations import sigmoid
__all__ = [
'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual']
class SqueezeExcite(nn.Module):
""" Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family
Args:
in_chs (int): input channels to layer
rd_ratio (float): ratio of squeeze reduction
act_layer (nn.Module): activation layer of containing block
gate_layer (Callable): attention gate function
force_act_layer (nn.Module): override block's activation fn if this is set/bound
rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs
"""
def __init__(
self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU,
gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None):
super(SqueezeExcite, self).__init__()
if rd_channels is None:
rd_round_fn = rd_round_fn or round
rd_channels = rd_round_fn(in_chs * rd_ratio)
act_layer = force_act_layer or act_layer
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)
self.act1 = create_act_layer(act_layer, inplace=True)
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate(x_se)
class ConvBnAct(nn.Module):
""" Conv + Norm Layer + Activation w/ optional skip connection
"""
def __init__(
self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='',
skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.):
super(ConvBnAct, self).__init__()
self.has_residual = skip and stride == 1 and in_chs == out_chs
self.drop_path_rate = drop_path_rate
self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(out_chs)
self.act1 = act_layer(inplace=True)
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv.out_channels)
return info
def forward(self, x):
shortcut = x
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='',
noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
se_layer=None, drop_path_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.drop_path_rate = drop_path_rate
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True)
self.bn1 = norm_layer(in_chs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity()
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs)
self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels)
return info
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE
Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often
referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in
* MNasNet - https://arxiv.org/abs/1807.11626
* EfficientNet - https://arxiv.org/abs/1905.11946
* MobileNet-V3 - https://arxiv.org/abs/1905.02244
"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='',
noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
conv_kwargs = conv_kwargs or {}
self.conv_kwargs = conv_kwargs
self.pad_type = pad_type
self.dilation = dilation
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.mid_chs = mid_chs
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs)
self.act1 = act_layer(inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs)
self.act2 = act_layer(inplace=True)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs)
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
shortcut = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='',
noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type,
act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs,
drop_path_rate=drop_path_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
shortcut = x
# CondConv routing
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
# Point-wise expansion
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class EdgeResidual(nn.Module):
""" Residual block with expansion convolution followed by pointwise-linear w/ stride
Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`
- https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers
* MobileDet - https://arxiv.org/abs/2004.14525
* EfficientNet-X - https://arxiv.org/abs/2102.05610
* EfficientNet-V2 - https://arxiv.org/abs/2104.00298
"""
def __init__(
self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, pad_type='',
force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.):
super(EdgeResidual, self).__init__()
if force_in_chs > 0:
mid_chs = make_divisible(force_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
self.pad_type = pad_type
self.dilation = dilation
self.mid_chs = mid_chs
self.noskip = noskip
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.bn1 = norm_layer(mid_chs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs)
def feature_info(self, location):
if location == 'expansion': # after SE, before PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
shortcut = x
# Expansion convolution
x = self.conv_exp(x)
x = self.bn1(x)
x = self.act1(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
| [
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.Identity",
"torch.nn.functional.adaptive_avg_pool2d"
] | 1.4.0 | wenh18/OnDeviceNAS | d6e39500b794ddd9737ef4bc631cf4f977b47617 |
0.1 | import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(3, 16, (3, 3), padding=1)
self.conv1_bn = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, (3, 3), padding=1, stride=2)
self.conv2_bn = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 64, (3, 3), padding=1, stride=2)
self.conv3_bn = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 128, (3, 3), padding=1, stride=2)
self.conv4_bn = nn.BatchNorm2d(128)
self.conv5 = nn.Conv2d(128, 256, (3, 3), padding=1, stride=2)
self.conv5_bn = nn.BatchNorm2d(256)
self.fc1 = nn.Linear(256 * 2 * 2, 2000)
self.fc2 = nn.Linear(2000, 2000)
self.fc3 = nn.Linear(2000, config['n_classes'])
def forward(self, x):
# [3, 32, 32] -> [16, 32, 32]
x = F.relu(self.conv1_bn(self.conv1(x)))
# [16, 32, 32] -> [32, 16, 16]
x = F.relu(self.conv2_bn(self.conv2(x)))
# [32, 16, 16] -> [64, 8, 8]
x = F.relu(self.conv3_bn(self.conv3(x)))
# [64, 8, 8] -> [128, 4, 4]
x = F.relu(self.conv4_bn(self.conv4(x)))
# [128, 4, 4] -> [256, 2, 2]
x = F.relu(self.conv5_bn(self.conv5(x)))
# [128, 2, 2] -> [512]
x = x.view(-1, 256 * 2 * 2)
# 1024 -> 2000
x = F.relu(F.dropout((self.fc1(x)), 0.0))
# 2000 -> 2000
# x = F.relu(F.dropout((self.fc2(x)), 0.5))
# 2000 -> 100
x = self.fc3(x)
return x
def freeze_conv(self):
self.conv1.weight.requires_grad = False
self.conv1_bn.weight.requires_grad = False
self.conv1_bn.bias.requires_grad = False
self.conv2.weight.requires_grad = False
self.conv2_bn.weight.requires_grad = False
self.conv2_bn.bias.requires_grad = False
self.conv3.weight.requires_grad = False
self.conv3_bn.weight.requires_grad = False
self.conv3_bn.bias.requires_grad = False
self.conv4.weight.requires_grad = False
self.conv4_bn.weight.requires_grad = False
self.conv4_bn.bias.requires_grad = False
self.conv5.weight.requires_grad = False
self.conv5_bn.weight.requires_grad = False
self.conv5_bn.bias.requires_grad = False | [
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
] | 0.1.2 | andrearosasco/DistilledReplay | 2a4efa88d22b9afc7016f07549114688f346dbe8 |
0.1 | import importlib
import os
from collections import OrderedDict
import torch
from torchvision.transforms import transforms
model_config = OrderedDict([
('arch', 'mlp2'),
('n_classes', 10),
('dropout', 0.5)
])
data_config = OrderedDict([
('dataset', 'PermutedMNIST'),
('valid', 0.0),
('num_workers', 4),
('train_transform', transforms.Compose([
lambda x: torch.FloatTensor(x),
lambda x: x / 255.0,
lambda x: (x - 0.1307) / 0.3081,
])),
('test_transform', transforms.Compose([
lambda x: torch.FloatTensor(x),
lambda x: x / 255.0,
lambda x: (x - 0.1307) / 0.3081,
]))
])
run_config = OrderedDict([
('experiment', 'run'), # This configuration will be executed by distill.py
('device', 'cuda'),
('tasks', list(range(10))),
('save', 'task1.distilled'), # Path for the distilled dataset
('seed', 1234),
])
log_config = OrderedDict([
('wandb', True),
('wandb_name', 'joint'),
('print', True),
('images', True), # Save the distilled images
])
param_config = OrderedDict([
('no_steps', 3), # Training epoch performed by the model on the distilled dataset
('steps', 'epoch'), # epoch or minibatch
('meta_lr', 0.1), # Learning rate for distilling images
('model_lr', 0.05), # Base learning rate for the model
('lr_lr', 0.0), # Learning rate for the lrs of the model at each optimization step
('outer_steps', 0), # Distillation epochs
('inner_steps', 0), # Optimization steps of the model
('batch_size', 128), # Minibatch size used during distillation
('distill_batch_size', 128),
('buffer_size', -1), # Number of examples per class kept in the buffer
])
config = OrderedDict([
('model_config', model_config),
('param_config', param_config),
('data_config', data_config),
('run_config', run_config),
('log_config', log_config),
])
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
experiment = importlib.import_module(config['run_config']['experiment'])
experiment.run(config) | [
"torch.FloatTensor"
] | 0.1.2 | andrearosasco/DistilledReplay | 2a4efa88d22b9afc7016f07549114688f346dbe8 |
1.7 | from typing import Optional, Any
import torch
import numpy as np
from abc import ABC, abstractmethod
from functools import lru_cache
from slam.common.utils import check_tensor, assert_debug
def torch__spherical_projection(t_pointcloud: torch.Tensor,
height: int,
width: int,
min_vertical_fov: float,
max_vertical_fov: float,
min_horizontal_fov: float = -180.0,
max_horizontal_fov: float = 180.0) -> tuple:
"""
Computes a spherical projection of the points of a point cloud
It will compute the pixel values of the points in t_pointcloud
Parameters
----------
t_pointcloud: torch.Tensor [B, N, 3]
A batch of tensor to projection to spherical coordinates
height: int
the height of the destination image
width: int
the width of the destination image
min_vertical_fov: float (angle in degrees)
the field of view up of the image
max_vertical_fov: float (angle in degrees)
the field of view down of the image
min_horizontal_fov: float (optional)
the horizontal field of view left (of the image)
max_horizontal_fov: float (optional)
the horizontal field of view right (of the image)
Returns
-------
(t_row, t_col) : pair of torch.Tensor of size [B, N] (torch.float32)
t_row : the pixels' rows as a float for each point in the point cloud
t_col : the pixels' cols as a float for each point in the point cloud
"""
check_tensor(t_pointcloud, [-1, -1, 3])
fov_up = min_vertical_fov / 180.0 * np.pi
fov_down = max_vertical_fov / 180.0 * np.pi
fov = abs(fov_down) + abs(fov_up)
# get depth of all points
r = torch.norm(t_pointcloud, p=2, dim=2)
# Define a mask of validity to avoid nan
mask_0 = (r == 0.0).to(dtype=t_pointcloud.dtype)
mask_valid = 1.0 - mask_0
r = mask_0 * 0.001 + mask_valid * r
x = t_pointcloud[:, :, 0]
y = t_pointcloud[:, :, 1]
z = t_pointcloud[:, :, 2]
# compute angles
theta = - torch.atan2(y, x)
phi = torch.asin(z / r)
proj_col = 0.5 * (theta / np.pi + 1.0)
proj_row = 1.0 - (phi + abs(fov_down)) / fov
proj_col *= width
proj_row *= height
return proj_row * mask_valid - mask_0, proj_col * mask_valid - mask_0, r * mask_valid
def xyz_conversion(t_point_cloud: (torch.Tensor, np.ndarray)) -> torch.Tensor:
"""
Extracts the xyz fields of a point cloud
Parameters
----------
t_point_cloud : A [B, N, C >= 3] or a [N, C >= 3] array
Extracts the first three channels of a tensor
>>> assert (xyz_conversion(np.array([[1.0, 2.0, 3.0, 4.0]])) - np.array([[1.0, 2.0, 3.0]]) == 0.0).all()
"""
if len(list(t_point_cloud.shape)) == 2:
n, c = t_point_cloud.shape
assert_debug(c >= 3)
return t_point_cloud[:, :3]
else:
check_tensor(t_point_cloud, [-1, -1, -1])
b, n, c = t_point_cloud.shape
assert_debug(c >= 3)
return t_point_cloud[:, :, :3]
def depth_conversion(t_point_cloud: (torch.Tensor, np.ndarray)) -> (torch.Tensor, np.ndarray):
"""
Extracts the depth from a point cloud
Parameters
----------
t_point_cloud : (torch.Tensor, np.ndarray) [B, N, 3] of [N, 3]
A Point cloud which can be either a tensor or a numpy ndarray
Returns
-------
(torch.Tensor, np.ndarray) : [B, N, 1]
A Tensor of the same type as the input tensor
>>> check_tensor(depth_conversion(torch.randn(4, 10, 3)), [4, 10, 1])
>>> check_tensor(depth_conversion(np.random.randn(4, 10, 3)), [4, 10, 1])
>>> check_tensor(depth_conversion(np.random.randn(40, 3)), [40, 1])
"""
if len(t_point_cloud.shape) == 2:
assert_debug(isinstance(t_point_cloud, np.ndarray) and t_point_cloud.shape[1] >= 3)
return np.linalg.norm(t_point_cloud, ord=2, axis=1, keepdims=True)
else:
check_tensor(t_point_cloud, [-1, -1, -1])
if isinstance(t_point_cloud, np.ndarray):
return np.linalg.norm(t_point_cloud[:, :, :3], ord=2, axis=2, keepdims=True)
else:
return torch.norm(t_point_cloud[:, :, :3], p=2, dim=2, keepdim=True)
def build_spherical_image(t_point_cloud: torch.Tensor,
destination: torch.Tensor,
min_vertical_fov: float,
max_vertical_fov: float,
min_horizontal_fov: float = -180.0,
max_horizontal_fov: float = 180.0,
conversion_function: callable = lambda x: x):
"""
Builds a Spherical Image from a Point Cloud in place
Parameters
----------
t_point_cloud: torch.Tensor [B, N, C >= 3]
The first 3 channels corresponding to the coordinates X, Y, Z
destination: torch.Tensor [B, C_dest, H, W]
In which the image will be projected. The projection is done in place
min_vertical_fov: float in [0.0, 180.0]
The angle in degrees of the upward boundary of the fov
max_vertical_fov: float in [min_vertical_fov, 180.0]
The angle in degrees of the downward boundary of the fov
min_horizontal_fov: float in [-180.0, 180.0]
The angle in degrees of the leftward boundary of the fov
max_horizontal_fov: float in [min_horizontal_fov, 180.0]
The angle in degrees of the rightward boundary of the fov
conversion_function: callable
The function to convert a point cloud [B, N, C] into
a point cloud with the specific channels to put in the image [B, N, C_dest]
"""
check_tensor(destination, [-1, 3, -1, -1])
check_tensor(t_point_cloud, [-1, -1, -1])
# Extract channels to put in destination
channels_extracted = conversion_function(t_point_cloud)
b, n, c = t_point_cloud.shape
assert_debug(c >= 3, "The point cloud must have at least 3 channels")
bp, c_dest, height, width = destination.shape
assert_debug(bp == b, "Mismatch between the batch size of the destination and the source point cloud")
proj_row, proj_col, depth = torch__spherical_projection(t_point_cloud[:, :, :3],
height,
width,
min_vertical_fov,
max_vertical_fov,
min_horizontal_fov,
max_horizontal_fov)
proj_row = torch.floor(proj_row)
proj_row = proj_row.clamp(min=0, max=height - 1)
proj_col = torch.floor(proj_col)
proj_col = proj_col.clamp(min=0, max=width - 1)
b_idx = torch.arange(b, dtype=torch.int64, device=t_point_cloud.device).view(b, 1).expand(b, n).reshape(b * n)
order = torch.argsort(depth, dim=1).reshape(b * n)
proj_row = proj_row[b_idx, order].to(torch.int64)
proj_col = proj_col[b_idx, order].to(torch.int64)
destination[b_idx, :, proj_row, proj_col] = channels_extracted[b_idx, order, :]
class Projector(ABC):
"""
A Projector is an object which can project a PointCloud in an image
And construct a PointCloud from a Depth image
"""
def __init__(self,
transform: callable = lambda x: x,
height: Optional[int] = None,
width: Optional[int] = None):
# The transform mapping a pointcloud to a array or tensor of color values
# Used to construct an image from the point cloud
self.transform = transform
self.height: Optional[int] = height
self.width: Optional[int] = width
@abstractmethod
def project_pointcloud(self, pointcloud: torch.Tensor, **kwargs) -> torch.tensor:
"""
Projects the points of a PointCloud tensor in the image plane
Parameters
----------
pointcloud : torch.Tensor [B, N, 3]
A Pointcloud tensor (X, Y, Z) to be projected in the image plane
kwargs :
Additional arguments required for the projection
Returns
-------
torch.Tensor
The tensor of size [B, N, 2] of pixel values (as float) in the image plane
The first coordinate is the pixel of the row, and the second the pixel coordinate in the columns
When considering the image as a matrix. (The values can be outside of the image plane dimension)
"""
raise NotImplementedError("")
def project_normalized(self, pointcloud: torch.Tensor, height=None, width=None, **kwargs) -> torch.Tensor:
"""
Parameters
----------
pointcloud : torch.Tensor
The point cloud tensor [B, N, 3] to project in the image plane
height : int
The optional height of the image
Uses member height if it is None
width :
The optional width of the image
Uses member width if it is None
kwargs
Returns
-------
torch.Tensor [B, N, 2]
A Tensor of pixels normalized between -1, 1
"""
height = self.swap(height=height)
width = self.swap(width=width)
pixels = self.project_pointcloud(pointcloud, height=height, width=width, **kwargs)
rows = pixels[:, :, 0] * 2.0 / height
cols = pixels[:, :, 1] * 2.0 / width
pixels: torch.Tensor = (-1.0 + torch.cat([rows.unsqueeze(2), cols.unsqueeze(2)], dim=2))
return pixels
@abstractmethod
def rescaled_projector(self, new_height: int, new_width: int):
"""
Parameters
----------
new_height : int
The new height of the projector
new_width
The new width of the projector
Returns
-------
Projector
A similar Projector, with its dimension reset to new_height and new_width
And its appropriate parameters reset (intrinsics)
"""
raise NotImplementedError("")
def rescale_intrinsics(self, new_height: int, new_width: int, **kwargs) -> Any:
"""
Rescales the intrinsics parameters of the projection from the arguments
Parameters
----------
new_height : int
The height of the new image
new_width : int
The width of the new image
kwargs
arguments to rescale
(Depends on the type of the projector)
Returns
-------
Any
The intrinsics rescaled : depends on the type of Projector
"""
raise NotImplementedError("")
@abstractmethod
def set_projection_params(self, height: int = None, width: int = None, transform: callable = None, **kwargs):
"""
Reads projection params from the arguments and set the appropriate parameters
All named arguments are optional, and will only be set if they are not None
Parameters
----------
height : int
The height of the image created from a point cloud
width : int
The width of the image created from a point cloud
transform : callable
The transformation applied to a pointcloud to extract color channels to build
The projection image from
**kwargs : other variables
"""
if height is not None:
self.height = height
if width is not None:
self.width = width
if transform is not None:
self.transform = transform
def swap(self, **kwargs):
for key, value in kwargs.items():
assert_debug(hasattr(self, key))
if value is None:
member_value = getattr(self, key)
assert_debug(member_value is not None)
value = member_value
return value
def build_projection_map(self,
pointcloud: torch.Tensor,
default_value: float = 0.0,
height: Optional[int] = None,
width: Optional[int] = None,
transform: Optional[callable] = None,
**kwargs) -> torch.Tensor:
"""
Builds a projection image from a PointCloud (torch.Tensor)
Parameters
----------
pointcloud : torch.Tensor
A [B, N, C>=3] torch.Tensor with the first 3 channels the cartesian coordinates X, Y, Z
default_value : float
The default value for the image being built
height : int
Optional value of the height of the image created
(If it is None, the member height will be used)
width : int
Optional value of the width of the image created
(If it is None, the member height will be used)
transform : Optional callable
The function called on a point cloud which maps the input pointcloud
to the channels desired in the image created.
Transforms a [B, N, C] pointcloud to a [B, N, C_dest] point cloud
kwargs
Returns
-------
torch.Tensor : [B, C_dest, height, width]
An image of size (height, width)
(Either the height and width of the parameters or the member height and width)
"""
height = self.swap(height=height)
width = self.swap(width=width)
transform = self.swap(transform=transform)
check_tensor(pointcloud, [-1, -1, -1])
b, n, _ = pointcloud.shape
image_channels = pointcloud
if transform is not None:
image_channels = transform(image_channels)
c_dest = image_channels.shape[2]
# Build destination tensor
if default_value == 0.:
destination_image = torch.zeros(pointcloud.size(0),
c_dest,
height,
width,
device=pointcloud.device,
dtype=pointcloud.dtype)
else:
destination_image = torch.ones(pointcloud.size(0),
c_dest,
height,
width,
device=pointcloud.device,
dtype=pointcloud.dtype) * default_value
pixels = self.project_pointcloud(pointcloud, height=height, width=width, **kwargs)
r = pointcloud.norm(dim=2)
pixel_rows = pixels[:, :, 0].round()
pixel_cols = pixels[:, :, 1].round()
invalidity_mask = ~((pixel_rows[:] >= 0.0) * \
(pixel_rows[:] <= (height - 1)) * \
(pixel_cols[:] >= 0.0) * \
(pixel_cols[:] <= (width - 1)))
b_idx = torch.arange(b, dtype=torch.int64, device=pointcloud.device).view(b, 1).expand(b, n)
r[invalidity_mask] = -1.0
order = torch.argsort(r, dim=1, descending=True)
order = order.reshape(b, n)
b_idx = b_idx.reshape(b, n)
mask = r[b_idx, order] > 0.0
order = order[mask]
b_idx = b_idx[mask]
proj_row = pixel_rows[b_idx, order].to(torch.int64)
proj_col = pixel_cols[b_idx, order].to(torch.int64)
destination_image[b_idx, :, proj_row, proj_col] = image_channels[b_idx, order, :]
# TODO DEAL WITH [0, 0] coordinates clamping problem
return destination_image
@lru_cache(maxsize=10)
def torch_ones(b: int, n: int, dtype: torch.dtype, device: torch.device):
return torch.ones(b, n, 1, dtype=dtype, device=device)
class SphericalProjector(Projector):
"""
A SphericalProjector projects a pointcloud in a spherical image
Parameters
----------
up_fov : float
The field of view upward in degrees [-90, 90]
down_fov : float
The field of view downward in degrees [-90, up_vertical_fov]
"""
def __init__(self,
height: Optional[int] = None,
width: Optional[int] = None,
num_channels: Optional[int] = None,
up_fov: Optional[float] = None,
down_fov: Optional[float] = None,
conversion: Optional[callable] = xyz_conversion):
super().__init__(transform=conversion, height=height, width=width)
self.num_channels = num_channels
self.up_fov = up_fov
self.down_fov = down_fov
self.conversion = conversion
def project_pointcloud(self,
pointcloud: torch.Tensor,
height: Optional[int] = None,
width: Optional[int] = None,
up_fov: Optional[float] = None,
down_fov: Optional[float] = None, **kwargs) -> torch.tensor:
"""
Project the pointcloud in the Spherical image
Parameters
----------
pointcloud : torch.Tensor [B, N, K>=3]
height: Optional[int]
The height of the spherical image built
width: Optional[int]
The width of the spherical image built
up_fov: Optional[float]
down_fov: Optional[float]
Returns
-------
pixel_tensor : torch.Tensor [B, N, 2]
The pixel tensor of the pointcloud projected in the Spherical image plane
First coordinates are the row values, Second are the column values
"""
check_tensor(pointcloud, [-1, -1, -1])
height: int = self.swap(height=height)
width = self.swap(width=width)
up_fov = self.swap(up_fov=up_fov)
down_fov = self.swap(down_fov=down_fov)
t_rows, t_cols, r = torch__spherical_projection(pointcloud[:, :, :3], height, width, up_fov, down_fov)
return torch.cat([t_rows.unsqueeze(2), t_cols.unsqueeze(2)], dim=2)
def rescaled_projector(self, new_height: int, new_width: int):
"""
Returns a rescaled Spherical projector
"""
return SphericalProjector(height=new_height,
width=new_width,
num_channels=self.num_channels,
up_fov=self.up_fov,
down_fov=self.down_fov,
conversion=self.conversion)
def rescale_intrinsics(self, new_height: int, new_width: int, **kwargs) -> Any:
"""
The Spherical projection does not need to rescale its intrinsics parameters
"""
raise NotImplementedError("")
def set_projection_params(self, up_fov: float = None, down_fov: float = None, **kwargs):
super().set_projection_params(**kwargs)
if up_fov is not None:
self.up_fov = up_fov
if down_fov is not None:
self.down_fov = down_fov
| [
"torch.arange",
"torch.norm",
"torch.argsort",
"torch.ones",
"torch.floor",
"torch.atan2",
"torch.asin"
] | 1.7.1 | Pandinosaurus/pyLiDAR-SLAM | 1baa21a67bd32f144f8e17583251ac777f81345e |
1.4 | '''
A module which implements the basic Transformer
'''
import uuid
import threading
import pdb
import torch
from torch import nn
from models.new_attention import NewAttention
from models.attention import MultiHeadedAttention
from models.embeddings import PositionEmbedding, TokenEmbedding
from models.utils import LabelSmoothingLoss, Translator
from utils import left_shift, right_shift, triu
class TransformerSublayer(nn.Module):
'''
Implements a sub layer of the transformer model, which consists of:
1) A sub layer module
2) Followed by dropout
3) Plus a residual connection
4) With layer normalization
'''
def __init__(self, sublayer, sublayer_shape, dropout_p=0.1):
''' Initialize the transformer sublayer '''
super(TransformerSublayer, self).__init__()
self.sublayer = sublayer
self.norm = nn.LayerNorm(sublayer_shape)
self.dropout = nn.Dropout(dropout_p, inplace=True)
self.reset_parameters()
def reset_parameters(self):
''' Reset parameters using xavier initialiation '''
self.norm.reset_parameters()
def forward(self, inputs, *sublayer_args, **sublayer_kwargs): # pylint:disable=arguments-differ
''' The forward pass of the sublayer '''
return self.norm(inputs + self.dropout(self.sublayer(*sublayer_args, **sublayer_kwargs)))
class TransformerFFN(nn.Module):
''' Implements the Transformer feed-forward network '''
def __init__(self, embedding_size, hidden_dim):
super(TransformerFFN, self).__init__()
self.relu = nn.ReLU()
self.hidden = nn.Linear(embedding_size, hidden_dim)
self.output = nn.Linear(hidden_dim, embedding_size)
self.reset_parameters()
def reset_parameters(self):
''' Reset parameters using xavier initialiation '''
gain = nn.init.calculate_gain('relu')
nn.init.xavier_uniform_(self.hidden.weight, gain)
nn.init.constant_(self.hidden.bias, 0.)
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.output.weight, gain)
nn.init.constant_(self.output.bias, 0.)
def forward(self, inputs): # pylint:disable=arguments-differ
''' The forward pass of the feed-forward network '''
return self.output(self.relu(self.hidden(inputs)))
class TransformerEncoderLayer(nn.Module):
''' Implements a single encoder layer in a transformer encoder stack '''
def __init__(self, attn_config, num_heads, dim, hidden_dim, layer_i, dropout_p=0.1):
''' Initialize the transformer layer '''
super(TransformerEncoderLayer, self).__init__()
if attn_config['ffn_layer'][layer_i]:
self.ffn = TransformerSublayer(
TransformerFFN(dim, hidden_dim),
dim, dropout_p
)
print('enc layer %i has ffn' % layer_i)
self.self_attention = TransformerSublayer(
NewAttention(attn_config, dim, num_heads),
dim, dropout_p
)
def reset_parameters(self):
''' Reset the parameters of the module '''
self.ffn.reset_parameters()
self.self_attention.reset_parameters()
def forward(self, inputs, layer_i): # pylint:disable=arguments-differ
''' The forward pass '''
mask = inputs['mask']
state = inputs['state']
# print("encoder self attention")
state = self.self_attention(
state, # residual
state, state, state, mask, # passed to multiheaded attention
layer_i=layer_i
)
if hasattr(self, 'ffn'):
state = self.ffn(
state, # residual
state # passed to feed-forward network
)
return {'state': state, 'mask': mask}
class TransformerDecoderLayer(nn.Module):
''' Implements a single decoder layer in a transformer decoder stack '''
def __init__(self, dec_attn_config, enc_dec_attn_config, num_heads, dim, hidden_dim, layer_i, causal=True,
dropout_p=0.1):
''' Initialize the transformer layer '''
super(TransformerDecoderLayer, self).__init__()
self.causal = causal
self.uuid = uuid.uuid4()
self.enc_dec_attn_config = enc_dec_attn_config
if dec_attn_config['ffn_layer'][layer_i]:
self.ffn = TransformerSublayer(
TransformerFFN(dim, hidden_dim),
dim, dropout_p
)
print('dec layer %i has ffn' % layer_i)
self.self_attention = TransformerSublayer(
NewAttention(dec_attn_config, dim, num_heads),
dim, dropout_p
)
if self.enc_dec_attn_config['enc_dec_attn_layer'] == 1 or \
(type(self.enc_dec_attn_config['enc_dec_attn_layer'] is list) and
self.enc_dec_attn_config['enc_dec_attn_layer'][layer_i] == 1):
if self.enc_dec_attn_config['enc_dec_attn_num_heads'] == -1:
src_num_heads = num_heads
elif type(self.enc_dec_attn_config['enc_dec_attn_num_heads']) is not list:
src_num_heads = self.enc_dec_attn_config['enc_dec_attn_num_heads']
else:
src_num_heads = self.enc_dec_attn_config['enc_dec_attn_num_heads'][layer_i]
assert src_num_heads != 0
self.source_attention = TransformerSublayer(
NewAttention(enc_dec_attn_config, dim, src_num_heads),
dim, dropout_p
)
print('layer %i num of src heads %i' % (layer_i, src_num_heads))
def reset_parameters(self):
''' Reset the parameters of the module '''
self.ffn.reset_parameters()
self.self_attention.reset_parameters()
if hasattr(self, 'source_attention'):
self.source_attention.reset_parameters()
def forward(self, inputs, sources, layer_i): # pylint:disable=arguments-differ
''' The forward pass '''
mask = inputs['mask']
state = inputs['state']
cache = inputs.get('cache')
decoder_position = state.shape[1] - 1
kwargs = {'layer_i': layer_i}
if self.causal and cache is not None:
# If caching, only want the last one sequence values. Requires no causal masking.
residual = state[:, -1:]
kwargs['decoder_position'] = decoder_position
else:
# If not caching, use the full sequence and ensure an appropriate causal mask
residual = state
kwargs['key_mask'] = mask
kwargs['attention_mask'] = self.mask(state)
# print("decoder self attention")
state = self.self_attention(
residual, # residual
state, state, state, **kwargs # passed to multiheaded attention
)
source = sources['state']
# print("source", source)
kwargs = {'key_mask': sources['mask'], 'layer_i': layer_i}
if self.causal and cache is not None:
kwargs['decoder_position'] = decoder_position
# print("decoder source attention")
if hasattr(self, 'source_attention'):
# print("in source, state", state.shape)
state = self.source_attention(
state, # residual
source, source, state, **kwargs # passed to multiheaded attention
)
if hasattr(self, 'ffn'):
state = self.ffn(
state, # residual
state # passed to feed-forward network
)
if self.causal and cache is not None:
cached = cache.get(self.uuid)
if cached is None:
cache[self.uuid] = state
else:
# print("cached", cached.shape)
# print("state", state.shape)
try:
state = cache[self.uuid] = torch.cat((cached, state), 1)
except:
pdb.set_trace()
return {'state': state, 'mask': mask, 'cache': cache}
_masks = threading.local()
def mask(self, inputs):
'''
Get a self-attention mask
The mask will be of shape [T x T] containing elements from the set {0, -inf}
Input shape: (B x T x E)
Output shape: (T x T)
'''
if not self.causal:
return None
dim = inputs.shape[1]
device = inputs.device
mask_store = TransformerDecoderLayer._masks.__dict__
if device not in mask_store:
mask = inputs.new_full((dim, dim), float('-inf'))
mask_store[device] = triu(mask, 1, 1, 1)
mask = mask_store[device]
if mask.shape[0] < dim:
mask = mask.resize_(dim, dim).fill_(float('-inf'))
mask_store[device] = triu(mask, 1, 1, 1)
mask = mask_store[device]
return mask[None, :dim, :dim]
class NewTransformer(nn.Module):
''' The New Transformer module '''
def __init__(self, config, dataset):
''' Initialize the Transformer '''
super(NewTransformer, self).__init__()
self.dataset = dataset
self.embedding = TokenEmbedding(
dataset.vocab_size,
config.embedding_size,
padding_idx=self.padding_idx
)
self.position_embedding = PositionEmbedding(config.embedding_size)
self.dropout = nn.Dropout(config.dropout_p, inplace=True)
# Uniq attn attributes
self.attn_ofs_uniq = list(set(
config.enc_attn_offset + config.dec_attn_offset + config.enc_dec_attn_offset))
self.attn_std_uniq = list(set(
config.enc_attn_std + config.dec_attn_std + config.enc_dec_attn_std))
# Allow for overriding the encoders and decoders in dervied classes
self.encoders = self.create_encoders(config)
self.decoders = self.create_decoders(config)
self.label_smoothing = LabelSmoothingLoss(
config.label_smoothing or 0,
ignore_index=self.padding_idx,
reduction='none'
)
self.cross_entropy = nn.CrossEntropyLoss(
ignore_index=self.padding_idx,
reduction='none'
)
def create_encoders(self, config):
''' Create the transformer encoders '''
kwargs = {'dropout_p': config.dropout_p}
if config.ffn_layer == -1:
config.ffn_layer = [1] * config.num_layers
assert len(config.ffn_layer) == config.num_layers
attn_config = {'attn_type': config.enc_attn_type,
'attn_std': config.enc_attn_std,
'attn_offset': config.enc_attn_offset,
'num_layers': config.num_layers,
'num_heads': config.num_heads,
'which_attn': 'encoder',
'attn_threshold': config.enc_attn_threshold,
'attn_window': config.enc_attn_window,
'attn_impl': config.enc_attn_impl,
'ffn_layer': config.ffn_layer,
'attn_ofs_uniq': self.attn_ofs_uniq,
'attn_std_uniq': self.attn_std_uniq}
args = [attn_config, config.num_heads, config.embedding_size, config.hidden_dim]
encoders = nn.ModuleList([
TransformerEncoderLayer(*args, layer_i, **kwargs)
for layer_i in range(config.num_layers)
])
return encoders
def create_decoders(self, config):
''' Create the transformer decoders '''
kwargs = {'dropout_p': config.dropout_p}
if config.ffn_layer == -1:
config.ffn_layer = [1] * config.num_layers
assert len(config.ffn_layer) == config.num_layers
dec_attn_config = {'attn_type': config.dec_attn_type,
'attn_std': config.dec_attn_std,
'attn_offset': config.dec_attn_offset,
'num_layers': config.num_layers,
'num_heads': config.num_heads,
'which_attn': 'decoder',
'attn_threshold': config.dec_attn_threshold,
'attn_window': config.dec_attn_window,
'attn_impl': config.dec_attn_impl,
'ffn_layer': config.ffn_layer,
'attn_ofs_uniq': self.attn_ofs_uniq,
'attn_std_uniq': self.attn_std_uniq
}
enc_dec_attn_config = {'attn_type': config.enc_dec_attn_type,
'attn_std': config.enc_dec_attn_std,
'attn_offset': config.enc_dec_attn_offset,
'num_layers': config.num_layers,
'num_heads': config.num_heads,
'word_count_ratio': self.dataset.word_count_ratio,
'which_attn': 'source',
'enc_dec_attn_layer': config.enc_dec_attn_layer,
'enc_dec_attn_num_heads': config.enc_dec_attn_num_heads,
'attn_threshold': config.enc_dec_attn_threshold,
'attn_window': config.enc_dec_attn_window,
'attn_impl': config.enc_dec_attn_impl,
'ffn_layer': config.ffn_layer,
'attn_ofs_uniq': self.attn_ofs_uniq,
'attn_std_uniq': self.attn_std_uniq
}
args = [dec_attn_config, enc_dec_attn_config, config.num_heads, config.embedding_size, config.hidden_dim]
decoders = nn.ModuleList([
TransformerDecoderLayer(*args, layer_i, **kwargs)
for layer_i in range(config.num_layers)
])
return decoders
@property
def sos_idx(self):
''' Return the sos index '''
return self.dataset.sos_idx
@property
def padding_idx(self):
''' Return the padding index '''
return self.dataset.padding_idx
def translator(self, config):
''' Get a translator for this model '''
return Translator(config, self, self.dataset)
def reset_named_parameters(self, modules):
''' Get a translator for this model '''
if 'encoder' in modules:
for encoder in self.encoders:
encoder.reset_parameters()
if 'decoder' in modules:
for decoder in self.decoders:
decoder.reset_parameters()
if 'embeddings' in modules:
self.embedding.reset_parameters()
def forward(self, batch): # pylint:disable=arguments-differ
''' A batch of inputs and targets '''
decoded = self.decode(
self.encode(batch['inputs']),
right_shift(batch['targets']),
input_lens=batch['input_lens']
)
logits = decoded['logits']
dims = list(range(1, logits.dim()))
targets = left_shift(batch['targets'])
nll = self.cross_entropy(logits, targets).sum(dims[:-1])
smoothed_nll = self.label_smoothing(logits, targets).sum(dims)
return smoothed_nll, nll
def encode(self, inputs):
''' Encode the inputs '''
word_embedding = self.embed(inputs, self.embedding)
encoded = {
'state': word_embedding,
'mask': inputs.eq(self.padding_idx)
}
for i, encoder in enumerate(self.encoders):
encoded = encoder(encoded, i)
return encoded
def decode(self, encoded, targets, decoders=None, embedding=None, cache=None, mask=None, input_lens=None):
''' Decode the encoded sequence to the targets '''
if decoders is None:
decoders = self.decoders
if embedding is None:
embedding = self.embedding
word_embedding = self.embed(targets, embedding)
decoded = {
'cache': cache,
'state': word_embedding,
'mask': targets.eq(self.padding_idx) if mask is None else mask
}
for i, decoder in enumerate(decoders):
# print("i", i)
decoded = decoder(decoded, encoded, i)
# compute projection to the vocabulary
state = decoded['state']
if cache is not None:
state = state[:, -1:]
return {
'cache': decoded.get('cache'),
'logits': embedding(state, transpose=True).transpose(2, 1), # transpose to B x C x ...
}
def embed(self, inputs, token_embedding):
''' Embed the given inputs '''
return self.dropout(token_embedding(inputs) + self.position_embedding(inputs))
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.init.calculate_gain",
"torch.nn.CrossEntropyLoss"
] | 1.4.0 | Khoale1096/stupidNMT | 894536c16dc7ff958aa5571828a89ecabfcb72d7 |
1.0 | import torchvision
from torchvision.models import resnet as vrn
import torch.utils.model_zoo as model_zoo
from .utils import register
class ResNet(vrn.ResNet):
'Deep Residual Network - https://arxiv.org/abs/1512.03385'
def __init__(self, layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[5], groups=1, width_per_group=64, url=None):
self.stride = 128
self.bottleneck = bottleneck
self.outputs = outputs
self.url = url
kwargs = {'block': bottleneck, 'layers': layers, 'groups': groups, 'width_per_group': width_per_group}
super().__init__(**kwargs)
self.unused_modules = ['fc']
def initialize(self):
if self.url:
self.load_state_dict(model_zoo.load_url(self.url))
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outputs = []
for i, layer in enumerate([self.layer1, self.layer2, self.layer3, self.layer4]):
level = i + 2
if level > max(self.outputs):
break
x = layer(x)
if level in self.outputs:
outputs.append(x)
return outputs
@register
def ResNet18C4():
return ResNet(layers=[2, 2, 2, 2], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet18'])
@register
def ResNet34C4():
return ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet34'])
| [
"torch.utils.model_zoo.load_url"
] | 1.0.0 | Mo5mami/retinanet-examples | f7ad4ff6a99fe3e66f8a9c8e8a6e03b870f84700 |
1.8 | import argparse
import random
import numpy as np
import torch
from tsformer.exp_autoformer import Exp_Main
fix_seed = 2021
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
parser = argparse.ArgumentParser(
description='Autoformer & Transformer family for Time Series Forecasting')
# basic config
parser.add_argument(
'--is_training', type=int, required=True, default=1, help='status')
parser.add_argument(
'--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument(
'--model',
type=str,
required=True,
default='Autoformer',
help='model name, options: [Autoformer, Informer, Transformer]')
# data loader
parser.add_argument(
'--data', type=str, required=True, default='ETTm1', help='dataset type')
parser.add_argument(
'--root_path',
type=str,
default='./data/ETT/',
help='root path of the data file')
parser.add_argument(
'--data_path', type=str, default='ETTh1.csv', help='data file')
parser.add_argument(
'--features',
type=str,
default='M',
help='forecasting task, options:[M, S, MS]')
parser.add_argument(
'--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument(
'--freq',
type=str,
default='h',
help='freq for time features encoding, options:[s:secondly]')
parser.add_argument(
'--checkpoints',
type=str,
default='./checkpoints/',
help='location of model checkpoints')
# forecasting task
parser.add_argument(
'--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument(
'--label_len', type=int, default=48, help='start token length')
parser.add_argument(
'--pred_len', type=int, default=96, help='prediction sequence length')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size')
parser.add_argument(
'--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument(
'--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument(
'--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument(
'--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument(
'--distil',
action='store_false',
help='whether to use distilling in encoder,',
default=True)
parser.add_argument('--dropout', type=float, default=0.05, help='dropout')
parser.add_argument(
'--embed',
type=str,
default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument(
'--activation', type=str, default='gelu', help='activation')
parser.add_argument(
'--output_attention',
action='store_true',
help='whether to output attention in ecoder')
parser.add_argument(
'--do_predict',
action='store_true',
help='whether to predict unseen future data')
# optimization
parser.add_argument(
'--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=2, help='experiments times')
parser.add_argument(
'--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument(
'--batch_size',
type=int,
default=32,
help='batch size of train input data')
parser.add_argument(
'--patience', type=int, default=3, help='early stopping patience')
parser.add_argument(
'--learning_rate',
type=float,
default=0.0001,
help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='mse', help='loss function')
parser.add_argument(
'--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument(
'--use_amp',
action='store_true',
help='use automatic mixed precision training',
default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument(
'--use_multi_gpu',
action='store_true',
help='use multiple gpus',
default=False)
parser.add_argument(
'--devices',
type=str,
default='0,1,2,3',
help='device ids of multile gpus')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.dvices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
Exp = Exp_Main
if args.is_training:
for ii in range(args.itr):
# setting record of experiments
setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(
args.model_id, args.model, args.data, args.features, args.seq_len,
args.label_len, args.pred_len, args.d_model, args.n_heads,
args.e_layers, args.d_layers, args.d_ff, args.factor, args.embed,
args.distil, args.des, ii)
exp = Exp(args) # set experiments
print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(
setting))
exp.train(setting)
print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(
setting))
exp.test(setting)
if args.do_predict:
print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.
format(setting))
exp.predict(setting, True)
torch.cuda.empty_cache()
else:
ii = 0
setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(
args.model_id, args.model, args.data, args.features, args.seq_len,
args.label_len, args.pred_len, args.d_model, args.n_heads,
args.e_layers, args.d_layers, args.d_ff, args.factor, args.embed,
args.distil, args.des, ii)
exp = Exp(args) # set experiments
print(
'>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
exp.test(setting, test=1)
torch.cuda.empty_cache()
| [
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.cuda.is_available"
] | 1.8.0 | Fanxingye/TsFormer | da6e7eee1bddb44e2e98f07c9f0d374793e80da6 |
1.0 | import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
class GuidedBackProp():
def __init__(self, model, use_cuda):
self.model = model.eval()
self.use_cuda = use_cuda
if self.use_cuda:
self.model = self.model.cuda()
for module in self.model.named_modules():
module[1].register_backward_hook(self.bp_relu)
def bp_relu(self, module, grad_in, grad_out):
if isinstance(module, nn.ReLU):
return (torch.clamp(grad_in[0], min=0.0), )
def __call__(self, x, index=None):
x = x.clone()
if self.use_cuda:
x = x.cuda()
x.requires_grad_()
output = self.model(x)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype = np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot)
one_hot.requires_grad_()
if self.use_cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
one_hot.backward()
result = x.grad.cpu().numpy()[0]
result = np.transpose(result, (1,2,0))
return result, index
def arrange_img(img):
img = np.maximum(img, 0)
res = img - img.min()
res /= res.max()
res = np.uint8(res*255)
return res | [
"torch.clamp",
"torch.from_numpy",
"torch.sum"
] | 1.0.0 | kamata1729/visualize-pytorch | ec1b3fe0952c5db187a5d4875cd1539a1b7a1270 |
0.4 | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class PointLikeMixer(nn.Module):
def __init__(self, args):
super(PointLikeMixer, self).__init__()
self.args = args
self.n_agents = args.n_agents
self.n_groups = args.mixing_group_dim
self.state_dim = int(np.prod(args.state_shape))
self.embed_dim = args.mixing_embed_dim
self.group_dim = args.mixing_group_dim
self.hyper_w_1 = nn.Linear(self.state_dim, self.embed_dim * self.n_groups)
self.hyper_w_final = nn.Linear(self.state_dim, self.embed_dim)
# State dependent bias for hidden layer
self.hyper_b_1 = nn.Linear(self.state_dim, self.embed_dim)
# V(s) instead of a bias for the last layers
self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
nn.ReLU(),
nn.Linear(self.embed_dim, 1))
def forward(self, agent_qs, states):
bs = agent_qs.size(0)
states = states.reshape(-1, self.state_dim)
agent_qs = agent_qs.view(-1, self.group_dim, self.n_agents)
group_qs = agent_qs.sum(dim=2).view(-1, 1, self.group_dim)
# First layer
w1 = th.abs(self.hyper_w_1(states))
b1 = self.hyper_b_1(states)
w1 = w1.view(-1, self.n_groups, self.embed_dim)
b1 = b1.view(-1, 1, self.embed_dim)
hidden = F.elu(th.bmm(group_qs, w1) + b1)
# Second layer
w_final = th.abs(self.hyper_w_final(states))
w_final = w_final.view(-1, self.embed_dim, 1)
# State-dependent bias
v = self.V(states).view(-1, 1, 1)
# Compute final output
y = th.bmm(hidden, w_final) + v
# Reshape and return
q_tot = y.view(bs, -1, 1)
return q_tot
| [
"torch.nn.Linear",
"torch.bmm",
"torch.nn.ReLU"
] | 0.4.1 | wjh720/pymarl | 9392407568d440c4808a1c7c98ddf1ef52e0c009 |
1.8 | import torch
from pytorchfi.core import fault_injection as pfi_core
from .util_test import helper_setUp_CIFAR10_same
class TestWeightFIcpu:
"""
Testing focuses on weight perturbations.
"""
def setup_class(self):
torch.manual_seed(0)
self.BATCH_SIZE = 1
self.WORKERS = 1
self.channels = 3
self.img_size = 32
self.USE_GPU = False
self.model, self.dataset = helper_setUp_CIFAR10_same(
self.BATCH_SIZE, self.WORKERS
)
self.dataiter = iter(self.dataset)
self.images, self.labels = self.dataiter.next()
self.model.eval()
with torch.no_grad():
self.output = self.model(self.images)
self.p = pfi_core(
self.model,
self.BATCH_SIZE,
input_shape=[self.channels, self.img_size, self.img_size],
use_cuda=self.USE_GPU,
)
def test_neuronFI_singleElement(self):
layer_i = 1
k = 15
c_i = 20
h_i = 2
w_i = 3
inj_value_i = 10000.0
self.inj_model = self.p.declare_weight_fi(
layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=inj_value_i
)
self.inj_model.eval()
with torch.no_grad():
corrupted_output_1 = self.inj_model(self.images)
if torch.all(corrupted_output_1.eq(self.output)):
raise AssertionError
self.inj_model = self.p.declare_weight_fi(
layer_num=layer_i,
k=k,
dim1=c_i,
dim2=h_i,
dim3=w_i,
value=0.01388985849916935,
)
self.inj_model.eval()
with torch.no_grad():
uncorrupted_output = self.inj_model(self.images)
if not torch.all(uncorrupted_output.eq(self.output)):
raise AssertionError
self.inj_model = self.p.declare_weight_fi(
layer_num=layer_i,
k=k,
dim1=c_i,
dim2=h_i,
dim3=w_i,
value=inj_value_i * 2,
)
self.inj_model.eval()
with torch.no_grad():
corrupted_output_2 = self.inj_model(self.images)
if torch.all(corrupted_output_2.eq(self.output)):
raise AssertionError
if not torch.all(corrupted_output_2.eq(corrupted_output_2)):
raise AssertionError
def test_neuronFI_singleElement_noErr(self):
layer_i = 4
k = 153
c_i = 254
h_i = 0
w_i = 0
inj_value_i = 10000.0
self.inj_model = self.p.declare_weight_fi(
layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=inj_value_i
)
self.inj_model.eval()
with torch.no_grad():
corrupted_output_1 = self.inj_model(self.images)
if not torch.all(corrupted_output_1.eq(self.output)):
raise AssertionError
| [
"torch.manual_seed",
"torch.no_grad"
] | 1.8.1 | TarekAloui/pytorchfi | 29915e158941a21fc786e6a59c958ec751a59167 |
0.4 | import json
import math
import logging
import string
import nltk
import scipy
import torch
from nltk.stem.porter import *
import numpy as np
from collections import Counter
import os
from torch.autograd import Variable
import config
import pykp
from utils import Progbar
from pykp.metric.bleu import bleu
stemmer = PorterStemmer()
def process_predseqs(pred_seqs, oov, id2word, opt):
'''
:param pred_seqs:
:param src_str:
:param oov:
:param id2word:
:param opt:
:return:
'''
processed_seqs = []
if_valid = []
for seq in pred_seqs:
# print('-' * 50)
# print('seq.sentence: ' + str(seq.sentence))
# print('oov: ' + str(oov))
#
# for x in seq.sentence[:-1]:
# if x >= opt.vocab_size and len(oov)==0:
# print('ERROR')
# convert to words and remove the EOS token
seq_sentence_np = [int(x) for x in seq.sentence]
processed_seq = [id2word[x] if x < opt.vocab_size else oov[x - opt.vocab_size] for x in seq_sentence_np[:-1]]
# print('processed_seq: ' + str(processed_seq))
# print('%s - %s' % (str(seq.sentence[:-1]), str(processed_seq)))
keep_flag = True
if len(processed_seq) == 0:
keep_flag = False
if keep_flag and any([w == pykp.io.UNK_WORD for w in processed_seq]):
keep_flag = False
if keep_flag and any([w == '.' or w == ',' for w in processed_seq]):
keep_flag = False
if_valid.append(keep_flag)
processed_seqs.append((seq, processed_seq, seq.score))
unzipped = list(zip(*(processed_seqs)))
processed_seqs, processed_str_seqs, processed_scores = unzipped if len(processed_seqs) > 0 and len(unzipped) == 3 else ([], [], [])
assert len(processed_seqs) == len(processed_str_seqs) == len(processed_scores) == len(if_valid)
return if_valid, processed_seqs, processed_str_seqs, processed_scores
def post_process_predseqs(seqs, num_oneword_seq=1):
processed_seqs = []
# -1 means no filter applied
if num_oneword_seq == -1:
return seqs
for seq, str_seq, score in zip(*seqs):
keep_flag = True
if len(str_seq) == 1 and num_oneword_seq <= 0:
keep_flag = False
if keep_flag:
processed_seqs.append((seq, str_seq, score))
# update the number of one-word sequeces to keep
if len(str_seq) == 1:
num_oneword_seq -= 1
unzipped = list(zip(*(processed_seqs)))
if len(unzipped) != 3:
return ([], [], [])
else:
return unzipped
def if_present_phrase(src_str_tokens, phrase_str_tokens):
"""
:param src_str_tokens: a list of strings (words) of source text
:param phrase_str_tokens: a list of strings (words) of a phrase
:return:
"""
match_pos_idx = -1
for src_start_idx in range(len(src_str_tokens) - len(phrase_str_tokens) + 1):
match_flag = True
# iterate each word in target, if one word does not match, set match=False and break
for seq_idx, seq_w in enumerate(phrase_str_tokens):
src_w = src_str_tokens[src_start_idx + seq_idx]
if src_w != seq_w:
match_flag = False
break
if match_flag:
match_pos_idx = src_start_idx
break
return match_flag, match_pos_idx
def if_present_duplicate_phrases(src_str, trgs_str, do_stemming=True, check_duplicate=True):
if do_stemming:
src_to_match = stem_word_list(src_str)
else:
src_to_match = src_str
present_indices = []
present_flags = []
phrase_set = set() # some phrases are duplicate after stemming, like "model" and "models" would be same after stemming, thus we ignore the following ones
for trg_str in trgs_str:
if do_stemming:
trg_to_match = stem_word_list(trg_str)
else:
trg_to_match = trg_str
# check if the phrase appears in source text
# iterate each word in source
match_flag, match_pos_idx = if_present_phrase(src_to_match, trg_to_match)
# check if it is duplicate, if true then ignore it
if check_duplicate and '_'.join(trg_to_match) in phrase_set:
present_flags.append(False)
present_indices.append(match_pos_idx)
continue
else:
# if it reaches the end of source and no match, means it doesn't appear in the source
present_flags.append(match_flag)
present_indices.append(match_pos_idx)
phrase_set.add('_'.join(trg_to_match))
assert len(present_flags) == len(present_indices)
return present_flags, present_indices
def evaluate_beam_search(generator, data_loader, opt, title='', epoch=1, predict_save_path=None):
logger = config.init_logging(title, predict_save_path + '/%s.log' % title, redirect_to_stdout=False)
progbar = Progbar(logger=logger, title=title, target=len(data_loader.dataset.examples), batch_size=data_loader.batch_size,
total_examples=len(data_loader.dataset.examples))
topk_range = [5, 10]
score_names = ['precision', 'recall', 'f_score']
example_idx = 0
score_dict = {} # {'precision@5':[],'recall@5':[],'f1score@5':[], 'precision@10':[],'recall@10':[],'f1score@10':[]}
for i, batch in enumerate(data_loader):
# if i > 5:
# break
one2many_batch, one2one_batch = batch
src_list, src_len, trg_list, _, trg_copy_target_list, src_oov_map_list, oov_list, src_str_list, trg_str_list = one2many_batch
if torch.cuda.is_available():
src_list = src_list.cuda()
src_oov_map_list = src_oov_map_list.cuda()
print("batch size - %s" % str(src_list.size(0)))
print("src size - %s" % str(src_list.size()))
print("target size - %s" % len(trg_copy_target_list))
pred_seq_list = generator.beam_search(src_list, src_len, src_oov_map_list, oov_list, opt.word2id)
'''
process each example in current batch
'''
for src, src_str, trg, trg_str_seqs, trg_copy, pred_seq, oov in zip(src_list, src_str_list, trg_list, trg_str_list, trg_copy_target_list, pred_seq_list, oov_list):
logger.info('====================== %d =========================' % (i))
print_out = ''
print_out += '[Source][%d]: %s \n' % (len(src_str), ' '.join(src_str))
src = src.cpu().data.numpy() if torch.cuda.is_available() else src.data.numpy()
print_out += '\nSource Input: \n %s\n' % (' '.join([opt.id2word[x] for x in src[:len(src_str) + 5]]))
print_out += 'Real Target String [%d] \n\t\t%s \n' % (len(trg_str_seqs), trg_str_seqs)
print_out += 'Real Target Input: \n\t\t%s \n' % str([[opt.id2word[x] for x in t] for t in trg])
print_out += 'Real Target Copy: \n\t\t%s \n' % str([[opt.id2word[x] if x < opt.vocab_size else oov[x - opt.vocab_size] for x in t] for t in trg_copy])
trg_str_is_present_flags, _ = if_present_duplicate_phrases(src_str, trg_str_seqs)
# ignore the cases that there's no present phrases
if opt.must_appear_in_src and np.sum(trg_str_is_present_flags) == 0:
logger.error('found no present targets')
continue
print_out += '[GROUND-TRUTH] #(present)/#(all targets)=%d/%d\n' % (sum(trg_str_is_present_flags), len(trg_str_is_present_flags))
print_out += '\n'.join(['\t\t[%s]' % ' '.join(phrase) if is_present else '\t\t%s' % ' '.join(phrase) for phrase, is_present in zip(trg_str_seqs, trg_str_is_present_flags)])
print_out += '\noov_list: \n\t\t%s \n' % str(oov)
# 1st filtering
pred_is_valid_flags, processed_pred_seqs, processed_pred_str_seqs, processed_pred_score = process_predseqs(pred_seq, oov, opt.id2word, opt)
# 2nd filtering: if filter out phrases that don't appear in text, and keep unique ones after stemming
if opt.must_appear_in_src:
pred_is_present_flags, _ = if_present_duplicate_phrases(src_str, processed_pred_str_seqs)
filtered_trg_str_seqs = np.asarray(trg_str_seqs)[trg_str_is_present_flags]
else:
pred_is_present_flags = [True] * len(processed_pred_str_seqs)
valid_and_present = np.asarray(pred_is_valid_flags) * np.asarray(pred_is_present_flags)
match_list = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=processed_pred_str_seqs)
print_out += '[PREDICTION] #(valid)=%d, #(present)=%d, #(retained&present)=%d, #(all)=%d\n' % (sum(pred_is_valid_flags), sum(pred_is_present_flags), sum(valid_and_present), len(pred_seq))
print_out += ''
'''
Print and export predictions
'''
preds_out = ''
for p_id, (seq, word, score, match, is_valid, is_present) in enumerate(
zip(processed_pred_seqs, processed_pred_str_seqs, processed_pred_score, match_list, pred_is_valid_flags, pred_is_present_flags)):
# if p_id > 5:
# break
preds_out += '%s\n' % (' '.join(word))
if is_present:
print_phrase = '[%s]' % ' '.join(word)
else:
print_phrase = ' '.join(word)
if is_valid:
print_phrase = '*%s' % print_phrase
if match == 1.0:
correct_str = '[correct!]'
else:
correct_str = ''
if any([t >= opt.vocab_size for t in seq.sentence]):
copy_str = '[copied!]'
else:
copy_str = ''
print_out += '\t\t[%.4f]\t%s \t %s %s%s\n' % (-score, print_phrase, str(seq.sentence), correct_str, copy_str)
'''
Evaluate predictions w.r.t different filterings and metrics
'''
processed_pred_seqs = np.asarray(processed_pred_seqs)[valid_and_present]
filtered_processed_pred_str_seqs = np.asarray(processed_pred_str_seqs)[valid_and_present]
filtered_processed_pred_score = np.asarray(processed_pred_score)[valid_and_present]
# 3rd round filtering (one-word phrases)
num_oneword_seq = -1
filtered_pred_seq, filtered_pred_str_seqs, filtered_pred_score = post_process_predseqs((processed_pred_seqs, filtered_processed_pred_str_seqs, filtered_processed_pred_score), num_oneword_seq)
match_list_exact = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=filtered_pred_str_seqs, type='exact')
match_list_soft = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=filtered_pred_str_seqs, type='partial')
assert len(filtered_pred_seq) == len(filtered_pred_str_seqs) == len(filtered_pred_score) == len(match_list_exact) == len(match_list_soft)
print_out += "\n ======================================================="
print_pred_str_seqs = [" ".join(item) for item in filtered_pred_str_seqs]
print_trg_str_seqs = [" ".join(item) for item in filtered_trg_str_seqs]
# print_out += "\n PREDICTION: " + " / ".join(print_pred_str_seqs)
# print_out += "\n GROUND TRUTH: " + " / ".join(print_trg_str_seqs)
for topk in topk_range:
results_exact = evaluate(match_list_exact, filtered_pred_str_seqs, filtered_trg_str_seqs, topk=topk)
for k, v in zip(score_names, results_exact):
if '%s@%d_exact' % (k, topk) not in score_dict:
score_dict['%s@%d_exact' % (k, topk)] = []
score_dict['%s@%d_exact' % (k, topk)].append(v)
print_out += "\n ------------------------------------------------- EXACT, k=%d" % (topk)
print_out += "\n --- batch precision, recall, fscore: " + str(results_exact[0]) + " , " + str(results_exact[1]) + " , " + str(results_exact[2])
print_out += "\n --- total precision, recall, fscore: " + str(np.average(score_dict['precision@%d_exact' % (topk)])) + " , " +\
str(np.average(score_dict['recall@%d_exact' % (topk)])) + " , " +\
str(np.average(score_dict['f_score@%d_exact' % (topk)]))
for topk in topk_range:
results_soft = evaluate(match_list_soft, filtered_pred_str_seqs, filtered_trg_str_seqs, topk=topk)
for k, v in zip(score_names, results_soft):
if '%s@%d_soft' % (k, topk) not in score_dict:
score_dict['%s@%d_soft' % (k, topk)] = []
score_dict['%s@%d_soft' % (k, topk)].append(v)
print_out += "\n ------------------------------------------------- SOFT, k=%d" % (topk)
print_out += "\n --- batch precision, recall, fscore: " + str(results_soft[0]) + " , " + str(results_soft[1]) + " , " + str(results_soft[2])
print_out += "\n --- total precision, recall, fscore: " + str(np.average(score_dict['precision@%d_soft' % (topk)])) + " , " +\
str(np.average(score_dict['recall@%d_soft' % (topk)])) + " , " +\
str(np.average(score_dict['f_score@%d_soft' % (topk)]))
print_out += "\n ======================================================="
logger.info(print_out)
'''
write predictions to disk
'''
if predict_save_path:
if not os.path.exists(os.path.join(predict_save_path, title + '_detail')):
os.makedirs(os.path.join(predict_save_path, title + '_detail'))
with open(os.path.join(predict_save_path, title + '_detail', str(example_idx) + '_print.txt'), 'w') as f_:
f_.write(print_out)
with open(os.path.join(predict_save_path, title + '_detail', str(example_idx) + '_prediction.txt'), 'w') as f_:
f_.write(preds_out)
out_dict = {}
out_dict['src_str'] = src_str
out_dict['trg_str'] = trg_str_seqs
out_dict['trg_present_flag'] = trg_str_is_present_flags
out_dict['pred_str'] = processed_pred_str_seqs
out_dict['pred_score'] = [float(s) for s in processed_pred_score]
out_dict['present_flag'] = pred_is_present_flags
out_dict['valid_flag'] = pred_is_valid_flags
out_dict['match_flag'] = [float(m) for m in match_list]
for k,v in out_dict.items():
out_dict[k] = list(v)
# print('len(%s) = %d' % (k, len(v)))
# print(out_dict)
assert len(out_dict['trg_str']) == len(out_dict['trg_present_flag'])
assert len(out_dict['pred_str']) == len(out_dict['present_flag']) \
== len(out_dict['valid_flag']) == len(out_dict['match_flag']) == len(out_dict['pred_score'])
with open(os.path.join(predict_save_path, title + '_detail', str(example_idx) + '.json'), 'w') as f_:
f_.write(json.dumps(out_dict))
progbar.update(epoch, example_idx, [('f_score@5_exact', np.average(score_dict['f_score@5_exact'])),
('f_score@5_soft', np.average(score_dict['f_score@5_soft'])),
('f_score@10_exact', np.average(score_dict['f_score@10_exact'])),
('f_score@10_soft', np.average(score_dict['f_score@10_soft'])),])
example_idx += 1
# print('#(f_score@5#oneword=-1)=%d, sum=%f' % (len(score_dict['f_score@5#oneword=-1']), sum(score_dict['f_score@5#oneword=-1'])))
# print('#(f_score@10#oneword=-1)=%d, sum=%f' % (len(score_dict['f_score@10#oneword=-1']), sum(score_dict['f_score@10#oneword=-1'])))
# print('#(f_score@5#oneword=1)=%d, sum=%f' % (len(score_dict['f_score@5#oneword=1']), sum(score_dict['f_score@5#oneword=1'])))
# print('#(f_score@10#oneword=1)=%d, sum=%f' % (len(score_dict['f_score@10#oneword=1']), sum(score_dict['f_score@10#oneword=1'])))
if predict_save_path:
# export scores. Each row is scores (precision, recall and f-score) of different way of filtering predictions (how many one-word predictions to keep)
with open(predict_save_path + os.path.sep + title + '_result.csv', 'w') as result_csv:
csv_lines = []
for mode in ["exact", "soft"]:
for topk in topk_range:
csv_line = ""
for k in score_names:
csv_line += ',%f' % np.average(score_dict['%s@%d_%s' % (k, topk, mode)])
csv_lines.append(csv_line + '\n')
result_csv.writelines(csv_lines)
# precision, recall, f_score = macro_averaged_score(precisionlist=score_dict['precision'], recalllist=score_dict['recall'])
# logging.info("Macro@5\n\t\tprecision %.4f\n\t\tmacro recall %.4f\n\t\tmacro fscore %.4f " % (np.average(score_dict['precision@5']), np.average(score_dict['recall@5']), np.average(score_dict['f1score@5'])))
# logging.info("Macro@10\n\t\tprecision %.4f\n\t\tmacro recall %.4f\n\t\tmacro fscore %.4f " % (np.average(score_dict['precision@10']), np.average(score_dict['recall@10']), np.average(score_dict['f1score@10'])))
# precision, recall, f_score = evaluate(true_seqs=target_all, pred_seqs=prediction_all, topn=5)
# logging.info("micro precision %.4f , micro recall %.4f, micro fscore %.4f " % (precision, recall, f_score))
for k,v in score_dict.items():
print('#(%s) = %d' % (k, len(v)))
return score_dict
def predict_beam_search(generator, data_loader, opt, title='', epoch=1, predict_save_path=None):
if predict_save_path:
logger = config.init_logging(title, predict_save_path + '/%s.log' % title, redirect_to_stdout=False)
else:
logger = config.init_logging(title, '', redirect_to_stdout=False)
progbar = Progbar(logger=logger, title=title, target=len(data_loader.dataset.examples), batch_size=data_loader.batch_size,
total_examples=len(data_loader.dataset.examples))
topk_range = [5, 10]
score_names = ['precision', 'recall', 'f_score']
example_idx = 0
score_dict = {} # {'precision@5':[],'recall@5':[],'f1score@5':[], 'precision@10':[],'recall@10':[],'f1score@10':[]}
for i, batch in enumerate(data_loader):
# if i > 5:
# break
one2many_batch, one2one_batch = batch
src_list, src_len, trg_list, _, trg_copy_target_list, src_oov_map_list, oov_list, src_str_list, trg_str_list = one2many_batch
if torch.cuda.is_available():
src_list = src_list.cuda()
src_oov_map_list = src_oov_map_list.cuda()
print("batch size - %s" % str(src_list.size(0)))
print("src size - %s" % str(src_list.size()))
print("target size - %s" % len(trg_copy_target_list))
pred_seq_list = generator.beam_search(src_list, src_len, src_oov_map_list, oov_list, opt.word2id)
'''
process each example in current batch
'''
for src, src_str, trg, trg_str_seqs, trg_copy, pred_seq, oov in zip(src_list, src_str_list, trg_list, trg_str_list, trg_copy_target_list, pred_seq_list, oov_list):
logger.info('====================== %d =========================' % (i))
print_out = ''
print_out += '[Source][%d]: %s \n' % (len(src_str), ' '.join(src_str))
src = src.cpu().data.numpy() if torch.cuda.is_available() else src.data.numpy()
print_out += '\nSource Input: \n %s\n' % (' '.join([opt.id2word[x] for x in src[:len(src_str) + 5]]))
print_out += 'Real Target String [%d] \n\t\t%s \n' % (len(trg_str_seqs), trg_str_seqs)
print_out += 'Real Target Input: \n\t\t%s \n' % str([[opt.id2word[x] for x in t] for t in trg])
print_out += 'Real Target Copy: \n\t\t%s \n' % str([[opt.id2word[x] if x < opt.vocab_size else oov[x - opt.vocab_size] for x in t] for t in trg_copy])
trg_str_is_present_flags, _ = if_present_duplicate_phrases(src_str, trg_str_seqs)
# ignore the cases that there's no present phrases
# if opt.must_appear_in_src and np.sum(trg_str_is_present_flags) == 0:
# logger.error('found no present targets')
# continue
print_out += '[GROUND-TRUTH] #(present)/#(all targets)=%d/%d\n' % (sum(trg_str_is_present_flags), len(trg_str_is_present_flags))
print_out += '\n'.join(['\t\t[%s]' % ' '.join(phrase) if is_present else '\t\t%s' % ' '.join(phrase) for phrase, is_present in zip(trg_str_seqs, trg_str_is_present_flags)])
print_out += '\noov_list: \n\t\t%s \n' % str(oov)
# 1st filtering
pred_is_valid_flags, processed_pred_seqs, processed_pred_str_seqs, processed_pred_score = process_predseqs(pred_seq, oov, opt.id2word, opt)
# 2nd filtering: if filter out phrases that don't appear in text, and keep unique ones after stemming
if opt.must_appear_in_src:
pred_is_present_flags, _ = if_present_duplicate_phrases(src_str, processed_pred_str_seqs)
filtered_trg_str_seqs = np.asarray(trg_str_seqs)[trg_str_is_present_flags]
else:
pred_is_present_flags = [True] * len(processed_pred_str_seqs)
valid_and_present = np.asarray(pred_is_valid_flags) * np.asarray(pred_is_present_flags)
match_list = get_match_result(true_seqs=filtered_trg_str_seqs, pred_seqs=processed_pred_str_seqs)
print_out += '[PREDICTION] #(valid)=%d, #(present)=%d, #(retained&present)=%d, #(all)=%d\n' % (sum(pred_is_valid_flags), sum(pred_is_present_flags), sum(valid_and_present), len(pred_seq))
print_out += ''
'''
Print and export predictions
'''
preds_out = ''
output_keywords = list(map(lambda x:' '.join(x),processed_pred_str_seqs))
return output_keywords
return []
def evaluate_greedy(model, data_loader, test_examples, opt):
model.eval()
logging.info('====================== Checking GPU Availability =========================')
if torch.cuda.is_available():
logging.info('Running on GPU!')
model.cuda()
else:
logging.info('Running on CPU!')
logging.info('====================== Start Predicting =========================')
progbar = Progbar(title='Testing', target=len(data_loader), batch_size=data_loader.batch_size,
total_examples=len(data_loader.dataset))
'''
Note here each batch only contains one data example, thus decoder_probs is flattened
'''
for i, (batch, example) in enumerate(zip(data_loader, test_examples)):
src = batch.src
logging.info('====================== %d =========================' % (i + 1))
logging.info('\nSource text: \n %s\n' % (' '.join([opt.id2word[wi] for wi in src.data.numpy()[0]])))
if torch.cuda.is_available():
src.cuda()
# trg = Variable(torch.from_numpy(np.zeros((src.size(0), opt.max_sent_length), dtype='int64')))
trg = Variable(torch.LongTensor([[opt.word2id[pykp.io.BOS_WORD]] * opt.max_sent_length]))
max_words_pred = model.greedy_predict(src, trg)
progbar.update(None, i, [])
sentence_pred = [opt.id2word[x] for x in max_words_pred]
sentence_real = example['trg_str']
if '</s>' in sentence_real:
index = sentence_real.index('</s>')
sentence_pred = sentence_pred[:index]
logging.info('\t\tPredicted : %s ' % (' '.join(sentence_pred)))
logging.info('\t\tReal : %s ' % (sentence_real))
def stem_word_list(word_list):
return [stemmer.stem(w.strip().lower()) for w in word_list]
def macro_averaged_score(precisionlist, recalllist):
precision = np.average(precisionlist)
recall = np.average(recalllist)
f_score = 0
if(precision or recall):
f_score = round((2 * (precision * recall)) / (precision + recall), 2)
return precision, recall, f_score
def get_match_result(true_seqs, pred_seqs, do_stem=True, type='exact'):
'''
:param true_seqs:
:param pred_seqs:
:param do_stem:
:param topn:
:param type: 'exact' or 'partial'
:return:
'''
micro_metrics = []
micro_matches = []
# do processing to baseline predictions
match_score = np.asarray([0.0] * len(pred_seqs), dtype='float32')
target_number = len(true_seqs)
predicted_number = len(pred_seqs)
metric_dict = {'target_number': target_number, 'prediction_number': predicted_number, 'correct_number': match_score}
# convert target index into string
if do_stem:
true_seqs = [stem_word_list(seq) for seq in true_seqs]
pred_seqs = [stem_word_list(seq) for seq in pred_seqs]
for pred_id, pred_seq in enumerate(pred_seqs):
if type == 'exact':
match_score[pred_id] = 0
for true_id, true_seq in enumerate(true_seqs):
match = True
if len(pred_seq) != len(true_seq):
continue
for pred_w, true_w in zip(pred_seq, true_seq):
# if one two words are not same, match fails
if pred_w != true_w:
match = False
break
# if every word in pred_seq matches one true_seq exactly, match succeeds
if match:
match_score[pred_id] = 1
break
elif type == 'partial':
max_similarity = 0.
pred_seq_set = set(pred_seq)
# use the jaccard coefficient as the degree of partial match
for true_id, true_seq in enumerate(true_seqs):
true_seq_set = set(true_seq)
jaccard = len(set.intersection(*[set(true_seq_set), set(pred_seq_set)])) / float(len(set.union(*[set(true_seq_set), set(pred_seq_set)])))
if jaccard > max_similarity:
max_similarity = jaccard
match_score[pred_id] = max_similarity
elif type == 'bleu':
# account for the match of subsequences, like n-gram-based (BLEU) or LCS-based
match_score[pred_id] = bleu(pred_seq, true_seqs, [0.1, 0.3, 0.6])
return match_score
def evaluate(match_list, predicted_list, true_list, topk=5):
if len(match_list) > topk:
match_list = match_list[:topk]
if len(predicted_list) > topk:
predicted_list = predicted_list[:topk]
# Micro-Averaged Method
micropk = float(sum(match_list)) / float(len(predicted_list)) if len(predicted_list) > 0 else 0.0
micrork = float(sum(match_list)) / float(len(true_list)) if len(true_list) > 0 else 0.0
if micropk + micrork > 0:
microf1 = float(2 * (micropk * micrork)) / (micropk + micrork)
else:
microf1 = 0.0
return micropk, micrork, microf1
def f1_score(prediction, ground_truth):
# both prediction and grount_truth should be list of words
common = Counter(prediction) & Counter(ground_truth)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction)
recall = 1.0 * num_same / len(ground_truth)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def self_redundancy(_input):
# _input shoule be list of list of words
if len(_input) == 0:
return None
_len = len(_input)
scores = np.ones((_len, _len), dtype="float32") * -1.0
for i in range(_len):
for j in range(_len):
if scores[i][j] != -1:
continue
elif i == j:
scores[i][j] = 0.0
else:
f1 = f1_score(_input[i], _input[j])
scores[i][j] = f1
scores[j][i] = f1
res = np.max(scores, 1)
res = np.mean(res)
return res
| [
"torch.cuda.is_available",
"torch.LongTensor"
] | 0.4.0 | malarinv/seq2seq-keyphrase-pytorch | 14350477867bbaafe285d6ac0e7a814f4cda1bdf |
1.4 | import torch
from torch import Tensor
EPS = torch.tensor(1e-8)
@torch.jit.script
def dist_iou_ab(box_a: Tensor, box_b: Tensor, eps=EPS):
"""
Args:
box_a: tensor of shape [batch_size, boxes_a, 4]
box_b: tensor of shape [batch_size, boxes_b, 4]
gamma: float
eps: float
Original:
https://github.com/Zzh-tju/CIoU/blob/8995056b1e93b86d03c384f042514391b70e58e0/layers/functions/detection.py#L162
https://github.com/Zzh-tju/CIoU/blob/8995056b1e93b86d03c384f042514391b70e58e0/layers/box_utils.py#L82
"""
assert box_a.dim() == 3
assert box_b.dim() == 3
assert box_a.size(0) == box_b.size(0)
A, B = box_a.size(1), box_b.size(1)
box_a = box_a.unsqueeze(2).expand(-1, -1, A, -1)
box_b = box_b.unsqueeze(1).expand(-1, B, -1, -1)
inter_yx0 = torch.max(box_a[..., :2], box_b[..., :2])
inter_yx1 = torch.min(box_a[..., 2:4], box_b[..., 2:4])
inter_hw = torch.clamp_min_(inter_yx1 - inter_yx0, 0)
inter_area = torch.prod(inter_hw, dim=-1)
# del inter_hw, inter_yx0, inter_yx1
hw_a = box_a[..., 2:4] - box_a[..., :2]
hw_b = box_b[..., 2:4] - box_b[..., :2]
area_a = torch.prod(hw_a, dim=-1)
area_b = torch.prod(hw_b, dim=-1)
union_area = area_a + area_b - inter_area
iou = inter_area / (union_area + eps)
# del inter_area, union_area, area_a, area_b, hw_a, hw_b
center_a = (box_a[..., :2] + box_a[..., 2:4]) / 2
center_b = (box_b[..., :2] + box_b[..., 2:4]) / 2
inter_diag = torch.pow(center_b - center_a, 2).sum(dim=-1)
clos_yx0 = torch.min(box_a[..., :2], box_b[..., :2])
clos_yx1 = torch.max(box_a[..., 2:4], box_b[..., 2:4])
clos_hw = torch.clamp_min_(clos_yx1 - clos_yx0, 0)
clos_diag = torch.pow(clos_hw, 2).sum(dim=-1)
# del clos_yx0, clos_yx1, clos_hw, center_a, center_b
dist = inter_diag / (clos_diag + eps)
return iou - dist ** 0.9
def cluster_nms_dist_iou(boxes: Tensor, scores: Tensor, iou_threshold=0.5, top_k=200):
assert boxes.dim() == 2
assert scores.dim() == 2
assert boxes.size(0) == scores.size(0)
scores, classes = torch.max(scores, dim=1)
# scores: [detections]
_, idx = scores.sort(descending=True)
idx = idx[:top_k]
# add batch dim
top_k_boxes = boxes[idx][None, ...]
# [1, top_k, top_k] -> [top_k, top_k]
iou = dist_iou_ab(top_k_boxes, top_k_boxes)[0]
iou = iou.triu_(diagonal=1)
best_iou = torch.zeros_like(idx)
iou_b = iou
for i in range(top_k):
iou_a = iou_b
best_iou, _ = torch.max(iou_b, dim=0)
# keep far away boxes
keep = (best_iou <= iou_threshold)[:, None].expand_as(iou_b)
iou_b = torch.where(keep, iou, torch.zeros_like(iou_b))
if iou_b.eq(iou_a).all():
break
idx = idx[best_iou <= iou_threshold]
return boxes[idx], scores[idx], classes[idx]
| [
"torch.prod",
"torch.min",
"torch.max",
"torch.clamp_min_",
"torch.tensor",
"torch.zeros_like",
"torch.pow"
] | 1.4.0 | dmitry-vorobiev/kaggle-global-wheat-detection | adf75b73f5955848488477c361c66f1b0510b2bb |
0.4 | # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
import os
import sys
import torch
import torch.nn as nn
from torch.utils.data.dataset import TensorDataset
sys.path.append("..")
from rater.datasets.criteo import Criteo
from rater.models.ctr.flen import FLEN
from rater.models.model import train_model
pwd_path = os.path.abspath(os.path.dirname(__file__))
def train(x_idx, x_value, label, features, out_type='binary'):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
X_idx_tensor = torch.LongTensor(x_idx).to(device)
X_value_tensor = torch.Tensor(x_value).to(device)
y_tensor = torch.Tensor(label).to(device)
y_tensor = y_tensor.reshape(-1, 1)
X = TensorDataset(X_idx_tensor, y_tensor)
model = FLEN(features.feature_size(), features.field_size(), features.feature_size(), field_ranges=features.field_range(),
out_type=out_type).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
model_path = os.path.join(pwd_path, 'flen_model.pt')
model, loss_history = train_model(model=model, model_path=model_path, dataset=X, loss_func=nn.BCELoss(),
optimizer=optimizer, device=device, val_size=0.2, batch_size=32, epochs=10)
print(loss_history)
if __name__ == '__main__':
# load criteo sample dataset
dataset = Criteo(n_samples=100)
features, X_idx, X_value, y, category_index, continuous_value = dataset.get_features(use_continuous_columns=True,
use_category_columns=True)
print(features.feature_size(), features.field_size())
print("X_idx[0], X_value[0], y[0] :\n", X_idx[0], X_value[0], y[0])
train(X_idx, X_value, y, features)
| [
"torch.utils.data.dataset.TensorDataset",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.nn.BCELoss",
"torch.Tensor"
] | 0.4.1 | shibing624/rater | 8437dea8baf0137ab3c07dd19c5f2bb8c15b4435 |
0.4 | # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
@reference: https://github.com/tkipf/pygcn; https://github.com/dawnranger/pytorch-AGNN
"""
from __future__ import division
from __future__ import print_function
import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from rater.models.graph.agnn import AGNN
from rater.models.graph.reader import load_data, accuracy
def train():
t_total = time.time()
for epoch in range(args.epochs):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
if not args.fastmode:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(features, adj)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=True, help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=500, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.')
parser.add_argument('--layers', type=int, default=3, help='Number of attention layers.')
parser.add_argument('--dropout_rate', type=float, default=0.5, help='Dropout rate (1 - keep probability).')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
# Model and optimizer
model = AGNN(nfeat=features.shape[1],
nhid=args.hidden,
nclass=labels.max() + 1,
nlayers=args.layers,
dropout_rate=args.dropout_rate)
# print(model)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
features, adj, labels = Variable(features), Variable(adj), Variable(labels)
train()
test()
| [
"torch.cuda.manual_seed",
"torch.autograd.Variable",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.nn.functional.nll_loss"
] | 0.4.1 | shibing624/rater | 8437dea8baf0137ab3c07dd19c5f2bb8c15b4435 |
1.4 | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import torch
import torch.nn.functional as F
from fastreid.utils import comm
from fastreid.layers import GatherLayer
from .utils import concat_all_gather, euclidean_dist, normalize
def softmax_weights(dist, mask):
max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]
diff = dist - max_v
Z = torch.sum(torch.exp(diff) * mask, dim=1, keepdim=True) + 1e-6 # avoid division by zero
W = torch.exp(diff) * mask / Z
return W
def hard_example_mining(dist_mat, is_pos, is_neg):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pair wise distance between samples, shape [N, M]
is_pos: positive index with shape [N, M]
is_neg: negative index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
N = dist_mat.size(0)
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return dist_ap, dist_an
def weighted_example_mining(dist_mat, is_pos, is_neg):
"""For each anchor, find the weighted positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
is_pos:
is_neg:
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
"""
assert len(dist_mat.size()) == 2
is_pos = is_pos.float()
is_neg = is_neg.float()
dist_ap = dist_mat * is_pos
dist_an = dist_mat * is_neg
weights_ap = softmax_weights(dist_ap, is_pos)
weights_an = softmax_weights(-dist_an, is_neg)
dist_ap = torch.sum(dist_ap * weights_ap, dim=1)
dist_an = torch.sum(dist_an * weights_an, dim=1)
return dist_ap, dist_an
def triplet_loss(embedding, targets, margin, norm_feat, hard_mining):
r"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
if norm_feat: embedding = normalize(embedding, axis=-1)
# For distributed training, gather all features from different process.
if comm.get_world_size() > 1:
all_embedding = torch.cat(GatherLayer.apply(embedding), dim=0)
all_targets = concat_all_gather(targets)
else:
all_embedding = embedding
all_targets = targets
dist_mat = euclidean_dist(embedding, all_embedding)
N, M = dist_mat.size()
is_pos = targets.view(N, 1).expand(N, M).eq(all_targets.view(M, 1).expand(M, N).t())
is_neg = targets.view(N, 1).expand(N, M).ne(all_targets.view(M, 1).expand(M, N).t())
if hard_mining:
dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg)
else:
dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, is_neg)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if margin > 0:
loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=margin)
else:
loss = F.soft_margin_loss(dist_an - dist_ap, y)
# fmt: off
if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)
# fmt: on
return loss
| [
"torch.max",
"torch.nn.functional.margin_ranking_loss",
"torch.nn.functional.soft_margin_loss",
"torch.exp",
"torch.sum"
] | 1.4.0 | tenghehan/reid_without_id | d1d0ff273b1ef19fc6da8cbbf210527779b37455 |
0.4 | # pylint: disable=invalid-name,no-self-use,protected-access
from collections import namedtuple
import os
import pytest
from flaky import flaky
from numpy.testing import assert_almost_equal
import torch
from allennlp.common.testing import ModelTestCase
from allennlp.training.metrics.wikitables_accuracy import SEMPRE_ABBREVIATIONS_PATH, SEMPRE_GRAMMAR_PATH
@pytest.mark.java
class WikiTablesMmlSemanticParserTest(ModelTestCase):
def setUp(self):
self.should_remove_sempre_abbreviations = not os.path.exists(SEMPRE_ABBREVIATIONS_PATH)
self.should_remove_sempre_grammar = not os.path.exists(SEMPRE_GRAMMAR_PATH)
# The model tests are run with respect to the module root, so check if abbreviations
# and grammar already exist there (since we want to clean up module root after test)
self.module_root_abbreviations_path = self.MODULE_ROOT / "data" / "abbreviations.tsv"
self.module_root_grammar_path = self.MODULE_ROOT / "data" / "grow.grammar"
self.should_remove_root_sempre_abbreviations = not os.path.exists(self.module_root_abbreviations_path)
self.should_remove_root_sempre_grammar = not os.path.exists(self.module_root_grammar_path)
super(WikiTablesMmlSemanticParserTest, self).setUp()
self.set_up_model(str(self.FIXTURES_ROOT / "semantic_parsing" / "wikitables" / "experiment.json"),
str(self.FIXTURES_ROOT / "data" / "wikitables" / "sample_data.examples"))
def tearDown(self):
super().tearDown()
# We don't want to leave generated files around just from running tests...
if self.should_remove_sempre_abbreviations and os.path.exists(SEMPRE_ABBREVIATIONS_PATH):
os.remove(SEMPRE_ABBREVIATIONS_PATH)
if self.should_remove_sempre_grammar and os.path.exists(SEMPRE_GRAMMAR_PATH):
os.remove(SEMPRE_GRAMMAR_PATH)
if self.should_remove_root_sempre_abbreviations and os.path.exists(self.module_root_abbreviations_path):
os.remove(self.module_root_abbreviations_path)
if self.should_remove_root_sempre_grammar and os.path.exists(self.module_root_grammar_path):
os.remove(self.module_root_grammar_path)
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_mixture_no_features_model_can_train_save_and_load(self):
param_file = self.FIXTURES_ROOT / 'semantic_parsing' / 'wikitables' / 'experiment-mixture.json'
self.ensure_model_can_train_save_and_load(param_file)
@flaky
def test_elmo_no_features_can_train_save_and_load(self):
param_file = self.FIXTURES_ROOT / 'semantic_parsing' / 'wikitables' / 'experiment-elmo-no-features.json'
self.ensure_model_can_train_save_and_load(param_file, tolerance=1e-2)
def test_get_neighbor_indices(self):
worlds, num_entities = self.get_fake_worlds()
tensor = torch.LongTensor([])
neighbor_indices = self.model._get_neighbor_indices(worlds, num_entities, tensor)
# Checks for the correct shape meaning dimension 2 has size num_neighbors,
# padding of -1 is used, and correct neighbor indices.
assert_almost_equal(neighbor_indices.data.numpy(), [[[-1, -1],
[3, 4],
[3, 4],
[1, 2],
[1, 2]],
[[-1, -1],
[2, -1],
[1, -1],
[-1, -1],
[-1, -1]]])
def test_get_type_vector(self):
worlds, num_entities = self.get_fake_worlds()
tensor = torch.LongTensor([])
type_vector, _ = self.model._get_type_vector(worlds, num_entities, tensor)
# Verify that both types are present and padding used for non existent entities.
assert_almost_equal(type_vector.data.numpy(), [[0, 1, 1, 3, 3],
[0, 1, 3, 0, 0]])
def test_get_linking_probabilities(self):
worlds, num_entities = self.get_fake_worlds()
# (batch_size, num_question_tokens, num_entities)
linking_scores = [[[-2, 1, 0, -3, 2],
[4, -1, 5, -3, 4]],
[[0, 1, 8, 10, 10],
[3, 2, -1, -2, 1]]]
linking_scores = torch.FloatTensor(linking_scores)
question_mask = torch.LongTensor([[1, 1], [1, 0]])
_, entity_type_dict = self.model._get_type_vector(worlds, num_entities, linking_scores)
# (batch_size, num_question_tokens, num_entities)
entity_probability = self.model._get_linking_probabilities(worlds, linking_scores, question_mask,
entity_type_dict)
# The following properties in entity_probability are tested for by true_probability:
# (1) It has all 0.0 probabilities when there is no question token, as seen for the
# second word in the second batch.
# (2) It has 0.0 probabilities when an entity is masked, as seen in the last two entities
# for the second batch instance.
# (3) The probabilities for entities of the same type with the same question token should
# sum to at most 1, but not necessarily 1, because some probability mass goes to the
# null entity. We have three entity types here, so each row should sum to at most 3,
# and that number will approach 3 as the unnormalized linking scores for each entity
# get higher.
true_probability = [[[0.1192029, 0.5761169, 0.2119416, 0.0058998, 0.8756006],
[0.9820138, 0.0024561, 0.9908675, 0.0008947, 0.9811352]],
[[0.5, 0.7310586, 0.9996647, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]]]
assert_almost_equal(entity_probability.detach().cpu().numpy(), true_probability)
def get_fake_worlds(self):
# Generate a toy WikitablesWorld.
FakeTable = namedtuple('FakeTable', ['entities', 'neighbors'])
FakeWorld = namedtuple('FakeWorld', ['table_graph'])
entities = [['0', 'fb:cell.2010', 'fb:cell.2011', 'fb:row.row.year', 'fb:row.row.year2'],
['1', 'fb:cell.2012', 'fb:row.row.year']]
neighbors = [{'fb:cell.2010': ['fb:row.row.year', 'fb:row.row.year2'],
'fb:cell.2011': ['fb:row.row.year', 'fb:row.row.year2'],
'fb:row.row.year': ['fb:cell.2010', 'fb:cell.2011'],
'fb:row.row.year2': ['fb:cell.2010', 'fb:cell.2011'],
'0': [],
},
{'fb:cell.2012': ['fb:row.row.year'],
'fb:row.row.year': ['fb:cell.2012'],
'1': [],
}]
worlds = [FakeWorld(FakeTable(entity_list, entity2neighbors))
for entity_list, entity2neighbors in zip(entities, neighbors)]
num_entities = max([len(entity_list) for entity_list in entities])
return worlds, num_entities
| [
"torch.FloatTensor",
"torch.LongTensor"
] | 0.4.1 | csbhagav/allennlp | 4c99f8e82f7fd70c86652109bfca5282d470e981 |
1.8 | """Test cases for datahandling."""
import unittest
import torch
from dfadetect.datasets import AudioDataset
from dfadetect.utils import find_wav_files
from tests.utils import REAL_PATH, load_real, load_special
class TestAudioDataset(unittest.TestCase):
def test_loading_audio(self):
dataset = load_real()
# found all files
self.assertEqual(len(dataset), 5)
# returns sample rate
self.assertEqual(len(dataset[0]), 2)
def test_resampling(self):
new_rate = 24_000
dataset = load_real(sample_rate=new_rate)
for _, sample_rate in dataset:
self.assertEqual(sample_rate, new_rate)
def test_loading_audio_triming(self):
# trimmed
dataset = load_real()
trim_time = 0.
for waveform, _ in dataset:
trim_time += waveform.shape[1]
# not trimmed
dataset = load_real(trim=False)
orig_time = 0.
for waveform, _ in dataset:
orig_time += waveform.shape[1]
self.assertGreater(orig_time, trim_time)
def test_trimming_entire_file(self):
dataset = load_special()
# check that we do not trim entire file
for waveform, _sr in dataset:
self.assertGreater(waveform.size()[1], 0)
def test_phone_call(self):
dataset = load_special(phone_call=True)
for _waveform, sr in dataset:
self.assertEqual(sr, 8_000)
def test_phone_call_reassigned(self):
dataset = load_special()
for _waveform, sr in dataset:
self.assertEqual(sr, 16_000)
dataset.phone_call = True
for _waveform, sr in dataset:
self.assertEqual(sr, 8_000)
def test_list_of_paths(self):
ref = load_real()
paths = find_wav_files(REAL_PATH)
from_paths = AudioDataset(paths)
for (file_1, sr_1), (file_2, sr_2) in zip(ref, from_paths):
self.assertTrue(torch.allclose(file_1, file_2))
self.assertEqual(sr_1, sr_2)
if __name__ == "__main__":
unittest.main()
| [
"torch.allclose"
] | 1.8.1 | RUB-SysSec/WaveFake | d52d51b9ccdb0cec3f484e84b228791f06b955be |
1.1 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.utils import data
import numpy as np
from functools import reduce
from egg.zoo.objects_game.util import compute_binomial
import itertools
import os
import pathlib
class VectorsLoader:
def __init__(self,
perceptual_dimensions=[4, 4, 4, 4, 4],
n_distractors=1,
batch_size=32,
train_samples=128000,
validation_samples=4096,
test_samples=1024,
shuffle_train_data=False,
dump_data_folder=None,
load_data_path =None,
seed=None):
self.perceptual_dimensions = perceptual_dimensions
self._n_features = len(self.perceptual_dimensions)
self.n_distractors = n_distractors
self.batch_size = batch_size
self.train_samples = train_samples
self.validation_samples = validation_samples
self.test_samples = test_samples
self.shuffle_train_data = shuffle_train_data
self.load_data_path = load_data_path
self.dump_data_folder = pathlib.Path(dump_data_folder) if dump_data_folder is not None else None
seed = seed if seed else np.random.randint(0, 2 ** 31)
self.random_state = np.random.RandomState(seed)
@property
def n_features(self):
return self._n_features
@n_features.setter
def n_features(self, n_features):
self._n_features = n_features
def upd_cl_options(self, opts):
opts.perceptual_dimensions = self.perceptual_dimensions
opts.train_samples = self.train_samples
opts.validation_samples = self.validation_samples
opts.test_samples = self.test_samples
opts.n_distractors = self.n_distractors
def load_data(self, data_file):
data = np.load(data_file)
train, train_labels = data['train'], data['train_labels']
valid, valid_labels = data['valid'], data['valid_labels']
test, test_labels = data['test'], data['test_labels']
# train valid and test are of shape b_size X n_distractors+1 X n_features
self.train_samples = train.shape[0]
self.validation_samples = valid.shape[0]
self.test_samples = test.shape[0]
self.n_distractors = train.shape[1] - 1
self.perceptual_dimensions = [-1] * train.shape[-1]
self._n_features = len(self.perceptual_dimensions)
return (train, train_labels), (valid, valid_labels), (test, test_labels)
def _fill_split(self, all_vectors, n_samples, tuple_dict):
split_list = []
len_all_vectors = len(all_vectors)
tuple_dim = self.n_distractors+1
done = 0
while done < n_samples:
candidates_tuple = self.random_state.choice(len_all_vectors, replace=False, size=tuple_dim)
key = ''
for vector_idx in candidates_tuple:
key += f'{str(vector_idx)}-'
key = key[:-1]
if key not in tuple_dict:
tuple_dict[key] = True
possible_batch = all_vectors[candidates_tuple]
split_list.append(possible_batch)
done += 1
else:
continue
target_idxs = self.random_state.choice(self.n_distractors+1, n_samples)
return (np.array(split_list), target_idxs), tuple_dict
def generate_tuples(self, data):
data = np.array(data)
train_data, tuple_dict = self._fill_split(data, self.train_samples, {})
valid_data, tuple_dict = self._fill_split(data, self.validation_samples, tuple_dict)
test_data, _ = self._fill_split(data, self.test_samples, tuple_dict)
return train_data, valid_data, test_data
def collate(self, batch):
tuples, target_idxs = [elem[0] for elem in batch], [elem[1] for elem in batch]
receiver_input = np.reshape(tuples, (self.batch_size, self.n_distractors+1, -1))
labels = np.array(target_idxs)
targets = receiver_input[np.arange(self.batch_size), labels]
return torch.from_numpy(targets).float(), torch.from_numpy(labels).long(), torch.from_numpy(receiver_input).float()
def get_iterators(self):
if self.load_data_path:
train, valid, test = self.load_data(self.load_data_path)
else: # if load_data_path wasn't given then I need to generate the tuple
world_dim = reduce(lambda x, y: x*y, self.perceptual_dimensions)
possible_tuples = compute_binomial(world_dim, self.n_distractors+1)
list_of_dim = [range(1, elem+1) for elem in self.perceptual_dimensions]
all_vectors = list(itertools.product(*list_of_dim))
assert self.train_samples > 0 and self.validation_samples > 0 and self.test_samples > 0, 'Train size, validation size and test size must all be greater than 0'
assert possible_tuples > self.train_samples + self.validation_samples + self.test_samples , f'Not enough data for requested split sizes. Reduced split samples or increase perceptual_dimensions'
train, valid, test = self.generate_tuples(data=all_vectors)
assert self.train_samples >= self.batch_size and self.validation_samples >= self.batch_size and self.test_samples >= self.batch_size, 'Batch size cannot be smaller than any split size'
train_dataset = TupleDataset(*train)
valid_dataset = TupleDataset(*valid)
test_dataset = TupleDataset(*test)
train_it = data.DataLoader(train_dataset, batch_size=self.batch_size, collate_fn=self.collate, drop_last=True, shuffle=self.shuffle_train_data)
validation_it = data.DataLoader(valid_dataset, batch_size=self.batch_size, collate_fn=self.collate, drop_last=True)
test_it = data.DataLoader(test_dataset, batch_size=self.batch_size, collate_fn=self.collate, drop_last=True)
if self.dump_data_folder:
self.dump_data_folder.mkdir(exist_ok=True)
path = self.dump_data_folder / f'{self.perceptual_dimensions}_{self.n_distractors}_distractors'
np.savez_compressed(path,
train=train[0],
train_labels=train[1],
valid=valid[0],
valid_labels=valid[1],
test=test[0],
test_labels=test[1],
n_distractors=self.n_distractors)
return train_it, validation_it, test_it
class TupleDataset(data.Dataset):
def __init__(self, tuples, target_idxs):
self.list_of_tuples = tuples
self.target_idxs = target_idxs
def __len__(self):
return len(self.list_of_tuples)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self.list_of_tuples):
raise RuntimeError('Accessing dataset through wrong index: < 0 or >= max_len')
return self.list_of_tuples[idx], self.target_idxs[idx]
| [
"torch.from_numpy"
] | 1.1.0 | Shawn-Guo-CN/EGG | 0a5b258108e2cd1c873d7f67e8c92551bb3d809c |
1.0 | import sys
import pytest
import numpy as np
import torch
from numpy.testing import assert_
sys.path.append("../../../")
from pycroscopy.learn import Trainer, models
def assert_weights_equal(m1, m2):
eq_w = []
for p1, p2 in zip(m1.values(), m2.values()):
eq_w.append(np.array_equal(
p1.detach().cpu().numpy(),
p2.detach().cpu().numpy()))
return all(eq_w)
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_trainer(dim, size):
# Initialize a model
in_dim = (1, *size)
model = models.AutoEncoder(
in_dim, layers_per_block=[1, 1], nfilters=2)
weights_before = model.state_dict()
# Create dummy train set
X_train = torch.randn(5, *in_dim)
# Initialize trainer
t = Trainer(model, X_train, X_train, batch_size=2)
# train and compare model params before and after
t.fit(num_epochs=2)
weights_after = model.state_dict()
assert_(not assert_weights_equal(weights_before, weights_after))
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_trainer_determenism(dim, size):
in_dim = (1, *size)
# Create dummy train set
torch.manual_seed(0)
X_train = torch.randn(5, *in_dim)
# Initialize a model
model1 = models.AutoEncoder(
in_dim, layers_per_block=[1, 1], nfilters=2,
upsampling_mode="nearest")
# Initialize trainer
t = Trainer(model1, X_train, X_train, batch_size=2)
# train
t.fit(num_epochs=4)
# Reininitiaize model and train again
torch.manual_seed(0)
X_train = torch.randn(5, *in_dim)
model2 = models.AutoEncoder(
in_dim, layers_per_block=[1, 1], nfilters=2,
upsampling_mode="nearest")
t = Trainer(model2, X_train, X_train, batch_size=2)
t.fit(num_epochs=4)
assert_(assert_weights_equal(model1.state_dict(), model2.state_dict()))
| [
"torch.manual_seed",
"torch.randn"
] | 1.0.0 | itsalexis962/pycroscopy | 8a6557408ffdc332cef102616be16e26a396532f |
1.0 | import logging
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchgeometry as tgm
logger = logging.getLogger(__name__)
class TestIntegrationFocalLoss:
# optimization
thresh = 1e-1
lr = 1e-3
num_iterations = 1000
num_classes = 2
# focal loss
alpha = 2.0
gamma = 2.0
def generate_sample(self, base_target, std_val=0.1):
target = base_target.float() / base_target.max()
noise = std_val * torch.rand(1, 1, 6, 5)
return target + noise
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def test_conv2d_relu(self):
# we generate base sample
target = torch.LongTensor(1, 6, 5).fill_(0)
for i in range(1, self.num_classes):
target[..., i:-i, i:-i] = i
m = nn.Sequential(
nn.Conv2d(1, self.num_classes, kernel_size=3, padding=1),
nn.ReLU(True),
)
m.apply(self.init_weights)
optimizer = optim.Adam(m.parameters(), lr=self.lr)
criterion = tgm.losses.FocalLoss(
alpha=self.alpha, gamma=self.gamma, reduction='mean')
# NOTE: uncomment to compare against vanilla cross entropy
# criterion = nn.CrossEntropyLoss()
for iter_id in range(self.num_iterations):
sample = self.generate_sample(target)
output = m(sample)
loss = criterion(output, target)
logger.debug("Loss: {}".format(loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
sample = self.generate_sample(target)
output_argmax = torch.argmax(m(sample), dim=1)
logger.debug("Output argmax: \n{}".format(output_argmax))
# TODO(edgar): replace by IoU or find a more stable solution
# for this test. The issue is that depending on
# the seed to initialize the weights affects the
# final results and slows down the convergence of
# the algorithm.
val = F.mse_loss(output_argmax.float(), target.float())
if not val.item() < self.thresh:
pytest.xfail("Wrong seed or initial weight values.")
| [
"torch.rand",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.LongTensor"
] | 1.0.0 | fkluger/torchgeometry | 5f1a4dc8ff3647a60901b79aa90a4e799829a7a2 |
1.0 | from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from .one_hot import one_hot
# based on:
# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
class TverskyLoss(nn.Module):
r"""Criterion that computes Tversky Coeficient loss.
According to [1], we compute the Tversky Coefficient as follows:
.. math::
\text{S}(P, G, \alpha; \beta) =
\frac{|PG|}{|PG| + \alpha |P \ G| + \beta |G \ P|}
where:
- :math:`P` and :math:`G` are the predicted and ground truth binary
labels.
- :math:`\alpha` and :math:`\beta` control the magnitude of the
penalties for FPs and FNs, respectively.
Notes:
- :math:`\alpha = \beta = 0.5` => dice coeff
- :math:`\alpha = \beta = 1` => tanimoto coeff
- :math:`\alpha + \beta = 1` => F beta coeff
Shape:
- Input: :math:`(N, C, H, W)` where C = number of classes.
- Target: :math:`(N, H, W)` where each value is
:math:`0 ≤ targets[i] ≤ C−1`.
Examples:
>>> N = 5 # num_classes
>>> loss = tgm.losses.TverskyLoss(alpha=0.5, beta=0.5)
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = loss(input, target)
>>> output.backward()
References:
[1]: https://arxiv.org/abs/1706.05721
"""
def __init__(self, alpha, beta) -> None:
super(TverskyLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.eps = 1e-6
def forward(
self,
input: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(input):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxNxHxW. Got: {}"
.format(input.shape))
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError("input and target shapes must be the same. Got: {}"
.format(input.shape, input.shape))
if not input.device == target.device:
raise ValueError(
"input and target must be in the same device. Got: {}" .format(
input.device, target.device))
# compute softmax over the classes axis
input_soft = F.softmax(input, dim=1)
# create the labels one hot tensor
target_one_hot = one_hot(target, num_classes=input.shape[1],
device=input.device, dtype=input.dtype)
# compute the actual dice score
dims = (1, 2, 3)
intersection = torch.sum(input_soft * target_one_hot, dims)
fps = torch.sum(input_soft * (1. - target_one_hot), dims)
fns = torch.sum((1. - input_soft) * target_one_hot, dims)
numerator = intersection
denominator = intersection + self.alpha * fps + self.beta * fns
tversky_loss = numerator / (denominator + self.eps)
return torch.mean(1. - tversky_loss)
######################
# functional interface
######################
def tversky_loss(
input: torch.Tensor,
target: torch.Tensor,
alpha,
beta) -> torch.Tensor:
r"""Function that computes Tversky loss.
See :class:`~torchgeometry.losses.TverskyLoss` for details.
"""
return TverskyLoss(alpha, beta)(input, target)
| [
"torch.sum",
"torch.is_tensor",
"torch.mean",
"torch.nn.functional.softmax"
] | 1.0.0 | fkluger/torchgeometry | 5f1a4dc8ff3647a60901b79aa90a4e799829a7a2 |
1.6 | import torch
from torch.optim import SGD
from d3rlpy.models.torch.v_functions import create_value_function
from d3rlpy.models.torch.policies import squash_action, create_normal_policy
from d3rlpy.models.torch.policies import create_categorical_policy
from .utility import torch_api, train_api, eval_api
from .utility import compute_augemtation_mean
from .base import TorchImplBase
class AWRImpl(TorchImplBase):
def __init__(self, observation_shape, action_size, actor_learning_rate,
critic_learning_rate, momentum, use_batch_norm, use_gpu,
scaler, augmentation, n_augmentations, encoder_params):
self.observation_shape = observation_shape
self.action_size = action_size
self.actor_learning_rate = actor_learning_rate
self.critic_learning_rate = critic_learning_rate
self.use_batch_norm = use_batch_norm
self.momentum = momentum
self.scaler = scaler
self.augmentation = augmentation
self.n_augmentations = n_augmentations
self.encoder_params = encoder_params
self.use_gpu = use_gpu
def build(self):
# setup torch models
self._build_critic()
self._build_actor()
if self.use_gpu:
self.to_gpu(self.use_gpu)
else:
self.to_cpu()
# setup optimizer after the parameters move to GPU
self._build_critic_optim()
self._build_actor_optim()
def _build_critic(self):
self.v_func = create_value_function(self.observation_shape,
use_batch_norm=self.use_batch_norm,
encoder_params=self.encoder_params)
def _build_critic_optim(self):
self.critic_optim = SGD(self.v_func.parameters(),
lr=self.critic_learning_rate,
momentum=self.momentum)
def _build_actor(self):
self.policy = create_normal_policy(self.observation_shape,
self.action_size,
self.use_batch_norm,
encoder_params=self.encoder_params)
def _build_actor_optim(self):
self.actor_optim = SGD(self.policy.parameters(),
lr=self.actor_learning_rate,
momentum=self.momentum)
@train_api
@torch_api(scaler_targets=['observation'])
def update_critic(self, observation, value):
loss = compute_augemtation_mean(self.augmentation,
self.n_augmentations,
self._compute_critic_loss, {
'observation': observation,
'value': value
}, ['observation'])
self.critic_optim.zero_grad()
loss.backward()
self.critic_optim.step()
return loss.cpu().detach().numpy()
def _compute_critic_loss(self, observation, value):
return self.v_func.compute_error(observation, value)
@train_api
@torch_api(scaler_targets=['observation'])
def update_actor(self, observation, action, weight):
loss = compute_augemtation_mean(self.augmentation,
self.n_augmentations,
self._compute_actor_loss, {
'observation': observation,
'action': action,
'weight': weight
}, ['observation'])
self.actor_optim.zero_grad()
loss.backward()
self.actor_optim.step()
return loss.cpu().detach().numpy()
def _compute_actor_loss(self, observation, action, weight):
dist = self.policy.dist(observation)
# unnormalize action via inverse tanh function
unnormalized_action = torch.atanh(action.clamp(-0.999999, 0.999999))
# compute log probability
_, log_probs = squash_action(dist, unnormalized_action)
return -(weight * log_probs).mean()
def _predict_best_action(self, x):
return self.policy.best_action(x)
@eval_api
@torch_api(scaler_targets=['x'])
def predict_value(self, x, *args, **kwargs):
with torch.no_grad():
return self.v_func(x).view(-1).cpu().detach().numpy()
@eval_api
@torch_api(scaler_targets=['x'])
def sample_action(self, x):
with torch.no_grad():
return self.policy.sample(x).cpu().detach().numpy()
class DiscreteAWRImpl(AWRImpl):
def _build_actor(self):
self.policy = create_categorical_policy(
self.observation_shape,
self.action_size,
self.use_batch_norm,
encoder_params=self.encoder_params)
def _compute_actor_loss(self, observation, action, weight):
dist = self.policy.dist(observation)
log_probs = dist.log_prob(action).view(observation.shape[0], -1)
return -(weight * log_probs.sum(dim=1, keepdims=True)).mean()
| [
"torch.no_grad"
] | 1.6.0 | DenDen047/d3rlpy | 6184518d52f961ba6ca9f045761f810706110aa7 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.finite_checks import detect_nan_parameters
from pytorch_lightning.utilities.grads import grad_norm
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(
self,
trainer,
multiple_trainloader_mode: str,
max_epochs: Optional[int],
min_epochs: Optional[int],
max_steps: Optional[int],
min_steps: Optional[int],
num_sanity_val_steps: int,
):
self.trainer = trainer
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
self._optimizer_freq_cumsum = None
self.global_step = 0
self.current_epoch = 0
self.trainer.should_stop = False
self.total_batch_idx = 0
self.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.max_steps = max_steps
self.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
@property
def optimizer_freq_cumsum(self):
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def should_skip_training(self) -> bool:
should_by_max_steps = self.max_steps is not None and self.global_step >= self.max_steps
should_by_epoch = self.max_epochs is not None and self.current_epoch >= self.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
# reset bookkeeping
self.trainer.state.stage = None
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.lightning_module
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.current_epoch = epoch
model = self.trainer.lightning_module
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]
processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)
# hook
self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model) -> None:
"""
Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
"""
if self.trainer.train_dataloader is None:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
hook_overridden = self._should_add_batch_output_to_epoch_output()
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def _should_add_batch_output_to_epoch_output(self) -> bool:
# We add to the epoch outputs if
# 1. The model defines training_epoch_end OR
# 2. The model overrides on_train_epoch_end which has `outputs` in the signature
# TODO: in v1.5 this only needs to check if training_epoch_end is overridden
lightning_module = self.trainer.lightning_module
if is_overridden("training_epoch_end", model=lightning_module):
return True
if is_overridden("on_train_epoch_end", model=lightning_module):
model_hook_fx = getattr(lightning_module, "on_train_epoch_end")
if is_param_in_hook_signature(model_hook_fx, "outputs"):
return True
return False
def get_optimizers_iterable(self, batch_idx=None):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
if batch_idx is None:
batch_idx = self.total_batch_idx
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
training_step_output.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.trainer.lightning_module.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("model_forward"):
step_kwargs = self._build_kwargs(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator.training_step(step_kwargs)
self.trainer.accelerator.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
if training_step_output_for_epoch_end is None:
return
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.lightning_module.automatic_optimization:
# accumulate loss. if accumulate_grad_batches==1, no effect
closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
result = self.trainer.lightning_module._results
loss = None
hiddens = None
result["extra"] = {}
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
if hiddens is not None:
hiddens = hiddens.detach()
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
# map to results under the hood
result.minimize = loss
self.trainer.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()
return training_step_output_for_epoch_end, result
@staticmethod
def _prepare_outputs(
outputs: List[List[List[Result]]],
batch_mode: bool,
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
"""
Extract required information from batch or epoch end results.
Args:
outputs: A 3-dimensional list of ``Result`` objects with dimensions:
[optimizer outs][batch outs][tbptt steps].
batch_mode: If True, ignore the batch output dimension.
Returns:
The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will
be collapsed.
"""
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
for tbptt_output in batch_outputs:
out = tbptt_output.extra
out['loss'] = tbptt_output.minimize
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.lightning_module
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer) -> dict:
# track gradient norms
grad_norm_dict = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator.clip_gradients(
optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm
)
return grad_norm_dict
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.lightning_module
grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm)
return grad_norm_dict
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
splits = [batch]
truncated_bptt_enabled = self._truncated_bptt_enabled()
if truncated_bptt_enabled:
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self._truncated_bptt_steps())
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
val_loop_called = False
batch_idx = None
is_last_batch = None
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.batch_idx = batch_idx
self.trainer.is_last_batch = is_last_batch
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# hook
# TODO: add outputs to batches
self.on_train_batch_end(
epoch_output,
batch_output.training_step_output_for_epoch_end,
batch,
batch_idx,
dataloader_idx,
)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED
# -----------------------------------------
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.validating = True
self.trainer._run_evaluation()
self.trainer.training = True
val_loop_called = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.max_steps is not None and self.max_steps <= self.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
if batch_idx is None:
# dataloader/iterator did not produce a batch
return
# handle epoch_output on epoch end
self.on_train_epoch_end(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)
should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
# update epoch level lr_schedulers if no val loop outside train loop is triggered
if (val_loop_called and not should_check_val) or should_train_only:
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
if should_train_only:
self.check_checkpoint_callback(True)
if should_check_val:
self.trainer.validating = True
self.trainer._run_evaluation(on_epoch=True)
self.trainer.training = True
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
# prepare epoch output
processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)
# get the model and call model.training_epoch_end
model = self.trainer.lightning_module
if is_overridden('training_epoch_end', model=model):
# run training_epoch_end
# refresh the result for custom logging at the epoch level
model._current_fx_name = 'training_epoch_end'
# lightningmodule hook
training_epoch_end_output = model.training_epoch_end(processed_epoch_output)
if training_epoch_end_output is not None:
raise MisconfigurationException(
'training_epoch_end expects a return of None. '
'HINT: remove the return statement in training_epoch_end'
)
# capture logging
self.trainer.logger_connector.cache_logged_metrics()
# call train epoch end hooks
self._on_train_epoch_end_hook(processed_epoch_output)
self.trainer.call_hook('on_epoch_end')
def _on_train_epoch_end_hook(self, processed_epoch_output) -> None:
# We cannot rely on Trainer.call_hook because the signatures might be different across
# lightning module and callback
# As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end`
# This implementation is copied from Trainer.call_hook
hook_name = "on_train_epoch_end"
# set hook_name to model + reset Result obj
skip = self.trainer._reset_result_and_set_hook_fx_name(hook_name)
# always profile hooks
with self.trainer.profiler.profile(hook_name):
# first call trainer hook
if hasattr(self.trainer, hook_name):
trainer_hook = getattr(self.trainer, hook_name)
trainer_hook(processed_epoch_output)
# next call hook in lightningModule
model_ref = self.trainer.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
if is_param_in_hook_signature(hook_fx, "outputs"):
self.warning_cache.warn(
"The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3."
" `outputs` parameter has been deprecated."
" Support for the old signature will be removed in v1.5", DeprecationWarning
)
model_ref.on_train_epoch_end(processed_epoch_output)
else:
model_ref.on_train_epoch_end()
# if the PL module doesn't have the hook then call the accelerator
# used to auto-reduce things for the user with Results obj
elif hasattr(self.trainer.accelerator, hook_name):
accelerator_hook = getattr(self.trainer.accelerator, hook_name)
accelerator_hook()
if not skip:
self.trainer._cache_logged_metrics()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dict = {}
# bookkeeping
self.trainer.hiddens = None
optimizers = self.prepare_optimizers()
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(optimizers))]
if batch is None:
self.warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(
signal=0,
grad_norm_dict={},
training_step_output_for_epoch_end=batch_outputs,
)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dict={})
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dict={})
# lightning module hook
splits = self._tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in optimizers:
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
result = AttributeDict()
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.trainer.lightning_module.automatic_optimization:
def train_step_and_backward_closure():
nonlocal result
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
result = self.training_step(split_batch, batch_idx, opt_idx, self.trainer.hiddens)
if not result:
# user decided to skip optimization
# make sure to zero grad.
continue
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dict = result.get("grad_norm_dict", {})
# update running loss + reset accumulated loss
self.update_running_loss(result.loss)
batch_outputs = self._process_closure_result(
opt_closure_result=result,
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
result = AttributeDict(
signal=0,
grad_norm_dict=grad_norm_dict,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.trainer.lightning_module.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(
self, opt_closure_result: Optional[AttributeDict], batch_outputs: list, opt_idx: int
) -> list:
if opt_closure_result:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""Wrap forward, zero_grad and backward in a closure so second order methods work"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
if not self._skip_backward and self.trainer.lightning_module.automatic_optimization:
is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0
if is_first_batch_to_accumulate:
self.on_before_zero_grad(optimizer)
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
# backward pass
if result is not None:
with self.trainer.profiler.profile("backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self._check_finite(result.loss)
else:
self.warning_cache.warn(
"training_step returned None. If this was on purpose, ignore this warning..."
)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.lightning_module.untoggle_optimizer(opt_idx)
return result
def _check_finite(self, loss: torch.Tensor) -> None:
if not torch.isfinite(loss).all():
raise ValueError(f'The loss returned in `training_step` is {loss}.')
model = self.trainer.lightning_module
detect_nan_parameters(model)
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
result.grad_norm_dict = self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(
interval="step",
monitor_metrics=monitor_metrics,
opt_indices=[opt_idx for opt_idx, _ in self.get_optimizers_iterable()],
)
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.global_step = self.trainer.accelerator.update_global_step(self.total_batch_idx, self.global_step)
def _accumulated_batches_reached(self):
return (self.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool:
""" Decide if we should run validation. """
if not self.trainer.enable_validation:
return False
# check if this epoch is eligible to run validation
if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0:
return False
# val_check_batch is inf for iterable datasets with no length defined
# TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = False
if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float('inf'):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
# Note: num_training_batches is also inf for iterable datasets with no length defined
epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
if on_epoch:
return (
is_val_check_batch and epoch_end_val_check
) or self.trainer.should_stop or is_last_batch_for_infinite_dataset
else:
return is_val_check_batch and not epoch_end_val_check
def _build_kwargs(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)])
lightning_module = self.trainer.lightning_module
if len(self.trainer.optimizers) > 1:
training_step_fx = getattr(lightning_module, "training_step")
has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, "optimizer_idx")
if has_opt_idx_in_train_step:
if not lightning_module.automatic_optimization:
self.warning_cache.warn(
"`training_step` hook signature has changed in v1.3."
" `optimizer_idx` argument has been removed in case of manual optimization. Support for"
" the old signature will be removed in v1.5", DeprecationWarning
)
step_kwargs['optimizer_idx'] = opt_idx
elif not has_opt_idx_in_train_step and self.trainer.lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but"
' `training_step` is missing the `optimizer_idx` argument.'
)
# pass hiddens if using tbptt
if self._truncated_bptt_enabled():
step_kwargs['hiddens'] = hiddens
return step_kwargs
def _truncated_bptt_enabled(self) -> bool:
""" Temporary tbptt utilities until this flag is fully migrated to the lightning module. """
return self._truncated_bptt_steps() > 0
def _truncated_bptt_steps(self) -> int:
lightning_module = self.trainer.lightning_module
# Give precedence to the LightningModule as the Trainer flag will be removed in v1.5
if lightning_module.truncated_bptt_steps > 0:
return lightning_module.truncated_bptt_steps
return self.trainer.truncated_bptt_steps or 0
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.trainer.lightning_module.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.trainer.lightning_module.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.lightning_module
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self, current_loss: torch.Tensor) -> None:
if self.trainer.lightning_module.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(current_loss)
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| [
"torch.isfinite"
] | 1.4 | loic-beheshti/pytorch-lightning | 6ac16ff34822cef9b3c16e54f872655b585a066a |
1.8 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
class _InfiniteSampler(torch.utils.data.Sampler):
"""Wraps another Sampler to yield an infinite stream."""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
for batch in self.sampler:
yield batch
class InfiniteDataLoader:
def __init__(self, dataset, weights, batch_size, num_workers):
super().__init__()
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(weights, replacement=True, num_samples=batch_size)
else:
sampler = torch.utils.data.RandomSampler(dataset, replacement=True)
if weights == None:
weights = torch.ones(len(dataset))
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=True)
self._infinite_iterator = iter(
torch.utils.data.DataLoader(
dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler)
)
)
def __iter__(self):
while True:
yield next(self._infinite_iterator)
def __len__(self):
raise ValueError
class FastDataLoader:
"""DataLoader wrapper with slightly improved speed by not respawning worker
processes at every epoch."""
def __init__(self, dataset, batch_size, num_workers):
super().__init__()
batch_sampler = torch.utils.data.BatchSampler(
torch.utils.data.RandomSampler(dataset, replacement=False), batch_size=batch_size, drop_last=False
)
self._infinite_iterator = iter(
torch.utils.data.DataLoader(
dataset, num_workers=num_workers, batch_sampler=_InfiniteSampler(batch_sampler)
)
)
self._length = len(batch_sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self._infinite_iterator)
def __len__(self):
return self._length
| [
"torch.utils.data.WeightedRandomSampler",
"torch.utils.data.RandomSampler",
"torch.utils.data.BatchSampler"
] | 1.8.1 | VinAIResearch/mDSDI | 8ec49085d8389ab490ec633c3ae4bf66be085366 |
1.5 | import unittest
import torch
from models import VanillaVAE
from torchsummary import summary
class TestVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = VanillaVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device="cpu"))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N=0.005)
print(loss)
if __name__ == "__main__":
unittest.main()
| [
"torch.randn"
] | 1.5.0 | threewisemonkeys-as/PyTorch-VAE | 4ed0fc7581d4792b435134aa9e06d5e35a5db118 |
1.5 | from typing import List, Optional
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import vgg19_bn
from .base import BaseVAE
class DFCVAE(BaseVAE):
def __init__(
self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
alpha: float = 1,
beta: float = 0.5,
lr: float = 0.005,
weight_decay: Optional[float] = 0,
scheduler_gamma: Optional[float] = 0.95,
) -> None:
super(DFCVAE, self).__init__(
lr=lr, weight_decay=weight_decay, scheduler_gamma=scheduler_gamma
)
self.latent_dim = latent_dim
self.alpha = alpha
self.beta = beta
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(
in_channels,
out_channels=h_dim,
kernel_size=3,
stride=2,
padding=1,
),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU(),
)
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU(),
)
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),
nn.Tanh(),
)
self.feature_network = vgg19_bn(pretrained=True)
# Freeze the pretrained feature network
for param in self.feature_network.parameters():
param.requires_grad = False
self.feature_network.eval()
def encode(self, input: torch.Tensor) -> List[torch.Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (torch.Tensor) Input tensor to encoder [N x C x H x W]
:return: (torch.Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (torch.Tensor) [B x D]
:return: (torch.Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (torch.Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (torch.Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (torch.Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: torch.Tensor, **kwargs) -> List[torch.Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
recons = self.decode(z)
recons_features = self.extract_features(recons)
input_features = self.extract_features(input)
return [recons, input, recons_features, input_features, mu, log_var]
def extract_features(
self, input: torch.Tensor, feature_layers: List = None
) -> List[torch.Tensor]:
"""
Extracts the features from the pretrained model
at the layers indicated by feature_layers.
:param input: (torch.Tensor) [B x C x H x W]
:param feature_layers: List of string of IDs
:return: List of the extracted features
"""
if feature_layers is None:
feature_layers = ["14", "24", "34", "43"]
features = []
result = input
for (key, module) in self.feature_network.features._modules.items():
result = module(result)
if key in feature_layers:
features.append(result)
return features
def loss_function(self, *args, **kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
recons_features = args[2]
input_features = args[3]
mu = args[4]
log_var = args[5]
kld_weight = kwargs["M_N"] # Account for the minibatch samples from the dataset
recons_loss = F.mse_loss(recons, input)
feature_loss = 0.0
for (r, i) in zip(recons_features, input_features):
feature_loss += F.mse_loss(r, i)
kld_loss = torch.mean(
-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0
)
loss = (
self.beta * (recons_loss + feature_loss)
+ self.alpha * kld_weight * kld_loss
)
return {"loss": loss, "Reconstruction_Loss": recons_loss, "KLD": -kld_loss}
def sample(self, num_samples: int, current_device: int, **kwargs) -> torch.Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (torch.Tensor)
"""
z = torch.randn(num_samples, self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (torch.Tensor) [B x C x H x W]
:return: (torch.Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
| [
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.LeakyReLU",
"torch.nn.functional.mse_loss",
"torch.randn_like",
"torch.nn.Conv2d",
"torch.flatten",
"torch.exp",
"torch.randn"
] | 1.5.0 | threewisemonkeys-as/PyTorch-VAE | 4ed0fc7581d4792b435134aa9e06d5e35a5db118 |
1.5 | from typing import List, Optional
import torch
from torch import nn
from torch.distributions import Normal
from torch.nn import functional as F
from .base import BaseVAE
class MIWAE(BaseVAE):
def __init__(
self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
num_samples: int = 5,
num_estimates: int = 5,
lr: float = 0.005,
weight_decay: Optional[float] = 0,
scheduler_gamma: Optional[float] = 0.95,
) -> None:
super(MIWAE, self).__init__(
lr=lr, weight_decay=weight_decay, scheduler_gamma=scheduler_gamma
)
self.latent_dim = latent_dim
self.num_samples = num_samples # K
self.num_estimates = num_estimates # M
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(
in_channels,
out_channels=h_dim,
kernel_size=3,
stride=2,
padding=1,
),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU(),
)
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU(),
)
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),
nn.Tanh(),
)
def encode(self, input: torch.Tensor) -> List[torch.Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (torch.Tensor) Input tensor to encoder [N x C x H x W]
:return: (torch.Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""
Maps the given latent codes of S samples
onto the image space.
:param z: (torch.Tensor) [B x S x D]
:return: (torch.Tensor) [B x S x C x H x W]
"""
B, M, S, D = z.size()
z = z.view(-1, self.latent_dim) # [BMS x D]
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result) # [BMS x C x H x W ]
result = result.view(
[B, M, S, result.size(-3), result.size(-2), result.size(-1)]
) # [B x M x S x C x H x W]
return result
def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
"""
:param mu: (torch.Tensor) Mean of the latent Gaussian
:param logvar: (torch.Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: torch.Tensor, **kwargs) -> List[torch.Tensor]:
mu, log_var = self.encode(input)
mu = mu.repeat(self.num_estimates, self.num_samples, 1, 1).permute(
2, 0, 1, 3
) # [B x M x S x D]
log_var = log_var.repeat(self.num_estimates, self.num_samples, 1, 1).permute(
2, 0, 1, 3
) # [B x M x S x D]
z = self.reparameterize(mu, log_var) # [B x M x S x D]
eps = (z - mu) / log_var # Prior samples
return [self.decode(z), input, mu, log_var, z, eps]
def loss_function(self, *args, **kwargs) -> dict:
"""
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
z = args[4]
eps = args[5]
input = input.repeat(self.num_estimates, self.num_samples, 1, 1, 1, 1).permute(
2, 0, 1, 3, 4, 5
) # [B x M x S x C x H x W]
kld_weight = kwargs["M_N"] # Account for the minibatch samples from the dataset
log_p_x_z = (
((recons - input) ** 2).flatten(3).mean(-1)
) # Reconstruction Loss # [B x M x S]
kld_loss = -0.5 * torch.sum(
1 + log_var - mu ** 2 - log_var.exp(), dim=3
) # [B x M x S]
# Get importance weights
log_weight = log_p_x_z + kld_weight * kld_loss # .detach().data
# Rescale the weights (along the sample dim) to lie in [0, 1] and sum to 1
weight = F.softmax(log_weight, dim=-1) # [B x M x S]
loss = torch.mean(
torch.mean(torch.sum(weight * log_weight, dim=-1), dim=-2), dim=0
)
return {
"loss": loss,
"Reconstruction_Loss": log_p_x_z.mean(),
"KLD": -kld_loss.mean(),
}
def sample(self, num_samples: int, current_device: int, **kwargs) -> torch.Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (torch.Tensor)
"""
z = torch.randn(num_samples, 1, 1, self.latent_dim)
z = z.to(current_device)
samples = self.decode(z).squeeze()
return samples
def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Given an input image x, returns the reconstructed image.
Returns only the first reconstructed sample
:param x: (torch.Tensor) [B x C x H x W]
:return: (torch.Tensor) [B x C x H x W]
"""
return self.forward(x)[0][:, 0, 0, :]
| [
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.LeakyReLU",
"torch.randn_like",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.flatten",
"torch.exp",
"torch.randn",
"torch.sum"
] | 1.5.0 | threewisemonkeys-as/PyTorch-VAE | 4ed0fc7581d4792b435134aa9e06d5e35a5db118 |
3 | import torch
import torch.nn as nn
from torch3d.nn import functional as F
from torch3d.nn.utils import _single
class FeaturePropagation(nn.Sequential):
"""
The feature propagation from the `"PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space" <https://arxiv.org/abs/1706.02413>`_ paper.
Args:
in_channels (int): Number of channels in the input point set
out_channels (int): Number of channels produced by the convolution
kernel_size (int, optional): Neighborhood size of the convolution kernel. Default: 1
bias (bool, optional): If True, adds a learnable bias to the output. Default: ``True``
""" # noqa
def __init__(self, in_channels, out_channels, kernel_size=1, bias=True):
self.kernel_size = kernel_size
in_channels = in_channels
out_channels = _single(out_channels)
modules = []
for channels in out_channels:
modules.append(nn.Conv1d(in_channels, channels, 1, bias=bias))
modules.append(nn.BatchNorm1d(channels))
modules.append(nn.ReLU(True))
in_channels = channels
super(FeaturePropagation, self).__init__(*modules)
def forward(self, x, y):
p, x = x[:, :3], x[:, 3:]
q, y = y[:, :3], y[:, 3:]
x = F.interpolate(p, q, x, self.kernel_size)
x = torch.cat([x, y], dim=1)
x = super(FeaturePropagation, self).forward(x)
x = torch.cat([q, x], dim=1)
return x
class PointDeconv(nn.Module):
"""
The point deconvolution layer from the `"PointConv: Deep Convolutional Networks on 3D Point Clouds" <https://arxiv.org/abs/1811.07246>`_ paper.
Args:
in_channels (int): Number of channels in the input point set
out_channels (int): Number of channels produced by the convolution
kernel_size (int, optional): Neighborhood size of the convolution kernel. Default: 1
bandwidth (float, optional): Bandwidth of kernel density estimation. Default: 1.0
bias (bool, optional): If True, adds a learnable bias to the output. Default: ``True``
""" # noqa
def __init__(
self, in_channels, out_channels, kernel_size=1, bandwidth=1.0, bias=True
):
super(PointDeconv, self).__init__()
self.kernel_size = kernel_size
self.bandwidth = bandwidth
self.scale = nn.Sequential(
nn.Conv1d(1, 8, 1, bias=bias),
nn.BatchNorm1d(8),
nn.ReLU(True),
nn.Conv1d(8, 8, 1, bias=bias),
nn.BatchNorm1d(8),
nn.ReLU(True),
nn.Conv1d(8, 1, 1, bias=bias),
nn.Sigmoid(),
)
self.weight = nn.Sequential(
nn.Conv2d(3, 8, 1, bias=bias),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.Conv2d(8, 8, 1, bias=bias),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.Conv2d(8, 16, 1, bias=bias),
)
in_channels = in_channels
out_channels = _single(out_channels)
modules = []
for channels in out_channels[:-1]:
modules.append(nn.Conv2d(in_channels, channels, 1, bias=bias))
modules.append(nn.BatchNorm2d(channels))
modules.append(nn.ReLU(True))
in_channels = channels
self.mlp = nn.Sequential(*modules)
self.lin = nn.Sequential(
nn.Conv2d(in_channels, out_channels[-1], [16, 1], bias=bias),
nn.BatchNorm2d(out_channels[-1]),
nn.ReLU(True),
)
def forward(self, x, y):
batch_size = x.shape[0]
p, x = x[:, :3], x[:, 3:]
q, y = y[:, :3], y[:, 3:]
x = F.interpolate(p, q, x, self.kernel_size)
x = torch.cat([x, y], dim=1)
in_channels = x.shape[1]
s = F.kernel_density(q, self.bandwidth).unsqueeze(1)
s = self.scale(torch.reciprocal(s)) # calculate scaling factor
_, index = F.knn(q, q, self.kernel_size)
index = index.view(batch_size, -1).unsqueeze(1)
# Point and density grouping
p = torch.gather(q, 2, index.expand(-1, 3, -1))
x = torch.gather(x, 2, index.expand(-1, in_channels, -1))
s = torch.gather(s, 2, index)
p = p.view(batch_size, 3, self.kernel_size, -1)
x = x.view(batch_size, in_channels, self.kernel_size, -1)
s = s.view(batch_size, 1, self.kernel_size, -1)
p = p - q.unsqueeze(2).expand(-1, -1, self.kernel_size, -1)
w = self.weight(p)
x = self.mlp(x * s)
x = torch.matmul(w.permute(0, 3, 1, 2), x.permute(0, 3, 2, 1))
x = x.permute(0, 3, 2, 1)
x = self.lin(x).squeeze(2)
x = torch.cat([q, x], dim=1)
return x
| [
"torch.cat",
"torch.nn.Conv1d",
"torch.nn.Sequential",
"torch.gather",
"torch.nn.Sigmoid",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.reciprocal"
] | 3 | zhangmozhe/torch3d | d47e9b243e520f9c0c72a26c271d2c7ad242cb65 |
1.1 | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong ([email protected])
# @Link : None
# @Version : 0.0
import comet_ml
import os
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
import torch.nn.functional as F
from imageio import imsave
from tqdm import tqdm
from copy import deepcopy
import logging
from torch.autograd import Variable
from utils.inception_score import get_inception_score
from utils.fid_score import calculate_fid_given_paths
import models
class Log_loss(torch.nn.Module):
def __init__(self):
# negation is true when you minimize -log(val)
super(Log_loss, self).__init__()
def forward(self, x, negation=True):
# shape of x will be [batch size]
log_val = torch.log(x)
loss = torch.sum(log_val)
if negation:
loss = torch.neg(loss)
return loss
class Itself_loss(torch.nn.Module):
def __init__(self):
super(Itself_loss, self).__init__()
def forward(self, x, negation=True):
# shape of x will be [batch size]
loss = torch.sum(x)
if negation:
loss = torch.neg(loss)
return loss
logger = logging.getLogger(__name__)
def train_d2(args, gen_net: nn.Module, dis_net1: nn.Module, dis_net2: nn.Module, gen_optimizer, dis_optimizer1, dis_optimizer2, gen_avg_param, train_loader, epoch,
writer_dict, schedulers=None, experiment=None):
writer = writer_dict['writer']
gen_step = 0
criterion_log = Log_loss()
criterion_itself = Itself_loss()
# train mode
gen_net = gen_net.train()
dis_net1 = dis_net1.train()
dis_net2 = dis_net2.train()
d_loss1 = 0.0
d_loss2 = 0.0
g_loss = 0.0
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
# Adversarial ground truths
real_imgs = imgs.type(torch.cuda.FloatTensor)
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
# ---------------------
# Train Discriminator
# ---------------------
dis_optimizer1.zero_grad()
dis_optimizer2.zero_grad()
real_validity1 = dis_net1(real_imgs)
real_validity2 = dis_net2(real_imgs)
fake_imgs = gen_net(z).detach()
assert fake_imgs.size() == real_imgs.size()
fake_validity1 = dis_net1(fake_imgs.detach())
fake_validity2 = dis_net2(fake_imgs.detach())
d_loss1 = 0.2 * criterion_log(real_validity1) + criterion_itself(fake_validity1, False)
d_loss1.backward()
d_loss2 = criterion_itself(real_validity2, False) + 0.1*criterion_log(fake_validity2, False)
d_loss2.backward()
dis_optimizer1.step()
dis_optimizer2.step()
writer.add_scalar('d_loss1', d_loss1.item(), global_steps)
writer.add_scalar('d_loss2', d_loss2.item(), global_steps)
# -----------------
# Train Generator
# -----------------
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
gen_imgs = gen_net(gen_z)
fake_validity1 = dis_net1(gen_imgs)
fake_validity2 = dis_net2(gen_imgs)
# cal loss
g_loss = criterion_itself(fake_validity1) + 0.1*criterion_log(fake_validity2)
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
# if schedulers:
# gen_scheduler, dis_scheduler = schedulers
# g_lr = gen_scheduler.step(global_steps)
# d_lr = dis_scheduler.step(global_steps)
# writer.add_scalar('LR/g_lr', g_lr, global_steps)
# writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss1.item(), g_loss.item()))
if experiment != None:
experiment.log_metric("gen_loss", g_loss.item())
experiment.log_metric("dis_loss1", d_loss1.item())
experiment.log_metric("dis_loss2", d_loss2.item())
writer_dict['train_global_steps'] = global_steps + 1
def train_chainer(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,
writer_dict, schedulers=None, experiment=None):
writer = writer_dict['writer']
gen_step = 0
# train mode
gen_net = gen_net.train()
dis_net = dis_net.train()
d_loss = 0.0
g_loss = 0.0
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
# Adversarial ground truths
real_imgs = imgs.type(torch.cuda.FloatTensor)
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
# ---------------------
# Train Discriminator
# ---------------------
dis_optimizer.zero_grad()
real_validity = dis_net(real_imgs)
fake_imgs = gen_net(z).detach()
assert fake_imgs.size() == real_imgs.size()
fake_validity = dis_net(fake_imgs)
# cal loss
d_loss = torch.mean(F.softplus(-real_validity)) + \
torch.mean(F.softplus(fake_validity))
d_loss.backward()
dis_optimizer.step()
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# -----------------
# Train Generator
# -----------------
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
gen_imgs = gen_net(gen_z)
fake_validity = dis_net(gen_imgs)
# cal loss
g_loss = torch.mean(F.softplus(-fake_validity))
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
if schedulers:
print("schedulars?")
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
# for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
# avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
if experiment != None:
experiment.log_metric("gen_loss", g_loss.item())
experiment.log_metric("dis_loss", d_loss.item())
writer_dict['train_global_steps'] = global_steps + 1
def train_wgan(args, gen_net: nn.Module, multiD, gen_optimizer, multiD_opt, gen_avg_param, train_loader, epoch,
writer_dict, schedulers=None, experiment=None):
writer = writer_dict['writer']
gen_step = 0
n_dis = len(multiD)
# train mode
gen_net = gen_net.train()
d_loss = 0.0
g_loss = 0.0
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
for i in range(n_dis):
multiD[i].train()
for p in multiD[i].parameters():
p.requires_grad = True
multiD_opt[i].zero_grad()
for p in gen_net.parameters():
p.requires_grad = True
# Adversarial ground truths
x_real = imgs.type(torch.cuda.FloatTensor)
y_real = torch.ones(imgs.shape[0], 1).cuda()
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
y_fake = torch.zeros(x_real.size()[0], 1).cuda()
# ---------------------
# Train Discriminator
# ---------------------
for i in range(n_dis):
multiD_opt[i].zero_grad()
gen_optimizer.zero_grad()
x_fake = gen_net(z)
# assert x_fake.size() == x_real.size()
flag = True
for i in range(n_dis):
if flag:
D_fake = multiD[i](x_fake)
D_real = multiD[i](x_real)
flag = False
else:
D_fake = torch.cat((D_fake, multiD[i](x_fake)), dim = 1)
D_real = torch.cat((D_real, multiD[i](x_real)), dim = 1)
ind = torch.argmin(D_fake, dim = 1)
mask = torch.zeros((x_real.size()[0], n_dis)).cuda()
mask2 = torch.zeros((x_real.size()[0], n_dis)).cuda()
for i in range(mask.size()[0]):
random_checker = np.random.randint(0,10)
if random_checker > 7: #100 for no random thingie
index = np.random.randint(0,n_dis)
mask[i][index] = 1.0
mask2[i][index] = 1.0
else:
mask[i][ind[i]] = 1.0
mask2[i][ind[i]] = 1.0
for i in range(mask.size()[0], mask2.size()[0]):
mask2[i][np.random.randint(0,n_dis)] = 1.0
alpha = Variable(torch.rand(x_real.size()))
alpha = alpha.cuda()
x_hat = alpha*x_fake + (1-alpha)*x_real
flag = True
for i in range(n_dis):
if flag:
d_x_hat = multiD[i](x_hat)
flag = False
else:
d_x_hat = torch.cat((d_x_hat, multiD[i](x_hat)), dim = 1)
d_x_hat = torch.sum(mask*d_x_hat, dim=1)
# d_x_hat = multiD[0](x_hat)
gradients = torch.autograd.grad(outputs=d_x_hat, inputs=x_hat,
grad_outputs=torch.ones(d_x_hat.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.reshape(imgs.shape[0], -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
LAMBDA=10
loss = LAMBDA*gradient_penalty
D_fake_output = torch.sum(mask2*D_fake, dim = 1)
D_real_output = torch.sum(mask*D_real, dim = 1)
# cal loss
d_loss = -(torch.mean(D_real_output) - torch.mean(D_fake_output))
d_loss += loss
# d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)
d_loss.backward()
for i in range(n_dis):
multiD_opt[i].step()
# for i in range(n_dis):
# for p in multiD[i].parameters():
# p.data.clamp_(-0.01, 0.01)
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# -----------------
# Train Generator
# -----------------
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
fake_img = gen_net(gen_z)
critic_fakes = []
lit = np.zeros(n_dis)
for i in range(n_dis):
for p in multiD[i].parameters():
p.requires_grad = False
critic_fake = multiD[i](fake_img)
critic_fakes.append(critic_fake)
lit[i] = torch.sum(critic_fake).item()
loss_sort = np.argsort(lit)
weights = np.random.dirichlet(np.ones(n_dis))
weights = np.sort(weights)[::-1]
flag = False
for i in range(len(critic_fakes)):
if flag == False:
critic_fake = weights[i]*critic_fakes[loss_sort[i]]
flag = True
else:
critic_fake = torch.add(critic_fake, weights[i]*critic_fakes[loss_sort[i]])
# cal loss
g_loss = -torch.mean(critic_fake)
# g_loss = criterion(fake_validity, y_fake)
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
if experiment != None:
experiment.log_metric("gen_loss", g_loss.item())
experiment.log_metric("dis_loss", d_loss.item())
writer_dict['train_global_steps'] = global_steps + 1
# def train_multi(args, gen_net: nn.Module, multiD, gen_optimizer, multiD_opt, gen_avg_param, train_loader, epoch,
# writer_dict, schedulers=None, experiment=None):
# writer = writer_dict['writer']
# gen_step = 0
# criterion = nn.BCELoss()
# n_dis = len(multiD)
# # train mode
# gen_net = gen_net.train()
# d_loss = 0.0
# g_loss = 0.0
# for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
# global_steps = writer_dict['train_global_steps']
# for i in range(n_dis):
# multiD[i].train()
# for p in multiD[i].parameters():
# p.requires_grad = True
# multiD_opt[i].zero_grad()
# for p in gen_net.parameters():
# p.requires_grad = True
# # Adversarial ground truths
# x_real = imgs.type(torch.cuda.FloatTensor)
# y_real = torch.ones(imgs.shape[0], 1).cuda()
# # Sample noise as generator input
# z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
# y_fake = torch.zeros(x_real.size()[0], 1).cuda()
# # ---------------------
# # Train Discriminator
# # ---------------------
# for i in range(n_dis):
# multiD_opt[i].zero_grad()
# gen_optimizer.zero_grad()
# x_fake = gen_net(z).detach()
# # assert x_fake.size() == x_real.size()
# flag = True
# for i in range(n_dis):
# if flag:
# D_fake = multiD[i](x_fake)
# D_real = multiD[i](x_real)
# flag = False
# else:
# D_fake = torch.cat((D_fake, multiD[i](x_fake)), dim = 1)
# D_real = torch.cat((D_real, multiD[i](x_real)), dim = 1)
# ind = torch.argmin(D_fake, dim = 1)
# mask = torch.zeros((x_real.size()[0], n_dis)).cuda()
# mask2 = torch.zeros((x_fake.size()[0], n_dis)).cuda()
# for i in range(mask2.size()[0]):
# random_checker = np.random.randint(0,10)
# if random_checker > 7: #100 for no random thingie
# index = np.random.randint(0,n_dis)
# mask[i][index] = 1.0
# mask2[i][index] = 1.0
# else:
# mask[i][ind[i]] = 1.0
# mask2[i][ind[i]] = 1.0
# # for i in range(mask2.size()[0], mask.size()[0]):
# # id = np.random.randint(0,n_dis)
# # if id != ind[i - mask2.size()[0]]:
# # mask[i][id] = 1.0
# D_fake_output = torch.sum(mask2*D_fake, dim = 1)
# D_real_output = torch.sum(mask*D_real, dim = 1)
# #cos = torch.nn.CosineSimilarity()
# #dot = cos(D_fake[0], D_fake[1])
# # cal loss
# d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - D_real_output)) + \
# torch.mean(nn.ReLU(inplace=True)(1 + D_fake_output))
# # d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)
# d_loss.backward()
# for i in range(n_dis):
# multiD_opt[i].step()
# writer.add_scalar('d_loss', d_loss.item(), global_steps)
# # -----------------
# # Train Generator
# # -----------------
# if global_steps % args.n_critic == 0:
# gen_optimizer.zero_grad()
# gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
# fake_img = gen_net(gen_z)
# critic_fakes = []
# lit = np.zeros(n_dis)
# for i in range(n_dis):
# for p in multiD[i].parameters():
# p.requires_grad = False
# critic_fake = multiD[i](fake_img)
# critic_fakes.append(critic_fake)
# lit[i] = torch.sum(critic_fake).item()
# loss_sort = np.argsort(lit)
# weights = np.random.dirichlet(np.ones(n_dis))
# weights = np.sort(weights)[::-1]
# flag = False
# for i in range(len(critic_fakes)):
# if flag == False:
# critic_fake = weights[i]*critic_fakes[loss_sort[i]]
# flag = True
# else:
# critic_fake = torch.add(critic_fake, weights[i]*critic_fakes[loss_sort[i]])
# # cal loss
# g_loss = -torch.mean(critic_fake)
# # g_loss = criterion(fake_validity, y_fake)
# g_loss.backward()
# gen_optimizer.step()
# # adjust learning rate
# if schedulers:
# gen_scheduler, dis_scheduler = schedulers
# g_lr = gen_scheduler.step(global_steps)
# d_lr = dis_scheduler.step(global_steps)
# writer.add_scalar('LR/g_lr', g_lr, global_steps)
# writer.add_scalar('LR/d_lr', d_lr, global_steps)
# # moving average weight
# for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
# avg_p.mul_(0.999).add_(0.001, p.data)
# writer.add_scalar('g_loss', g_loss.item(), global_steps)
# gen_step += 1
# # verbose
# if gen_step and iter_idx % args.print_freq == 0:
# tqdm.write(
# "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
# (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
# if experiment != None:
# experiment.log_metric("gen_loss", g_loss.item())
# experiment.log_metric("dis_loss", d_loss.item())
# writer_dict['train_global_steps'] = global_steps + 1
def train_multi(args, gen_net: nn.Module, multiD, gen_optimizer, multiD_opt, gen_avg_param, train_loader, epoch,
writer_dict, alpha_m, t, check_ep, alpha, schedulers=None, experiment=None):
writer = writer_dict['writer']
gen_step = 0
n_dis = len(multiD)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
if args.init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif args.init_type == 'orth':
nn.init.orthogonal_(m.weight.data)
elif args.init_type == 'xavier_uniform':
nn.init.xavier_uniform(m.weight.data, 1.)
else:
raise NotImplementedError('{} unknown inital type'.format(args.init_type))
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
for imgs,_ in train_loader:
exemplar = imgs[:15].type(torch.cuda.FloatTensor)
break
addno = False
# check_ep = 10
# check_ep = int(check_ep*t)
if n_dis == 1:
check_ep = 5
if epoch > 1 and epoch % check_ep == 0:
check_ep = int(check_ep*t)
exemplar_flag = True
with torch.no_grad():
for dis_index in range(n_dis):
if exemplar_flag:
exemplar_res = multiD[dis_index](exemplar).unsqueeze(0)
exemplar_flag = False
else:
exemplar_res = torch.cat((multiD[dis_index](exemplar).unsqueeze(0), exemplar_res), dim=0)
print(exemplar_res.size())
alpha = 1.5
if n_dis > 2:
alpha = alpha*alpha_m
print('\n',exemplar_res, torch.mean(exemplar_res, dim = 1))
exemplar_max,_ = torch.max(exemplar_res, dim = 1)
exemplar_min,_ = torch.min(exemplar_res, dim = 1)
print('\n',exemplar_min)
# for i in range(n_dis):
# if exemplar_min[i].item() > alpha[0]*torch.mean(exemplar_res[i]).item():
# addno = True
# print(exemplar_min[i].item(), torch.mean(exemplar_res[i]).item())
# if n_dis > 3:
# addno = False
# "\nAdd True but N_dis > 4\n"
# break
# break
for i in range(n_dis):
if addno:
break
if exemplar_max[i].item() > alpha*torch.mean(exemplar_res[i]).item():
addno = True
print(exemplar_min[i].item(), torch.mean(exemplar_res[i]).item())
# if n_dis > 3:
# addno = False
# "\nAdd True but N_dis > 4\n"
# break
break
if addno:
# print('\n adding D \n')
addno = False
d_new = eval('models.'+args.model+'.Discriminator')(args=args).cuda()
# d_new.apply(weights_init)
multiD.append(d_new)
multiD_opt.append(torch.optim.Adam(filter(lambda p: p.requires_grad, multiD[n_dis].parameters()),
args.d_lr, (args.beta1, args.beta2)))
n_dis +=1
# print('\nn_dis: ', n_dis)
# dcopy = deepcopy(multiD[n_dis-1]).cpu()
# sdict = dcopy.state_dict()
# for i, p in enumerate(sdict):
# if i <4:
# continue
# # print(p)
# sdict[p] = 0.01*torch.randn(sdict[p].size())
# dcopy.load_state_dict(sdict)
# multiD.append(dcopy.cuda())
# sdict = multiD[n_dis-1].state_dict()
# for i, p in enumerate(sdict):
# # if i <4:
# # continue
# # print(p)
# sdict[p] = sdict[p] + 0.1*torch.randn(sdict[p].size()).cuda()
# multiD[n_dis-1].load_state_dict(sdict)
# multiD_opt.append(torch.optim.Adam(multiD[n_dis].parameters(), lr = args.lr, betas = (0.5,0.999)))
# n_dis = n_dis + 1
# train mode
gen_net = gen_net.train()
d_loss = 0.0
g_loss = 0.0
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
for i in range(n_dis):
multiD[i].train()
for p in multiD[i].parameters():
p.requires_grad = True
multiD_opt[i].zero_grad()
for p in gen_net.parameters():
p.requires_grad = True
# Adversarial ground truths
x_real = imgs.type(torch.cuda.FloatTensor)
y_real = torch.ones(imgs.shape[0], 1).cuda()
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
y_fake = torch.zeros(x_real.size()[0], 1).cuda()
# ---------------------
# Train Discriminator
# ---------------------
for i in range(n_dis):
multiD_opt[i].zero_grad()
gen_optimizer.zero_grad()
x_fake = gen_net(z).detach()
# assert x_fake.size() == x_real.size()
flag = True
for i in range(n_dis):
if flag:
D_fake = multiD[i](x_fake)
D_real = multiD[i](x_real)
flag = False
else:
D_fake = torch.cat((D_fake, multiD[i](x_fake)), dim = 1)
D_real = torch.cat((D_real, multiD[i](x_real)), dim = 1)
ind = torch.argmin(D_fake, dim = 1)
mask = torch.zeros((x_real.size()[0], n_dis)).cuda()
mask2 = torch.zeros((x_real.size()[0], n_dis)).cuda()
for i in range(mask.size()[0]):
random_checker = np.random.randint(0,10)
if random_checker > 7: #100 for no random thingie
index = np.random.randint(0,n_dis)
mask[i][index] = 1.0
mask2[i][index] = 1.0
else:
mask[i][ind[i]] = 1.0
mask2[i][ind[i]] = 1.0
# for i in range(mask.size()[0], mask2.size()[0]):
# mask2[i][np.random.randint(0,n_dis)] = 1.0
D_fake_output = torch.sum(mask2*D_fake, dim = 1)
D_real_output = torch.sum(mask*D_real, dim = 1)
# cal loss
d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - D_real_output)) + \
torch.mean(nn.ReLU(inplace=True)(1 + D_fake_output))
# d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)
d_loss.backward()
for i in range(n_dis):
multiD_opt[i].step()
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# -----------------
# Train Generator
# -----------------
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
fake_img = gen_net(gen_z)
critic_fakes = []
lit = np.zeros(n_dis)
for i in range(n_dis):
for p in multiD[i].parameters():
p.requires_grad = False
critic_fake = multiD[i](fake_img)
critic_fakes.append(critic_fake)
lit[i] = torch.sum(critic_fake).item()
loss_sort = np.argsort(lit)
weights = np.random.dirichlet(np.ones(n_dis))
weights = np.sort(weights)[::-1]
flag = False
for i in range(len(critic_fakes)):
if flag == False:
critic_fake = weights[i]*critic_fakes[loss_sort[i]]
flag = True
else:
critic_fake = torch.add(critic_fake, weights[i]*critic_fakes[loss_sort[i]])
# cal loss
g_loss = -torch.mean(critic_fake)
# g_loss = criterion(fake_validity, y_fake)
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
if experiment != None:
experiment.log_metric("gen_loss", g_loss.item())
experiment.log_metric("dis_loss", d_loss.item())
writer_dict['train_global_steps'] = global_steps + 1
return multiD, multiD_opt, check_ep, alpha
def train(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,
writer_dict, schedulers=None, experiment=None):
writer = writer_dict['writer']
gen_step = 0
criterion = nn.BCELoss()
# train mode
gen_net = gen_net.train()
dis_net = dis_net.train()
d_loss = 0.0
g_loss = 0.0
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
# Adversarial ground truths
real_imgs = imgs.type(torch.cuda.FloatTensor)
y_real = torch.ones(imgs.shape[0], 1).cuda()
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
y_fake = torch.zeros(real_imgs.size()[0], 1).cuda()
# ---------------------
# Train Discriminator
# ---------------------
dis_optimizer.zero_grad()
real_validity = dis_net(real_imgs)
fake_imgs = gen_net(z).detach()
assert fake_imgs.size() == real_imgs.size()
fake_validity = dis_net(fake_imgs)
# cal loss
d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - real_validity)) + \
torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))
# d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)
d_loss.backward()
dis_optimizer.step()
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# -----------------
# Train Generator
# -----------------
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
gen_imgs = gen_net(gen_z)
fake_validity = dis_net(gen_imgs)
y_fake = torch.zeros(args.gen_batch_size, 1).cuda()
# cal loss
g_loss = -torch.mean(fake_validity)
# g_loss = criterion(fake_validity, y_fake)
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
if experiment != None:
experiment.log_metric("gen_loss", g_loss.item())
experiment.log_metric("dis_loss", d_loss.item())
writer_dict['train_global_steps'] = global_steps + 1
def validate(args, fixed_z, fid_stat, gen_net: nn.Module, writer_dict):
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
# eval mode
gen_net = gen_net.eval()
# generate images
sample_imgs = gen_net(fixed_z)
img_grid = make_grid(sample_imgs, nrow=8, normalize=True, scale_each=True)
# get fid and inception score
fid_buffer_dir = os.path.join(args.path_helper['sample_path'], 'fid_buffer')
try:
os.makedirs(fid_buffer_dir)
except:
pass
eval_iter = args.num_eval_imgs // args.eval_batch_size
img_list = list()
with torch.no_grad():
for iter_idx in tqdm(range(eval_iter), desc='sample images'):
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))
# Generate a batch of images
gen_imgs = gen_net(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy()
for img_idx, img in enumerate(gen_imgs):
file_name = os.path.join(fid_buffer_dir, f'iter{iter_idx}_b{img_idx}.png')
imsave(file_name, img)
img_list.extend(list(gen_imgs))
# get inception score
logger.info('=> calculate inception score')
torch.cuda.empty_cache()
mean, std = get_inception_score(img_list)
# get fid score
logger.info('=> calculate fid score')
fid_score = 0 #calculate_fid_given_paths([fid_buffer_dir, fid_stat], inception_path=None)
# os.system('rm -r {}'.format(fid_buffer_dir))
writer.add_image('sampled_images', img_grid, global_steps)
writer.add_scalar('Inception_score/mean', mean, global_steps)
writer.add_scalar('Inception_score/std', std, global_steps)
# writer.add_scalar('FID_score', fid_score, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return mean, fid_score, sample_imgs
class LinearLrDecay(object):
def __init__(self, optimizer, start_lr, end_lr, decay_start_step, decay_end_step):
assert start_lr > end_lr
self.optimizer = optimizer
self.delta = (start_lr - end_lr) / (decay_end_step - decay_start_step)
self.decay_start_step = decay_start_step
self.decay_end_step = decay_end_step
self.start_lr = start_lr
self.end_lr = end_lr
def step(self, current_step):
if current_step <= self.decay_start_step:
lr = self.start_lr
elif current_step >= self.decay_end_step:
lr = self.end_lr
else:
lr = self.start_lr - self.delta * (current_step - self.decay_start_step)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
| [
"torch.argmin",
"torch.ones",
"torch.sum",
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.nn.BCELoss",
"torch.nn.init.orthogonal_",
"torch.neg",
"torch.zeros",
"torch.nn.functional.softplus",
"torch.min",
"torch.max",
"torch.nn.ReLU",
"torch.cuda.empty_cache",
"torch.log",
"torch.nn.init.xavier_uniform",
"torch.no_grad",
"torch.add",
"torch.mean"
] | 1.1.0 | gargrohin/sngan.pytorch | 58d200c731935360f1b0fdcb1865c366c633e56c |
1.6 | import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils import data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
import torch.nn.functional as F
from copy import deepcopy
import matplotlib.pyplot as plt
import time
import loss
from torch.utils.tensorboard import SummaryWriter
from event_ae import EventAE
from chamfer import ChamferDistance, ChamferLoss
from dataloader import EventStreamDataset, EventStreamArray
from data_utils import *
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
type=str,
default="data/MSTrain_bytestream.txt",
help="training data filename",
)
parser.add_argument("--batch_size", type=int,
default=1000, help="input batch size")
parser.add_argument("--batch_num", type=int, default=50,
help="number of batches")
parser.add_argument("--data_len", type=int, default=2,
help="event element length")
parser.add_argument("--tcode", type=bool, default=False,
help="consider timestamps")
parser.add_argument(
"--nepoch", type=int, default=5000, help="number of epochs to train for"
)
parser.add_argument("--latent_size", type=int, default=8)
parser.add_argument(
"--rec_loss",
type=str,
default="huber",
help="type of loss: mse, huber, bce, chamfer",
)
parser.add_argument(
"--decoder", type=str, default="image", help="decoder type: stream or image"
)
parser.add_argument("--outf", type=str, default="weights",
help="output folder")
parser.add_argument("--model", type=str, default="", help="model path")
parser.add_argument(
"--norm_type",
type=str,
default="none",
help="normalization type: scale: [0, 1]; center: [-1, 1]",
)
parser.add_argument("--arch", type=str, default="vanilla")
opt = parser.parse_args()
print(opt)
def blue(x): return "\033[94m" + x + "\033[0m"
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
try:
os.makedirs(opt.outf)
except OSError:
pass
writer = SummaryWriter("runs/str_to_img_test")
# Params:
# n_events, data_size for stream decoder
# Height, width for image decoder
H = 32
W = 32
params = [H, W]
event_dataset = EventStreamArray(
opt.input_file, opt.batch_num, opt.batch_size, opt.data_len
)
"""
batch_size_total = opt.batch_size * opt.batch_num
train_loader = data.DataLoader(
event_dataset,
batch_size=batch_size_total,
shuffle=False,
num_workers=0,
drop_last=True,
)
"""
data_utils = EventDataUtils(32, 32, opt.norm_type)
enet = EventAE(
opt.data_len, opt.latent_size, params, decoder=opt.decoder, norm_type=opt.norm_type
)
if opt.model != "":
enet.load_state_dict(torch.load(opt.model))
enet.cuda()
init = True
event_np_stack = np.empty([opt.batch_num, opt.batch_size, 4], dtype=np.float32)
frames_stack = np.empty([opt.batch_num, H * W], dtype=np.float32)
if opt.data_len == 3:
pol = True
else:
pol = False
with torch.no_grad():
for i, data in enumerate(test_loader, 0):
# events = data_utils.normalize(EventExtractor(data, batch_num=1))
idx = random.randint(0, 1000000)
events = data_utils.normalize(event_array.get_event_stack(idx))
events = Variable(events)
events = events.transpose(2, 1)
events = events.cuda()
recon, z = enet(events)
events = events.transpose(2, 1).contiguous()
if opt.decoder == "stream":
recon = recon.transpose(2, 1).contiguous()
data_utils.compare_frames(events, recon)
| [
"torch.autograd.Variable",
"torch.no_grad",
"torch.manual_seed",
"torch.load",
"torch.utils.tensorboard.SummaryWriter"
] | 1.6.0 | microsoft/event-vae-rl | cb64c2809bcbfec81e84fff93a912f65c72f73d3 |
1.10 | import torch
from torch.utils.data import Dataset
import cv2
import numpy as np
import pandas as pd
__all__ = ['VideoDataset', 'VideoLabelDataset']
class VideoDataset(Dataset):
""" Video Dataset for loading video.
It will output only path of video (neither video file path or video folder path).
However, you can load video as torch.Tensor (C x L x H x W).
See below for an example of how to read video as torch.Tensor.
Your video dataset can be image frames or video files.
Args:
csv_file (str): path fo csv file which store path of video file or video folder.
the format of csv_file should like:
# example_video_file.csv (if the videos of dataset is saved as video file)
path
~/path/to/video/file1.mp4
~/path/to/video/file2.mp4
~/path/to/video/file3.mp4
~/path/to/video/file4.mp4
# example_video_folder.csv (if the videos of dataset is saved as image frames)
path
~/path/to/video/folder1/
~/path/to/video/folder2/
~/path/to/video/folder3/
~/path/to/video/folder4/
Example:
if the videos of dataset is saved as video file
>>> import torch
>>> from datasets import VideoDataset
>>> import transforms
>>> dataset = VideoDataset(
>>> "example_video_file.csv",
>>> transform = transforms.VideoFilePathToTensor() # See more options at transforms.py
>>> )
>>> data_loader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)
>>> for videos in data_loader:
>>> print(videos.size())
if the video of dataset is saved as frames in video folder
The tree like: (The names of the images are arranged in ascending order of frames)
~/path/to/video/folder1
├── frame-001.jpg
├── frame-002.jpg
├── frame-003.jpg
└── frame-004.jpg
>>> import torch
>>> from datasets import VideoDataset
>>> import transforms
>>> dataset = VideoDataset(
>>> "example_video_folder.csv",
>>> transform = transforms.VideoFolderPathToTensor() # See more options at transforms.py
>>> )
>>> data_loader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)
>>> for videos in data_loader:
>>> print(videos.size())
"""
def __init__(self, csv_file, transform=None):
self.dataframe = pd.read_csv(csv_file)
self.transform = transform
def __len__(self):
"""
Returns:
int: number of rows of the csv file (not include the header).
"""
return len(self.dataframe)
def __getitem__(self, index):
""" get a video """
video = self.dataframe.iloc[index].path
if self.transform:
video = self.transform(video)
return video
class VideoLabelDataset(Dataset):
""" Dataset Class for Loading Video.
It will output path and label. However, you can load video as torch.Tensor (C x L x H x W).
See below for an example of how to read video as torch.Tensor.
You can load tensor from video file or video folder by using the same way as VideoDataset.
Args:
csv_file (str): path fo csv file which store path and label of video file (or video folder).
the format of csv_file should like:
path, label
~/path/to/video/file1.mp4, 0
~/path/to/video/file2.mp4, 1
~/path/to/video/file3.mp4, 0
~/path/to/video/file4.mp4, 2
Example:
>>> import torch
>>> import transforms
>>> dataset = VideoDataset(
>>> "example_video_file_with_label.csv",
>>> transform = transforms.VideoFilePathToTensor() # See more options at transforms.py
>>> )
>>> data_loader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)
>>> for videos, labels in data_loader:
>>> print(videos.size())
"""
def __init__(self, csv_file, transform=None):
self.dataframe = pd.read_csv(csv_file)
self.transform = transform
def __len__(self):
"""
Returns:
int: number of rows of the csv file (not include the header).
"""
return len(self.dataframe)
def __getitem__(self, index):
""" get a video and its label """
video = self.dataframe.iloc[index].path
label = self.dataframe.iloc[index].label
if self.transform:
video = self.transform(video)
return video, label
if __name__ == '__main__':
import torchvision
import PIL
import transforms
# test for VideoDataset
dataset = VideoDataset(
'./data/example_video_file.csv',
)
path = dataset[0]
print(path)
test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True)
for video in test_loader:
print(video)
# test for VideoLabelDataset
dataset = VideoLabelDataset(
'./data/example_video_file_with_label.csv',
transform=torchvision.transforms.Compose([
transforms.VideoFilePathToTensor(max_len=50, fps=10, padding_mode='last'),
transforms.VideoRandomCrop([512, 512]),
transforms.VideoResize([256, 256]),
])
)
video, label = dataset[0]
print(video.size(), label)
frame1 = torchvision.transforms.ToPILImage()(video[:, 29, :, :])
frame2 = torchvision.transforms.ToPILImage()(video[:, 39, :, :])
frame1.show()
frame2.show()
test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True)
for videos, labels in test_loader:
print(videos.size(), label)
| [
"torch.utils.data.DataLoader"
] | 1.10.0 | Jo951128/2021-2-MIP | 511e0a38816d16fdba9631f76cf913ba51c43138 |
1.1 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
from torch.nn import functional as F
import torch.utils.data
from torchvision import datasets, transforms
import torch.distributions
import egg.core as core
from egg.zoo.language_bottleneck.mnist_adv.archs import Sender, Receiver
from egg.zoo.language_bottleneck.relaxed_channel import AlwaysRelaxedWrapper
from egg.core import EarlyStopperAccuracy
from egg.zoo.language_bottleneck.mnist_classification.data import DoubleMnist
def diff_loss_symbol(_sender_input, _message, _receiver_input, receiver_output, labels):
loss = F.nll_loss(receiver_output, labels, reduction='none').mean()
acc = (receiver_output.argmax(dim=1) == labels).float()
return loss, {'acc': acc}
def get_params(params):
parser = argparse.ArgumentParser()
parser.add_argument('--temperature', type=float, default=1.0,
help="GS temperature for the sender (default: 1)")
parser.add_argument('--early_stopping_thr', type=float, default=1.0,
help="Early stopping threshold on accuracy (default: 1.0)")
parser.add_argument('--softmax_non_linearity', type=int, default=0,
help="Disable GS training, treat channel as softmax non-linearity (default: 0)")
parser.add_argument('--linear_channel', type=int, default=0,
help="Disable GS training, treat channel as a linear connection (default: 0)")
args = core.init(parser, params)
assert not (args.softmax_non_linearity == 1 and args.linear_channel == 1)
return args
def main(params):
opts = get_params(params)
print(opts)
kwargs = {'num_workers': 1, 'pin_memory': True} if opts.cuda else {}
transform = transforms.ToTensor()
train_dataset = datasets.MNIST('./data', train=True, download=True,
transform=transform)
test_dataset = datasets.MNIST('./data', train=False, download=False,
transform=transform)
n_classes = 10
label_mapping = torch.LongTensor([x % n_classes for x in range(100)])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opts.batch_size, shuffle=True, **kwargs)
train_loader = DoubleMnist(train_loader, label_mapping)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=16 * 1024, shuffle=False, **kwargs)
test_loader = DoubleMnist(test_loader, label_mapping)
sender = Sender(vocab_size=opts.vocab_size, linear_channel=opts.linear_channel == 1,
softmax_channel=opts.softmax_non_linearity)
receiver = Receiver(vocab_size=opts.vocab_size, n_classes=n_classes)
if opts.softmax_non_linearity == 0 and opts.linear_channel == 0:
sender = AlwaysRelaxedWrapper(sender, temperature=opts.temperature)
game = core.SymbolGameGS(sender, receiver, diff_loss_symbol)
optimizer = core.build_optimizer(game.parameters())
trainer = core.Trainer(game=game, optimizer=optimizer, train_data=train_loader,
validation_data=test_loader,
callbacks=[core.ConsoleLogger(as_json=True, print_train_loss=True),
EarlyStopperAccuracy(opts.early_stopping_thr)])
trainer.train(n_epochs=opts.n_epochs)
core.close()
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| [
"torch.nn.functional.nll_loss"
] | 1.1.0 | Slowika/GameBias-EmeCom2020 | 5b94c47559f8202bca99c26fc1bcb078dd0509a6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.