version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.10 | import torch
import matplotlib.image as img
import cv2
import dlib
from imutils.face_utils import *
import numpy as np
# image = img.imread("extra//test.jpg")
# image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # opencvImage
dlib_path = 'extra//shape_predictor_68_face_landmarks.dat'
def get_face(img):
global detector, landmark_predictor
# 宣告臉部偵測器,以及載入預訓練的臉部特徵點模型
detector = dlib.get_frontal_face_detector()
landmark_predictor = dlib.shape_predictor(dlib_path)
# 產生臉部識別
face_rects = detector(img, 1)
for i, d in enumerate(face_rects):
# 讀取框左上右下座標
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
# 根據此座標範圍讀取臉部特徵點
shape = landmark_predictor(img, d)
# 將特徵點轉為numpy
shape = shape_to_np(shape) # (68,2)
# 透過dlib挖取臉孔部分,將臉孔圖片縮放至256*256的大小,並存放於pickle檔中
# 人臉圖像部分呢。很簡單,只要根據畫框的位置切取即可crop_img = img[y1:y2, x1:x2, :]
crop_img = img[y1:y2, x1:x2, :]
try:
resize_img = cv2.resize(crop_img, (512, 512))
# cv2.imshow("OpenCV",resize_img)
# cv2.waitKey()
return resize_img
except:
return np.array([0])
return np.array([0])
def predict_image(logger, image, model):
try:
face = get_face(image) # predict target
face = torch.tensor(face, dtype=torch.float32)/255 # normalize
face = face.permute(2, 0, 1).unsqueeze(0).cuda()
# model = torch.load('run\SCUT\pre_googlenet\experiment_6\pre_googlenet.pkl')
# model.load_state_dict(torch.load('run\SCUT\pre_googlenet\experiment_6\checkpoint.pth.tar')['state_dict'])
outputs = model(face) # [bsz, c, h, w]
_, predicted = torch.max(outputs.data, 1)
score = int(predicted.item()) * 20
# logger.info("Predict Score : {}".format(score))
return score
except Exception as e:
# print(e)
return 0 | [
"torch.tensor",
"torch.max"
] | 1.10.1 | Leyan529/ImageClassificationPL | a4be75f4525828100d8d278e46ff5dccd829af1a |
1.9 | from kivymd.app import MDApp
from kivy.uix.widget import Widget
from kivy.uix.actionbar import ActionBar
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivymd.theming import ThemableBehavior
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import MDList
from kivymd.uix.textfield import MDTextField
from kivy.uix.button import Button
from kivy.lang import Builder
from kivymd.toast import toast
from kivy.uix.screenmanager import Screen, ScreenManager
import time
from kivy.core.window import Window
from kivymd.uix.label import MDLabel
from kivy.uix.modalview import ModalView
from kivymd.uix.filemanager import MDFileManager
from kivymd.theming import ThemeManager
import requests
from kivy.uix.popup import Popup
import os
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from lightgbm import LGBMClassifier
import torch
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from XBNet.training_utils import training,predict
from XBNet.models import XBNETClassifier
from XBNet.run import run_XBNET
from os import environ
import pickle
def suppress_qt_warnings():
environ["QT_DEVICE_PIXEL_RATIO"] = "0"
environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
environ["QT_SCREEN_SCALE_FACTORS"] = "1"
environ["QT_SCALE_FACTOR"] = "1"
Login_Page = """
ScreenManager:
LoginPage
ModelDetails
FileManage
<LoginPage>:
name:"Login"
MDFloatLayout:
Image:
id: imageView
source: 'Untitled.png'
allow_stretch: True
halign: 'center'
pos_hint: {"center_x":0.23, "center_y":0.5}
MDRoundFlatIconButton:
id: filemanage
text: "Select Dataset"
icon: "folder"
pos_hint: {'center_x': .77, 'center_y': .85}
on_release: root.manager.current = "File"
MDTextField:
id: modelname
hint_text:"Enter the model name: "
pos_hint:{"center_x":0.77,"center_y":0.7}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
required: True
MDTextField:
id: layers
hint_text:"Enter number of layers(For XBNet or NN): "
pos_hint:{"center_x":0.77,"center_y":0.55}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
MDTextField:
id: target
hint_text:"Enter name of target feature: "
pos_hint:{"center_x":0.77,"center_y":0.40}
current_hint_text_color:0,0,0,1
size_hint_x:0.4
required: True
MDRaisedButton:
text:"Build model"
pos_hint:{"center_x":0.77,"center_y":0.25}
size_hint_x:0.3
on_release: root.manager.current = "Model"
on_press: app.get_model(modelname.text,target.text,layers.text)
theme_text_color:"Custom"
text_color:0,0,0,1
<ModelDetails>:
name:"Model"
MDFloatLayout:
Image:
id: imageView
source: 'Untitled.png'
allow_stretch: True
halign: 'center'
pos_hint: {"center_x":0.23, "center_y":0.5}
MDRaisedButton:
text:"Train"
pos_hint:{"center_x":0.63,"center_y":0.15}
size_hint_x:0.2
# on_release: root.manager.current = "Model"
on_press: app.get_layers()
theme_text_color:"Custom"
text_color:0,0,0,1
MDRaisedButton:
text:"Predict"
pos_hint:{"center_x":0.88,"center_y":0.15}
size_hint_x:0.2
# on_release: root.manager.current = "Model"
on_press: app.predict()
theme_text_color:"Custom"
text_color:0,0,0,1
<FileManage>:
name:"File"
BoxLayout:
FileChooserListView:
canvas.before:
Color:
rgb: 0.1, 0.2, 0.5
Rectangle:
pos: self.pos
size: self.size
on_selection: app.get_path(*args)
"""
class LoginPage(Screen):
pass
class ModelDetails(Screen):
pass
class CustomDropDown(BoxLayout):
pass
class FileManage(Screen):
pass
sm = ScreenManager()
sm.add_widget(LoginPage(name="Login"))
sm.add_widget(ModelDetails(name="Model"))
sm.add_widget(FileManage(name="File"))
class XBNetGUI(MDApp):
def __init__(self):
super(XBNetGUI, self).__init__()
self.predict_phase = False
class ContentNavigationDrawer(BoxLayout):
pass
class DrawerList(ThemableBehavior, MDList):
pass
def build(self):
self.theme_cls.primary_palette = "Blue"
login_page = Builder.load_string(Login_Page)
return login_page
def get_layers(self):
self.layers_dims = []
if self.model == "xbnet" or self.model == "neural network":
for i,j in self.fields.items():
self.layers_dims.append(int(j.text))
print(j.text)
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
for i,j in self.fields.items():
try:
self.layers_dims.append(int(j.text))
except:
self.layers_dims.append(float(j.text))
self.train()
def process_input(self):
suppress_qt_warnings()
column_to_predict = self.target
data = pd.read_csv(self.file_selected)
n_df = len(data)
label_encoded = {}
imputations = {}
for i in data.columns:
imputations[i] = data[i].mode()
if data[i].isnull().sum() / n_df >= 0.15:
data.drop(i, axis=1, inplace=True)
elif data[i].isnull().sum() / n_df < 0.15 and data[i].isnull().sum() / n_df > 0:
data[i].fillna(data[i].mode(), inplace=True)
imputations[i] = data[i].mode()
columns_object = list(data.dtypes[data.dtypes == object].index)
for i in columns_object:
if i != column_to_predict:
if data[i].nunique() / n_df < 0.4:
le = LabelEncoder()
data[i] = le.fit_transform(data[i])
label_encoded[i] = le
else:
data.drop(i, axis=1, inplace=True)
x_data = data.drop(column_to_predict, axis=1).to_numpy()
self.columns_finally_used = data.drop(column_to_predict, axis=1).columns
y_data = data[column_to_predict].to_numpy()
self.label_y = False
if y_data.dtype == object:
self.label_y = True
self.y_label_encoder = LabelEncoder()
y_data = self.y_label_encoder.fit_transform(y_data)
self.label_encoded = label_encoded
self.imputations = imputations
toast("Number of features are: " + str(x_data.shape[1]) +
" classes are: "+ str(len(np.unique(y_data))),duration=5)
self.x_data = x_data
self.y_data = y_data
def train(self):
X_train, X_test, y_train, y_test = train_test_split(self.x_data, self.y_data,
test_size=0.3, random_state=0)
if self.model == "xbnet" or self.model =="neural network":
print(self.layers_dims)
m = self.model
model = XBNETClassifier( X_train, y_train, self.layers,
input_through_cmd=True, inputs_for_gui=self.layers_dims,
num_layers_boosted=self.n_layers_boosted
)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
self.model, self.acc, self.lo, self.val_ac, self.val_lo = run_XBNET(X_train, X_test, y_train, y_test, model, criterion, optimizer, 32, 10)
model.save(m+"_testAccuracy_" +str(max(self.val_ac))[:4] +"_trainAccuracy_" +
str(max(self.acc))[:4]+ ".pt",)
toast("Test Accuracy is: " +str(max(self.val_ac))[:4] +" and Training Accuracy is: " +
str(max(self.acc))[:4] + " and model is saved.",duration= 10)
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
if self.model == "xgboost":
self.model_tree = XGBClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
learning_rate= self.layers_dims[2],
subsample= self.layers_dims[3],
colsample_bylevel = self.layers_dims[4],
random_state=0,n_jobs=-1,
)
self.model_tree.fit(X_train, y_train,eval_metric="mlogloss")
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "randomforest":
self.model_tree = RandomForestClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
random_state=0,n_jobs=-1)
self.model_tree.fit(X_train, y_train)
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "decision tree":
self.model_tree = DecisionTreeClassifier(max_depth=self.layers_dims[1],random_state=0)
self.model_tree.fit(X_train, y_train)
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
elif self.model == "lightgbm":
self.model_tree = LGBMClassifier(n_estimators=self.layers_dims[0],
max_depth=self.layers_dims[1],
learning_rate= self.layers_dims[2],
subsample= self.layers_dims[3],
colsample_bylevel = self.layers_dims[4],
random_state=0,n_jobs=-1,)
self.model_tree.fit(X_train, y_train,eval_metric="mlogloss")
training_acc = self.model_tree.score(X_train, y_train)
testing_acc = self.model_tree.score(X_test,y_test)
toast(text="Training and Testing accuracies are "+str(training_acc*100)
+" "+str(testing_acc*100) + " respectively and model is stored",duration=7)
with open(self.model+"_testAccuracy_" +str(testing_acc)[:4] +"_trainAccuracy_" +
str(training_acc)[:4]+ ".pkl", 'wb') as outfile:
pickle.dump(self.model_tree,outfile)
def predict(self):
self.predict_phase = True
self.root.current = "File"
def predict_results(self):
df = pd.read_csv(self.file_selected)
data = df[self.columns_finally_used]
for i in data.columns:
if data[i].isnull().sum() > 0:
data[i].fillna(self.imputations[i], inplace=True)
if i in self.label_encoded.keys():
data[i] = self.label_encoded[i].transform(data[i])
if (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
predictions = self.model_tree.predict(data.to_numpy())
else:
predictions = predict(self.model, data.to_numpy())
if self.label_y == True:
df[self.target] = self.y_label_encoder.inverse_transform(predictions)
else:
df[self.target] = predictions
df.to_csv("Predicted_Results.csv",index=False)
toast(text="Predicted_Results.csv in this directory has the results",
duration = 10)
def get_model(self,model,target,layers):
self.model = model.lower()
if len(layers) > 0:
self.layers = int(layers)
self.target = target
if self.model.lower() == "xbnet":
self.n_layers_boosted = 1
self.net_model()
elif (self.model == "xgboost" or self.model == "randomforest"
or self.model == "decision tree" or self.model == "lightgbm"):
self.tree_model()
elif self.model.lower() == "neural network":
self.n_layers_boosted = 0
self.net_model()
self.process_input()
def net_model(self):
layout = self.root.get_screen('Model')
gap = 1/(2*self.layers+2)
counter = 1
self.fields = {}
for i in range(self.layers):
lab1 = MDTextField(hint_text="Enter input dimensions of layer "+ str(i+1) +":",
pos_hint={"center_x":0.77,"center_y":1-gap*(counter)},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
counter+=1
lab2 = MDTextField(hint_text="Enter output dimensions of layer "+ str(i+1) +":",
pos_hint={"center_x":0.77,"center_y":1-gap*(counter)},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
counter +=1
layout.add_widget(lab1)
layout.add_widget(lab2)
self.fields["input_"+str(i+1)] = lab1
self.fields["output_" + str(i+1)] = lab2
def tree_model(self):
layout = self.root.get_screen('Model')
self.fields = {}
lab1 = MDTextField(hint_text="Enter number of estimators: ",
pos_hint={"center_x":0.77,"center_y":0.85},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab2 = MDTextField(hint_text="Enter depth of trees[default:6](Typical 3-10): ",
pos_hint={"center_x":0.77,"center_y":0.7},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab3 = MDTextField(hint_text="Enter learning rate forr XGBoost(eta)[default:0.3]: ",
pos_hint={"center_x":0.77,"center_y":0.55},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab4 = MDTextField(hint_text="Enter size of subsample[default:1](Typical 0.5-1): ",
pos_hint={"center_x":0.77,"center_y":0.4},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
lab5 = MDTextField(hint_text="Enter size of colsample_bytree[default:1](Typical 0.5-1): ",
pos_hint={"center_x":0.77,"center_y":0.25},
size_hint_x=.4, current_hint_text_color=[0,0,0,1] )
layout.add_widget(lab1)
layout.add_widget(lab2)
layout.add_widget(lab3)
layout.add_widget(lab4)
layout.add_widget(lab5)
self.fields["no_trees"] = lab1
self.fields["depth"] = lab2
self.fields["learning_rate"] = lab3
self.fields["subsample"] = lab4
self.fields["colsample_bytree"] = lab5
def get_path(self,*args):
print(args)
self.file_selected = args[1][0]
print(self.file_selected)
if self.predict_phase:
self.root.current = "Model"
print("hellooo")
self.predict_results()
else:
self.root.current = "Login"
if __name__ == "__main__":
XBNetGUI().run() | [
"torch.nn.CrossEntropyLoss"
] | 1.9.0 | tusharsarkar3/XBNet | 01e385f1c0a446eb38f4dd59ee9c510170bf096b |
0.4 | import json
import os
import numpy as np
import torch
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID
from zerogercrnn.experiments.ast_level.utils import read_non_terminals
from zerogercrnn.lib.constants import EMPTY_TOKEN_ID, UNKNOWN_TOKEN_ID, EOF_TOKEN
from zerogercrnn.lib.metrics import Metrics, BaseAccuracyMetrics, IndexedAccuracyMetrics, MaxPredictionAccuracyMetrics, TopKAccuracy
class NonTerminalsMetricsWrapper(Metrics):
"""Metrics that extract non-terminals from target and pass non-terminals tensor to base metrics."""
def __init__(self, base: Metrics):
super().__init__()
self.base = base
def drop_state(self):
self.base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
self.base.report((prediction, target.non_terminals))
def get_current_value(self, should_print=False):
return self.base.get_current_value(should_print)
def decrease_hits(self, number):
self.base.decrease_hits(number)
class SingleNonTerminalAccuracyMetrics(Metrics):
"""Metrics that show accuracies per non-terminal. It should not be used for plotting, but to
print results on console during model evaluation."""
def __init__(self, non_terminals_file, results_dir=None, group=False, dim=2):
"""
:param non_terminals_file: file with json of non-terminals
:param results_dir: where to save json with accuracies per non-terminal
:param dim: dimension to run max function on for predicted values
"""
super().__init__()
print('SingleNonTerminalAccuracyMetrics created!')
self.non_terminals = read_non_terminals(non_terminals_file)
self.non_terminals_number = len(self.non_terminals)
self.results_dir = results_dir
self.group = group
self.dim = dim
self.accuracies = [IndexedAccuracyMetrics(label='ERROR') for _ in self.non_terminals]
def drop_state(self):
for accuracy in self.accuracies:
accuracy.drop_state()
def report(self, data):
prediction, target = data
if self.dim is None:
predicted = prediction
else:
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.non_terminals.view(-1)
for cur in range(len(self.non_terminals)):
indices = (target == cur).nonzero().squeeze()
self.accuracies[cur].report(predicted, target, indices)
def get_current_value(self, should_print=False):
result = []
for cur in range(len(self.non_terminals)):
cur_accuracy = self.accuracies[cur].get_current_value(should_print=False)
result.append(cur_accuracy)
# if should_print:
# print('Accuracy on {} is {}'.format(self.non_terminals[cur], cur_accuracy))
self.save_to_file(result)
return 0 # this metrics if only for printing
def save_to_file(self, result):
if self.results_dir is not None:
if self.group:
nt, res = self.get_grouped_result()
else:
nt, res = self.non_terminals, result
with open(os.path.join(self.results_dir, 'nt_acc.txt'), mode='w') as f:
f.write(json.dumps(nt))
f.write('\n')
f.write(json.dumps(res))
def get_grouped_result(self):
"""Calc accuracies ignoring last two bits of information."""
nt = set()
hits = {}
misses = {}
for i in range(len(self.non_terminals)):
base = self.non_terminals[i]
if self.non_terminals[i] != EOF_TOKEN:
base = base[:-2] # remove last two bits
nt.add(base)
if base not in hits:
hits[base] = 0
if base not in misses:
misses[base] = 0
hits[base] += self.accuracies[i].metrics.hits
misses[base] += self.accuracies[i].metrics.misses
nt = sorted(list(nt))
result = []
nt.remove('Program')
nt.remove('AssignmentPattern')
for cur in nt:
if hits[cur] + misses[cur] == 0:
result.append(0)
else:
result.append(float(hits[cur]) / (hits[cur] + misses[cur]))
return nt, result
class TerminalAccuracyMetrics(Metrics):
def __init__(self, dim=2):
super().__init__()
self.dim = dim
self.general_accuracy = BaseAccuracyMetrics()
self.empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is <empty>'
)
self.non_empty_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <empty>'
)
self.ground_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that ground truth is not <unk> (and ground truth is not <empty>)'
)
self.model_not_unk_accuracy = IndexedAccuracyMetrics(
label='Accuracy on terminals that model predicted to non <unk> (and ground truth is not <empty>)'
)
def drop_state(self):
self.general_accuracy.drop_state()
self.empty_accuracy.drop_state()
self.non_empty_accuracy.drop_state()
self.ground_not_unk_accuracy.drop_state()
self.model_not_unk_accuracy.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
_, predicted = torch.max(prediction, dim=self.dim)
predicted = predicted.view(-1)
target = target.view(-1)
self.general_accuracy.report((predicted, target))
if not self.is_train:
empty_indexes = torch.nonzero(target == 0).squeeze()
self.empty_accuracy.report(predicted, target, empty_indexes)
non_empty_indexes = torch.nonzero(target - EMPTY_TOKEN_ID).squeeze()
self.non_empty_accuracy.report(predicted, target, non_empty_indexes)
predicted = torch.index_select(predicted, 0, non_empty_indexes)
target = torch.index_select(target, 0, non_empty_indexes)
ground_not_unk_indexes = torch.nonzero(target - UNKNOWN_TOKEN_ID).squeeze()
self.ground_not_unk_accuracy.report(predicted, target, ground_not_unk_indexes)
model_not_unk_indexes = torch.nonzero(predicted - UNKNOWN_TOKEN_ID).squeeze()
self.model_not_unk_accuracy.report(predicted, target, model_not_unk_indexes)
def get_current_value(self, should_print=False):
general_accuracy = self.general_accuracy.get_current_value(should_print=should_print)
if (not self.is_train) and should_print:
self.empty_accuracy.get_current_value(should_print=True)
self.non_empty_accuracy.get_current_value(should_print=True)
self.ground_not_unk_accuracy.get_current_value(should_print=True)
self.model_not_unk_accuracy.get_current_value(should_print=True)
return general_accuracy
class NonTerminalTerminalAccuracyMetrics(Metrics):
def __init__(self):
super().__init__()
self.nt_accuracy = MaxPredictionAccuracyMetrics()
self.t_accuracy = MaxPredictionAccuracyMetrics()
def drop_state(self):
self.nt_accuracy.drop_state()
self.t_accuracy.drop_state()
def report(self, data):
nt_prediction, t_prediction, nt_target, t_target = data
self.nt_accuracy.report((nt_prediction, nt_target))
self.t_accuracy.report((t_prediction, t_target))
def get_current_value(self, should_print=False):
nt_value = self.nt_accuracy.get_current_value(should_print=False)
t_value = self.t_accuracy.get_current_value(should_print=False)
if should_print:
print('Non terminals accuracy: {}'.format(nt_value))
print('Terminals accuracy: {}'.format(t_value))
return nt_value, t_value
class LayeredNodeDepthsAttentionMetrics(Metrics):
"""Metrics that is able to visualize attention coefficient per node depths"""
def __init__(self):
super().__init__()
self.per_depth_attention_sum = np.zeros((50, 50))
self.per_depth_reports = np.zeros((50))
def drop_state(self):
pass
def report(self, node_depths, attention_coefficients):
for i in range(50):
index = torch.nonzero((node_depths == i))
if index.size()[0] == 0:
continue
selected_attention = torch.index_select(attention_coefficients, dim=0, index=index.squeeze())
selected_attention = selected_attention.squeeze(2)
to_report = torch.sum(selected_attention, dim=0).cpu().numpy()
self.per_depth_attention_sum[i] += to_report
self.per_depth_reports[i] += index.size()[0]
def get_current_value(self, should_print=False):
for i in range(50):
if abs(self.per_depth_reports[i]) > 1e-6:
self.per_depth_attention_sum[i] /= self.per_depth_reports[i]
np.save('eval/temp/attention/per_depth_matrix', self.per_depth_attention_sum)
return 0 # this metrics is only for saving results to file.
class PerNtAttentionMetrics(Metrics):
def __init__(self):
super().__init__()
def report(self, current_input, attention_coefficients):
nt_ids = torch.argmax(current_input, dim=-1)
# for i in range(97): # TODO: check
# index = torch.nonzero((nt_ids == i))
# if index.size()[0] == 0:
# continue
# selected_attention = torch.index_select(attention_coefficients, dim=0, index=index.squeeze())
# selected_attention = selected_attention.squeeze(2)
# to_report = torch.sum(selected_attention, dim=0).cpu().numpy()
# self.per_depth_attention_sum[i] += to_report
# self.per_depth_reports[i] += index.size()[0]
def drop_state(self):
pass
def get_current_value(self, should_print=False):
pass
class EmptyNonEmptyWrapper(Metrics):
def __init__(self, non_emp_base: Metrics, with_emp_base:Metrics):
super().__init__()
self.non_emp_base = non_emp_base
self.with_emp_base = with_emp_base
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
class EmptyNonEmptyTerminalTopKAccuracyWrapper(Metrics):
def __init__(self):
super().__init__()
self.non_emp_base = TopKAccuracy(k=5)
self.with_emp_base = TopKAccuracy(k=5)
def drop_state(self):
self.non_emp_base.drop_state()
self.with_emp_base.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1, prediction.size()[-1])
target = target.view(-1)
self.with_emp_base.report((prediction, target))
non_emp_indices = (target != EMPTY_TOKEN_ID).nonzero().squeeze()
prediction = torch.index_select(prediction, 0, non_emp_indices)
target = torch.index_select(target, 0, non_emp_indices)
self.non_emp_base.report((prediction, target))
def get_current_value(self, should_print=False):
print('Non Empty')
self.non_emp_base.get_current_value(should_print=should_print)
print('With Empty')
self.with_emp_base.get_current_value(should_print=should_print)
# class AggregatedTerminalTopKMetrics(Metrics):
#
# def __init__(self, k):
# super().__init__()
# self.k = k
# self.common = BaseAccuracyMetrics()
# self.target_non_unk = Top
# self.prediction_non_unk = IndexedAccuracyMetrics('Prediction not unk')
#
# def drop_state(self):
# self.common.drop_state()
# self.target_non_unk.drop_state()
# self.prediction_non_unk.drop_state()
#
# def report(self, prediction_target):
# prediction, target = prediction_target
# prediction = prediction.view(-1)
# target = target.view(-1)
#
# self.common.report((prediction, target))
#
# pred_non_unk_indices = (prediction != UNKNOWN_TOKEN_ID).nonzero().squeeze()
# target_non_unk_indices = (target != UNKNOWN_TOKEN_ID).nonzero().squeeze()
#
# self.prediction_non_unk.report(prediction, target, pred_non_unk_indices)
# self.target_non_unk.report(prediction, target, target_non_unk_indices)
#
# def get_current_value(self, should_print=False):
# print('P(hat(t) == t) = {}'.format(self.common.get_current_value(False)))
# print('P(hat(t) == t && hat(t) != unk) = {}'.format(self.prediction_non_unk.metrics.hits / (self.common.hits + self.common.misses)))
# print('P(hat(t) == t | t != unk) = {}'.format(self.target_non_unk.get_current_value(False)))
# print('P(hat(t) == t | hat(t) != unk) = {}'.format(self.prediction_non_unk.get_current_value(False)))
class AggregatedTerminalMetrics(Metrics):
def __init__(self):
super().__init__()
self.common = BaseAccuracyMetrics()
self.target_non_unk = IndexedAccuracyMetrics('Target not unk')
self.prediction_non_unk = IndexedAccuracyMetrics('Prediction not unk')
def drop_state(self):
self.common.drop_state()
self.target_non_unk.drop_state()
self.prediction_non_unk.drop_state()
def report(self, prediction_target):
prediction, target = prediction_target
prediction = prediction.view(-1)
target = target.view(-1)
self.common.report((prediction, target))
pred_non_unk_indices = (prediction != UNKNOWN_TOKEN_ID).nonzero().squeeze()
target_non_unk_indices = (target != UNKNOWN_TOKEN_ID).nonzero().squeeze()
self.prediction_non_unk.report(prediction, target, pred_non_unk_indices)
self.target_non_unk.report(prediction, target, target_non_unk_indices)
def get_current_value(self, should_print=False):
print('P(hat(t) == t) = {}'.format(self.common.get_current_value(False)))
print('P(hat(t) == t && hat(t) != unk) = {}'.format(self.prediction_non_unk.metrics.hits / (self.common.hits + self.common.misses)))
print('P(hat(t) == t | t != unk) = {}'.format(self.target_non_unk.get_current_value(False)))
print('P(hat(t) == t | hat(t) != unk) = {}'.format(self.prediction_non_unk.get_current_value(False)))
| [
"torch.nonzero",
"torch.max",
"torch.index_select",
"torch.argmax",
"torch.sum"
] | 0.4.0 | zerogerc/rnn-autocomplete | 39dc8dd7c431cb8ac9e15016388ec823771388e4 |
1.1 | from _warnings import warn
import matplotlib
from batchgenerators.utilities.file_and_folder_operations import *
from sklearn.model_selection import KFold
matplotlib.use("agg")
from time import time, sleep
import torch
import numpy as np
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import sys
from collections import OrderedDict
from datetime import datetime
import torch.backends.cudnn as cudnn
from abc import abstractmethod
from datetime import datetime
try:
from apex import amp
except ImportError:
amp = None
class NetworkTrainer(object):
def __init__(self, deterministic=True, fp16=False):
"""
A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such
as the training loop, tracking of training and validation losses (and the target metric if you implement it)
Training can be terminated early if the validation loss (or the target metric if implemented) do not improve
anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth
results.
What you need to override:
- __init__
- initialize
- run_online_evaluation (optional)
- finish_online_evaluation (optional)
- validate
- predict_test_case
"""
np.random.seed(12345)
torch.manual_seed(12345)
torch.cuda.manual_seed_all(12345)
self.fp16 = fp16
if deterministic:
cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
################# SET THESE IN self.initialize() ###################################
self.network = None
self.optimizer = None
self.lr_scheduler = None
self.tr_gen = self.val_gen = None
self.was_initialized = False
################# SET THESE IN INIT ################################################
self.output_folder = None
self.fold = None
self.loss = None
self.dataset_directory = None
################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################
self.dataset = None # these can be None for inference mode
self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split
################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED #####################
self.patience = 50
self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new
# if this is too low then the moving average will be too noisy and the training may terminate early. If it is
# too high the training will take forever
self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new
self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller)
self.save_every = 50
self.save_latest_only = True
self.max_num_epochs = 1000
self.num_batches_per_epoch = 250
self.num_val_batches_per_epoch = 50
self.also_val_in_tr_mode = False
self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold
################# LEAVE THESE ALONE ################################################
self.val_eval_criterion_MA = None
self.train_loss_MA = None
self.best_val_eval_criterion_MA = None
self.best_MA_tr_loss_for_patience = None
self.best_epoch_based_on_MA_tr_loss = None
self.all_tr_losses = []
self.all_val_losses = []
self.all_val_losses_tr_mode = []
self.all_val_eval_metrics = [] # does not have to be used
self.epoch = 0
self.log_file = None
self.deterministic = deterministic
@abstractmethod
def initialize(self, training=True):
"""
create self.output_folder
modify self.output_folder if you are doing cross-validation (one folder per fold)
set self.tr_gen and self.val_gen
set self.network, self.optimizer and self.lr_scheduler
finally set self.was_initialized to True
:param training:
:return:
"""
@abstractmethod
def load_dataset(self):
pass
def do_split(self):
"""
This is a suggestion for if your dataset is a dictionary (my personal standard)
:return:
"""
splits_file = join(self.dataset_directory, "splits_final.pkl")
if not isfile(splits_file):
self.print_to_log_file("Creating new split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
splits = load_pickle(splits_file)
if self.fold == "all":
tr_keys = val_keys = list(self.dataset.keys())
else:
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def plot_progress(self):
"""
Should probably by improved
:return:
"""
try:
font = {'weight': 'normal',
'size': 18}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(30, 24))
ax = fig.add_subplot(111)
ax2 = ax.twinx()
x_values = list(range(self.epoch + 1))
ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label="loss_tr")
ax.plot(x_values, self.all_val_losses, color='r', ls='-', label="loss_val, train=False")
if len(self.all_val_losses_tr_mode) > 0:
ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label="loss_val, train=True")
if len(self.all_val_eval_metrics) == len(self.all_val_losses):
ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label="evaluation metric")
ax.set_xlabel("epoch")
ax.set_ylabel("loss")
ax2.set_ylabel("evaluation metric")
ax.legend()
ax2.legend(loc=9)
fig.savefig(join(self.output_folder, "progress.png"))
plt.close()
except IOError:
self.print_to_log_file("failed to plot: ", sys.exc_info())
def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):
timestamp = time()
dt_object = datetime.fromtimestamp(timestamp)
if add_timestamp:
args = ("%s:" % dt_object, *args)
if self.log_file is None:
maybe_mkdir_p(self.output_folder)
timestamp = datetime.now()
self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" %
(timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second))
with open(self.log_file, 'w') as f:
f.write("Starting... \n")
successful = False
max_attempts = 5
ctr = 0
while not successful and ctr < max_attempts:
try:
with open(self.log_file, 'a+') as f:
for a in args:
f.write(str(a))
f.write(" ")
f.write("\n")
successful = True
except IOError:
print("%s: failed to log: " % datetime.fromtimestamp(timestamp), sys.exc_info())
sleep(0.5)
ctr += 1
if also_print_to_console:
print(*args)
def save_checkpoint(self, fname, save_optimizer=True):
start_time = time()
state_dict = self.network.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
lr_sched_state_dct = None
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
lr_sched_state_dct = self.lr_scheduler.state_dict()
for key in lr_sched_state_dct.keys():
lr_sched_state_dct[key] = lr_sched_state_dct[key]
if save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
self.print_to_log_file("saving checkpoint...")
torch.save({
'epoch': self.epoch + 1,
'state_dict': state_dict,
'optimizer_state_dict': optimizer_state_dict,
'lr_scheduler_state_dict': lr_sched_state_dct,
'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode,
self.all_val_eval_metrics)},
fname)
self.print_to_log_file("done, saving took %.2f seconds" % (time() - start_time))
def load_best_checkpoint(self, train=True):
if self.fold is None:
raise RuntimeError("Cannot load best checkpoint if self.fold is None")
self.load_checkpoint(join(self.output_folder, "model_best.model"), train=train)
def load_latest_checkpoint(self, train=True):
if isfile(join(self.output_folder, "model_final_checkpoint.model")):
return self.load_checkpoint(join(self.output_folder, "model_final_checkpoint.model"), train=train)
if isfile(join(self.output_folder, "model_latest.model")):
return self.load_checkpoint(join(self.output_folder, "model_latest.model"), train=train)
all_checkpoints = [i for i in os.listdir(self.output_folder) if i.endswith(".model") and i.find("_ep_") != -1]
if len(all_checkpoints) == 0:
return self.load_best_checkpoint(train=train)
corresponding_epochs = [int(i.split("_")[-1].split(".")[0]) for i in all_checkpoints]
checkpoint = all_checkpoints[np.argmax(corresponding_epochs)]
self.load_checkpoint(join(self.output_folder, checkpoint), train=train)
def load_checkpoint(self, fname, train=True):
self.print_to_log_file("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize(train)
saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device()))
self.load_checkpoint_ram(saved_model, train)
def load_checkpoint_ram(self, saved_model, train=True):
"""
used for if the checkpoint is already in ram
:param saved_model:
:param train:
:return:
"""
if not self.was_initialized:
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
# if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not
# match. Use heuristic to make it match
for k, value in saved_model['state_dict'].items():
key = k
if key not in curr_state_dict_keys:
key = key[7:]
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = saved_model['plot_stuff']
def _maybe_init_amp(self):
# we use fp16 for training only, not inference
if self.fp16:
if amp is not None:
self.network, self.optimizer = amp.initialize(self.network, self.optimizer, opt_level="O1")
else:
self.print_to_log_file("WARNING: FP16 training was requested but nvidia apex is not installed. "
"Install it from https://github.com/NVIDIA/apex")
def run_training(self):
torch.cuda.empty_cache()
self._maybe_init_amp()
if cudnn.benchmark and cudnn.deterministic:
warn("torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. "
"But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! "
"If you want deterministic then set benchmark=False")
maybe_mkdir_p(self.output_folder)
if not self.was_initialized:
self.initialize(True)
while self.epoch < self.max_num_epochs:
self.print_to_log_file("\nepoch: ", self.epoch)
epoch_start_time = time()
train_losses_epoch = []
# train one epoch
self.network.train()
for b in range(self.num_batches_per_epoch):
l = self.run_iteration(self.tr_gen, True)
train_losses_epoch.append(l)
self.all_tr_losses.append(np.mean(train_losses_epoch))
self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1])
with torch.no_grad():
# validation with train=False
self.network.eval()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False, True)
val_losses.append(l)
self.all_val_losses.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=False): %.4f" % self.all_val_losses[-1])
if self.also_val_in_tr_mode:
self.network.train()
# validation with train=True
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False)
val_losses.append(l)
self.all_val_losses_tr_mode.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1])
epoch_end_time = time()
self.update_train_loss_MA() # needed for lr scheduler and stopping of training
continue_training = self.on_epoch_end()
if not continue_training:
# allows for early stopping
break
self.epoch += 1
self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time-epoch_start_time))
self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model"))
# now we can delete latest as it will be identical with final
if isfile(join(self.output_folder, "model_latest.model")):
os.remove(join(self.output_folder, "model_latest.model"))
if isfile(join(self.output_folder, "model_latest.model.pkl")):
os.remove(join(self.output_folder, "model_latest.model.pkl"))
def maybe_update_lr(self):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def maybe_save_checkpoint(self):
"""
Saves a checkpoint every save_ever epochs.
:return:
"""
if self.epoch % self.save_every == (self.save_every - 1):
self.print_to_log_file("saving scheduled checkpoint file...")
if not self.save_latest_only:
self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1)))
self.save_checkpoint(join(self.output_folder, "model_latest.model"))
self.print_to_log_file("done")
def update_eval_criterion_MA(self):
"""
If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping
(not a minimization, but a maximization of a metric and therefore the - in the latter case)
:return:
"""
if self.val_eval_criterion_MA is None:
if len(self.all_val_eval_metrics) == 0:
self.val_eval_criterion_MA = - self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.all_val_eval_metrics[-1]
else:
if len(self.all_val_eval_metrics) == 0:
"""
We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower
is better, so we need to negate it.
"""
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - (
1 - self.val_eval_criterion_alpha) * \
self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + (
1 - self.val_eval_criterion_alpha) * \
self.all_val_eval_metrics[-1]
def manage_patience(self):
# update patience
continue_training = True
if self.patience is not None:
# if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized,
# initialize them
if self.best_MA_tr_loss_for_patience is None:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
if self.best_epoch_based_on_MA_tr_loss is None:
self.best_epoch_based_on_MA_tr_loss = self.epoch
if self.best_val_eval_criterion_MA is None:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
# check if the current epoch is the best one according to moving average of validation criterion. If so
# then save 'best' model
# Do not use this for validation. This is intended for test set prediction only.
self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA)
self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA)
if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
self.print_to_log_file("saving best epoch checkpoint...")
self.save_checkpoint(join(self.output_folder, "model_best.model"))
# Now see if the moving average of the train loss has improved. If yes then reset patience, else
# increase patience
if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
self.best_epoch_based_on_MA_tr_loss = self.epoch
self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience)
else:
self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" %
(self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps))
# if patience has reached its maximum then finish training (provided lr is low enough)
if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience:
if self.optimizer.param_groups[0]['lr'] > self.lr_threshold:
self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)")
self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2
else:
self.print_to_log_file("My patience ended")
continue_training = False
else:
self.print_to_log_file(
"Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience))
return continue_training
def on_epoch_end(self):
self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_
# metrics
self.plot_progress()
self.maybe_update_lr()
self.maybe_save_checkpoint()
self.update_eval_criterion_MA()
continue_training = self.manage_patience()
continue_training = True
return continue_training
def update_train_loss_MA(self):
if self.train_loss_MA is None:
self.train_loss_MA = self.all_tr_losses[-1]
else:
self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \
self.all_tr_losses[-1]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
if not isinstance(data, torch.Tensor):
data = torch.from_numpy(data).float()
if not isinstance(target, torch.Tensor):
target = torch.from_numpy(target).float()
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
self.optimizer.zero_grad()
output = self.network(data)
del data
l = self.loss(output, target)
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
if do_backprop:
if not self.fp16 or amp is None:
l.backward()
else:
with amp.scale_loss(l, self.optimizer) as scaled_loss:
scaled_loss.backward()
self.optimizer.step()
return l.detach().cpu().numpy()
def run_online_evaluation(self, *args, **kwargs):
"""
Can be implemented, does not have to
:param output_torch:
:param target_npy:
:return:
"""
pass
def finish_online_evaluation(self):
"""
Can be implemented, does not have to
:return:
"""
pass
@abstractmethod
def validate(self, *args, **kwargs):
pass
def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98):
"""
stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
:param num_iters:
:param init_value:
:param final_value:
:param beta:
:return:
"""
import math
self._maybe_init_amp()
mult = (final_value / init_value) ** (1/num_iters)
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
losses = []
log_lrs = []
for batch_num in range(1, num_iters + 1):
# +1 because this one here is not designed to have negative loss...
loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1
# Compute the smoothed loss
avg_loss = beta * avg_loss + (1-beta) * loss
smoothed_loss = avg_loss / (1 - beta**batch_num)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
# Record the best loss
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
# Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
# Update the lr for the next step
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
lrs = [10 ** i for i in log_lrs]
fig = plt.figure()
plt.xscale('log')
plt.plot(lrs[10:-5], losses[10:-5])
plt.savefig(join(self.output_folder, "lr_finder.png"))
plt.close()
return log_lrs, losses
| [
"torch.cuda.manual_seed_all",
"torch.save",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.cuda.current_device",
"torch.from_numpy"
] | 1.1.0 | mangoyuan/Unifed-Seg3d | 74c82464dbe901cf18e38afb0e1b74cc159a8850 |
1.1 | # Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from batchgenerators.augmentations.utils import resize_segmentation
from nnunet.experiment_planning.plan_and_preprocess_task import get_caseIDs_from_splitted_dataset_folder
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Process, Queue
import torch
import SimpleITK as sitk
import shutil
from multiprocessing import Pool
from nnunet.training.model_restore import load_model_and_checkpoint_files
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.one_hot_encoding import to_one_hot
def predict_save_to_queue(preprocess_fn, q, list_of_lists, output_files, segs_from_prev_stage, classes):
errors_in = []
for i, l in enumerate(list_of_lists):
try:
output_file = output_files[i]
print("preprocessing", output_file)
d, _, dct = preprocess_fn(l)
print(output_file, dct)
if segs_from_prev_stage[i] is not None:
assert isfile(segs_from_prev_stage[i]) and segs_from_prev_stage[i].endswith(".nii.gz"), "segs_from_prev_stage" \
" must point to a " \
"segmentation file"
seg_prev = sitk.GetArrayFromImage(sitk.ReadImage(segs_from_prev_stage[i]))
# check to see if shapes match
img = sitk.GetArrayFromImage(sitk.ReadImage(l[0]))
assert all([i == j for i, j in zip(seg_prev.shape, img.shape)]), "image and segmentation from previous " \
"stage don't have the same pixel array " \
"shape! image: %s, seg_prev: %s" % \
(l[0], segs_from_prev_stage[i])
seg_reshaped = resize_segmentation(seg_prev, d.shape[1:], order=1, cval=0)
seg_reshaped = to_one_hot(seg_reshaped, classes)
d = np.vstack((d, seg_reshaped)).astype(np.float32)
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
print(d.shape)
if np.prod(d.shape) > (2e9 / 4 * 0.9): # *0.9 just to be save, 4 because float32 is 4 bytes
print(
"This output is too large for python process-process communication. "
"Saving output temporarily to disk")
np.save(output_file[:-7] + ".npy", d)
d = output_file[:-7] + ".npy"
q.put((output_file, (d, dct)))
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception as e:
print("error in", l)
print(e)
q.put("end")
if len(errors_in) > 0:
print("There were some errors in the following cases:", errors_in)
print("These cases were ignored.")
else:
print("This worker has ended successfully, no errors to report")
def preprocess_multithreaded(trainer, list_of_lists, output_files, num_processes=2, segs_from_prev_stage=None):
if segs_from_prev_stage is None:
segs_from_prev_stage = [None] * len(list_of_lists)
classes = list(range(1, trainer.num_classes))
# assert isinstance(trainer, nnUNetTrainer)
q = Queue(1)
processes = []
print(num_processes)
for i in range(num_processes):
pr = Process(target=predict_save_to_queue, args=(trainer.preprocess_patient, q,
list_of_lists[i::num_processes],
output_files[i::num_processes],
segs_from_prev_stage[i::num_processes],
classes))
pr.start()
processes.append(pr)
try:
end_ctr = 0
while end_ctr != num_processes:
item = q.get()
if item == "end":
end_ctr += 1
continue
else:
yield item
finally:
for p in processes:
if p.is_alive():
p.terminate() # this should not happen but better safe than sorry right
p.join()
q.close()
def predict_cases(model, list_of_lists, output_filenames, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True,
overwrite_existing=False):
assert len(list_of_lists) == len(output_filenames)
if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames)
prman = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
dr, f = os.path.split(o)
if len(dr) > 0:
maybe_mkdir_p(dr)
if not f.endswith(".nii.gz"):
f, _ = os.path.splitext(f)
f = f + ".nii.gz"
cleaned_output_files.append(join(dr, f))
if not overwrite_existing:
print("number of cases:", len(list_of_lists))
not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if segs_from_prev_stage is not None:
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print("number of cases that still need to be predicted:", len(cleaned_output_files))
print("emptying cuda cache")
torch.cuda.empty_cache()
print("loading parameters for folds,", folds)
trainer, params = load_model_and_checkpoint_files(model, folds)
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage)
print("starting prediction...")
for preprocessed in preprocessing:
output_filename, (d, dct) = preprocessed
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
print("predicting", output_filename)
softmax = []
for p in params:
trainer.load_checkpoint_ram(p, False)
softmax.append(trainer.predict_preprocessed_data_return_softmax(d, do_tta, 1, False, 1,
trainer.data_aug_params['mirror_axes'],
True, True, 2, trainer.patch_size, True)[None])
softmax = np.vstack(softmax)
softmax_mean = np.mean(softmax, 0)
transpose_forward = trainer.plans.get('transpose_forward')
if transpose_forward is not None:
transpose_backward = trainer.plans.get('transpose_backward')
softmax_mean = softmax_mean.transpose([0] + [i + 1 for i in transpose_backward])
if save_npz:
npz_file = output_filename[:-7] + ".npz"
else:
npz_file = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_mean.shape) > (2e9 / 4 * 0.9): # *0.9 just to be save
print("This output is too large for python process-process communication. Saving output temporarily to disk")
np.save(output_filename[:-7] + ".npy", softmax_mean)
softmax_mean = output_filename[:-7] + ".npy"
results.append(prman.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_mean, output_filename, dct, 1, None, None, None, npz_file), )
))
_ = [i.get() for i in results]
def predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta,
overwrite_existing=True):
"""
here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases
:param model:
:param input_folder:
:param output_folder:
:param folds:
:param save_npz:
:param num_threads_preprocessing:
:param num_threads_nifti_save:
:param lowres_segmentations:
:param part_id:
:param num_parts:
:param tta:
:return:
"""
maybe_mkdir_p(output_folder)
shutil.copy(join(model, 'plans.pkl'), output_folder)
case_ids = get_caseIDs_from_splitted_dataset_folder(input_folder)
output_files = [join(output_folder, i + ".nii.gz") for i in case_ids]
all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True)
list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and
len(i) == (len(j) + 12)] for j in case_ids]
if lowres_segmentations is not None:
assert isdir(lowres_segmentations), "if lowres_segmentations is not None then it must point to a directory"
lowres_segmentations = [join(lowres_segmentations, i + ".nii.gz") for i in case_ids]
assert all([isfile(i) for i in lowres_segmentations]), "not all lowres_segmentations files are present. " \
"(I was searching for case_id.nii.gz in that folder)"
lowres_segmentations = lowres_segmentations[part_id::num_parts]
else:
lowres_segmentations = None
return predict_cases(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, save_npz,
num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations,
tta, overwrite_existing=overwrite_existing)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct"
" order (same as training). Files must be named "
"CASENAME_XXXX.nii.gz where XXXX is the modality "
"identifier (0000, 0001, etc)", required=True)
parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions")
parser.add_argument('-m', '--model_output_folder', help='model output folder. Will automatically discover the folds '
'that were '
'run and use those as an ensemble', required=True)
parser.add_argument('-f', '--folds', nargs='+', default='None', help="folds to use for prediction. Default is None "
"which means that folds will be detected "
"automatically in the model output folder")
parser.add_argument('-z', '--save_npz', required=False, action='store_true', help="use this if you want to ensemble"
" these predictions with those of"
" other models. Softmax "
"probabilities will be saved as "
"compresed numpy arrays in "
"output_folder and can be merged "
"between output_folders with "
"merge_predictions.py")
parser.add_argument('-l', '--lowres_segmentations', required=False, default='None', help="if model is the highres "
"stage of the cascade then you need to use -l to specify where the segmentations of the "
"corresponding lowres unet are. Here they are required to do a prediction")
parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (for example via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_parts", type=int, required=False, default=1, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help=
"Determines many background processes will be used for data preprocessing. Reduce this if you "
"run into out of memory (RAM) problems. Default: 6")
parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help=
"Determines many background processes will be used for segmentation export. Reduce this if you "
"run into out of memory (RAM) problems. Default: 2")
parser.add_argument("--tta", required=False, type=int, default=1, help="Set to 0 to disable test time data "
"augmentation (speedup of factor "
"4(2D)/8(3D)), "
"lower quality segmentations")
parser.add_argument("--overwrite_existing", required=False, type=int, default=1, help="Set this to 0 if you need "
"to resume a previous "
"prediction. Default: 1 "
"(=existing segmentations "
"in output_folder will be "
"overwritten)")
args = parser.parse_args()
input_folder = args.input_folder
output_folder = args.output_folder
part_id = args.part_id
num_parts = args.num_parts
model = args.model_output_folder
folds = args.folds
save_npz = args.save_npz
lowres_segmentations = args.lowres_segmentations
num_threads_preprocessing = args.num_threads_preprocessing
num_threads_nifti_save = args.num_threads_nifti_save
tta = args.tta
overwrite = args.overwrite_existing
if lowres_segmentations == "None":
lowres_segmentations = None
if isinstance(folds, list):
if folds[0] == 'all' and len(folds) == 1:
pass
else:
folds = [int(i) for i in folds]
elif folds == "None":
folds = None
else:
raise ValueError("Unexpected value for argument folds")
if tta == 0:
tta = False
elif tta == 1:
tta = True
else:
raise ValueError("Unexpected value for tta, Use 1 or 0")
if overwrite == 0:
overwrite = False
elif overwrite == 1:
overwrite = True
else:
raise ValueError("Unexpected value for overwrite, Use 1 or 0")
predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta,
overwrite_existing=overwrite)
| [
"torch.cuda.empty_cache"
] | 1.1.0 | mangoyuan/Unifed-Seg3d | 74c82464dbe901cf18e38afb0e1b74cc159a8850 |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of "vanilla" transforms for spatial operations
https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
"""
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import USE_COMPILED, DtypeLike
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import AFFINE_TOL, compute_shape_offset, reorient_spatial_axes, to_affine_nd, zoom_affine
from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull
from monai.networks.utils import meshgrid_ij, normalize_transform
from monai.transforms.croppad.array import CenterSpatialCrop, Pad
from monai.transforms.transform import Randomizable, RandomizableTransform, ThreadUnsafe, Transform
from monai.transforms.utils import (
create_control_grid,
create_grid,
create_rotate,
create_scale,
create_shear,
create_translate,
map_spatial_axes,
)
from monai.transforms.utils_pytorch_numpy_unification import allclose, moveaxis
from monai.utils import (
GridSampleMode,
GridSamplePadMode,
InterpolateMode,
NumpyPadMode,
PytorchPadMode,
ensure_tuple,
ensure_tuple_rep,
ensure_tuple_size,
fall_back_tuple,
issequenceiterable,
optional_import,
pytorch_after,
)
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.enums import TransformBackends
from monai.utils.module import look_up_option
from monai.utils.type_conversion import convert_data_type, convert_to_dst_type
nib, has_nib = optional_import("nibabel")
__all__ = [
"SpatialResample",
"ResampleToMatch",
"Spacing",
"Orientation",
"Flip",
"GridDistortion",
"Resize",
"Rotate",
"Zoom",
"Rotate90",
"RandRotate90",
"RandRotate",
"RandFlip",
"RandGridDistortion",
"RandAxisFlip",
"RandZoom",
"AffineGrid",
"RandAffineGrid",
"RandDeformGrid",
"Resample",
"Affine",
"RandAffine",
"Rand2DElastic",
"Rand3DElastic",
]
RandRange = Optional[Union[Sequence[Union[Tuple[float, float], float]], float]]
class SpatialResample(Transform):
"""
Resample input image from the orientation/spacing defined by ``src_affine`` affine matrix into
the ones specified by ``dst_affine`` affine matrix.
Internally this transform computes the affine transform matrix from ``src_affine`` to ``dst_affine``,
by ``xform = linalg.solve(src_affine, dst_affine)``, and call ``monai.transforms.Affine`` with ``xform``.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
):
"""
Args:
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
"""
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.dtype = dtype
def __call__(
self,
img: NdarrayOrTensor,
src_affine: Optional[NdarrayOrTensor] = None,
dst_affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], np.ndarray, int]] = None,
mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,
align_corners: Optional[bool] = False,
dtype: DtypeLike = None,
) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:
"""
Args:
img: input image to be resampled. It currently supports channel-first arrays with
at most three spatial dimensions.
src_affine: source affine matrix. Defaults to ``None``, which means the identity matrix.
the shape should be `(r+1, r+1)` where `r` is the spatial rank of ``img``.
dst_affine: destination affine matrix. Defaults to ``None``, which means the same as `src_affine`.
the shape should be `(r+1, r+1)` where `r` is the spatial rank of ``img``.
when `dst_affine` and `spatial_size` are None, the input will be returned without resampling,
but the data type will be `float32`.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined,
the transform will compute a spatial size automatically containing the previous field of view.
if `spatial_size` is ``-1`` are the transform will use the corresponding input img size.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype`` or
``np.float64`` (for best precision). If ``None``, use the data type of input data.
To be compatible with other modules, the output data type is always `float32`.
The spatial rank is determined by the smallest among ``img.ndim -1``, ``len(src_affine) - 1``, and ``3``.
When both ``monai.config.USE_COMPILED`` and ``align_corners`` are set to ``True``,
MONAI's resampling implementation will be used.
Set `dst_affine` and `spatial_size` to `None` to turn off the resampling step.
"""
if src_affine is None:
src_affine = np.eye(4, dtype=np.float64)
spatial_rank = min(len(img.shape) - 1, src_affine.shape[0] - 1, 3)
if (not isinstance(spatial_size, int) or spatial_size != -1) and spatial_size is not None:
spatial_rank = min(len(ensure_tuple(spatial_size)), 3) # infer spatial rank based on spatial_size
src_affine = to_affine_nd(spatial_rank, src_affine)
dst_affine = to_affine_nd(spatial_rank, dst_affine) if dst_affine is not None else src_affine
dst_affine, *_ = convert_to_dst_type(dst_affine, dst_affine, dtype=torch.float32)
in_spatial_size = np.asarray(img.shape[1 : spatial_rank + 1])
if isinstance(spatial_size, int) and (spatial_size == -1): # using the input spatial size
spatial_size = in_spatial_size
elif spatial_size is None and spatial_rank > 1: # auto spatial size
spatial_size, _ = compute_shape_offset(in_spatial_size, src_affine, dst_affine) # type: ignore
spatial_size = np.asarray(fall_back_tuple(ensure_tuple(spatial_size)[:spatial_rank], in_spatial_size))
if (
allclose(src_affine, dst_affine, atol=AFFINE_TOL)
and allclose(spatial_size, in_spatial_size)
or spatial_rank == 1
):
# no significant change, return original image
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine
if has_nib and isinstance(img, np.ndarray):
spatial_ornt, dst_r = reorient_spatial_axes(img.shape[1 : spatial_rank + 1], src_affine, dst_affine)
if allclose(dst_r, dst_affine, atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
# simple reorientation achieves the desired affine
spatial_ornt[:, 0] += 1
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
img_ = nib.orientations.apply_orientation(img, spatial_ornt)
output_data, *_ = convert_to_dst_type(img_, img, dtype=torch.float32)
return output_data, dst_affine
try:
src_affine, *_ = convert_to_dst_type(src_affine, dst_affine)
if isinstance(src_affine, np.ndarray):
xform = np.linalg.solve(src_affine, dst_affine)
else:
xform = (
torch.linalg.solve(src_affine, dst_affine)
if pytorch_after(1, 8, 0)
else torch.solve(dst_affine, src_affine).solution # type: ignore
)
except (np.linalg.LinAlgError, RuntimeError) as e:
raise ValueError(f"src affine is not invertible: {src_affine}") from e
xform = to_affine_nd(spatial_rank, xform)
# no resampling if it's identity transform
if allclose(xform, np.diag(np.ones(len(xform))), atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine
_dtype = dtype or self.dtype or img.dtype
in_spatial_size = in_spatial_size.tolist()
chns, additional_dims = img.shape[0], img.shape[spatial_rank + 1 :] # beyond three spatial dims
# resample
img_ = convert_data_type(img, torch.Tensor, dtype=_dtype)[0]
xform = convert_to_dst_type(xform, img_)[0]
align_corners = self.align_corners if align_corners is None else align_corners
mode = mode or self.mode
padding_mode = padding_mode or self.padding_mode
if additional_dims:
xform_shape = [-1] + in_spatial_size
img_ = img_.reshape(xform_shape)
if align_corners:
_t_r = torch.diag(torch.ones(len(xform), dtype=xform.dtype, device=xform.device)) # type: ignore
for idx, d_dst in enumerate(spatial_size[:spatial_rank]):
_t_r[idx, -1] = (max(d_dst, 2) - 1.0) / 2.0
xform = xform @ _t_r
if not USE_COMPILED:
_t_l = normalize_transform(
in_spatial_size, xform.device, xform.dtype, align_corners=True # type: ignore
)
xform = _t_l @ xform # type: ignore
affine_xform = Affine(
affine=xform, spatial_size=spatial_size, norm_coords=False, image_only=True, dtype=_dtype
)
output_data = affine_xform(img_, mode=mode, padding_mode=padding_mode)
else:
affine_xform = AffineTransform(
normalized=False,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
reverse_indexing=True,
)
output_data = affine_xform(img_.unsqueeze(0), theta=xform, spatial_size=spatial_size).squeeze(0)
if additional_dims:
full_shape = (chns, *spatial_size, *additional_dims)
output_data = output_data.reshape(full_shape)
# output dtype float
output_data, *_ = convert_to_dst_type(output_data, img, dtype=torch.float32)
return output_data, dst_affine
class ResampleToMatch(SpatialResample):
"""Resample an image to match given meta data. The affine matrix will be aligned,
and the size of the output image will match."""
def __call__( # type: ignore
self,
img: NdarrayOrTensor,
src_meta: Optional[Dict] = None,
dst_meta: Optional[Dict] = None,
mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,
align_corners: Optional[bool] = False,
dtype: DtypeLike = None,
):
if src_meta is None:
raise RuntimeError("`in_meta` is missing")
if dst_meta is None:
raise RuntimeError("`out_meta` is missing")
mode = mode or self.mode
padding_mode = padding_mode or self.padding_mode
align_corners = self.align_corners if align_corners is None else align_corners
dtype = dtype or self.dtype
src_affine = src_meta.get("affine")
dst_affine = dst_meta.get("affine")
img, updated_affine = super().__call__(
img=img,
src_affine=src_affine,
dst_affine=dst_affine,
spatial_size=dst_meta.get("spatial_shape"),
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
dst_meta = deepcopy(dst_meta)
dst_meta["affine"] = updated_affine
return img, dst_meta
class Spacing(Transform):
"""
Resample input image into the specified `pixdim`.
"""
backend = SpatialResample.backend
def __init__(
self,
pixdim: Union[Sequence[float], float, np.ndarray],
diagonal: bool = False,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
image_only: bool = False,
) -> None:
"""
Args:
pixdim: output voxel spacing. if providing a single number, will use it for the first dimension.
items of the pixdim sequence map to the spatial dimensions of input image, if length
of pixdim sequence is longer than image spatial dimensions, will ignore the longer part,
if shorter, will pad with `1.0`.
if the components of the `pixdim` are non-positive values, the transform will use the
corresponding components of the original pixdim, which is computed from the `affine`
matrix of input image.
diagonal: whether to resample the input to have a diagonal affine matrix.
If True, the input data is resampled to the following affine::
np.diag((pixdim_0, pixdim_1, ..., pixdim_n, 1))
This effectively resets the volume to the world coordinate system (RAS+ in nibabel).
The original orientation, rotation, shearing are not preserved.
If False, this transform preserves the axes orientation, orthogonal rotation and
translation components from the original affine. This option will not flip/swap axes
of the original data.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
image_only: return just the image or the image, the old affine and new affine. Default is `False`.
"""
self.pixdim = np.array(ensure_tuple(pixdim), dtype=np.float64)
self.diagonal = diagonal
self.image_only = image_only
self.sp_resample = SpatialResample(
mode=look_up_option(mode, GridSampleMode),
padding_mode=look_up_option(padding_mode, GridSamplePadMode),
align_corners=align_corners,
dtype=dtype,
)
def __call__(
self,
data_array: NdarrayOrTensor,
affine: Optional[NdarrayOrTensor] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: DtypeLike = None,
output_spatial_shape: Optional[Union[Sequence[int], np.ndarray, int]] = None,
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:
"""
Args:
data_array: in shape (num_channels, H[, W, ...]).
affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
output_spatial_shape: specify the shape of the output data_array. This is typically useful for
the inverse of `Spacingd` where sometimes we could not compute the exact shape due to the quantization
error with the affine.
Raises:
ValueError: When ``data_array`` has no spatial dimensions.
ValueError: When ``pixdim`` is nonpositive.
Returns:
data_array (resampled into `self.pixdim`), original affine, current affine.
"""
sr = int(data_array.ndim - 1)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
if affine is None:
# default to identity
affine_np = affine = np.eye(sr + 1, dtype=np.float64)
affine_ = np.eye(sr + 1, dtype=np.float64)
else:
affine_np, *_ = convert_data_type(affine, np.ndarray)
affine_ = to_affine_nd(sr, affine_np)
out_d = self.pixdim[:sr]
if out_d.size < sr:
out_d = np.append(out_d, [1.0] * (sr - out_d.size))
# compute output affine, shape and offset
new_affine = zoom_affine(affine_, out_d, diagonal=self.diagonal)
output_shape, offset = compute_shape_offset(data_array.shape[1:], affine_, new_affine)
new_affine[:sr, -1] = offset[:sr]
output_data, new_affine = self.sp_resample(
data_array,
src_affine=affine,
dst_affine=new_affine,
spatial_size=list(output_shape) if output_spatial_shape is None else output_spatial_shape,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
new_affine = to_affine_nd(affine_np, new_affine)
new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)
if self.image_only:
return output_data
return output_data, affine, new_affine
class Orientation(Transform):
"""
Change the input image's orientation into the specified based on `axcodes`.
"""
backend = [TransformBackends.NUMPY, TransformBackends.TORCH]
def __init__(
self,
axcodes: Optional[str] = None,
as_closest_canonical: bool = False,
labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip("LPI", "RAS")),
image_only: bool = False,
) -> None:
"""
Args:
axcodes: N elements sequence for spatial ND input's orientation.
e.g. axcodes='RAS' represents 3D orientation:
(Left, Right), (Posterior, Anterior), (Inferior, Superior).
default orientation labels options are: 'L' and 'R' for the first dimension,
'P' and 'A' for the second, 'I' and 'S' for the third.
as_closest_canonical: if True, load the image as closest to canonical axis format.
labels: optional, None or sequence of (2,) sequences
(2,) sequences are labels for (beginning, end) of output axis.
Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``.
image_only: if True return only the image volume, otherwise return (image, affine, new_affine).
Raises:
ValueError: When ``axcodes=None`` and ``as_closest_canonical=True``. Incompatible values.
See Also: `nibabel.orientations.ornt2axcodes`.
"""
if axcodes is None and not as_closest_canonical:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if axcodes is not None and as_closest_canonical:
warnings.warn("using as_closest_canonical=True, axcodes ignored.")
self.axcodes = axcodes
self.as_closest_canonical = as_closest_canonical
self.labels = labels
self.image_only = image_only
def __call__(
self, data_array: NdarrayOrTensor, affine: Optional[NdarrayOrTensor] = None
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:
"""
original orientation of `data_array` is defined by `affine`.
Args:
data_array: in shape (num_channels, H[, W, ...]).
affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.
Raises:
ValueError: When ``data_array`` has no spatial dimensions.
ValueError: When ``axcodes`` spatiality differs from ``data_array``.
Returns:
data_array [reoriented in `self.axcodes`] if `self.image_only`, else
(data_array [reoriented in `self.axcodes`], original axcodes, current axcodes).
"""
spatial_shape = data_array.shape[1:]
sr = len(spatial_shape)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
affine_: np.ndarray
if affine is None:
# default to identity
affine_np = affine = np.eye(sr + 1, dtype=np.float64)
affine_ = np.eye(sr + 1, dtype=np.float64)
else:
affine_np, *_ = convert_data_type(affine, np.ndarray)
affine_ = to_affine_nd(sr, affine_np)
src = nib.io_orientation(affine_)
if self.as_closest_canonical:
spatial_ornt = src
else:
if self.axcodes is None:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if sr < len(self.axcodes):
warnings.warn(
f"axcodes ('{self.axcodes}') length is smaller than the number of input spatial dimensions D={sr}.\n"
f"{self.__class__.__name__}: input spatial shape is {spatial_shape}, num. channels is {data_array.shape[0]},"
"please make sure the input is in the channel-first format."
)
dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels)
if len(dst) < sr:
raise ValueError(
f"axcodes must match data_array spatially, got axcodes={len(self.axcodes)}D data_array={sr}D"
)
spatial_ornt = nib.orientations.ornt_transform(src, dst)
new_affine = affine_ @ nib.orientations.inv_ornt_aff(spatial_ornt, spatial_shape)
_is_tensor = isinstance(data_array, torch.Tensor)
spatial_ornt[:, 0] += 1 # skip channel dim
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
axes = [ax for ax, flip in enumerate(spatial_ornt[:, 1]) if flip == -1]
if axes:
data_array = (
torch.flip(data_array, dims=axes) if _is_tensor else np.flip(data_array, axis=axes) # type: ignore
)
full_transpose = np.arange(len(data_array.shape))
full_transpose[: len(spatial_ornt)] = np.argsort(spatial_ornt[:, 0])
if not np.all(full_transpose == np.arange(len(data_array.shape))):
if _is_tensor:
data_array = data_array.permute(full_transpose.tolist()) # type: ignore
else:
data_array = data_array.transpose(full_transpose) # type: ignore
out, *_ = convert_to_dst_type(src=data_array, dst=data_array)
new_affine = to_affine_nd(affine_np, new_affine)
new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)
if self.image_only:
return out
return out, affine, new_affine
class Flip(Transform):
"""
Reverses the order of elements along the given spatial axis. Preserves shape.
Uses ``np.flip`` in practice. See numpy.flip for additional details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html.
Args:
spatial_axis: spatial axes along which to flip over. Default is None.
The default `axis=None` will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
self.spatial_axis = spatial_axis
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
"""
if isinstance(img, np.ndarray):
return np.ascontiguousarray(np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis)))
return torch.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))
class Resize(Transform):
"""
Resize the input image to given spatial size (with scaling, not cropping/padding).
Implemented using :py:class:`torch.nn.functional.interpolate`.
Args:
spatial_size: expected shape of spatial dimensions after resize operation.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
size_mode: should be "all" or "longest", if "all", will use `spatial_size` for all the spatial dims,
if "longest", rescale the image so that only the longest side is equal to specified `spatial_size`,
which must be an int number in this case, keeping the aspect ratio of the initial image, refer to:
https://albumentations.ai/docs/api_reference/augmentations/geometric/resize/
#albumentations.augmentations.geometric.resize.LongestMaxSize.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
size_mode: str = "all",
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
align_corners: Optional[bool] = None,
) -> None:
self.size_mode = look_up_option(size_mode, ["all", "longest"])
self.spatial_size = spatial_size
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.align_corners = align_corners
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
align_corners: Optional[bool] = None,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]).
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
Raises:
ValueError: When ``self.spatial_size`` length is less than ``img`` spatial dimensions.
"""
img_, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float)
if self.size_mode == "all":
input_ndim = img_.ndim - 1 # spatial ndim
output_ndim = len(ensure_tuple(self.spatial_size))
if output_ndim > input_ndim:
input_shape = ensure_tuple_size(img_.shape, output_ndim + 1, 1)
img_ = img_.reshape(input_shape)
elif output_ndim < input_ndim:
raise ValueError(
"len(spatial_size) must be greater or equal to img spatial dimensions, "
f"got spatial_size={output_ndim} img={input_ndim}."
)
spatial_size_ = fall_back_tuple(self.spatial_size, img_.shape[1:])
else: # for the "longest" mode
img_size = img_.shape[1:]
if not isinstance(self.spatial_size, int):
raise ValueError("spatial_size must be an int number if size_mode is 'longest'.")
scale = self.spatial_size / max(img_size)
spatial_size_ = tuple(int(round(s * scale)) for s in img_size)
resized = torch.nn.functional.interpolate(
input=img_.unsqueeze(0),
size=spatial_size_,
mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,
align_corners=self.align_corners if align_corners is None else align_corners,
)
out, *_ = convert_to_dst_type(resized.squeeze(0), img)
return out
class Rotate(Transform, ThreadUnsafe):
"""
Rotates an input image by given angle using :py:class:`monai.networks.layers.AffineTransform`.
Args:
angle: Rotation angle(s) in radians. should a float for 2D, three floats for 3D.
keep_size: If it is True, the output shape is kept the same as the input.
If it is False, the output shape is adapted so that the
input array is contained completely in the output. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float32``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = np.float32,
) -> None:
self.angle = angle
self.keep_size = keep_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
self._rotation_matrix: Optional[NdarrayOrTensor] = None
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: [chns, H, W] or [chns, H, W, D].
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
Raises:
ValueError: When ``img`` spatially is not one of [2D, 3D].
"""
_dtype = dtype or self.dtype or img.dtype
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype)
im_shape = np.asarray(img_t.shape[1:]) # spatial dimensions
input_ndim = len(im_shape)
if input_ndim not in (2, 3):
raise ValueError(f"Unsupported img dimension: {input_ndim}, available options are [2, 3].")
_angle = ensure_tuple_rep(self.angle, 1 if input_ndim == 2 else 3)
transform = create_rotate(input_ndim, _angle)
shift = create_translate(input_ndim, ((im_shape - 1) / 2).tolist())
if self.keep_size:
output_shape = im_shape
else:
corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing="ij")).reshape(
(len(im_shape), -1)
)
corners = transform[:-1, :-1] @ corners # type: ignore
output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int)
shift_1 = create_translate(input_ndim, (-(output_shape - 1) / 2).tolist())
transform = shift @ transform @ shift_1
transform_t, *_ = convert_to_dst_type(transform, img_t)
xform = AffineTransform(
normalized=False,
mode=look_up_option(mode or self.mode, GridSampleMode),
padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),
align_corners=self.align_corners if align_corners is None else align_corners,
reverse_indexing=True,
)
output: torch.Tensor = xform(img_t.unsqueeze(0), transform_t, spatial_size=output_shape).float().squeeze(0)
self._rotation_matrix = transform
out: NdarrayOrTensor
out, *_ = convert_to_dst_type(output, dst=img, dtype=output.dtype)
return out
def get_rotation_matrix(self) -> Optional[NdarrayOrTensor]:
"""
Get the most recently applied rotation matrix
This is not thread-safe.
"""
return self._rotation_matrix
class Zoom(Transform):
"""
Zooms an ND image using :py:class:`torch.nn.functional.interpolate`.
For details, please see https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html.
Different from :py:class:`monai.transforms.resize`, this transform takes scaling factors
as input, and provides an option of preserving the input spatial size.
Args:
zoom: The zoom factor along the spatial axes.
If a float, zoom is the same for each spatial axis.
If a sequence, zoom should contain one value for each spatial axis.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
keep_size: Should keep original size (padding/slicing if needed), default is True.
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
zoom: Union[Sequence[float], float],
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
self.zoom = zoom
self.mode: InterpolateMode = InterpolateMode(mode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
align_corners: Optional[bool] = None,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]).
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
"""
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float32)
_zoom = ensure_tuple_rep(self.zoom, img.ndim - 1) # match the spatial image dim
zoomed: NdarrayOrTensor = torch.nn.functional.interpolate( # type: ignore
recompute_scale_factor=True,
input=img_t.unsqueeze(0),
scale_factor=list(_zoom),
mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,
align_corners=self.align_corners if align_corners is None else align_corners,
)
zoomed = zoomed.squeeze(0)
if self.keep_size and not np.allclose(img_t.shape, zoomed.shape):
pad_vec = [(0, 0)] * len(img_t.shape)
slice_vec = [slice(None)] * len(img_t.shape)
for idx, (od, zd) in enumerate(zip(img_t.shape, zoomed.shape)):
diff = od - zd
half = abs(diff) // 2
if diff > 0: # need padding
pad_vec[idx] = (half, diff - half)
elif diff < 0: # need slicing
slice_vec[idx] = slice(half, half + od)
padder = Pad(pad_vec, padding_mode or self.padding_mode)
zoomed = padder(zoomed)
zoomed = zoomed[tuple(slice_vec)]
out, *_ = convert_to_dst_type(zoomed, dst=img)
return out
class Rotate90(Transform):
"""
Rotate an array by 90 degrees in the plane specified by `axes`.
See np.rot90 for additional details:
https://numpy.org/doc/stable/reference/generated/numpy.rot90.html.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
"""
Args:
k: number of times to rotate by 90 degrees.
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
If axis is negative it counts from the last to the first axis.
"""
self.k = k
spatial_axes_: Tuple[int, int] = ensure_tuple(spatial_axes) # type: ignore
if len(spatial_axes_) != 2:
raise ValueError("spatial_axes must be 2 int numbers to indicate the axes to rotate 90 degrees.")
self.spatial_axes = spatial_axes_
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
"""
rot90: Callable = torch.rot90 if isinstance(img, torch.Tensor) else np.rot90 # type: ignore
out: NdarrayOrTensor = rot90(img, self.k, map_spatial_axes(img.ndim, self.spatial_axes))
out, *_ = convert_data_type(out, dtype=img.dtype)
return out
class RandRotate90(RandomizableTransform):
"""
With probability `prob`, input arrays are rotated by 90 degrees
in the plane specified by `spatial_axes`.
"""
backend = Rotate90.backend
def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
"""
Args:
prob: probability of rotating.
(Default 0.1, with 10% probability it returns a rotated array)
max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`, (Default 3).
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
"""
RandomizableTransform.__init__(self, prob)
self.max_k = max_k
self.spatial_axes = spatial_axes
self._rand_k = 0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._rand_k = self.R.randint(self.max_k) + 1
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
return Rotate90(self._rand_k, self.spatial_axes)(img)
class RandRotate(RandomizableTransform):
"""
Randomly rotate the input arrays.
Args:
range_x: Range of rotation angle in radians in the plane defined by the first and second axes.
If single number, angle is uniformly sampled from (-range_x, range_x).
range_y: Range of rotation angle in radians in the plane defined by the first and third axes.
If single number, angle is uniformly sampled from (-range_y, range_y).
range_z: Range of rotation angle in radians in the plane defined by the second and third axes.
If single number, angle is uniformly sampled from (-range_z, range_z).
prob: Probability of rotation.
keep_size: If it is False, the output shape is adapted so that the
input array is contained completely in the output.
If it is True, the output shape is the same as the input. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float32``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
"""
backend = Rotate.backend
def __init__(
self,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = np.float32,
) -> None:
RandomizableTransform.__init__(self, prob)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.keep_size = keep_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
self.x = 0.0
self.y = 0.0
self.z = 0.0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])
self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])
self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
randomize: bool = True,
get_matrix: bool = False,
):
"""
Args:
img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
randomize: whether to execute `randomize()` function first, default to True.
get_matrix: whether to return the rotated image and rotate matrix together, default to False.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
rotator = Rotate(
angle=self.x if img.ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, GridSampleMode),
padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),
align_corners=self.align_corners if align_corners is None else align_corners,
dtype=dtype or self.dtype or img.dtype,
)
img = rotator(img)
return (img, rotator.get_rotation_matrix()) if get_matrix else img
class RandFlip(RandomizableTransform):
"""
Randomly flips the image along axes. Preserves shape.
See numpy.flip for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
prob: Probability of flipping.
spatial_axis: Spatial axes along which to flip over. Default is None.
"""
backend = Flip.backend
def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
RandomizableTransform.__init__(self, prob)
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize(None)
if not self._do_transform:
return img
return self.flipper(img)
class RandAxisFlip(RandomizableTransform):
"""
Randomly select a spatial axis and flip along it.
See numpy.flip for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
prob: Probability of flipping.
"""
backend = Flip.backend
def __init__(self, prob: float = 0.1) -> None:
RandomizableTransform.__init__(self, prob)
self._axis: Optional[int] = None
def randomize(self, data: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._axis = self.R.randint(data.ndim - 1)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize(data=img)
if not self._do_transform:
return img
return Flip(spatial_axis=self._axis)(img)
class RandZoom(RandomizableTransform):
"""
Randomly zooms input arrays with given probability within given zoom range.
Args:
prob: Probability of zooming.
min_zoom: Min zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, min_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
max_zoom: Max zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, max_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
keep_size: Should keep original size (pad if needed), default is True.
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = Zoom.backend
def __init__(
self,
prob: float = 0.1,
min_zoom: Union[Sequence[float], float] = 0.9,
max_zoom: Union[Sequence[float], float] = 1.1,
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
RandomizableTransform.__init__(self, prob)
self.min_zoom = ensure_tuple(min_zoom)
self.max_zoom = ensure_tuple(max_zoom)
if len(self.min_zoom) != len(self.max_zoom):
raise AssertionError("min_zoom and max_zoom must have same length.")
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
self._zoom: Sequence[float] = [1.0]
def randomize(self, img: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]
if len(self._zoom) == 1:
# to keep the spatial shape ratio, use same random zoom factor for all dims
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 1)
elif len(self._zoom) == 2 and img.ndim > 3:
# if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 2) + ensure_tuple(self._zoom[-1])
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
align_corners: Optional[bool] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
randomize: whether to execute `randomize()` function first, default to True.
"""
# match the spatial image dim
if randomize:
self.randomize(img=img)
if not self._do_transform:
return img
return Zoom(
self._zoom,
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, InterpolateMode),
padding_mode=padding_mode or self.padding_mode,
align_corners=align_corners or self.align_corners,
**self.kwargs,
)(img)
class AffineGrid(Transform):
"""
Affine transforms on the coordinates.
Args:
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: shearing factors for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,
a tuple of 3 floats for 3D. Defaults to `1.0`.
dtype: data type for the grid computation. Defaults to ``np.float32``.
If ``None``, use the data type of input data (if `grid` is provided).
device: device on which the tensor will be allocated, if a new grid is generated.
affine: If applied, ignore the params (`rotate_params`, etc.) and use the
supplied matrix. Should be square with each side = num of image spatial
dimensions + 1.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
affine: Optional[NdarrayOrTensor] = None,
) -> None:
self.rotate_params = rotate_params
self.shear_params = shear_params
self.translate_params = translate_params
self.scale_params = scale_params
self.device = device
self.dtype = dtype
self.affine = affine
def __call__(
self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None
) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:
"""
The grid can be initialized with a `spatial_size` parameter, or provided directly as `grid`.
Therefore, either `spatial_size` or `grid` must be provided.
When initialising from `spatial_size`, the backend "torch" will be used.
Args:
spatial_size: output grid size.
grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
Raises:
ValueError: When ``grid=None`` and ``spatial_size=None``. Incompatible values.
"""
if grid is None: # create grid from spatial_size
if spatial_size is None:
raise ValueError("Incompatible values: grid=None and spatial_size=None.")
grid = create_grid(spatial_size, device=self.device, backend="torch", dtype=self.dtype)
_b = TransformBackends.TORCH if isinstance(grid, torch.Tensor) else TransformBackends.NUMPY
_device = grid.device if isinstance(grid, torch.Tensor) else self.device
affine: NdarrayOrTensor
if self.affine is None:
spatial_dims = len(grid.shape) - 1
affine = (
torch.eye(spatial_dims + 1, device=_device)
if _b == TransformBackends.TORCH
else np.eye(spatial_dims + 1)
)
if self.rotate_params:
affine = affine @ create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b)
if self.shear_params:
affine = affine @ create_shear(spatial_dims, self.shear_params, device=_device, backend=_b)
if self.translate_params:
affine = affine @ create_translate(spatial_dims, self.translate_params, device=_device, backend=_b)
if self.scale_params:
affine = affine @ create_scale(spatial_dims, self.scale_params, device=_device, backend=_b)
else:
affine = self.affine
grid, *_ = convert_data_type(grid, torch.Tensor, device=_device, dtype=self.dtype or grid.dtype)
affine, *_ = convert_to_dst_type(affine, grid)
grid = (affine @ grid.reshape((grid.shape[0], -1))).reshape([-1] + list(grid.shape[1:]))
return grid, affine
class RandAffineGrid(Randomizable, Transform):
"""
Generate randomised affine grid.
"""
backend = AffineGrid.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,
take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select voxels to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
device: device to store the output grid data.
See also:
- :py:meth:`monai.transforms.utils.create_rotate`
- :py:meth:`monai.transforms.utils.create_shear`
- :py:meth:`monai.transforms.utils.create_translate`
- :py:meth:`monai.transforms.utils.create_scale`
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
self.rotate_range = ensure_tuple(rotate_range)
self.shear_range = ensure_tuple(shear_range)
self.translate_range = ensure_tuple(translate_range)
self.scale_range = ensure_tuple(scale_range)
self.rotate_params: Optional[List[float]] = None
self.shear_params: Optional[List[float]] = None
self.translate_params: Optional[List[float]] = None
self.scale_params: Optional[List[float]] = None
self.device = device
self.affine: Optional[NdarrayOrTensor] = None
def _get_rand_param(self, param_range, add_scalar: float = 0.0):
out_param = []
for f in param_range:
if issequenceiterable(f):
if len(f) != 2:
raise ValueError("If giving range as [min,max], should only have two elements per dim.")
out_param.append(self.R.uniform(f[0], f[1]) + add_scalar)
elif f is not None:
out_param.append(self.R.uniform(-f, f) + add_scalar)
return out_param
def randomize(self, data: Optional[Any] = None) -> None:
self.rotate_params = self._get_rand_param(self.rotate_range)
self.shear_params = self._get_rand_param(self.shear_range)
self.translate_params = self._get_rand_param(self.translate_range)
self.scale_params = self._get_rand_param(self.scale_range, 1.0)
def __call__(
self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None
) -> NdarrayOrTensor:
"""
Args:
spatial_size: output grid size.
grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
Returns:
a 2D (3xHxW) or 3D (4xHxWxD) grid.
"""
self.randomize()
affine_grid = AffineGrid(
rotate_params=self.rotate_params,
shear_params=self.shear_params,
translate_params=self.translate_params,
scale_params=self.scale_params,
device=self.device,
)
_grid: NdarrayOrTensor
_grid, self.affine = affine_grid(spatial_size, grid)
return _grid
def get_transformation_matrix(self) -> Optional[NdarrayOrTensor]:
"""Get the most recently applied transformation matrix"""
return self.affine
class RandDeformGrid(Randomizable, Transform):
"""
Generate random deformation grid.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spacing: Union[Sequence[float], float],
magnitude_range: Tuple[float, float],
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
spacing: spacing of the grid in 2D or 3D.
e.g., spacing=(1, 1) indicates pixel-wise deformation in 2D,
spacing=(1, 1, 1) indicates voxel-wise deformation in 3D,
spacing=(2, 2) indicates deformation field defined on every other pixel in 2D.
magnitude_range: the random offsets will be generated from
`uniform[magnitude[0], magnitude[1])`.
as_tensor_output: whether to output tensor instead of numpy array.
defaults to True.
device: device to store the output grid data.
"""
self.spacing = spacing
self.magnitude = magnitude_range
self.rand_mag = 1.0
self.as_tensor_output = as_tensor_output
self.random_offset: np.ndarray
self.device = device
def randomize(self, grid_size: Sequence[int]) -> None:
self.random_offset = self.R.normal(size=([len(grid_size)] + list(grid_size))).astype(np.float32, copy=False)
self.rand_mag = self.R.uniform(self.magnitude[0], self.magnitude[1])
def __call__(self, spatial_size: Sequence[int]):
"""
Args:
spatial_size: spatial size of the grid.
"""
self.spacing = fall_back_tuple(self.spacing, (1.0,) * len(spatial_size))
control_grid = create_control_grid(spatial_size, self.spacing, device=self.device, backend="torch")
self.randomize(control_grid.shape[1:])
_offset, *_ = convert_to_dst_type(self.rand_mag * self.random_offset, control_grid)
control_grid[: len(spatial_size)] += _offset
if not self.as_tensor_output:
control_grid, *_ = convert_data_type(control_grid, output_type=np.ndarray, dtype=np.float32)
return control_grid
class Resample(Transform):
backend = [TransformBackends.TORCH]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
as_tensor_output: bool = True,
norm_coords: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float64,
) -> None:
"""
computes output image using values from `img`, locations from `grid` using pytorch.
supports spatially 2D or 3D (num_channels, H, W[, D]).
Args:
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
norm_coords: whether to normalize the coordinates from `[-(size-1)/2, (size-1)/2]` to
`[0, size - 1]` (for ``monai/csrc`` implementation) or
`[-1, 1]` (for torch ``grid_sample`` implementation) to be compatible with the underlying
resampling API.
device: device on which the tensor will be allocated.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always `float32`.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.norm_coords = norm_coords
self.device = device
self.dtype = dtype
def __call__(
self,
img: NdarrayOrTensor,
grid: Optional[NdarrayOrTensor] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
dtype: DtypeLike = None,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
grid: shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
if ``norm_coords`` is True, the grid values must be in `[-(size-1)/2, (size-1)/2]`.
if ``USE_COMPILED=True`` and ``norm_coords=False``, grid values must be in `[0, size-1]`.
if ``USE_COMPILED=False`` and ``norm_coords=False``, grid values must be in `[-1, 1]`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
To be compatible with other modules, the output data type is always `float32`.
See also:
:py:const:`monai.config.USE_COMPILED`
"""
if grid is None:
raise ValueError("Unknown grid.")
_device = img.device if isinstance(img, torch.Tensor) else self.device
_dtype = dtype or self.dtype or img.dtype
img_t, *_ = convert_data_type(img, torch.Tensor, device=_device, dtype=_dtype)
grid_t = convert_to_dst_type(grid, img_t)[0]
if grid_t is grid: # copy if needed (convert_data_type converts to contiguous)
grid_t = grid_t.clone(memory_format=torch.contiguous_format)
sr = min(len(img_t.shape[1:]), 3)
if USE_COMPILED:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = (max(dim, 2) / 2.0 - 0.5 + grid_t[i]) / grid_t[-1:]
grid_t = moveaxis(grid_t[:sr], 0, -1) # type: ignore
_padding_mode = self.padding_mode if padding_mode is None else padding_mode
_padding_mode = _padding_mode.value if isinstance(_padding_mode, GridSamplePadMode) else _padding_mode
bound = 1 if _padding_mode == "reflection" else _padding_mode
_interp_mode = self.mode if mode is None else mode
_interp_mode = _interp_mode.value if isinstance(_interp_mode, GridSampleMode) else _interp_mode
if _interp_mode == "bicubic":
interp = 3
elif _interp_mode == "bilinear":
interp = 1
else:
interp = _interp_mode # type: ignore
out = grid_pull(
img_t.unsqueeze(0), grid_t.unsqueeze(0), bound=bound, extrapolate=True, interpolation=interp
)[0]
else:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = 2.0 / (max(2, dim) - 1.0) * grid_t[i] / grid_t[-1:]
index_ordering: List[int] = list(range(sr - 1, -1, -1))
grid_t = moveaxis(grid_t[index_ordering], 0, -1) # type: ignore
out = torch.nn.functional.grid_sample(
img_t.unsqueeze(0),
grid_t.unsqueeze(0),
mode=self.mode.value if mode is None else GridSampleMode(mode).value,
padding_mode=self.padding_mode.value if padding_mode is None else GridSamplePadMode(padding_mode).value,
align_corners=True,
)[0]
out_val, *_ = convert_to_dst_type(out, dst=img, dtype=np.float32)
return out_val
class Affine(Transform):
"""
Transform ``img`` given the affine parameters.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = list(set(AffineGrid.backend) & set(Resample.backend))
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
norm_coords: bool = True,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
image_only: bool = False,
) -> None:
"""
The affine transformations are applied in rotate, shear, translate, scale order.
Args:
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: shearing factors for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,
a tuple of 3 floats for 3D. Defaults to `1.0`.
affine: If applied, ignore the params (`rotate_params`, etc.) and use the
supplied matrix. Should be square with each side = num of image spatial
dimensions + 1.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
norm_coords: whether to normalize the coordinates from `[-(size-1)/2, (size-1)/2]` to
`[0, size - 1]` or `[-1, 1]` to be compatible with the underlying resampling API.
If the coordinates are generated by ``monai.transforms.utils.create_grid``
and the ``affine`` doesn't include the normalization, this argument should be set to ``True``.
If the output `self.affine_grid` is already normalized, this argument should be set to ``False``.
device: device on which the tensor will be allocated.
dtype: data type for resampling computation. Defaults to ``np.float32``.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always `float32`.
image_only: if True return only the image volume, otherwise return (image, affine).
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
self.affine_grid = AffineGrid(
rotate_params=rotate_params,
shear_params=shear_params,
translate_params=translate_params,
scale_params=scale_params,
affine=affine,
dtype=dtype,
device=device,
)
self.image_only = image_only
self.resampler = Resample(norm_coords=norm_coords, device=device, dtype=dtype)
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor]]:
"""
Args:
img: shape must be (num_channels, H, W[, D]),
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].
if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
"""
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
grid, affine = self.affine_grid(spatial_size=sp_size)
ret = self.resampler(img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode)
return ret if self.image_only else (ret, affine)
class RandAffine(RandomizableTransform):
"""
Random affine transform.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Affine.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
cache_grid: bool = False,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,
take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select pixel/voxel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
cache_grid: whether to cache the identity sampling grid.
If the spatial size is not dynamically defined by input image, enabling this option could
accelerate the transform.
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.spatial_size = spatial_size
self.cache_grid = cache_grid
self._cached_grid = self._init_identity_cache()
self.mode: GridSampleMode = GridSampleMode(mode)
self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)
def _init_identity_cache(self):
"""
Create cache of the identity grid if cache_grid=True and spatial_size is known.
"""
if self.spatial_size is None:
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size, please specify 'spatial_size'."
)
return None
_sp_size = ensure_tuple(self.spatial_size)
_ndim = len(_sp_size)
if _sp_size != fall_back_tuple(_sp_size, [1] * _ndim) or _sp_size != fall_back_tuple(_sp_size, [2] * _ndim):
# dynamic shape because it falls back to different outcomes
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size "
f"'spatial_size={self.spatial_size}', please specify 'spatial_size'."
)
return None
return create_grid(spatial_size=_sp_size, device=self.rand_affine_grid.device, backend="torch")
def get_identity_grid(self, spatial_size: Sequence[int]):
"""
Return a cached or new identity grid depends on the availability.
Args:
spatial_size: non-dynamic spatial size
"""
ndim = len(spatial_size)
if spatial_size != fall_back_tuple(spatial_size, [1] * ndim) or spatial_size != fall_back_tuple(
spatial_size, [2] * ndim
):
raise RuntimeError(f"spatial_size should not be dynamic, got {spatial_size}.")
return (
create_grid(spatial_size=spatial_size, device=self.rand_affine_grid.device, backend="torch")
if self._cached_grid is None
else self._cached_grid
)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "RandAffine":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]),
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].
if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize()
# if not doing transform and spatial size doesn't change, nothing to do
# except convert to float and device
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
do_resampling = self._do_transform or (sp_size != ensure_tuple(img.shape[1:]))
if not do_resampling:
img, *_ = convert_data_type(img, dtype=torch.float32, device=self.resampler.device)
grid = self.get_identity_grid(sp_size)
if self._do_transform:
grid = self.rand_affine_grid(grid=grid)
out: NdarrayOrTensor = self.resampler(
img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class Rand2DElastic(RandomizableTransform):
"""
Random elastic deformation and affine in 2D.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Resample.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
spacing: Union[Tuple[float, float], float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
spacing : distance in between the control points.
magnitude_range: the random offsets will be generated from ``uniform[magnitude[0], magnitude[1])``.
prob: probability of returning a randomized elastic transform.
defaults to 0.1, with 10% chance returns a randomized elastic transform,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D) for affine matrix, take a 2D affine as example::
[
[1.0, params[0], 0.0],
[params[1], 1.0, 0.0],
[0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select pixel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
RandomizableTransform.__init__(self, prob)
self.deform_grid = RandDeformGrid(
spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device
)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.device = device
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand2DElastic":
self.deform_grid.set_random_state(seed, state)
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, spatial_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.deform_grid.randomize(spatial_size)
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W),
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to execute `randomize()` function first, default to True.
"""
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
if randomize:
self.randomize(spatial_size=sp_size)
if self._do_transform:
grid = self.deform_grid(spatial_size=sp_size)
grid = self.rand_affine_grid(grid=grid)
grid = torch.nn.functional.interpolate( # type: ignore
recompute_scale_factor=True,
input=grid.unsqueeze(0),
scale_factor=list(ensure_tuple(self.deform_grid.spacing)),
mode=InterpolateMode.BICUBIC.value,
align_corners=False,
)
grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])
else:
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
out: NdarrayOrTensor = self.resampler(
img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class Rand3DElastic(RandomizableTransform):
"""
Random elastic deformation and affine in 3D.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Resample.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
sigma_range: Tuple[float, float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
sigma_range: a Gaussian kernel with standard deviation sampled from
``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.
magnitude_range: the random offsets on the grid will be generated from
``uniform[magnitude[0], magnitude[1])``.
prob: probability of returning a randomized elastic transform.
defaults to 0.1, with 10% chance returns a randomized elastic transform,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 6 floats for 3D) for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select voxel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: specifying output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted
to `(32, 32, 64)` if the third spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.sigma_range = sigma_range
self.magnitude_range = magnitude_range
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.device = device
self.rand_offset: np.ndarray
self.magnitude = 1.0
self.sigma = 1.0
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand3DElastic":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, grid_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)).astype(np.float32, copy=False)
self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1])
self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1])
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W, D),
spatial_size: specifying spatial 3D output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to execute `randomize()` function first, default to True.
"""
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
if randomize:
self.randomize(grid_size=sp_size)
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
if self._do_transform:
if self.rand_offset is None:
raise RuntimeError("rand_offset is not initialized.")
gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=_device)
offset = torch.as_tensor(self.rand_offset, device=_device).unsqueeze(0)
grid[:3] += gaussian(offset)[0] * self.magnitude
grid = self.rand_affine_grid(grid=grid)
out: NdarrayOrTensor = self.resampler(
img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class GridDistortion(Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int],
distort_steps: Sequence[Sequence[float]],
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
"""
Grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the
corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.
Each value in the tuple represents the distort step of the related cell.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
"""
self.resampler = Resample(mode=mode, padding_mode=padding_mode, device=device)
self.num_cells = num_cells
self.distort_steps = distort_steps
self.device = device
def __call__(
self,
img: NdarrayOrTensor,
distort_steps: Optional[Sequence[Sequence]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the
corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.
Each value in the tuple represents the distort step of the related cell.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
"""
distort_steps = self.distort_steps if distort_steps is None else distort_steps
if len(img.shape) != len(distort_steps) + 1:
raise ValueError("the spatial size of `img` does not match with the length of `distort_steps`")
all_ranges = []
num_cells = ensure_tuple_rep(self.num_cells, len(img.shape) - 1)
for dim_idx, dim_size in enumerate(img.shape[1:]):
dim_distort_steps = distort_steps[dim_idx]
ranges = torch.zeros(dim_size, dtype=torch.float32)
cell_size = dim_size // num_cells[dim_idx]
prev = 0
for idx in range(num_cells[dim_idx] + 1):
start = int(idx * cell_size)
end = start + cell_size
if end > dim_size:
end = dim_size
cur = dim_size
else:
cur = prev + cell_size * dim_distort_steps[idx]
ranges[start:end] = torch.linspace(prev, cur, end - start)
prev = cur
ranges = ranges - (dim_size - 1.0) / 2.0
all_ranges.append(ranges)
coords = meshgrid_ij(*all_ranges)
grid = torch.stack([*coords, torch.ones_like(coords[0])])
return self.resampler(img, grid=grid, mode=mode, padding_mode=padding_mode) # type: ignore
class RandGridDistortion(RandomizableTransform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int] = 5,
prob: float = 0.1,
distort_limit: Union[Tuple[float, float], float] = (-0.03, 0.03),
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
"""
Random grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
prob: probability of returning a randomized grid distortion transform. Defaults to 0.1.
distort_limit: range to randomly distort.
If single number, distort_limit is picked from (-distort_limit, distort_limit).
Defaults to (-0.03, 0.03).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
"""
RandomizableTransform.__init__(self, prob)
self.num_cells = num_cells
if isinstance(distort_limit, (int, float)):
self.distort_limit = (min(-distort_limit, distort_limit), max(-distort_limit, distort_limit))
else:
self.distort_limit = (min(distort_limit), max(distort_limit))
self.distort_steps: Sequence[Sequence[float]] = ((1.0,),)
self.grid_distortion = GridDistortion(
num_cells=num_cells, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode, device=device
)
def randomize(self, spatial_shape: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return
self.distort_steps = tuple(
tuple(1.0 + self.R.uniform(low=self.distort_limit[0], high=self.distort_limit[1], size=n_cells + 1))
for n_cells in ensure_tuple_rep(self.num_cells, len(spatial_shape))
)
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to shuffle the random factors using `randomize()`, default to True.
"""
if randomize:
self.randomize(img.shape[1:])
if not self._do_transform:
return img
return self.grid_distortion(img, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode)
| [
"torch.zeros",
"torch.linspace",
"torch.linalg.solve",
"torch.eye",
"torch.as_tensor",
"torch.ones_like",
"torch.solve",
"torch.flip"
] | 1.6 | Jianrong-Lu/MONAI | c319ca8ff31aa980a045f1b913fb2eb22aadb080 |
1.1 |
import time
import datetime
import random
import sys
import logging
from pathlib import Path
from typing import Union
from torch import cuda
from torch.utils.data import Dataset, DataLoader
from torch.optim.sgd import SGD
try:
from apex import amp
except ImportError:
amp = None
import flair
from flair.data import Dictionary
from flair.models import LanguageModel
from flair.optim import *
from flair.training_utils import add_file_handler
log = logging.getLogger('flair')
class TextDataset(Dataset):
def __init__(self, path, dictionary, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True, shuffle_lines=True):
assert path.exists()
self.files = None
self.path = path
self.dictionary = dictionary
self.split_on_char = split_on_char
self.forward = forward
self.random_case_flip = random_case_flip
self.expand_vocab = expand_vocab
self.shuffle_lines = shuffle_lines
if path.is_dir():
self.files = sorted([f for f in path.iterdir() if f.exists()])
else:
self.files = [path]
def __len__(self):
return len(self.files)
def __getitem__(self, index=0):
return self.charsplit(self.files[index], self.expand_vocab, self.forward, self.split_on_char, self.random_case_flip)
def charsplit(self, path, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True):
'Tokenizes a text file on character basis.'
assert path.exists()
lines = open(path, 'r', encoding='utf-8').readlines()
log.info(
''.join(['read text file with ', '{}'.format(len(lines)), ' lines']))
if self.shuffle_lines:
random.shuffle(lines)
log.info('shuffled')
tokens = 0
for line in lines:
if split_on_char:
chars = list(line)
else:
chars = line.split()
tokens += len(chars)
if expand_vocab:
for char in chars:
self.dictionary.add_item(char)
ids = torch.zeros(tokens, dtype=torch.long)
if forward:
token = 0
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if (token >= tokens):
break
ids[token] = self.dictionary.get_idx_for_item(char)
token += 1
else:
token = (tokens - 1)
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if (token >= tokens):
break
ids[token] = self.dictionary.get_idx_for_item(char)
token -= 1
return ids
@staticmethod
def random_casechange(line):
no = random.randint(0, 99)
if (no is 0):
line = line.lower()
if (no is 1):
line = line.upper()
return line
def tokenize(self, path):
'Tokenizes a text file.'
assert path.exists()
with open(path, 'r') as f:
tokens = 0
for line in f:
words = (line.split() + ['<eos>'])
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(path, 'r') as f:
ids = torch.zeros(tokens, dtype=torch.long, device=flair.device)
token = 0
for line in f:
words = (line.split() + ['<eos>'])
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class TextCorpus(object):
def __init__(self, path, dictionary, forward=True, character_level=True, random_case_flip=True, shuffle_lines=True):
self.dictionary = dictionary
self.forward = forward
self.split_on_char = character_level
self.random_case_flip = random_case_flip
self.shuffle_lines = shuffle_lines
if (type(path) == str):
path = Path(path)
self.train = TextDataset((path / 'train'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=self.shuffle_lines)
self.valid = TextDataset((path / 'valid.txt'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]
self.test = TextDataset((path / 'test.txt'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]
class LanguageModelTrainer():
def __init__(self, model, corpus, optimizer=SGD, test_mode=False, epoch=0, split=0, loss=10000, optimizer_state=None):
self.model = model
self.optimizer = optimizer
self.corpus = corpus
self.test_mode = test_mode
self.loss_function = torch.nn.CrossEntropyLoss()
self.log_interval = 100
self.epoch = epoch
self.split = split
self.loss = loss
self.optimizer_state = optimizer_state
def train(self, base_path, sequence_length, learning_rate=20, mini_batch_size=100, anneal_factor=0.25, patience=10, clip=0.25, max_epochs=1000, checkpoint=False, grow_to_sequence_length=0, num_workers=2, use_amp=False, amp_opt_level='O1', **kwargs):
if use_amp:
if (sys.version_info < (3, 0)):
raise RuntimeError(
'Apex currently only supports Python 3. Aborting.')
if (amp is None):
raise RuntimeError(
'Failed to import apex. Please install apex from https://www.github.com/nvidia/apex to enable mixed-precision training.')
if (type(base_path) is str):
base_path = Path(base_path)
add_file_handler(log, (base_path / 'training.log'))
number_of_splits = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = (base_path / 'loss.txt')
savefile = (base_path / 'best-lm.pt')
try:
epoch = self.epoch
best_val_loss = self.loss
optimizer = self.optimizer(
self.model.parameters(), lr=learning_rate, **kwargs)
if (self.optimizer_state is not None):
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience)
else:
scheduler = ReduceLROnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience)
if use_amp:
(self.model, optimizer) = amp.initialize(
self.model, optimizer, opt_level=amp_opt_level)
training_generator = DataLoader(
self.corpus.train, shuffle=False, num_workers=num_workers)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
if (epoch > 0):
training_generator = DataLoader(
self.corpus.train, shuffle=True, num_workers=num_workers)
self.model.save_checkpoint(
(base_path / ''.join(['epoch_', '{}'.format(epoch), '.pt'])), optimizer, epoch, 0, best_val_loss)
for (curr_split, train_slice) in enumerate(training_generator, self.split):
if (sequence_length < grow_to_sequence_length):
sequence_length += 1
log.info(
''.join(['Sequence length is ', '{}'.format(sequence_length)]))
split_start_time = time.time()
curr_split += 1
train_data = self._batchify(
train_slice.flatten(), mini_batch_size)
log.info((('Split %d' % curr_split) +
'\t - ({:%H:%M:%S})'.format(datetime.datetime.now())))
for group in optimizer.param_groups:
learning_rate = group['lr']
self.model.train()
hidden = self.model.init_hidden(mini_batch_size)
ntokens = len(self.corpus.dictionary)
total_loss = 0
start_time = time.time()
for (batch, i) in enumerate(range(0, (train_data.size(0) - 1), sequence_length)):
(data, targets) = self._get_batch(
train_data, i, sequence_length)
if ((not data.is_cuda) and cuda.is_available()):
log.info(
('Batch %d is not on CUDA, training will be very slow' % batch))
raise Exception('data isnt on cuda')
self.model.zero_grad()
optimizer.zero_grad()
(output, rnn_output, hidden) = self.model.forward(
data, hidden)
loss = self.loss_function(
output.view((- 1), ntokens), targets)
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
hidden = self._repackage_hidden(hidden)
del loss, output, rnn_output
if (((batch % self.log_interval) == 0) and (batch > 0)):
cur_loss = (total_loss.item() / self.log_interval)
elapsed = (time.time() - start_time)
log.info('| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format(
curr_split, number_of_splits, batch, (len(train_data) // sequence_length), ((elapsed * 1000) / self.log_interval), cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
log.info(('%d seconds for train split %d' %
((time.time() - split_start_time), curr_split)))
self.model.eval()
val_loss = self.evaluate(
val_data, mini_batch_size, sequence_length)
scheduler.step(val_loss)
log.info('best loss so far {:5.2f}'.format(best_val_loss))
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
(base_path / 'checkpoint.pt'), optimizer, epoch, curr_split, best_val_loss)
if (val_loss < best_val_loss):
self.model.best_score = best_val_loss
self.model.save(savefile)
best_val_loss = val_loss
log.info(('-' * 89))
summary = '| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | valid ppl {:8.2f} | learning rate {:3.4f}'.format(
curr_split, number_of_splits, (epoch + 1), (time.time() - split_start_time), val_loss, math.exp(val_loss), learning_rate)
with open(loss_txt, 'a') as myfile:
myfile.write(('%s\n' % summary))
log.info(summary)
log.info(('-' * 89))
log.info(('Epoch time: %.2f' %
(time.time() - epoch_start_time)))
except KeyboardInterrupt:
log.info(('-' * 89))
log.info('Exiting from training early')
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = 'TEST: valid loss {:5.2f} | valid ppl {:8.2f}'.format(
test_loss, math.exp(test_loss))
with open(loss_txt, 'a') as myfile:
myfile.write(('%s\n' % summary))
log.info(summary)
log.info(('-' * 89))
def evaluate(self, data_source, eval_batch_size, sequence_length):
self.model.eval()
with torch.no_grad():
total_loss = 0
ntokens = len(self.corpus.dictionary)
hidden = self.model.init_hidden(eval_batch_size)
for i in range(0, (data_source.size(0) - 1), sequence_length):
(data, targets) = self._get_batch(
data_source, i, sequence_length)
(prediction, rnn_output, hidden) = self.model.forward(data, hidden)
output_flat = prediction.view((- 1), ntokens)
total_loss += (len(data) *
self.loss_function(output_flat, targets).data)
hidden = self._repackage_hidden(hidden)
return (total_loss.item() / len(data_source))
@staticmethod
def _batchify(data, batch_size):
nbatch = (data.size(0) // batch_size)
data = data.narrow(0, 0, (nbatch * batch_size))
data = data.view(batch_size, (- 1)).t().contiguous()
return data
@staticmethod
def _get_batch(source, i, sequence_length):
seq_len = min(sequence_length, ((len(source) - 1) - i))
data = source[i:(i + seq_len)].clone().detach()
target = source[(i + 1):((i + 1) + seq_len)
].view((- 1)).clone().detach()
data = data.to(flair.device)
target = target.to(flair.device)
return (data, target)
@staticmethod
def _repackage_hidden(h):
'Wraps hidden states in new tensors, to detach them from their history.'
return tuple((v.clone().detach() for v in h))
@staticmethod
def load_from_checkpoint(checkpoint_file, corpus, optimizer=SGD):
checkpoint = LanguageModel.load_checkpoint(checkpoint_file)
return LanguageModelTrainer(checkpoint['model'], corpus, optimizer, epoch=checkpoint['epoch'], split=checkpoint['split'], loss=checkpoint['loss'], optimizer_state=checkpoint['optimizer_state_dict'])
| [
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.1.0 | azawalich/flair | f0101ab25381aefa586ecb688d4f412d5fab5de3 |
1.8 | # -*- coding: utf-8 -*-
""" Model definition functions and weight loading.
"""
from __future__ import print_function, division, unicode_literals
from os.path import exists
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from torchMoji.torchmoji.lstm import LSTMHardSigmoid
from torchMoji.torchmoji.attlayer import Attention
from torchMoji.torchmoji.global_variables import NB_TOKENS, NB_EMOJI_CLASSES
def torchmoji_feature_encoding(weight_path, return_attention=False):
""" Loads the pretrained torchMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
weight_path: Path to model weights to be loaded.
return_attention: If true, output will include weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = TorchMoji(nb_classes=None,
nb_tokens=NB_TOKENS,
feature_output=True,
return_attention=return_attention)
load_specific_weights(model, weight_path, exclude_names=['output_layer'])
return model
def torchmoji_emojis(weight_path, return_attention=False):
""" Loads the pretrained torchMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
weight_path: Path to model weights to be loaded.
return_attention: If true, output will include weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = TorchMoji(nb_classes=NB_EMOJI_CLASSES,
nb_tokens=NB_TOKENS,
return_attention=return_attention)
model.load_state_dict(torch.load(weight_path))
return model
def torchmoji_transfer(nb_classes, weight_path=None, extend_embedding=0,
embed_dropout_rate=0.1, final_dropout_rate=0.5):
""" Loads the pretrained torchMoji model for finetuning/transfer learning.
Does not load weights for the softmax layer.
Note that if you are planning to use class average F1 for evaluation,
nb_classes should be set to 2 instead of the actual number of classes
in the dataset, since binary classification will be performed on each
class individually.
Note that for the 'new' method, weight_path should be left as None.
# Arguments:
nb_classes: Number of classes in the dataset.
weight_path: Path to model weights to be loaded.
extend_embedding: Number of tokens that have been added to the
vocabulary on top of NB_TOKENS. If this number is larger than 0,
the embedding layer's dimensions are adjusted accordingly, with the
additional weights being set to random values.
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
# Returns:
Model with the given parameters.
"""
model = TorchMoji(nb_classes=nb_classes,
nb_tokens=NB_TOKENS + extend_embedding,
embed_dropout_rate=embed_dropout_rate,
final_dropout_rate=final_dropout_rate,
output_logits=True)
if weight_path is not None:
load_specific_weights(model, weight_path,
exclude_names=['output_layer'],
extend_embedding=extend_embedding)
return model
class TorchMoji(nn.Module):
def __init__(self, nb_classes, nb_tokens, feature_output=False, output_logits=False,
embed_dropout_rate=0, final_dropout_rate=0, return_attention=False):
"""
torchMoji model.
IMPORTANT: The model is loaded in evaluation mode by default (self.eval())
# Arguments:
nb_classes: Number of classes in the dataset.
nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).
feature_output: If True the model returns the penultimate
feature vector rather than Softmax probabilities
(defaults to False).
output_logits: If True the model returns logits rather than probabilities
(defaults to False).
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
return_attention: If True the model also returns attention weights over the sentence
(defaults to False).
"""
super(TorchMoji, self).__init__()
embedding_dim = 256
hidden_size = 512
attention_size = 4 * hidden_size + embedding_dim
self.feature_output = feature_output
self.embed_dropout_rate = embed_dropout_rate
self.final_dropout_rate = final_dropout_rate
self.return_attention = return_attention
self.hidden_size = hidden_size
self.output_logits = output_logits
self.nb_classes = nb_classes
self.add_module('embed', nn.Embedding(nb_tokens, embedding_dim))
# dropout2D: embedding channels are dropped out instead of words
# many exampels in the datasets contain few words that losing one or more words can alter the emotions completely
self.add_module('embed_dropout', nn.Dropout2d(embed_dropout_rate))
self.add_module('lstm_0', LSTMHardSigmoid(embedding_dim, hidden_size, batch_first=True, bidirectional=True))
self.add_module('lstm_1', LSTMHardSigmoid(hidden_size*2, hidden_size, batch_first=True, bidirectional=True))
self.add_module('attention_layer', Attention(attention_size=attention_size, return_attention=return_attention))
if not feature_output:
self.add_module('final_dropout', nn.Dropout(final_dropout_rate))
if output_logits:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1)))
else:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1),
nn.Softmax() if self.nb_classes > 2 else nn.Sigmoid()))
self.init_weights()
# Put model in evaluation mode by default
self.eval()
def init_weights(self):
"""
Here we reproduce Keras default initialization weights for consistency with Keras version
"""
ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)
hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
nn.init.uniform(self.embed.weight.data, a=-0.5, b=0.5)
for t in ih:
nn.init.xavier_uniform(t)
for t in hh:
nn.init.orthogonal(t)
for t in b:
nn.init.constant(t, 0)
if not self.feature_output:
nn.init.xavier_uniform(self.output_layer[0].weight.data)
def forward(self, input_seqs):
""" Forward pass.
# Arguments:
input_seqs: Can be one of Numpy array, Torch.LongTensor, Torch.Variable, Torch.PackedSequence.
# Return:
Same format as input format (except for PackedSequence returned as Variable).
"""
# Check if we have Torch.LongTensor inputs or not Torch.Variable (assume Numpy array in this case), take note to return same format
return_numpy = False
return_tensor = False
if isinstance(input_seqs, (torch.LongTensor, torch.cuda.LongTensor)):
input_seqs = Variable(input_seqs)
return_tensor = True
elif not isinstance(input_seqs, Variable):
input_seqs = Variable(torch.from_numpy(input_seqs.astype('int64')).long())
return_numpy = True
# If we don't have a packed inputs, let's pack it
reorder_output = False
if not isinstance(input_seqs, PackedSequence):
ho = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
# Reorder batch by sequence length
input_lengths = torch.LongTensor([torch.max(input_seqs[i, :].data.nonzero()) + 1 for i in range(input_seqs.size()[0])])
input_lengths, perm_idx = input_lengths.sort(0, descending=True)
input_seqs = input_seqs[perm_idx][:, :input_lengths.max()]
# Pack sequence and work on data tensor to reduce embeddings/dropout computations
packed_input = pack_padded_sequence(input_seqs, input_lengths.cpu().numpy(), batch_first=True)
reorder_output = True
else:
ho = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
input_lengths = input_seqs.batch_sizes
packed_input = input_seqs
hidden = (Variable(ho, requires_grad=False), Variable(co, requires_grad=False))
# Embed with an activation function to bound the values of the embeddings
x = self.embed(packed_input.data)
x = nn.Tanh()(x)
# pyTorch 2D dropout2d operate on axis 1 which is fine for us
x = self.embed_dropout(x)
# Update packed sequence data for RNN
packed_input = PackedSequence(x, packed_input.batch_sizes)
# skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
# ordering of the way the merge is done is important for consistency with the pretrained model
lstm_0_output, _ = self.lstm_0(packed_input, hidden)
lstm_1_output, _ = self.lstm_1(lstm_0_output, hidden)
# Update packed sequence data for attention layer
packed_input = PackedSequence(torch.cat((lstm_1_output.data,
lstm_0_output.data,
packed_input.data), dim=1),
packed_input.batch_sizes)
input_seqs, _ = pad_packed_sequence(packed_input, batch_first=True)
x, att_weights = self.attention_layer(input_seqs, input_lengths)
# output class probabilities or penultimate feature vector
if not self.feature_output:
x = self.final_dropout(x)
outputs = self.output_layer(x)
else:
outputs = x
# Reorder output if needed
if reorder_output:
reorered = Variable(outputs.data.new(outputs.size()))
reorered[perm_idx] = outputs
outputs = reorered
# Adapt return format if needed
if return_tensor:
outputs = outputs.data
if return_numpy:
outputs = outputs.data.numpy()
if self.return_attention:
return outputs, att_weights
else:
return outputs
def load_specific_weights(model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):
""" Loads model weights from the given file path, excluding any
given layers.
# Arguments:
model: Model whose weights should be loaded.
weight_path: Path to file containing model weights.
exclude_names: List of layer names whose weights should not be loaded.
extend_embedding: Number of new words being added to vocabulary.
verbose: Verbosity flag.
# Raises:
ValueError if the file at weight_path does not exist.
"""
if not exists(weight_path):
raise ValueError('ERROR (load_weights): The weights file at {} does '
'not exist. Refer to the README for instructions.'
.format(weight_path))
if extend_embedding and 'embed' in exclude_names:
raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '
'without loading the embedding weights.')
# Copy only weights from the temporary model that are wanted
# for the specific task (e.g. the Softmax is often ignored)
weights = torch.load(weight_path)
for key, weight in weights.items():
if any(excluded in key for excluded in exclude_names):
if verbose:
print('Ignoring weights for {}'.format(key))
continue
try:
model_w = model.state_dict()[key]
except KeyError:
raise KeyError("Weights had parameters {},".format(key)
+ " but could not find this parameters in model.")
if verbose:
print('Loading weights for {}'.format(key))
# extend embedding layer to allow new randomly initialized words
# if requested. Otherwise, just load the weights for the layer.
if 'embed' in key and extend_embedding > 0:
weight = torch.cat((weight, model_w[NB_TOKENS:, :]), dim=0)
if verbose:
print('Extended vocabulary for embedding layer ' +
'from {} to {} tokens.'.format(
NB_TOKENS, NB_TOKENS + extend_embedding))
try:
model_w.copy_(weight)
except:
print('While copying the weigths named {}, whose dimensions in the model are'
' {} and whose dimensions in the saved file are {}, ...'.format(
key, model_w.size(), weight.size()))
raise
| [
"torch.nn.utils.rnn.PackedSequence",
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.init.orthogonal",
"torch.nn.init.constant",
"torch.nn.Softmax",
"torch.nn.init.xavier_uniform",
"torch.autograd.Variable",
"torch.nn.init.uniform",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.load",
"torch.nn.Dropout2d",
"torch.nn.Embedding"
] | 1.8.2 | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 |
1.8 | import torch
from ..function import Function, InplaceFunction
# TODO: no need to save all args if the grad w.r.t. some of them is not needed
def _get_output(ctx, arg, inplace=False):
if inplace:
ctx.mark_dirty(arg)
return arg
else:
return arg.new().resize_as_(arg)
class Addmm(InplaceFunction):
@staticmethod
def forward(ctx, add_matrix, matrix1, matrix2, alpha=1, beta=1, inplace=False):
ctx.alpha = alpha
ctx.beta = beta
ctx.save_for_backward(matrix1, matrix2)
output = _get_output(ctx, add_matrix, inplace=inplace)
return torch.addmm(alpha, add_matrix, beta,
matrix1, matrix2, out=output)
@staticmethod
def backward(ctx, grad_output):
matrix1, matrix2 = ctx.saved_variables
grad_add_matrix = grad_matrix1 = grad_matrix2 = None
if ctx.needs_input_grad[0]:
grad_add_matrix = grad_output
if ctx.alpha != 1:
grad_add_matrix = grad_add_matrix.mul(ctx.alpha)
if ctx.needs_input_grad[1]:
grad_matrix1 = torch.mm(grad_output, matrix2.t())
if ctx.beta != 1:
grad_matrix1 *= ctx.beta
if ctx.needs_input_grad[2]:
grad_matrix2 = torch.mm(matrix1.t(), grad_output)
if ctx.beta != 1:
grad_matrix2 *= ctx.beta
return grad_add_matrix, grad_matrix1, grad_matrix2, None, None, None
class Addbmm(InplaceFunction):
@staticmethod
def forward(ctx, add_matrix, batch1, batch2, alpha=1, beta=1, inplace=False):
ctx.alpha = alpha
ctx.beta = beta
ctx.save_for_backward(batch1, batch2)
output = _get_output(ctx, add_matrix, inplace=inplace)
return torch.addbmm(alpha, add_matrix, beta,
batch1, batch2, out=output)
@staticmethod
def backward(ctx, grad_output):
batch1, batch2 = ctx.saved_variables
grad_add_matrix = grad_batch1 = grad_batch2 = None
if ctx.needs_input_grad[0]:
grad_add_matrix = grad_output
if ctx.alpha != 1:
grad_add_matrix = grad_add_matrix.mul(ctx.alpha)
if any(ctx.needs_input_grad[1:]):
batch_grad_output = (grad_output
.unsqueeze(0)
.expand(batch1.size(0), batch1.size(1), batch2.size(2)))
if ctx.needs_input_grad[1]:
grad_batch1 = torch.bmm(batch_grad_output, batch2.transpose(1, 2))
if ctx.beta != 1:
grad_batch1 *= ctx.beta
if ctx.needs_input_grad[2]:
grad_batch2 = torch.bmm(batch1.transpose(1, 2), batch_grad_output)
if ctx.beta != 1:
grad_batch2 *= ctx.beta
return grad_add_matrix, grad_batch1, grad_batch2, None, None, None
class Baddbmm(InplaceFunction):
@staticmethod
def forward(ctx, add_batch, batch1, batch2, alpha=1, beta=1, inplace=False):
ctx.alpha = alpha
ctx.beta = beta
ctx.save_for_backward(batch1, batch2)
output = _get_output(ctx, add_batch, inplace=inplace)
return torch.baddbmm(alpha, add_batch, beta,
batch1, batch2, out=output)
@staticmethod
def backward(ctx, grad_output):
batch1, batch2 = ctx.saved_variables
grad_add_batch = grad_batch1 = grad_batch2 = None
if ctx.needs_input_grad[0]:
grad_add_batch = grad_output
if ctx.alpha != 1:
grad_add_batch = grad_add_batch.mul(ctx.alpha)
if ctx.needs_input_grad[1]:
grad_batch1 = torch.bmm(grad_output, batch2.transpose(1, 2))
if ctx.beta != 1:
grad_batch1 *= ctx.beta
if ctx.needs_input_grad[2]:
grad_batch2 = torch.bmm(batch1.transpose(1, 2), grad_output)
if ctx.beta != 1:
grad_batch2 *= ctx.beta
return grad_add_batch, grad_batch1, grad_batch2, None, None, None
class Addmv(InplaceFunction):
@staticmethod
def forward(ctx, add_vector, matrix, vector, alpha=1, beta=1, inplace=False):
ctx.alpha = alpha
ctx.beta = beta
ctx.save_for_backward(matrix, vector)
output = _get_output(ctx, add_vector, inplace=inplace)
return torch.addmv(alpha, add_vector, beta,
matrix, vector, out=output)
@staticmethod
def backward(ctx, grad_output):
matrix, vector = ctx.saved_variables
grad_add_vector = grad_matrix = grad_vector = None
if ctx.needs_input_grad[0]:
grad_add_vector = grad_output
if ctx.alpha != 1:
grad_add_vector = grad_add_vector.mul(ctx.alpha)
if ctx.needs_input_grad[1]:
grad_matrix = torch.ger(grad_output, vector)
if ctx.beta != 1:
grad_matrix *= ctx.beta
if ctx.needs_input_grad[2]:
grad_vector = torch.mv(matrix.t(), grad_output)
if ctx.beta != 1:
grad_vector *= ctx.beta
return grad_add_vector, grad_matrix, grad_vector, None, None, None
class Addr(InplaceFunction):
@staticmethod
def forward(ctx, add_matrix, vector1, vector2, alpha=1, beta=1, inplace=False):
ctx.alpha = alpha
ctx.beta = beta
ctx.save_for_backward(vector1, vector2)
output = _get_output(ctx, add_matrix, inplace=inplace)
return torch.addr(alpha, add_matrix, beta,
vector1, vector2, out=output)
@staticmethod
def backward(ctx, grad_output):
vector1, vector2 = ctx.saved_variables
grad_add_matrix = grad_vector1 = grad_vector2 = None
if ctx.needs_input_grad[0]:
grad_add_matrix = grad_output
if ctx.alpha != 1:
grad_add_matrix = grad_add_matrix.mul(ctx.alpha)
if ctx.needs_input_grad[1]:
grad_vector1 = torch.mv(grad_output, vector2)
if ctx.beta != 1:
grad_vector1 *= ctx.beta
if ctx.needs_input_grad[2]:
# TODO: maybe it's better to do transpose + mv + transpose
grad_vector2 = torch.mm(vector1.unsqueeze(0), grad_output).squeeze(0)
if ctx.beta != 1:
grad_vector2 *= ctx.beta
return grad_add_matrix, grad_vector1, grad_vector2, None, None, None
class Dot(Function):
@staticmethod
def forward(ctx, vector1, vector2):
ctx.save_for_backward(vector1, vector2)
ctx.sizes = (vector1.size(), vector2.size())
return vector1.new((vector1.dot(vector2),))
@staticmethod
def backward(ctx, grad_output):
vector1, vector2 = ctx.saved_variables
grad_vector1 = grad_vector2 = None
if ctx.needs_input_grad[0]:
grad_vector1 = vector2.mul(grad_output.expand(ctx.sizes[1])).view(ctx.sizes[0])
if ctx.needs_input_grad[1]:
grad_vector2 = vector1.mul(grad_output.expand(ctx.sizes[0])).view(ctx.sizes[1])
return grad_vector1, grad_vector2
| [
"torch.mv",
"torch.addr",
"torch.addmv",
"torch.baddbmm",
"torch.addbmm",
"torch.addmm",
"torch.ger"
] | 1.8.2 | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 |
1.8 | import torch
from .Criterion import Criterion
# TODO: use THNN
class BCECriterion(Criterion):
eps = 1e-12
def __init__(self, weights=None, sizeAverage=True):
if weights is not None and weights.dim() != 1:
raise ValueError("weights input should be 1D Tensor")
super(BCECriterion, self).__init__()
self.sizeAverage = sizeAverage
self.buffer = None
self.weights = weights
def updateOutput(self, input, target):
# - log(input) * target - log(1 - input) * (1 - target)
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
buffer.resize_as_(input)
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
# log(input) * target
torch.add(input, self.eps, out=buffer).log_()
if weights is not None:
buffer.mul_(weights)
output = torch.dot(target, buffer)
# log(1 - input) * (1 - target)
torch.mul(input, -1, out=buffer).add_(1 + self.eps).log_()
if weights is not None:
buffer.mul_(weights)
output = output + torch.sum(buffer)
output = output - torch.dot(target, buffer)
if self.sizeAverage:
output = output / input.nelement()
self.output = - output
return self.output
def updateGradInput(self, input, target):
# - (target - input) / ( input (1 - input) )
# The gradient is slightly incorrect:
# It should have be divided by (input + self.eps) (1 - input + self.eps)
# but it is divided by input (1 - input + self.eps) + self.eps
# This modification requires less memory to be computed.
if input.nelement() != target.nelement():
raise RuntimeError("input and target size mismatch")
if self.buffer is None:
self.buffer = input.new()
buffer = self.buffer
weights = self.weights
gradInput = self.gradInput
if weights is not None and target.dim() != 1:
weights = self.weights.view(1, target.size(1)).expand_as(target)
buffer.resize_as_(input)
# - x ( 1 + self.eps -x ) + self.eps
torch.add(input, -1, out=buffer).add_(-self.eps).mul_(input).add_(-self.eps)
gradInput.resize_as_(input)
# y - x
torch.add(target, -1, input, out=gradInput)
# - (y - x) / ( x ( 1 + self.eps -x ) + self.eps )
gradInput.div_(buffer)
if weights is not None:
gradInput.mul_(weights)
if self.sizeAverage:
gradInput.div_(target.nelement())
return gradInput
| [
"torch.mul",
"torch.add",
"torch.dot",
"torch.sum"
] | 1.8.2 | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 |
1.8 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
ParlAI has limited support for using models from
`Fairseq <https://github.com/pytorch/fairseq>`_. Fairseq often supports more
experimental seq2seq architectures with fast fp16 training.
Fairseq models can be used for many default tasks by combining a
``--arch`` flag. For example:
`python -m parlai.scripts.train -t convai2 -m fairseq -a transformer`
"""
from parlai.core.dict import DictionaryAgent
from parlai.core.utils import argsort, padded_tensor
try:
from fairseq import models, optim, criterions
# this is a hack around versioning check because fairseq doesn't
# announce version numbers yet
# fairseq 0.5.0 has fp16_trainer, 0.6.0 does not
try:
from fairseq import fp16_trainer # noqa: F401
except ImportError:
pass
else:
raise ImportError
except ImportError:
raise ImportError(
"Please run \"pip install -U 'git+https://github.com/pytorch/"
"[email protected]#egg=fairseq'\""
)
from fairseq import trainer
from fairseq.sequence_generator import SequenceGenerator
from fairseq.sequence_scorer import SequenceScorer
from fairseq import options
from fairseq.tasks.fairseq_task import FairseqTask
from fairseq.utils import convert_padding_direction, load_model_state
from fairseq.meters import AverageMeter
from parlai.core.torch_agent import TorchAgent, Output
from parlai.core.build_data import modelzoo_path
from parlai.core.utils import round_sigfigs
import argparse
import torch
import os
import numpy as np
import json
from collections import defaultdict
# If a model file is loaded, these arguments may NOT be overridden in the
# command line:
NON_OVERRIDABLE_ARGS = {
'arch',
'encoder_embed_dim',
'encoder_layers',
'decoder_embed_dim',
'decoder_layers',
'decoder_out_embed_dim',
'decoder_attention',
}
def _fairseq_opt_wrapper(opt, skip_pretrained_embedding_loading=False):
"""
Marshalls from a dict to a argparse.Namespace object for API compatibility.
Also does some necessary post-processing needed for fairseq. Optionally can
override pretrained embedding options, which is useful if we're just loading
a model from a checkpoint.
:param opt: dict. ParlAI options passed around from everywhere.
:param skip_pretrained_embedding_loading: bool. Don't preload word embeddings.
:return: an argparse.Namespace object for use in fairseq-py.
"""
args = argparse.Namespace()
# first set args according to ParlAI options
for key in opt:
if opt[key] is not None:
setattr(args, key, opt[key])
# at this point the user *must* have specified an arch
if not hasattr(args, "arch"):
raise ValueError("--arch/-a must be specified")
# fill in default options from the model
models.ARCH_CONFIG_REGISTRY[args.arch](args)
# post processing of args. See
# https://github.com/pytorch/fairseq/blob/v0.5.0/fairseq/options.py#L95
if hasattr(args, "lr"):
args.lr = options.eval_str_list(args.lr, type=float)
if hasattr(args, "update_freq"):
args.update_freq = options.eval_str_list(args.update_freq, int)
if hasattr(args, "max_sentences_valid"):
args.max_sentences_valid = args.max_sentences
if args.truncate == -1:
# some torch agents use positional embeddings, which must have a max length
args.truncate = 1024
if not hasattr(args, "max_source_positions"):
# fairseq uses a different name for this CLI parameter
# Sometimes it's set in model defaults, but not for all models
args.max_source_positions = args.truncate
# if we don't have source lengths, we don't have target lengths
args.max_target_positions = args.truncate
# handle modelzoo if possible
for k in ("encoder_embed_path", "decoder_embed_path"):
if getattr(args, k, None) is None:
# not an argument for this model, pretrained embeddings don't matter
continue
elif skip_pretrained_embedding_loading:
# if we want to skip pretrained, then hide the option from fairseq
setattr(args, k, None)
else:
# otherwise we may need to modelzoo adjust the path for fairseq
import warnings
warnings.warn("We recommend using --embedding-type instead")
setattr(args, k, modelzoo_path(opt.get("datapath"), getattr(args, k)))
# Here we hardcode a few options that we currently do not support
# turn off distributed training
args.distributed_world_size = 1
args.distributed_rank = 0
return args, vars(args)
class _FairseqDictionary(DictionaryAgent):
"""
Skeleton dictionary class needed for interaction with fairseq-py.
This class mostly just adds some basic API behavior that Fairseq internally
expects from dictionaries.
It also inserts a fake token at the 0th index of the dictionary, as
fairseq-py maintains backwards compatibility with fairseq-lua, which uses
1 indexing.
"""
# Name of our fake lua compatibility token
_LUA = '__LUACOMPAT__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# insert the fairseq-lua compatibility token to emulate 1-indexing.
# This 1-indexing assumption is baked into a couple of places in fairseq-py,
# and is unavoidable at the moment.
#
# Because of the structure of DictionaryAgent, it's difficult to force
# a token in the 0th position without breaking load()ing. I've found
# this to be the best way.
# add the token to the dictionary
self.add_token(_FairseqDictionary._LUA)
# force it to be the "most frequent" token
self.freq[_FairseqDictionary._LUA] = self.freq[self.null_token] + 1
# sort the list to ensure the lua token is placed first. trim=False to
# ensure shuffle is non-destructive.
self.sort(trim=False)
def pad(self):
return self.pad_index
def eos(self):
return self[self.end_token]
def unk(self):
return self[self.unk_token]
@property
def pad_index(self):
return self[self.null_token]
@property
def eos_index(self):
return self[self.end_token]
@property
def bos_index(self):
return self[self.start_token]
@property
def unk_index(self):
return self[self.unk_token]
def add_symbol(self):
raise NotImplementedError("This is a fake class")
@property
def symbols(self):
return self.tok2ind.keys()
class _ParlaiTask(FairseqTask):
"""Skeleton task class needed for interaction with fairseq-py."""
def __init__(self, dictionary):
self.dict = dictionary
@property
def target_dictionary(self):
return self.dict
@property
def source_dictionary(self):
return self.dict
class FairseqAgent(TorchAgent):
"""Generic wrapper around fairseq for use in ParlAI"""
metrics = {}
@classmethod
def add_cmdline_args(cls, argparser):
"""Add command-line arguments specifically for this agent."""
# first we need to add the general torch agent operations
super(FairseqAgent, cls).add_cmdline_args(argparser)
# let's store any defaults that were overridden
old_defaults = argparser._defaults
if 'clip_norm' not in old_defaults:
# fairseq has a few awful defaults
old_defaults['clip_norm'] = 1.0
if 'optimizer' not in old_defaults:
old_defaults['optimizer'] = 'adam'
old_defaults['adam_betas'] = '(0.9,0.98)'
agent = argparser.add_argument_group('Fairseq Arguments')
agent.add_argument(
'--fp16', default=False, type='bool', help='Use fp16 training'
)
agent.add_argument(
'--fp16-init-scale',
default=2 ** 7,
type=int,
help='default FP16 loss scale',
)
agent.add_argument(
'--seed',
default=1,
type=int,
metavar='N',
help='pseudo random number generator seed',
)
agent.add_argument(
'--skip-generation',
default=False,
type='bool',
metavar='BOOL',
help='Skips test time beam search. Much faster if you only need PPL',
)
# Check subargs for generation, optimizers, criterions, archs, etc
options.add_generation_args(argparser)
options.add_optimization_args(argparser)
options.add_checkpoint_args(argparser)
# restore any user set defaults that fairseq possibly overrode
argparser.set_defaults(**old_defaults)
known_args = argparser.parse_known_args(nohelp=True)[0]
if hasattr(known_args, "optimizer"):
optimizer = known_args.optimizer
opt_group = argparser.add_argument_group(
'{} optimizer arguments'.format(optimizer)
)
optim.OPTIMIZER_REGISTRY[optimizer].add_args(opt_group)
if hasattr(known_args, "lr_scheduler"):
lr_scheduler = known_args.lr_scheduler
lr_group = argparser.add_argument_group(
'{} scheduler arguments'.format(lr_scheduler)
)
optim.lr_scheduler.LR_SCHEDULER_REGISTRY[lr_scheduler].add_args(lr_group)
# We need to find out the fairseq model-specific options, so grab the
# architecture stuff and look up its options
arch_group = options.add_model_args(argparser)
# Fairseq marks the arch flag as required, but it may be specified
# by a saved model cache, so we do some weird stuff to undo that
for a in arch_group._actions:
if a.dest == "arch":
a.required = False
a.default = None
break
# once again restore any user-set defaults
argparser.set_defaults(**old_defaults)
known_args = argparser.parse_known_args(nohelp=True)[0]
if hasattr(known_args, "arch") and known_args.arch is not None:
arch = known_args.arch
arch_group = argparser.add_argument_group(
"{} architecture arguments".format(arch)
)
models.ARCH_MODEL_REGISTRY[arch].add_args(arch_group)
if hasattr(known_args, "criterion"):
crit_group = argparser.add_argument_group(
'{} criterion arguments'.format(known_args.criterion)
)
criterions.CRITERION_REGISTRY[known_args.criterion].add_args(crit_group)
# one last time, restore any user set defaults
argparser.set_defaults(**old_defaults)
# default weight decay in fairseq is zero not None
argparser.set_defaults(weight_decay=0.0)
@staticmethod
def dictionary_class():
# Force use of the Fairseq Dictionary
return _FairseqDictionary
def __init__(self, opt, shared=None):
# In general use a basic TorchAgent wherever possible
super().__init__(opt, shared)
if not shared:
# this is not a shared instance of this class, so do full initialization
# check early if we're going to be loading the model from a checkpoint
model_file_exists = self.opt.get('model_file') and os.path.isfile(
self.opt['model_file']
)
# fairseq expects options to be in argparse format, instead of a dict
# We also need to do some argument postprocessing and whatnot
# We'll skip pretrained embeddings if we're going to override them with
# a model checkpoint anyway
self.args, self.opt = _fairseq_opt_wrapper(opt, model_file_exists)
# seed the RNG
torch.manual_seed(self.args.seed)
# Just some identifying info
self.id = "fairseq:{}".format(self.args.arch)
# We need a placeholder task for fairseq
self.task = _ParlaiTask(self.dict)
# meters for keeping track of loss, ppl, etc.
self.meters = defaultdict(AverageMeter)
# actually construct the criterion, model and generator
self.criterion = self.build_criterion()
self.model = self.build_model()
# Construct the generator and scorer
self.generator = SequenceGenerator(
[self.model],
tgt_dict=self.dict,
beam_size=self.args.beam,
stop_early=(not self.args.no_early_stop),
normalize_scores=(not self.args.unnormalized),
len_penalty=self.args.lenpen,
unk_penalty=self.args.unkpen,
sampling=self.args.sampling,
sampling_topk=self.args.sampling_topk,
sampling_temperature=self.args.sampling_temperature,
)
self.scorer = SequenceScorer([self.model], self.dict)
# TODO: we might choose to add a --no-fp16 opt in the future to
# explicitly disable fp16 instead
if not self.args.fp16 and torch.cuda.get_device_capability(0)[0] >= 7:
print("Heads up: using --fp16 could be a lot faster!")
if self.use_cuda:
self.trainer = trainer.Trainer(
self.args, self.task, self.model, self.criterion, None
)
self.trainer._build_optimizer()
else:
self.trainer = None
# if the model already existed, let's preload it and the trainer
if model_file_exists:
print('Loading existing model params from ' + self.opt['model_file'])
self.load(self.opt.get('model_file'))
# move things to the GPU if possible
if self.use_cuda:
self.model = self.model.cuda()
self.generator = self.generator.cuda()
else:
self.model = shared['model']
self.trainer = shared['trainer']
self.generator = shared['generator']
self.dict = shared['dict']
self.args = shared['args']
self.meters = shared['meters']
# Start things off clean
self.reset()
def _check_opts_unchanged(self, saved_opts, current_opts):
"""Verify that critical options do not differ in command line vs saved model"""
for k in NON_OVERRIDABLE_ARGS:
if k not in saved_opts or k not in current_opts:
# if it's not an option needed by this fairseq model, don't stress
continue
if saved_opts[k] != current_opts[k]:
raise ValueError(
'{} cannot be overridden when --model-file is specified'.format(k)
)
def build_model(self):
"""
Construct the actual Fairseq model. Default implementation is to use
Fairseq's arch builder, but this method may be overridden to build custom
models.
"""
model_class = models.ARCH_MODEL_REGISTRY[self.args.arch]
model = model_class.build_model(self.args, self.task)
if self.args.embedding_type != 'random':
self._copy_embeddings(
model.encoder.embed_tokens.weight, self.args.embedding_type
)
return model
def build_criterion(self):
"""Set up the grader."""
# TorchAgent will call this without ready=True before self.args is ready
return criterions.build_criterion(self.args, self.task)
def share(self):
shared = super().share()
shared['model'] = self.model
shared['trainer'] = self.trainer
shared['generator'] = self.generator
shared['dict'] = self.dict
shared['args'] = self.args
shared['meters'] = self.meters
return shared
def save(self, path):
"""Save using fairseq's checkpointing."""
if not path:
return
self.trainer.save_checkpoint(path, {'opt': self.opt, 'epoch': 0})
# Parlai expects options to also be saved
with open(path + '.opt', 'w') as handle:
# overridden options shouldn't be stored, only the main ones
if 'override' in self.opt:
del self.opt['override']
json.dump(self.opt, handle)
# force save the dict
self.dict.save(path + '.dict', sort=False)
def load(self, path):
"""Load using fairseq's checkpointing."""
if self.trainer:
old_options = self.trainer.load_checkpoint(path, self.args.reset_optimizer)
self._check_opts_unchanged(old_options, self.opt)
else:
load_model_state(path, self.model)
def shutdown(self):
if not hasattr(self, 'trainer'):
# looks like this is a "fake" model that isn't actually used for batch_act.
# we don't need to save this one.
return
super().shutdown()
def reset(self):
"""Reset observation and episode_done."""
super().reset()
self.reset_metrics()
def is_valid(self, obs):
"""Override from TorchAgent.
Check if an observation has no tokens in it."""
return len(obs.get('text_vec', [])) > 0
def batchify(self, obs_batch):
"""
Override parent batchify to set requirements for fairseq.
Fairseq depends on sorted batch inputs for a call to rnn.pad_packed_sequence.
Fairseq models cannot handle zero length sentences
"""
return super().batchify(obs_batch, sort=True)
def _update_metrics(self, metrics, sample):
if metrics is None:
# probably got an overflow in fp16 mode. don't count this sample
return
bsz = len(sample['target'])
ntok = sample['ntokens']
ssize = metrics['sample_size']
for k, v in metrics.items():
if k in {'ntokens', 'nsentences', 'sample_size'}:
# don't need these
continue
elif k == "nll_loss":
# nll loss is always normalized by ntokens
self.meters[k].update(v, ntok)
elif k == "loss":
# loss is explicitly normalized by passed up sample size
self.meters[k].update(v, ssize)
else:
# assume everything else it's averaged over bsz
self.meters[k].update(v, bsz)
def train_step(self, batch):
"""Process batch of inputs and targets and train on them.
:param batch: parlai.core.torch_agent.Batch, contains tensorized
version of observations.
"""
if batch.text_vec is None:
return
self.is_training = True
sample = self._make_sample(batch)
self.model.train()
metrics = self.trainer.train_step([sample])
self._update_metrics(metrics, sample)
def eval_step(self, batch):
"""Process batch of inputs.
If the batch includes labels, calculate validation metrics as well.
If --skip-generation is not set, return a prediction for each input.
:param batch: parlai.core.torch_agent.Batch, contains tensorized
version of observations.
"""
if batch.text_vec is None:
return
self.is_training = False
samples = self._make_sample(batch)
self.model.eval()
if batch.label_vec is not None and self.trainer is not None:
# Interactive mode won't have a gold label
metrics = self.trainer.valid_step(samples)
self._update_metrics(metrics, samples)
# Output placeholders
reranked_cands = None
generated_output = None
# Grade each of the candidate sequences
if batch.candidate_vecs is not None:
bsz = len(batch.text_vec)
reranked_cands = []
# score the candidates for each item in the batch separately, so that
# we can support variable number of candidates
for i in range(bsz):
cands = batch.candidate_vecs[i]
if not cands:
reranked_cands.append(None)
continue
ncand = len(cands)
# repeat the input many times
xs = batch.text_vec[i].unsqueeze(0).expand(ncand, -1)
# some models crash if there's leading padding on every example
xs = xs[:, : batch.text_lengths[i]]
# and appropriately pack the outputs
ys, _ = padded_tensor(cands, self.NULL_IDX, self.use_cuda)
s = self._make_sample(xs=xs, ys=ys)
# perform the actual grading, extract the scores
scored = list(self.scorer.score_batched_itr([s], cuda=self.use_cuda))
scores = [s[3][0]['score'].item() for s in scored]
# intentional hanging comma here; argsort returns a list
ranked, = argsort(scores, batch.candidates[i], descending=True)
reranked_cands.append(ranked)
# Next generate freely to create our response
if not self.args.skip_generation:
generated_output = self._generate(samples)
elif reranked_cands:
# we're skiping generation, but we're also grading candidates
# so output the highest ranked candidate
# In the case of zero candidates, we don't have something to rank,
# so we may need to pass on that None
generated_output = [
ranked and ranked[0] or None for ranked in reranked_cands
]
else:
# no output at all
pass
return Output(generated_output, reranked_cands)
def _generate(self, samples):
no_prev_token = {
k: v for k, v in samples['net_input'].items() if k != 'prev_output_tokens'
}
gens = self.generator.generate(no_prev_token, maxlen=64)
bsz = samples['net_input']['src_tokens'].size(0)
responses = []
for i in range(bsz):
beams = gens[i]
selected = max(beams, key=lambda x: x["score"])
tokens = selected["tokens"]
start = 0
end = -1
for i, t in enumerate(tokens):
t = t.item()
if t == self.dict.bos_index:
# don't include <s> token
start = i + 1
continue
if t == self.dict.eos_index:
# stop (and don't include) </s> token
end = i
break
responses.append(self.dict.vec2txt(tokens[start:end]))
return responses
def report(self):
"""Return metrics calculated by the model."""
# if we haven't initialized yet, just return a dummy object
if not hasattr(self, "trainer"):
return {}
output = {k: v.avg for k, v in self.meters.items()}
if "nll_loss" in self.meters:
# special case, we used sentence averaging so ppl comes from nll_loss
output["ppl"] = np.exp2(self.meters["nll_loss"].avg)
else:
# normal case, just use loss
output["ppl"] = np.exp2(self.meters["loss"].avg)
# Fairseq trainer metrics we'll pass up the way
trainer_metrics = {"ups", "wps", "gnorm", "clip"}
if self.is_training:
for k in trainer_metrics:
output[k] = self.trainer.meters[k].avg
# for display purposes
output = {k: round_sigfigs(v, 4) for k, v in output.items()}
return output
def reset_metrics(self):
"""Reset metrics calculated by the model back to zero."""
if not hasattr(self, "trainer"):
# We haven't set up the trainer yet, so we don't have any metrics
return
# We need to reset everything
self.meters.clear()
if self.trainer:
for k in self.trainer.meters:
self.trainer.meters[k].reset()
def receive_metrics(self, metrics_dict):
"""Update lr scheduler with validation loss."""
# TODO: this should be smarter
self.trainer.lr_step(-1, metrics_dict["loss"])
# Helper functions
def _seq_length(self, xs):
"""Compute length of the sequence (non-padded size)."""
return xs.ne(self.dict.pad_index).long().sum(dim=-1)
def _right_shifted_ys(self, ys):
"""Replace first token with EOS and shift remaining tokens right 1."""
result = torch.LongTensor(ys.size())
result[:, 0] = self.dict.eos_index
result[:, 1:] = ys[:, :-1]
return result
def _make_sample(self, batch=None, xs=None, ys=None):
"""Generate a sample object that Fairseq expects."""
# add extra info to samples
if batch is None and xs is None:
raise ValueError("Must supply either batch or xs")
if batch is None and ys is None:
raise ValueError("Must supply either batch or ys")
if xs is None:
xs = batch.text_vec
if ys is None:
ys = batch.label_vec
repadded = convert_padding_direction(xs, self.dict.pad(), right_to_left=True)
sample = {}
sample["id"] = torch.arange(len(xs) - 1)
sample["net_input"] = {
"src_tokens": repadded,
"src_lengths": self._seq_length(xs),
}
if ys is not None:
sample["target"] = ys
sample["ntokens"] = sum(self._seq_length(ys)).item()
sample["net_input"]["prev_output_tokens"] = self._right_shifted_ys(ys)
return sample
| [
"torch.manual_seed",
"torch.cuda.get_device_capability"
] | 1.8.2 | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 |
1.0 | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import json
import logging
import os
import shutil
import sys
import tempfile
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import numpy as np
import requests
from botocore.exceptions import ClientError
from dotmap import DotMap
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "farm")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
FARM_CACHE = Path(os.getenv("FARM_CACHE", default_cache_path))
except (AttributeError, ImportError):
FARM_CACHE = os.getenv("FARM_CACHE", default_cache_path)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None):
req = requests.get(url, stream=True, proxies=proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode("utf-8")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(
output_string, "utf-8"
) # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def read_config(path, flattend=False):
if path:
with open(path) as json_data_file:
conf_args = json.load(json_data_file)
else:
raise ValueError("No config provided for classifier")
def getArgValue(arg):
if "value" not in arg:
logger.error(
"Only depth 2 config files supported. Failed to convert: %s" % str(arg)
)
return arg["value"] if (arg["value"] is not None) else arg["default"]
# flatten last part of config, take either value or default as value
for gk, gv in conf_args.items():
for k, v in gv.items():
if isinstance(getArgValue(v), dict):
logger.error("Config is too deeply nested, at %s" % str(v))
conf_args[gk][k] = getArgValue(v)
# DotMap for making nested dictionary accessible through dot notation
flat_args = dict(
conf_args["general"],
**conf_args["task"],
**conf_args["parameter"],
**conf_args["logging"],
)
if flattend:
args = DotMap(flat_args, _dynamic=False)
else:
args = DotMap(conf_args, _dynamic=False)
return args
def unnestConfig(config, flattened=False):
"""
This function creates a list of config files for evaluating parameters with different values. If a config parameter
is of type list this list is iterated over and a config object without lists is returned. Can handle lists inside any
number of parameters.
Can handle shallow or nested (one level) configs
"""
nestedKeys = []
nestedVals = []
if flattened:
for k, v in config.items():
if isinstance(v, list):
if k != "layer_dims": # exclude layer dims, since it is already a list
nestedKeys.append(k)
nestedVals.append(v)
else:
for gk, gv in config.items():
if(gk != "task"):
for k, v in gv.items():
if isinstance(v, list):
if isinstance(v, list):
if (
k != "layer_dims"
): # exclude layer dims, since it is already a list
nestedKeys.append([gk, k])
nestedVals.append(v)
elif isinstance(v, dict):
logger.error("Config too deep!")
if len(nestedKeys) == 0:
unnestedConfig = [config]
else:
if flattened:
logger.info("Nested config at parameters: %s" % (", ".join(nestedKeys)))
else:
logger.info(
"Nested config at parameters: %s"
% (", ".join(".".join(x) for x in nestedKeys))
)
unnestedConfig = []
mesh = np.meshgrid(
*nestedVals
) # get all combinations, each dimension corresponds to one parameter type
# flatten mesh into shape: [num_parameters, num_combinations] so we can iterate in 2d over any paramter combinations
mesh = [x.flatten() for x in mesh]
# loop over all combinations
for i in range(len(mesh[0])):
tempconfig = config.copy()
for j, k in enumerate(nestedKeys):
if isinstance(k, str):
tempconfig[k] = mesh[j][
i
] # get ith val of correct param value and overwrite original config
elif len(k) == 2:
tempconfig[k[0]][k[1]] = mesh[j][i] # set nested dictionary keys
else:
logger.error("Config too deep!")
unnestedConfig.append(tempconfig)
return unnestedConfig
| [
"torch.hub._get_torch_home"
] | 1.0.1 | cregouby/FARM | 552bc07acffbce4f1f84d926c040fdd17b4ddeb3 |
1.6 | from typing import Dict
from alnlp.modules.feedforward import FeedForward
from alnlp.modules.time_distributed import TimeDistributed
from .highway_variational_lstm import *
import torch
from alnlp.modules import util
from ...parsers.biaffine.biaffine import Biaffine
def initializer_1d(input_tensor, initializer):
assert len(input_tensor.size()) == 1
input_tensor = input_tensor.view(-1, 1)
input_tensor = initializer(input_tensor)
return input_tensor.view(-1)
class SpanRankingSRLDecoder(nn.Module):
def __init__(self, context_layer_output_dim, label_space_size, config) -> None:
super().__init__()
self.config = config
self.label_space_size = label_space_size
self.dropout = float(config.dropout)
self.use_gold_predicates = config.use_gold_predicates
# span width feature embedding
self.span_width_embedding = nn.Embedding(self.config.max_arg_width, self.config.span_width_feature_size)
# self.context_projective_layer = nn.Linear(2 * self.lstm_hidden_size, self.config.num_attention_heads)
# span scores
self.span_emb_size = 3 * context_layer_output_dim + self.config.span_width_feature_size
self.arg_unary_score_layers = nn.ModuleList([nn.Linear(self.span_emb_size, self.config.ffnn_size) if i == 0
else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i
in range(self.config.ffnn_depth)]) # [,150]
self.arg_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.arg_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)
# predicate scores
self.pred_unary_score_layers = nn.ModuleList(
[nn.Linear(context_layer_output_dim, self.config.ffnn_size) if i == 0
else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i
in range(self.config.ffnn_depth)]) # [,150]
self.pred_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.pred_unary_score_projection = nn.Linear(self.config.ffnn_size, 1)
# srl scores
self.srl_unary_score_input_size = self.span_emb_size + context_layer_output_dim
self.srl_unary_score_layers = nn.ModuleList([nn.Linear(self.srl_unary_score_input_size, self.config.ffnn_size)
if i == 0 else nn.Linear(self.config.ffnn_size,
self.config.ffnn_size)
for i in range(self.config.ffnn_depth)])
self.srl_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)])
self.srl_unary_score_projection = nn.Linear(self.config.ffnn_size, self.label_space_size - 1)
if config.use_biaffine:
self.predicate_scale = TimeDistributed(FeedForward(context_layer_output_dim, 1, self.span_emb_size, 'ReLU'))
self.biaffine = Biaffine(self.span_emb_size, self.label_space_size - 1)
self.loss_reduction = config.loss_reduction
self.reset_parameters()
def reset_parameters(self):
init.xavier_uniform_(self.span_width_embedding.weight)
# init.xavier_uniform_(self.context_projective_layer.weight)
# initializer_1d(self.context_projective_layer.bias, init.xavier_uniform_)
for layer in self.arg_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.arg_unary_score_projection.weight)
initializer_1d(self.arg_unary_score_projection.bias, init.xavier_uniform_)
for layer in self.pred_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.pred_unary_score_projection.weight)
initializer_1d(self.pred_unary_score_projection.bias, init.xavier_uniform_)
for layer in self.srl_unary_score_layers:
init.xavier_uniform_(layer.weight)
initializer_1d(layer.bias, init.xavier_uniform_)
init.xavier_uniform_(self.srl_unary_score_projection.weight)
initializer_1d(self.srl_unary_score_projection.bias, init.xavier_uniform_)
return None
def forward(self, hidden_states, batch, mask=None):
gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = SpanRankingSRLModel.unpack(
batch, mask=mask, training=self.training)
return self.decode(hidden_states, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,
gold_predicates)
@staticmethod
def get_candidate_spans(sent_lengths: torch.Tensor, max_sent_length, max_arg_width):
num_sentences = len(sent_lengths)
device = sent_lengths.device
candidate_starts = torch.arange(0, max_sent_length, device=device).expand(num_sentences, max_arg_width, -1)
candidate_width = torch.arange(0, max_arg_width, device=device).view(1, -1, 1)
candidate_ends = candidate_starts + candidate_width
candidate_starts = candidate_starts.contiguous().view(num_sentences, max_sent_length * max_arg_width)
candidate_ends = candidate_ends.contiguous().view(num_sentences, max_sent_length * max_arg_width)
actual_sent_lengths = sent_lengths.view(-1, 1).expand(-1, max_sent_length * max_arg_width)
candidate_mask = candidate_ends < actual_sent_lengths
candidate_starts = candidate_starts * candidate_mask
candidate_ends = candidate_ends * candidate_mask
return candidate_starts, candidate_ends, candidate_mask
@staticmethod
def exclusive_cumsum(input: torch.Tensor, exclusive=True):
"""
Args:
input: input is the sentence lengths tensor.
exclusive: exclude the last sentence length (Default value = True)
input(torch.Tensor :):
input: torch.Tensor:
Returns:
"""
assert exclusive is True
if exclusive is True:
exclusive_sent_lengths = input.new_zeros(1, dtype=torch.long)
result = torch.cumsum(torch.cat([exclusive_sent_lengths, input], 0)[:-1], 0).view(-1, 1)
else:
result = torch.cumsum(input, 0).view(-1, 1)
return result
def flatten_emb(self, emb):
num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]
assert len(emb.size()) == 3
flatted_emb = emb.contiguous().view(num_sentences * max_sentence_length, -1)
return flatted_emb
def flatten_emb_in_sentence(self, emb, batch_sentences_mask):
num_sentences, max_sentence_length = emb.size()[0], emb.size()[1]
flatted_emb = self.flatten_emb(emb)
return flatted_emb[batch_sentences_mask.reshape(num_sentences * max_sentence_length)]
def get_span_emb(self, flatted_context_emb, flatted_candidate_starts, flatted_candidate_ends,
config, dropout=0.0):
batch_word_num = flatted_context_emb.size()[0]
# gather slices from embeddings according to indices
span_start_emb = flatted_context_emb[flatted_candidate_starts]
span_end_emb = flatted_context_emb[flatted_candidate_ends]
span_emb_feature_list = [span_start_emb, span_end_emb] # store the span vector representations for span rep.
span_width = 1 + flatted_candidate_ends - flatted_candidate_starts # [num_spans], generate the span width
max_arg_width = config.max_arg_width
# get the span width feature emb
span_width_index = span_width - 1
span_width_emb = self.span_width_embedding(span_width_index)
span_width_emb = F.dropout(span_width_emb, dropout, self.training)
span_emb_feature_list.append(span_width_emb)
"""head features"""
cpu_flatted_candidte_starts = flatted_candidate_starts
span_indices = torch.arange(0, max_arg_width, device=flatted_context_emb.device).view(1, -1) + \
cpu_flatted_candidte_starts.view(-1, 1) # For all the i, where i in [begin, ..i, end] for span
# reset the position index to the batch_word_num index with index - 1
span_indices = torch.clamp(span_indices, max=batch_word_num - 1)
num_spans, spans_width = span_indices.size()[0], span_indices.size()[1]
flatted_span_indices = span_indices.view(-1) # so Huge!!!, column is the span?
# if torch.cuda.is_available():
flatted_span_indices = flatted_span_indices
span_text_emb = flatted_context_emb.index_select(0, flatted_span_indices).view(num_spans, spans_width, -1)
span_indices_mask = util.lengths_to_mask(span_width, max_len=max_arg_width)
# project context output to num head
# head_scores = self.context_projective_layer.forward(flatted_context_emb)
# get span attention
# span_attention = head_scores.index_select(0, flatted_span_indices).view(num_spans, spans_width)
# span_attention = torch.add(span_attention, expanded_span_indices_log_mask).unsqueeze(2) # control the span len
# span_attention = F.softmax(span_attention, dim=1)
span_text_emb = span_text_emb * span_indices_mask.unsqueeze(2).expand(-1, -1, span_text_emb.size()[-1])
span_head_emb = torch.mean(span_text_emb, 1)
span_emb_feature_list.append(span_head_emb)
span_emb = torch.cat(span_emb_feature_list, 1)
return span_emb, None, span_text_emb, span_indices, span_indices_mask
def get_arg_unary_scores(self, span_emb):
"""Compute span score with FFNN(span embedding)
Args:
span_emb: tensor of [num_sentences, num_spans, emb_size]
config: param dropout:
num_labels: param name:
Returns:
"""
input = span_emb
for i, ffnn in enumerate(self.arg_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.arg_dropout_layers[i].forward(input)
output = self.arg_unary_score_projection.forward(input)
return output
def get_pred_unary_scores(self, span_emb):
input = span_emb
for i, ffnn in enumerate(self.pred_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.pred_dropout_layers[i].forward(input)
output = self.pred_unary_score_projection.forward(input)
return output
def extract_spans(self, candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length,
sort_spans, enforce_non_crossing):
"""extract the topk span indices
Args:
candidate_scores: param candidate_starts:
candidate_ends: param topk: [num_sentences]
max_sentence_length: param sort_spans:
enforce_non_crossing: return: indices [num_sentences, max_num_predictions]
candidate_starts:
topk:
sort_spans:
Returns:
"""
# num_sentences = candidate_scores.size()[0]
# num_input_spans = candidate_scores.size()[1]
max_num_output_spans = int(torch.max(topk))
indices = [score.topk(k)[1] for score, k in zip(candidate_scores, topk)]
output_span_indices_tensor = [F.pad(item, [0, max_num_output_spans - item.size()[0]], value=item[-1])
for item in indices]
output_span_indices_tensor = torch.stack(output_span_indices_tensor)
return output_span_indices_tensor
def batch_index_select(self, emb, indices):
num_sentences = emb.size()[0]
max_sent_length = emb.size()[1]
flatten_emb = self.flatten_emb(emb)
offset = (torch.arange(0, num_sentences, device=emb.device) * max_sent_length).unsqueeze(1)
return torch.index_select(flatten_emb, 0, (indices + offset).view(-1)) \
.view(indices.size()[0], indices.size()[1], -1)
def get_batch_topk(self, candidate_starts: torch.Tensor, candidate_ends, candidate_scores, topk_ratio, text_len,
max_sentence_length, sort_spans=False, enforce_non_crossing=True):
num_sentences = candidate_starts.size()[0]
max_sentence_length = candidate_starts.size()[1]
topk = torch.floor(text_len.to(torch.float) * topk_ratio).to(torch.long)
topk = torch.max(topk, torch.ones(num_sentences, device=candidate_starts.device, dtype=torch.long))
# this part should be implemented with C++
predicted_indices = self.extract_spans(candidate_scores, candidate_starts, candidate_ends, topk,
max_sentence_length, sort_spans, enforce_non_crossing)
predicted_starts = torch.gather(candidate_starts, 1, predicted_indices)
predicted_ends = torch.gather(candidate_ends, 1, predicted_indices)
predicted_scores = torch.gather(candidate_scores, 1, predicted_indices)
return predicted_starts, predicted_ends, predicted_scores, topk, predicted_indices
def get_dense_span_labels(self, span_starts, span_ends, span_labels, max_sentence_length,
span_parents=None):
num_sentences = span_starts.size()[0]
max_spans_num = span_starts.size()[1]
# span_starts = span_starts + 1 - (span_labels > 0).to(torch.long)
span_starts[(span_labels == 0) & (span_starts < max_sentence_length - 1)] += 1 # make start > end
sentence_indices = torch.arange(0, num_sentences, device=span_starts.device).unsqueeze(1).expand(-1,
max_spans_num)
sparse_indices = torch.cat([sentence_indices.unsqueeze(2), span_starts.unsqueeze(2), span_ends.unsqueeze(2)],
dim=2)
if span_parents is not None: # semantic span predicate offset
sparse_indices = torch.cat([sparse_indices, span_parents.unsqueeze(2)], 2)
rank = 3 if span_parents is None else 4
dense_labels = torch.sparse.LongTensor(sparse_indices.view(num_sentences * max_spans_num, rank).t(),
span_labels.view(-1),
torch.Size([num_sentences] + [max_sentence_length] * (rank - 1))) \
.to_dense()
return dense_labels
@staticmethod
def gather_4d(params, indices):
assert len(params.size()) == 4 and len(indices) == 4
indices_a, indices_b, indices_c, indices_d = indices
result = params[indices_a, indices_b, indices_c, indices_d]
return result
def get_srl_labels(self,
arg_starts,
arg_ends,
predicates,
gold_predicates,
gold_arg_starts,
gold_arg_ends,
gold_arg_labels,
max_sentence_length
):
num_sentences = arg_starts.size()[0]
max_arg_num = arg_starts.size()[1]
max_pred_num = predicates.size()[1]
sentence_indices_2d = torch.arange(0, num_sentences, device=arg_starts.device).unsqueeze(1).unsqueeze(2).expand(
-1, max_arg_num, max_pred_num)
expanded_arg_starts = arg_starts.unsqueeze(2).expand(-1, -1, max_pred_num)
expanded_arg_ends = arg_ends.unsqueeze(2).expand(-1, -1, max_pred_num)
expanded_predicates = predicates.unsqueeze(1).expand(-1, max_arg_num, -1)
dense_srl_labels = self.get_dense_span_labels(gold_arg_starts,
gold_arg_ends,
gold_arg_labels,
max_sentence_length, span_parents=gold_predicates) # ans
srl_labels = self.gather_4d(dense_srl_labels,
[sentence_indices_2d, expanded_arg_starts, expanded_arg_ends, expanded_predicates])
return srl_labels
def get_srl_unary_scores(self, span_emb):
input = span_emb
for i, ffnn in enumerate(self.srl_unary_score_layers):
input = F.relu(ffnn.forward(input))
input = self.srl_dropout_layers[i].forward(input)
output = self.srl_unary_score_projection.forward(input)
return output
def get_srl_scores(self, arg_emb, pred_emb, arg_scores, pred_scores, num_labels, config, dropout):
num_sentences = arg_emb.size()[0]
num_args = arg_emb.size()[1] # [batch_size, max_arg_num, arg_emb_size]
num_preds = pred_emb.size()[1] # [batch_size, max_pred_num, pred_emb_size]
unsqueezed_arg_emb = arg_emb.unsqueeze(2)
unsqueezed_pred_emb = pred_emb.unsqueeze(1)
expanded_arg_emb = unsqueezed_arg_emb.expand(-1, -1, num_preds, -1)
expanded_pred_emb = unsqueezed_pred_emb.expand(-1, num_args, -1, -1)
pair_emb_list = [expanded_arg_emb, expanded_pred_emb]
pair_emb = torch.cat(pair_emb_list, 3) # concatenate the argument emb and pre emb
pair_emb_size = pair_emb.size()[3]
flat_pair_emb = pair_emb.view(num_sentences * num_args * num_preds, pair_emb_size)
# get unary scores
flat_srl_scores = self.get_srl_unary_scores(flat_pair_emb)
srl_scores = flat_srl_scores.view(num_sentences, num_args, num_preds, -1)
if self.config.use_biaffine:
srl_scores += self.biaffine(arg_emb, self.predicate_scale(pred_emb)).permute([0, 2, 3, 1])
unsqueezed_arg_scores, unsqueezed_pred_scores = \
arg_scores.unsqueeze(2).unsqueeze(3), pred_scores.unsqueeze(1).unsqueeze(3)
srl_scores = srl_scores + unsqueezed_arg_scores + unsqueezed_pred_scores
dummy_scores = torch.zeros([num_sentences, num_args, num_preds, 1], device=arg_emb.device)
srl_scores = torch.cat([dummy_scores, srl_scores], 3)
return srl_scores
def get_srl_softmax_loss(self, srl_scores, srl_labels, num_predicted_args, num_predicted_preds):
srl_loss_mask = self.get_srl_loss_mask(srl_scores, num_predicted_args, num_predicted_preds)
loss = torch.nn.functional.cross_entropy(srl_scores[srl_loss_mask], srl_labels[srl_loss_mask],
reduction=self.loss_reduction)
return loss, srl_loss_mask
def get_srl_loss_mask(self, srl_scores, num_predicted_args, num_predicted_preds):
max_num_arg = srl_scores.size()[1]
max_num_pred = srl_scores.size()[2]
# num_predicted_args, 1D tensor; max_num_arg: a int variable means the gold ans's max arg number
args_mask = util.lengths_to_mask(num_predicted_args, max_num_arg)
pred_mask = util.lengths_to_mask(num_predicted_preds, max_num_pred)
srl_loss_mask = args_mask.unsqueeze(2) & pred_mask.unsqueeze(1)
return srl_loss_mask
def decode(self, contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels,
gold_predicates):
num_sentences, max_sent_length = masks.size()
device = sent_lengths.device
"""generate candidate spans with argument pruning"""
# candidate_starts [num_sentences, max_sent_length * max_arg_width]
candidate_starts, candidate_ends, candidate_mask = self.get_candidate_spans(
sent_lengths, max_sent_length, self.config.max_arg_width)
flatted_candidate_mask = candidate_mask.view(-1)
batch_word_offset = self.exclusive_cumsum(sent_lengths) # get the word offset in a batch
# choose the flatted_candidate_starts with the actual existing positions, i.e. exclude the illegal starts
flatted_candidate_starts = candidate_starts + batch_word_offset
flatted_candidate_starts = flatted_candidate_starts.view(-1)[flatted_candidate_mask].to(torch.long)
flatted_candidate_ends = candidate_ends + batch_word_offset
flatted_candidate_ends = flatted_candidate_ends.view(-1)[flatted_candidate_mask].to(torch.long)
# flatten the lstm output according to the sentence mask, i.e. exclude the illegal (padding) lstm output
flatted_context_output = self.flatten_emb_in_sentence(contextualized_embeddings, masks)
"""generate the span embedding"""
candidate_span_emb, head_scores, span_head_emb, head_indices, head_indices_log_mask = self.get_span_emb(
flatted_context_output, flatted_candidate_starts, flatted_candidate_ends,
self.config, dropout=self.dropout)
"""Get the span ids"""
candidate_span_number = candidate_span_emb.size()[0]
max_candidate_spans_num_per_sentence = candidate_mask.size()[1]
sparse_indices = candidate_mask.nonzero(as_tuple=False)
sparse_values = torch.arange(0, candidate_span_number, device=device)
candidate_span_ids = torch.sparse.FloatTensor(sparse_indices.t(), sparse_values,
torch.Size([num_sentences,
max_candidate_spans_num_per_sentence])).to_dense()
spans_log_mask = torch.log(candidate_mask.to(torch.float))
predict_dict = {"candidate_starts": candidate_starts, "candidate_ends": candidate_ends,
'candidate_arg_mask': candidate_mask, "head_scores": head_scores}
"""Get unary scores and topk of candidate argument spans."""
flatted_candidate_arg_scores = self.get_arg_unary_scores(candidate_span_emb)
candidate_arg_scores = flatted_candidate_arg_scores.index_select(0, candidate_span_ids.view(-1)) \
.view(candidate_span_ids.size()[0], candidate_span_ids.size()[1])
candidate_arg_scores = candidate_arg_scores + spans_log_mask
arg_starts, arg_ends, arg_scores, num_args, top_arg_indices = \
self.get_batch_topk(candidate_starts, candidate_ends, candidate_arg_scores,
self.config.argument_ratio, sent_lengths, max_sent_length,
sort_spans=False, enforce_non_crossing=False)
"""Get the candidate predicate"""
candidate_pred_ids = torch.arange(0, max_sent_length, device=device).unsqueeze(0).expand(num_sentences, -1)
candidate_pred_emb = contextualized_embeddings
candidate_pred_scores = self.get_pred_unary_scores(candidate_pred_emb)
candidate_pred_scores = candidate_pred_scores + torch.log(masks.to(torch.float).unsqueeze(2))
candidate_pred_scores = candidate_pred_scores.squeeze(2)
if self.use_gold_predicates is True:
predicates = gold_predicates[0]
num_preds = gold_predicates[1]
pred_scores = torch.zeros_like(predicates)
top_pred_indices = predicates
else:
predicates, _, pred_scores, num_preds, top_pred_indices = self.get_batch_topk(
candidate_pred_ids, candidate_pred_ids, candidate_pred_scores, self.config.predicate_ratio,
sent_lengths, max_sent_length,
sort_spans=False, enforce_non_crossing=False)
"""Get top arg embeddings"""
arg_span_indices = torch.gather(candidate_span_ids, 1, top_arg_indices) # [num_sentences, max_num_args]
arg_emb = candidate_span_emb.index_select(0, arg_span_indices.view(-1)).view(
arg_span_indices.size()[0], arg_span_indices.size()[1], -1
) # [num_sentences, max_num_args, emb]
"""Get top predicate embeddings"""
pred_emb = self.batch_index_select(candidate_pred_emb,
top_pred_indices) # [num_sentences, max_num_preds, emb]
"""Get the srl scores according to the arg emb and pre emb."""
srl_scores = self.get_srl_scores(arg_emb, pred_emb, arg_scores, pred_scores, self.label_space_size, self.config,
self.dropout) # [num_sentences, max_num_args, max_num_preds, num_labels]
if gold_arg_labels is not None:
"""Get the answers according to the labels"""
srl_labels = self.get_srl_labels(arg_starts, arg_ends, predicates, gold_predicates, gold_arg_starts,
gold_arg_ends, gold_arg_labels, max_sent_length)
"""Compute the srl loss"""
srl_loss, srl_mask = self.get_srl_softmax_loss(srl_scores, srl_labels, num_args, num_preds)
predict_dict.update({
'srl_mask': srl_mask,
'loss': srl_loss
})
else:
predict_dict['srl_mask'] = self.get_srl_loss_mask(srl_scores, num_args, num_preds)
predict_dict.update({
"candidate_arg_scores": candidate_arg_scores,
"candidate_pred_scores": candidate_pred_scores,
"predicates": predicates,
"arg_starts": arg_starts,
"arg_ends": arg_ends,
"arg_scores": arg_scores,
"pred_scores": pred_scores,
"num_args": num_args,
"num_preds": num_preds,
"arg_labels": torch.max(srl_scores, 1)[1], # [num_sentences, num_args, num_preds]
"srl_scores": srl_scores,
})
return predict_dict
class SpanRankingSRLModel(nn.Module):
def __init__(self, config, embed: torch.nn.Module, context_layer: torch.nn.Module, label_space_size):
super(SpanRankingSRLModel, self).__init__()
self.config = config
self.dropout = float(config.dropout)
self.lexical_dropout = float(self.config.lexical_dropout)
self.label_space_size = label_space_size
# Initialize layers and parameters
self.word_embedding_dim = embed.get_output_dim() # get the embedding dim
self.embed = embed
# Initialize context layer
self.context_layer = context_layer
context_layer_output_dim = context_layer.get_output_dim()
self.decoder = SpanRankingSRLDecoder(context_layer_output_dim, label_space_size, config)
def forward(self,
batch: Dict[str, torch.Tensor]
):
gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = \
self.unpack(batch, training=self.training)
context_embeddings = self.embed(batch)
context_embeddings = F.dropout(context_embeddings, self.lexical_dropout, self.training)
contextualized_embeddings = self.context_layer(context_embeddings, masks)
return self.decoder.decode(contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends,
gold_arg_labels, gold_predicates)
@staticmethod
def unpack(batch, mask=None, training=False):
keys = 'token_length', 'predicate_offset', 'argument_begin_offset', 'argument_end_offset', 'srl_label_id'
sent_lengths, gold_predicates, gold_arg_starts, gold_arg_ends, gold_arg_labels = [batch.get(k, None) for k in
keys]
if mask is None:
mask = util.lengths_to_mask(sent_lengths)
# elif not training:
# sent_lengths = mask.sum(dim=1)
return gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, mask, sent_lengths
| [
"torch.zeros",
"torch.Size",
"torch.cat",
"torch.stack",
"torch.arange",
"torch.gather",
"torch.max",
"torch.clamp",
"torch.ones",
"torch.nn.functional.cross_entropy",
"torch.zeros_like",
"torch.mean",
"torch.cumsum"
] | 1.6.0 | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 |
1.8 | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
import numpy as np
from torchvision.ops.boxes import box_area
from typing import Tuple
#### Bounding box utilities imported from torchvision and converted to numpy
def np_box_area(boxes: np.array) -> np.array:
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Args:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Returns:
area (Tensor[N]): area for each box
"""
assert boxes.ndim == 2 and boxes.shape[-1] == 4
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def _box_inter_union(boxes1: np.array, boxes2: np.array) -> Tuple[np.array, np.array]:
area1 = np_box_area(boxes1)
area2 = np_box_area(boxes2)
lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clip(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
return inter, union
def np_box_iou(boxes1: np.array, boxes2: np.array) -> np.array:
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``.
Args:
boxes1 (Tensor[N, 4])
boxes2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
"""
inter, union = _box_inter_union(boxes1, boxes2)
iou = inter / union
return iou
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| [
"torch.zeros",
"torch.stack",
"torch.min",
"torch.arange",
"torch.max",
"torch.meshgrid"
] | 1.8.1 | antoyang/TubeDETR | 3c32cc92a0fdaa0c770d95a59d8764e0e212424c |
1.10 | # -*- coding: utf-8 -*-
"""
@date: 2020/11/21 下午4:16
@file: test_resnest.py
@author: zj
@description:
"""
import torch
from zcls.config import cfg
from zcls.config.key_word import KEY_OUTPUT
from zcls.model.recognizers.resnet.resnet import ResNet
def test_data(model, input_shape, output_shape):
data = torch.randn(input_shape)
outputs = model(data)[KEY_OUTPUT]
print(outputs.shape)
assert outputs.shape == output_shape
def test_sknet():
config_file = 'configs/benchmarks/resnet-resnext/sknet50_zcls_imagenet_224.yaml'
cfg.merge_from_file(config_file)
model = ResNet(cfg)
print(model)
test_data(model, (3, 3, 224, 224), (3, 1000))
if __name__ == '__main__':
print('*' * 10 + ' sknet')
test_sknet()
| [
"torch.randn"
] | 1.10.0 | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 |
1.1 | # Modified by Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch
import torch.utils.data as data
from torch.autograd import Variable
from utils.config import *
from utils.until_temp import entityList
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
MEM_TOKEN_SIZE = 3
class Lang:
def __init__(self):
self.word2index = {}
self.word2count = {}
self.index2word = {UNK_token: 'UNK', PAD_token: "PAD", EOS_token: "EOS", SOS_token: "SOS"}
self.n_words = 4 # Count default tokens
def index_words(self, story, trg=False):
if trg:
for word in story.split(' '):
self.index_word(word)
else:
for word_triple in story:
for word in word_triple:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
class Dataset(data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len, conv_seq,ent,ID,kb_arr):
"""Reads source and target sequences from txt files."""
self.src_seqs = src_seq
self.trg_seqs = trg_seq
self.index_seqs = index_seq
self.gate_seq = gate_seq
self.num_total_seqs = len(self.src_seqs)
self.src_word2id = src_word2id
self.trg_word2id = trg_word2id
self.max_len = max_len
self.conv_seq = conv_seq
self.ent = ent
self.ID = ID
self.kb_arr = kb_arr
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
index_s = self.index_seqs[index]
gete_s = self.gate_seq[index]
src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)
trg_seq = self.preprocess(trg_seq, self.trg_word2id)
index_s = self.preprocess_inde(index_s,src_seq)
gete_s = self.preprocess_gate(gete_s)
conv_seq = self.conv_seq[index]
conv_seq = self.preprocess(conv_seq, self.src_word2id, trg=False)
ID = self.ID[index]
kb_arr = self.kb_arr[index]
return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index], conv_seq,self.ent[index], ID, kb_arr
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence, word2id, trg=True):
"""Converts words to ids."""
if trg:
story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]
else:
story = []
for i, word_triple in enumerate(sequence):
story.append([])
for ii, word in enumerate(word_triple):
temp = word2id[word] if word in word2id else UNK_token
story[i].append(temp)
try:
story = torch.Tensor(story)
except:
print(sequence)
print(story)
return story
def preprocess_inde(self, sequence, src_seq):
"""Converts words to ids."""
sequence = sequence + [len(src_seq)-1]
sequence = torch.Tensor(sequence)
return sequence
def preprocess_gate(self, sequence):
"""Converts words to ids."""
sequence = sequence + [0]
sequence = torch.Tensor(sequence)
return sequence
def collate_fn(data):
def merge(sequences,max_len):
lengths = [len(seq) for seq in sequences]
if (max_len):
padded_seqs = torch.ones(len(sequences), max(lengths), MEM_TOKEN_SIZE).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i,:end,:] = seq[:end]
else:
padded_seqs = torch.ones(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain, conv_seq, ent, ID, kb_arr = zip(*data)
# merge sequences (from tuple of 1D tensor to 2D tensor)
src_seqs, src_lengths = merge(src_seqs,max_len)
trg_seqs, trg_lengths = merge(trg_seqs,None)
ind_seqs, _ = merge(ind_seqs,None)
gete_s, _ = merge(gete_s,None)
conv_seqs, conv_lengths = merge(conv_seq, max_len)
src_seqs = Variable(src_seqs).transpose(0,1)
trg_seqs = Variable(trg_seqs).transpose(0,1)
ind_seqs = Variable(ind_seqs).transpose(0,1)
gete_s = Variable(gete_s).transpose(0,1)
conv_seqs = Variable(conv_seqs).transpose(0,1)
if USE_CUDA:
src_seqs = src_seqs.cuda()
trg_seqs = trg_seqs.cuda()
ind_seqs = ind_seqs.cuda()
gete_s = gete_s.cuda()
conv_seqs = conv_seqs.cuda()
return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain, conv_seqs, conv_lengths, ent, ID, kb_arr
def read_langs(file_name, entity, max_line = None):
logging.info(("Reading lines from {}".format(file_name)))
data=[]
contex_arr = []
conversation_arr = []
kb_arr = []
u=None
r=None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_name) as fin:
cnt_ptr = 0
cnt_voc = 0
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
if '\t' in line:
u, r = line.split('\t')
if u!='<SILENCE>': user_counter += 1
system_counter += 1
gen_u = generate_memory(u, "$u", str(time_counter))
contex_arr += gen_u
conversation_arr += gen_u
r_index = []
gate = []
for key in r.split(' '):
if ENTPTR:
if (key in entity):
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
cnt_voc +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
else:
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
r_index.append(index)
system_res_counter += 1
if len(r_index) > max_r_len:
max_r_len = len(r_index)
contex_arr_temp = contex_arr + [['$$$$']*MEM_TOKEN_SIZE]
ent = []
for key in r.split(' '):
if(key in entity):
ent.append(key)
data.append([contex_arr_temp,r,r_index,gate,list(conversation_arr),ent,dialog_counter, kb_arr])
gen_r = generate_memory(r, "$s", str(time_counter))
contex_arr += gen_r
conversation_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r=line
if USEKB:
temp = generate_memory(r, "", "")
contex_arr += temp
kb_arr += temp
else:
cnt_lin+=1
if(max_line and cnt_lin>=max_line):
break
contex_arr=[]
conversation_arr = []
kb_arr = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d[0]) for d in data])
logging.info("Pointer percentace= {} ".format(cnt_ptr/(cnt_ptr+cnt_voc)))
logging.info("Max responce Len: {}".format(max_r_len))
logging.info("Max Input Len: {}".format(max_len))
logging.info("Avg. User Utterances: {}".format(user_counter*1.0/dialog_counter))
logging.info("Avg. Bot Utterances: {}".format(system_counter*1.0/dialog_counter))
logging.info("Avg. KB results: {}".format(KB_counter*1.0/dialog_counter))
logging.info("Avg. responce Len: {}".format(system_res_counter*1.0/system_counter))
print('Sample: ',data[1][0],data[1][1],data[1][2],data[1][3])
return data, max_len, max_r_len
def generate_memory(sent, speaker, time):
sent_new = []
sent_token = sent.split(' ')
if speaker=="$u" or speaker=="$s":
for word in sent_token:
temp = [word, speaker, 't'+str(time)] + ["PAD"]*(MEM_TOKEN_SIZE-3)
sent_new.append(temp)
else:
if sent_token[1]=="R_rating":
sent_token = sent_token + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
else:
sent_token = sent_token[::-1] + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
sent_new.append(sent_token)
return sent_new
def get_seq(pairs,lang,batch_size,type,max_len):
x_seq = []
y_seq = []
ptr_seq = []
gate_seq = []
conv_seq = []
ent = []
ID = []
kb_arr = []
for pair in pairs:
x_seq.append(pair[0])
y_seq.append(pair[1])
ptr_seq.append(pair[2])
gate_seq.append(pair[3])
conv_seq.append(pair[4])
ent.append(pair[5])
ID.append(pair[6])
kb_arr.append(pair[7])
if(type):
lang.index_words(pair[0])
lang.index_words(pair[1], trg=True)
dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len, conv_seq,ent,ID,kb_arr)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=type,
collate_fn=collate_fn)
return data_loader
def prepare_data_seq(task,batch_size=100,shuffle=True):
file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)
file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)
file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)
if (int(task) != 6):
file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)
if int(task)!=6:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-kb-all.txt',int(task))
else:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt',int(task))
pair_train,max_len_train, max_r_train = read_langs(file_train, ent, max_line=None)
pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, ent, max_line=None)
pair_test,max_len_test, max_r_test = read_langs(file_test, ent, max_line=None)
max_r_test_OOV = 0
max_len_test_OOV = 0
if (int(task) != 6):
pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, ent, max_line=None)
max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) + 1
max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1
lang = Lang()
train = get_seq(pair_train,lang,batch_size,True,max_len)
dev = get_seq(pair_dev,lang,batch_size,False,max_len)
test = get_seq(pair_test,lang,batch_size,False,max_len)
if (int(task) != 6):
testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)
else:
testOOV = []
logging.info("Read %s sentence pairs train" % len(pair_train))
logging.info("Read %s sentence pairs dev" % len(pair_dev))
logging.info("Read %s sentence pairs test" % len(pair_test))
if (int(task) != 6):
logging.info("Read %s sentence pairs test" % len(pair_test_OOV))
logging.info("Max len Input %s " % max_len)
logging.info("Vocab_size %s " % lang.n_words)
logging.info("USE_CUDA={}".format(USE_CUDA))
return train, dev, test, testOOV, lang, max_len, max_r | [
"torch.autograd.Variable",
"torch.Tensor",
"torch.utils.data.DataLoader"
] | 1.1.0 | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a |
1.1 | import torch as th
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from convlab.modules.word_policy.multiwoz.larl.latent_dialog.base_models import BaseModel
from convlab.modules.word_policy.multiwoz.larl.latent_dialog.corpora import SYS, EOS, PAD, BOS
from convlab.modules.word_policy.multiwoz.larl.latent_dialog.utils import INT, FLOAT, LONG, Pack, cast_type
from convlab.modules.word_policy.multiwoz.larl.latent_dialog.enc2dec.encoders import RnnUttEncoder
from convlab.modules.word_policy.multiwoz.larl.latent_dialog.enc2dec.decoders import DecoderRNN, GEN, TEACH_FORCE
from convlab.modules.word_policy.multiwoz.larl.latent_dialog.criterions import NLLEntropy, CatKLLoss, Entropy, NormKLLoss
from convlab.modules.word_policy.multiwoz.larl.latent_dialog import nn_lib
import numpy as np
class SysPerfectBD2Word(BaseModel):
def __init__(self, corpus, config):
super(SysPerfectBD2Word, self).__init__(config)
self.vocab = corpus.vocab
self.vocab_dict = corpus.vocab_dict
self.vocab_size = len(self.vocab)
self.bos_id = self.vocab_dict[BOS]
self.eos_id = self.vocab_dict[EOS]
self.pad_id = self.vocab_dict[PAD]
self.bs_size = corpus.bs_size
self.db_size = corpus.db_size
self.embedding = None
self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,
embedding_dim=config.embed_size,
feat_size=0,
goal_nhid=0,
rnn_cell=config.utt_rnn_cell,
utt_cell_size=config.utt_cell_size,
num_layers=config.num_layers,
input_dropout_p=config.dropout,
output_dropout_p=config.dropout,
bidirectional=config.bi_utt_cell,
variable_lengths=False,
use_attn=config.enc_use_attn,
embedding=self.embedding)
self.policy = nn.Sequential(nn.Linear(self.utt_encoder.output_size + self.db_size + self.bs_size,
config.dec_cell_size), nn.Tanh(), nn.Dropout(config.dropout))
self.decoder = DecoderRNN(input_dropout_p=config.dropout,
rnn_cell=config.dec_rnn_cell,
input_size=config.embed_size,
hidden_size=config.dec_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=False,
vocab_size=self.vocab_size,
use_attn=config.dec_use_attn,
ctx_cell_size=self.utt_encoder.output_size,
attn_mode=config.dec_attn_mode,
sys_id=self.bos_id,
eos_id=self.eos_id,
use_gpu=config.use_gpu,
max_dec_len=config.max_dec_len,
embedding=self.embedding)
self.nll = NLLEntropy(self.pad_id, config.avg_type)
def forward(self, data_feed, mode, clf=False, gen_type='greedy', return_latent=False):
ctx_lens = data_feed['context_lens'] # (batch_size, )
short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)
bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
batch_size = len(ctx_lens)
utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))
# get decoder inputs
dec_inputs = out_utts[:, :-1]
labels = out_utts[:, 1:].contiguous()
# pack attention context
if self.config.dec_use_attn:
attn_context = enc_outs
else:
attn_context = None
# create decoder initial states
dec_init_state = self.policy(th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)).unsqueeze(0)
# decode
if self.config.dec_rnn_cell == 'lstm':
# h_dec_init_state = utt_summary.squeeze(1).unsqueeze(0)
dec_init_state = tuple([dec_init_state, dec_init_state])
dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,
dec_inputs=dec_inputs,
# (batch_size, response_size-1)
dec_init_state=dec_init_state, # tuple: (h, c)
attn_context=attn_context,
# (batch_size, max_ctx_len, ctx_cell_size)
mode=mode,
gen_type=gen_type,
beam_size=self.config.beam_size) # (batch_size, goal_nhid)
if mode == GEN:
return ret_dict, labels
if return_latent:
return Pack(nll=self.nll(dec_outputs, labels),
latent_action=dec_init_state)
else:
return Pack(nll=self.nll(dec_outputs, labels))
def forward_rl(self, data_feed, max_words, temp=0.1):
ctx_lens = data_feed['context_lens'] # (batch_size, )
short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)
bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
batch_size = len(ctx_lens)
utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))
# pack attention context
if self.config.dec_use_attn:
attn_context = enc_outs
else:
attn_context = None
# create decoder initial states
dec_init_state = self.policy(th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)).unsqueeze(0)
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
# decode
logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,
dec_init_state=dec_init_state,
attn_context=attn_context,
vocab=self.vocab,
max_words=max_words,
temp=temp)
return logprobs, outs
class SysPerfectBD2Cat(BaseModel):
def __init__(self, corpus, config):
super(SysPerfectBD2Cat, self).__init__(config)
self.vocab = corpus.vocab
self.vocab_dict = corpus.vocab_dict
self.vocab_size = len(self.vocab)
self.bos_id = self.vocab_dict[BOS]
self.eos_id = self.vocab_dict[EOS]
self.pad_id = self.vocab_dict[PAD]
self.bs_size = corpus.bs_size
self.db_size = corpus.db_size
self.k_size = config.k_size
self.y_size = config.y_size
self.simple_posterior = config.simple_posterior
self.contextual_posterior = config.contextual_posterior
self.embedding = None
self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,
embedding_dim=config.embed_size,
feat_size=0,
goal_nhid=0,
rnn_cell=config.utt_rnn_cell,
utt_cell_size=config.utt_cell_size,
num_layers=config.num_layers,
input_dropout_p=config.dropout,
output_dropout_p=config.dropout,
bidirectional=config.bi_utt_cell,
variable_lengths=False,
use_attn=config.enc_use_attn,
embedding=self.embedding)
self.c2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size + self.db_size + self.bs_size,
config.y_size, config.k_size, is_lstm=False)
self.z_embedding = nn.Linear(self.y_size * self.k_size, config.dec_cell_size, bias=False)
self.gumbel_connector = nn_lib.GumbelConnector(config.use_gpu)
if not self.simple_posterior:
if self.contextual_posterior:
self.xc2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size * 2 + self.db_size + self.bs_size,
config.y_size, config.k_size, is_lstm=False)
else:
self.xc2z = nn_lib.Hidden2Discrete(self.utt_encoder.output_size, config.y_size, config.k_size, is_lstm=False)
self.decoder = DecoderRNN(input_dropout_p=config.dropout,
rnn_cell=config.dec_rnn_cell,
input_size=config.embed_size,
hidden_size=config.dec_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=False,
vocab_size=self.vocab_size,
use_attn=config.dec_use_attn,
ctx_cell_size=config.dec_cell_size,
attn_mode=config.dec_attn_mode,
sys_id=self.bos_id,
eos_id=self.eos_id,
use_gpu=config.use_gpu,
max_dec_len=config.max_dec_len,
embedding=self.embedding)
self.nll = NLLEntropy(self.pad_id, config.avg_type)
self.cat_kl_loss = CatKLLoss()
self.entropy_loss = Entropy()
self.log_uniform_y = Variable(th.log(th.ones(1) / config.k_size))
self.eye = Variable(th.eye(self.config.y_size).unsqueeze(0))
self.beta = self.config.beta if hasattr(self.config, 'beta') else 0.0
if self.use_gpu:
self.log_uniform_y = self.log_uniform_y.cuda()
self.eye = self.eye.cuda()
def valid_loss(self, loss, batch_cnt=None):
if self.simple_posterior:
total_loss = loss.nll
if self.config.use_pr > 0.0:
total_loss += self.beta * loss.pi_kl
else:
total_loss = loss.nll + loss.pi_kl
if self.config.use_mi:
total_loss += (loss.b_pr * self.beta)
if self.config.use_diversity:
total_loss += loss.diversity
return total_loss
def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):
ctx_lens = data_feed['context_lens'] # (batch_size, )
short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)
bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
batch_size = len(ctx_lens)
utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))
# get decoder inputs
dec_inputs = out_utts[:, :-1]
labels = out_utts[:, 1:].contiguous()
# create decoder initial states
enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)
# create decoder initial states
if self.simple_posterior:
logits_qy, log_qy = self.c2z(enc_last)
sample_y = self.gumbel_connector(logits_qy, hard=mode==GEN)
log_py = self.log_uniform_y
else:
logits_py, log_py = self.c2z(enc_last)
# encode response and use posterior to find q(z|x, c)
x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1))
if self.contextual_posterior:
logits_qy, log_qy = self.xc2z(th.cat([enc_last, x_h.squeeze(1)], dim=1))
else:
logits_qy, log_qy = self.xc2z(x_h.squeeze(1))
# use prior at inference time, otherwise use posterior
if mode == GEN or (use_py is not None and use_py is True):
sample_y = self.gumbel_connector(logits_py, hard=False)
else:
sample_y = self.gumbel_connector(logits_qy, hard=True)
# pack attention context
if self.config.dec_use_attn:
z_embeddings = th.t(self.z_embedding.weight).split(self.k_size, dim=0)
attn_context = []
temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)
for z_id in range(self.y_size):
attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))
attn_context = th.cat(attn_context, dim=1)
dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)
else:
dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))
attn_context = None
# decode
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,
dec_inputs=dec_inputs,
# (batch_size, response_size-1)
dec_init_state=dec_init_state, # tuple: (h, c)
attn_context=attn_context,
# (batch_size, max_ctx_len, ctx_cell_size)
mode=mode,
gen_type=gen_type,
beam_size=self.config.beam_size) # (batch_size, goal_nhid)
if mode == GEN:
ret_dict['sample_z'] = sample_y
ret_dict['log_qy'] = log_qy
return ret_dict, labels
else:
result = Pack(nll=self.nll(dec_outputs, labels))
# regularization qy to be uniform
avg_log_qy = th.exp(log_qy.view(-1, self.config.y_size, self.config.k_size))
avg_log_qy = th.log(th.mean(avg_log_qy, dim=0) + 1e-15)
b_pr = self.cat_kl_loss(avg_log_qy, self.log_uniform_y, batch_size, unit_average=True)
mi = self.entropy_loss(avg_log_qy, unit_average=True) - self.entropy_loss(log_qy, unit_average=True)
pi_kl = self.cat_kl_loss(log_qy, log_py, batch_size, unit_average=True)
q_y = th.exp(log_qy).view(-1, self.config.y_size, self.config.k_size) # b
p = th.pow(th.bmm(q_y, th.transpose(q_y, 1, 2)) - self.eye, 2)
result['pi_kl'] = pi_kl
result['diversity'] = th.mean(p)
result['nll'] = self.nll(dec_outputs, labels)
result['b_pr'] = b_pr
result['mi'] = mi
return result
def forward_rl(self, data_feed, max_words, temp=0.1):
ctx_lens = data_feed['context_lens'] # (batch_size, )
short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
batch_size = len(ctx_lens)
utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))
# create decoder initial states
enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)
# create decoder initial states
if self.simple_posterior:
logits_py, log_qy = self.c2z(enc_last)
else:
logits_py, log_qy = self.c2z(enc_last)
qy = F.softmax(logits_py / temp, dim=1) # (batch_size, vocab_size, )
log_qy = F.log_softmax(logits_py, dim=1) # (batch_size, vocab_size, )
idx = th.multinomial(qy, 1).detach()
logprob_sample_z = log_qy.gather(1, idx).view(-1, self.y_size)
joint_logpz = th.sum(logprob_sample_z, dim=1)
sample_y = cast_type(Variable(th.zeros(log_qy.size())), FLOAT, self.use_gpu)
sample_y.scatter_(1, idx, 1.0)
# pack attention context
if self.config.dec_use_attn:
z_embeddings = th.t(self.z_embedding.weight).split(self.k_size, dim=0)
attn_context = []
temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)
for z_id in range(self.y_size):
attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))
attn_context = th.cat(attn_context, dim=1)
dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)
else:
dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))
attn_context = None
# decode
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
# decode
logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,
dec_init_state=dec_init_state,
attn_context=attn_context,
vocab=self.vocab,
max_words=max_words,
temp=0.1)
return logprobs, outs, joint_logpz, sample_y
class SysPerfectBD2Gauss(BaseModel):
def __init__(self, corpus, config):
super(SysPerfectBD2Gauss, self).__init__(config)
self.vocab = corpus.vocab
self.vocab_dict = corpus.vocab_dict
self.vocab_size = len(self.vocab)
self.bos_id = self.vocab_dict[BOS]
self.eos_id = self.vocab_dict[EOS]
self.pad_id = self.vocab_dict[PAD]
self.bs_size = corpus.bs_size
self.db_size = corpus.db_size
self.y_size = config.y_size
self.simple_posterior = config.simple_posterior
self.embedding = None
self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,
embedding_dim=config.embed_size,
feat_size=0,
goal_nhid=0,
rnn_cell=config.utt_rnn_cell,
utt_cell_size=config.utt_cell_size,
num_layers=config.num_layers,
input_dropout_p=config.dropout,
output_dropout_p=config.dropout,
bidirectional=config.bi_utt_cell,
variable_lengths=False,
use_attn=config.enc_use_attn,
embedding=self.embedding)
self.c2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size + self.db_size + self.bs_size,
config.y_size, is_lstm=False)
self.gauss_connector = nn_lib.GaussianConnector(self.use_gpu)
self.z_embedding = nn.Linear(self.y_size, config.dec_cell_size)
if not self.simple_posterior:
self.xc2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size * 2 + self.db_size + self.bs_size,
config.y_size, is_lstm=False)
self.decoder = DecoderRNN(input_dropout_p=config.dropout,
rnn_cell=config.dec_rnn_cell,
input_size=config.embed_size,
hidden_size=config.dec_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=False,
vocab_size=self.vocab_size,
use_attn=config.dec_use_attn,
ctx_cell_size=config.dec_cell_size,
attn_mode=config.dec_attn_mode,
sys_id=self.bos_id,
eos_id=self.eos_id,
use_gpu=config.use_gpu,
max_dec_len=config.max_dec_len,
embedding=self.embedding)
self.nll = NLLEntropy(self.pad_id, config.avg_type)
self.gauss_kl = NormKLLoss(unit_average=True)
self.zero = cast_type(th.zeros(1), FLOAT, self.use_gpu)
def valid_loss(self, loss, batch_cnt=None):
if self.simple_posterior:
total_loss = loss.nll
if self.config.use_pr > 0.0:
total_loss += self.config.beta * loss.pi_kl
else:
total_loss = loss.nll + loss.pi_kl
return total_loss
def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):
ctx_lens = data_feed['context_lens'] # (batch_size, )
short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)
bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
batch_size = len(ctx_lens)
utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))
# get decoder inputs
dec_inputs = out_utts[:, :-1]
labels = out_utts[:, 1:].contiguous()
# create decoder initial states
enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)
# create decoder initial states
if self.simple_posterior:
q_mu, q_logvar = self.c2z(enc_last)
sample_z = self.gauss_connector(q_mu, q_logvar)
p_mu, p_logvar = self.zero, self.zero
else:
p_mu, p_logvar = self.c2z(enc_last)
# encode response and use posterior to find q(z|x, c)
x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1))
q_mu, q_logvar = self.xc2z(th.cat([enc_last, x_h.squeeze(1)], dim=1))
# use prior at inference time, otherwise use posterior
if mode == GEN or use_py:
sample_z = self.gauss_connector(p_mu, p_logvar)
else:
sample_z = self.gauss_connector(q_mu, q_logvar)
# pack attention context
dec_init_state = self.z_embedding(sample_z.unsqueeze(0))
attn_context = None
# decode
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,
dec_inputs=dec_inputs,
dec_init_state=dec_init_state, # tuple: (h, c)
attn_context=attn_context,
mode=mode,
gen_type=gen_type,
beam_size=self.config.beam_size) # (batch_size, goal_nhid)
if mode == GEN:
ret_dict['sample_z'] = sample_z
return ret_dict, labels
else:
result = Pack(nll=self.nll(dec_outputs, labels))
pi_kl = self.gauss_kl(q_mu, q_logvar, p_mu, p_logvar)
result['pi_kl'] = pi_kl
result['nll'] = self.nll(dec_outputs, labels)
return result
def gaussian_logprob(self, mu, logvar, sample_z):
var = th.exp(logvar)
constant = float(-0.5 * np.log(2*np.pi))
logprob = constant - 0.5 * logvar - th.pow((mu-sample_z), 2) / (2.0*var)
return logprob
def forward_rl(self, data_feed, max_words, temp=0.1):
ctx_lens = data_feed['context_lens'] # (batch_size, )
short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
batch_size = len(ctx_lens)
utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))
# create decoder initial states
enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)
# create decoder initial states
p_mu, p_logvar = self.c2z(enc_last)
sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()
logprob_sample_z = self.gaussian_logprob(p_mu, self.zero, sample_z)
joint_logpz = th.sum(logprob_sample_z, dim=1)
# pack attention context
dec_init_state = self.z_embedding(sample_z.unsqueeze(0))
attn_context = None
# decode
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
# decode
logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,
dec_init_state=dec_init_state,
attn_context=attn_context,
vocab=self.vocab,
max_words=max_words,
temp=0.1)
return logprobs, outs, joint_logpz, sample_z
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Tanh",
"torch.pow",
"torch.nn.functional.log_softmax",
"torch.ones",
"torch.multinomial",
"torch.mm",
"torch.eye",
"torch.t",
"torch.nn.functional.softmax",
"torch.transpose",
"torch.exp",
"torch.mean",
"torch.sum"
] | 1.1.0 | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a |
1.3 | import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.optim import Adam
import numpy as np
import gym
from gym.spaces import Discrete, Box
from spinup.examples.pytorch.broil_rtg_pg_v2.cvar_utils import cvar_enumerate_pg
from spinup.examples.pytorch.broil_rtg_pg_v2.cartpole_reward_utils import CartPoleReward
def mlp(sizes, activation=nn.Tanh, output_activation=nn.Identity):
# Build a feedforward neural network.
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
def reward_to_go(rews):
n = len(rews)
rtgs = np.zeros_like(rews)
for i in reversed(range(n)):
rtgs[i] = rews[i] + (rtgs[i+1] if i+1 < n else 0)
return rtgs
def train(reward_dist, lamda, alpha=0.95, env_name='CartPole-v0', hidden_sizes=[32], lr=1e-2,
epochs=50, batch_size=5000, render=False):
# make environment, check spaces, get obs / act dims
env = gym.make(env_name)
assert isinstance(env.observation_space, Box), \
"This example only works for envs with continuous state spaces."
assert isinstance(env.action_space, Discrete), \
"This example only works for envs with discrete action spaces."
obs_dim = env.observation_space.shape[0]
n_acts = env.action_space.n
# make core of policy network
logits_net = mlp(sizes=[obs_dim]+hidden_sizes+[n_acts])
# make function to compute action distribution
def get_policy(obs):
logits = logits_net(obs)
return Categorical(logits=logits)
# make action selection function (outputs int actions, sampled from policy)
def get_action(obs):
return get_policy(obs).sample().item()
# make loss function whose gradient, for the right data, is policy gradient
def compute_loss(obs, act, weights):
logp = get_policy(obs).log_prob(act)
return -(logp * weights).mean()
#### compute BROIL policy gradient loss (robust version)
def compute_broil_weights(batch_rets, batch_rewards_to_go):
'''batch_returns: list of numpy arrays of size num_rollouts x num_reward_fns
batch_rewards_to_go: list of rewards to go by reward function over all rollouts,
size is num_rollouts*ave_rollout_length x num_reward_fns
'''
#inputs are lists of numpy arrays
#need to compute BROIL weights for policy gradient and convert to pytorch
#first find the expected on-policy return for current policy under each reward function in the posterior
exp_batch_rets = np.mean(batch_rets, axis=0)
print(exp_batch_rets)
posterior_reward_weights = reward_dist.posterior
#calculate sigma and find the conditional value at risk given the current policy
sigma, cvar = cvar_enumerate_pg(exp_batch_rets, posterior_reward_weights, alpha)
print("sigma = {}, cvar = {}".format(sigma, cvar))
#compute BROIL policy gradient weights
total_rollout_steps = len(batch_rewards_to_go)
broil_weights = np.zeros(total_rollout_steps)
for i,prob_r in enumerate(posterior_reward_weights):
if sigma > exp_batch_rets[i]:
w_r_i = lamda + (1 - lamda) / (1 - alpha)
else:
w_r_i = lamda
broil_weights += prob_r * w_r_i * np.array(batch_rewards_to_go)[:,i]
return broil_weights,cvar
# make optimizer
optimizer = Adam(logits_net.parameters(), lr=lr)
# for training policy
def train_one_epoch():
# make some empty lists for logging.
batch_obs = [] # for observations
batch_acts = [] # for actions
batch_rewards_to_go = [] # for reward-to-go weighting in policy gradient
batch_rets = [] # for measuring episode returns
batch_lens = [] # for measuring episode lengths
# reset episode-specific variables
obs = env.reset() # first obs comes from starting distribution
done = False # signal from environment that episode is over
ep_rews = [] # list for rewards accrued throughout ep
# render first episode of each epoch
finished_rendering_this_epoch = False
# collect experience by acting in the environment with current policy
while True:
# rendering
if (not finished_rendering_this_epoch) and render:
env.render()
#print(obs[0])
# save obs
batch_obs.append(obs.copy())
# act in the environment
act = get_action(torch.as_tensor(obs, dtype=torch.float32))
obs, rew, done, _ = env.step(act)
# save action, posterior over reward
batch_acts.append(act)
## old code from normal policy gradient:
## ep_rews.append(rew)
#### New code for BROIL
rew_dist = reward_dist.get_reward_distribution(obs) #S create reward
ep_rews.append(rew_dist)
####
if done:
# if episode is over, record info about episode
## Old code
## ep_ret, ep_len = sum(ep_rews), len(ep_rews)
#### New code
ep_ret_dist, ep_len = np.sum(ep_rews, axis=0), len(ep_rews)
####
batch_rets.append(ep_ret_dist)
batch_lens.append(ep_len)
# the weight for each logprob(a_t|s_t) is reward-to-go from t
#### we are now computing this for every element in the reward function posterior but we can use the same function
batch_rewards_to_go.extend(reward_to_go(ep_rews))
# reset episode-specific variables
obs, done, ep_rews = env.reset(), False, []
# won't render again this epoch
finished_rendering_this_epoch = True
# end experience loop if we have enough of it
if len(batch_obs) > batch_size:
break
#### take a single BROIL policy gradient update step
broil_weights, cvar = compute_broil_weights(batch_rets, batch_rewards_to_go)
####
optimizer.zero_grad()
batch_loss = compute_loss(obs=torch.as_tensor(batch_obs, dtype=torch.float32),
act=torch.as_tensor(batch_acts, dtype=torch.int32),
weights=torch.as_tensor(broil_weights, dtype=torch.float32)
)
batch_loss.backward()
optimizer.step()
return batch_loss, batch_rets, batch_lens, cvar
# training loop
cvar_list = []
exp_ret_list = []
wc_ret_list = []
for i in range(epochs):
batch_loss, batch_rets, batch_lens, cvar = train_one_epoch()
exp_ret = np.dot(np.mean(batch_rets,axis=0),reward_dist.posterior)
worst_case_return = np.min(np.mean(batch_rets, axis=0))
cvar_list.append(cvar)
exp_ret_list.append(exp_ret)
wc_ret_list.append(worst_case_return)
print('epoch: %3d \t loss: %.3f \t exp return: %.3f \t cvar: %.3f \t wc return: %.3f \t ep_len: %.3f'%
(i, batch_loss, exp_ret, cvar, worst_case_return, np.mean(batch_lens)))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(cvar_list)
plt.title("conditional value at risk")
plt.figure()
plt.plot(exp_ret_list)
plt.title("expected return")
plt.figure()
plt.plot(wc_ret_list)
plt.title("worst case return")
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--alpha', default=0.95, type=float, help="alpha for alpha CVaR")
parser.add_argument('--lamda', default = 0.0, type=float, help='blending between exp return (lamda=1) and cvar maximization (lamda=0)')
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--epochs', type=int, default=100)
args = parser.parse_args()
print('\nUsing reward-to-go formulation of BROIL policy gradient.\n')
#print('\nUsing only two reward functions in posterior')
#print("R1(s) = +1 (if s <= 0) +2 (if s > 0)")
#print("R2(s) = +1 (if s <= 0) -10 (if s > 0)")
#print("Pr(R1) = 0.95")
#print("Pr(R2) = 0.05")
#print("Expected reward R(s) = +1 (if s <= 0) +1.4 (if s > 0)")
#create reward function distribution
reward_dist = CartPoleReward()
train(reward_dist, args.lamda, args.alpha, env_name=args.env_name, epochs=args.epochs, render=args.render, lr=args.lr)
| [
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.distributions.categorical.Categorical",
"torch.as_tensor"
] | 1.3.1 | jerryzhucs21/spinningup | 2992e6a8163d78c3f82a3d92c5235fda0527c398 |
1.6 | import numpy as np
import torch
from torch.utils import data
import sys
from utils.generate_dataset import *
from HypHC.datasets.triples import samples_triples
class HyperLoader(data.Dataset):
def __init__(self, data_dir, split_indices, restrict_labels=[0,1,2,3,4,5,6], chromosome="all"):
'''
Takes in all the relevant arguments to produce the dataset.
Arguments:
`data_dir`: directory in which data (either text files or numpy arrays) are located
`similarity_func`: function to calculate pairwise similarities
`split_indices`: indices for the data split (train/test/valid)
`restrict_labels`: list of super-populations to include in analysis. Indices correspond to 'EUR', 'EAS', 'AMR', 'SAS', 'AFR', 'OCE', 'WAS'
'''
self.data_dir = data_dir
self.restrict_labels = restrict_labels
self.chromosome = chromosome
self.split_indices = split_indices
self.snps, self.pop_labels, self.suppop_labels, self.pop_label_index, self.suppop_label_index = self.load_data()
def load_data(self):
'''
Loads SNP and label data from the necessary file locations
'''
#If we want all chromosomes, then we have the arrays already pre-created
if self.chromosome =="all":
file_order = ["all_snps.npy", "labels_suppop.npy", "labels_pop.npy",
"coords.npy", "pop_index.npy", "pop_code_index.npy", "suppop_code_index.npy"]
test_data = tuple([np.load(self.data_dir + x) for x in file_order])
ind_data = test_data[0]
else:
#The data for individual chromosomes is in a slightly different format
test_data = load_dataset(self.data_dir + "ref_final_beagle_phased_1kg_hgdp_sgdp_chr%s_hg19.vcf.gz"%(self.chromosome),
self.data_dir + "reference_panel_metadata.tsv", "./", chromosome=self.chromosome,
verbose=True, filter_admixed=True, filter_missing_coord=True)
ind_data = test_data[0].reshape([test_data[0].shape[0], test_data[0].shape[1] * test_data[0].shape[2]]).T
#We've unfolded each set of 23 chromosomes as a "different" individual
#So we must do the same for the labels by doubling them
ind_pop_labels = np.repeat(test_data[2], 2).astype(int)
ind_suppop_labels = np.repeat(test_data[1], 2).astype(int)
#Restrict to only the super-populations we've specified
pop_indices = np.argwhere(np.isin(ind_suppop_labels, self.restrict_labels)).T[0]
indices = np.intersect1d(pop_indices, self.split_indices)
#Return everything
return ind_data[indices], ind_pop_labels[indices], ind_suppop_labels[indices], test_data[4], test_data[6]
def __len__(self):
return len(self.snps)
def __getitem__(self, index):
'''
Returns data and labels for the current index
'''
return torch.tensor(self.snps[index]), torch.tensor(self.suppop_labels[index]), torch.tensor(self.pop_labels[index])
| [
"torch.tensor"
] | 1.6.0 | AI-sandbox/hyperLAI | 49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689 |
0.4 | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import math
from sequicity.config import global_config as cfg
import copy, random, time, logging
from torch.distributions import Categorical
from sequicity.reader import pad_sequences
import pdb
import simulator.dialog_config as dialog_config
import pdb
def cuda_(var):
return var.cuda() if cfg.cuda else var
def toss_(p):
return random.randint(0, 99) <= p
def nan(v):
if type(v) is float:
return v == float('nan')
return np.isnan(np.sum(v.data.cpu().numpy()))
def get_sparse_input_aug(x_input_np):
"""
sparse input of
:param x_input_np: [T,B]
:return: Numpy array: [B,T,aug_V]
"""
ignore_index = [0]
unk = 2
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
for t in range(x_input_np.shape[0]):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
if w not in ignore_index:
if w != unk:
result[t][b][x_input_np[t][b]] = 1.0
else:
result[t][b][cfg.vocab_size + t] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def init_gru(gru):
gru.reset_parameters()
for _, hh, _, _ in gru.all_weights:
for i in range(0, hh.size(0), gru.hidden_size):
torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=1)
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.zeros(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs, mask=False, inp_seqs=None, stop_tok=None, normalize=True):
encoder_outputs = encoder_outputs.transpose(0, 1) # [B,T,H]
attn_energies = self.score(hidden, encoder_outputs)
if True or not mask:
normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]
else:
mask_idx = []
# inp_seqs: ndarray of [T,B]
# inp_seqs = inp_seqs.cpu().numpy()
for b in range(inp_seqs.shape[1]):
for t in range(inp_seqs.shape[0] + 1):
if t == inp_seqs.shape[0] or inp_seqs[t, b] in stop_tok:
mask_idx.append(t)
break
mask = []
for mask_len in mask_idx:
mask.append([1.] * mask_len + [0.] * (inp_seqs.shape[0] - mask_len))
mask = cuda_(Variable(torch.FloatTensor(mask))) # [B,T]
attn_energies = attn_energies * mask.unsqueeze(1)
normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]
context = torch.bmm(normalized_energy, encoder_outputs) # [B,1,H]
return context.transpose(0, 1) # [1,B,H]
def score(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
# pdb.set_trace()
energy = torch.tanh(self.attn(torch.cat([H, encoder_outputs], 2))) # [B,T,2H]->[B,T,H]
energy = energy.transpose(2, 1) # [B,H,T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]
energy = torch.bmm(v, energy) # [B,1,T]
return energy
class SimpleDynamicEncoder(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, n_layers, dropout):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)
init_gru(self.gru)
def forward(self, input_seqs, input_lens, hidden=None):
"""
forward procedure. No need for inputs to be sorted
:param input_seqs: Variable of [T,B]
:param hidden:
:param input_lens: *numpy array* of len for each input sequence
:return:
"""
# print("in encoder")
# print("input_seqs", input_seqs)
# print("hidden", hidden)
# print("input_lens", input_lens)
batch_size = input_seqs.size(1)
embedded = self.embedding(input_seqs)
import pdb
if torch.isnan(embedded).sum() > 0:
pdb.set_trace()
# pass
# print("embedded", embedded)
embedded = embedded.transpose(0, 1) # [B,T,E]
sort_idx = np.argsort(-input_lens)
unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx)))
input_lens = input_lens[sort_idx]
sort_idx = cuda_(torch.LongTensor(sort_idx))
embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E]
# print("embedded", embedded)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)
outputs, hidden = self.gru(packed, hidden)
# print('outputs', outputs)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
return outputs, hidden, embedded
class BSpanDecoder(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, dropout_rate, vocab):
super().__init__()
self.emb = nn.Embedding(vocab_size, embed_size)
if cfg.use_positional_embedding:
self.positional_embedding = nn.Embedding(cfg.max_ts + 1, embed_size)
init_pos_emb = self.position_encoding_init(cfg.max_ts + 1, embed_size)
self.positional_embedding.weight.data = init_pos_emb
self.gru = nn.GRU(hidden_size + embed_size, hidden_size, dropout=dropout_rate)
self.proj = nn.Linear(hidden_size * 2, vocab_size)
self.attn_u = Attn(hidden_size)
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.inp_dropout = nn.Dropout(self.dropout_rate)
init_gru(self.gru)
self.vocab = vocab
def position_encoding_init(self, n_position, d_pos_vec):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return torch.from_numpy(position_enc).type(torch.FloatTensor)
def forward(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
position):
# print("in bSpanDecoder")
# print(u_input_np)
# print(u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
# position)
# print("prev_z_input_np", prev_z_input_np)
sparse_u_input = Variable(get_sparse_input_aug(u_input_np), requires_grad=False)
if pv_z_enc_out is not None:
context = self.attn_u(last_hidden, torch.cat([pv_z_enc_out, u_enc_out], dim=0), mask=True,
inp_seqs=np.concatenate([prev_z_input_np, u_input_np], 0),
stop_tok=[self.vocab.encode('EOS_M')])
else:
context = self.attn_u(last_hidden, u_enc_out, mask=True, inp_seqs=u_input_np,
stop_tok=[self.vocab.encode('EOS_M')])
embed_z = self.emb(z_tm1)
# embed_z = self.inp_dropout(embed_z)
if cfg.use_positional_embedding: # defaulty not used
position_label = [position] * u_enc_out.size(1) # [B]
position_label = cuda_(Variable(torch.LongTensor(position_label))).view(1, -1) # [1,B]
pos_emb = self.positional_embedding(position_label)
embed_z = embed_z + pos_emb
gru_in = torch.cat([embed_z, context], 2)
gru_out, last_hidden = self.gru(gru_in, last_hidden)
# gru_out = self.inp_dropout(gru_out)
gen_score = self.proj(torch.cat([gru_out, context], 2)).squeeze(0)
# gen_score = self.inp_dropout(gen_score)
u_copy_score = torch.tanh(self.proj_copy1(u_enc_out.transpose(0, 1))) # [B,T,H]
# stable version of copynet
u_copy_score = torch.matmul(u_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
u_copy_score = u_copy_score.cpu()
u_copy_score_max = torch.max(u_copy_score, dim=1, keepdim=True)[0]
u_copy_score = torch.exp(u_copy_score - u_copy_score_max) # [B,T]
u_copy_score = torch.log(torch.bmm(u_copy_score.unsqueeze(1), sparse_u_input)).squeeze(
1) + u_copy_score_max # [B,V]
u_copy_score = cuda_(u_copy_score)
if pv_z_enc_out is None:
# u_copy_score = self.inp_dropout(u_copy_score)
scores = F.softmax(torch.cat([gen_score, u_copy_score], dim=1), dim=1)
gen_score, u_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + u_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, u_copy_score[:, cfg.vocab_size:]], 1)
else:
sparse_pv_z_input = Variable(get_sparse_input_aug(prev_z_input_np), requires_grad=False)
pv_z_copy_score = torch.tanh(self.proj_copy2(pv_z_enc_out.transpose(0, 1))) # [B,T,H]
pv_z_copy_score = torch.matmul(pv_z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
pv_z_copy_score = pv_z_copy_score.cpu()
pv_z_copy_score_max = torch.max(pv_z_copy_score, dim=1, keepdim=True)[0]
pv_z_copy_score = torch.exp(pv_z_copy_score - pv_z_copy_score_max) # [B,T]
pv_z_copy_score = torch.log(torch.bmm(pv_z_copy_score.unsqueeze(1), sparse_pv_z_input)).squeeze(
1) + pv_z_copy_score_max # [B,V]
pv_z_copy_score = cuda_(pv_z_copy_score)
scores = F.softmax(torch.cat([gen_score, u_copy_score, pv_z_copy_score], dim=1), dim=1)
gen_score, u_copy_score, pv_z_copy_score = scores[:, :cfg.vocab_size], \
scores[:,
cfg.vocab_size:2 * cfg.vocab_size + u_input_np.shape[0]], \
scores[:, 2 * cfg.vocab_size + u_input_np.shape[0]:]
proba = gen_score + u_copy_score[:, :cfg.vocab_size] + pv_z_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, pv_z_copy_score[:, cfg.vocab_size:], u_copy_score[:, cfg.vocab_size:]], 1)
return gru_out, last_hidden, proba
class ResponseDecoder(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):
super().__init__()
self.emb = emb
self.attn_z = Attn(hidden_size)
self.attn_u = Attn(hidden_size)
self.gru = gru
init_gru(self.gru)
self.proj = proj
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.vocab = vocab
def get_sparse_selective_input(self, x_input_np):
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']
for t in range(x_input_np.shape[0] - 1):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
word = self.vocab.decode(w)
if word in reqs:
slot = self.vocab.encode(word + '_SLOT')
result[t + 1][b][slot] = 1.0
else:
if w == 2 or w >= cfg.vocab_size:
result[t + 1][b][cfg.vocab_size + t] = 5.0
else:
result[t + 1][b][w] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def forward(self, z_enc_out, u_enc_out, u_input_np, m_t_input, degree_input, last_hidden, z_input_np):
sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)
m_embed = self.emb(m_t_input)
z_context = self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],
inp_seqs=z_input_np)
u_context = self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],
inp_seqs=u_input_np)
gru_in = torch.cat([m_embed, u_context, z_context, degree_input.unsqueeze(0)], dim=2)
gru_out, last_hidden = self.gru(gru_in, last_hidden)
gen_score = self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)
z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))
z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
z_copy_score = z_copy_score.cpu()
z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]
z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]
z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(
1) + z_copy_score_max # [B,V]
z_copy_score = cuda_(z_copy_score)
scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)
gen_score, z_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)
return proba, last_hidden, gru_out
class ResponseDecoder_discrete(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):
super().__init__()
self.emb = emb
self.attn_z = Attn(hidden_size)
self.attn_u = Attn(hidden_size)
self.gru = gru
init_gru(self.gru)
self.proj_0 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj_1 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj_2 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj = proj
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.vocab = vocab
def get_sparse_selective_input(self, x_input_np):
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']
for t in range(x_input_np.shape[0] - 1):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
word = self.vocab.decode(w)
if word in reqs:
slot = self.vocab.encode(word + '_SLOT')
result[t + 1][b][slot] = 1.0
else:
if w == 2 or w >= cfg.vocab_size:
result[t + 1][b][cfg.vocab_size + t] = 5.0
else:
result[t + 1][b][w] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def forward(self, z_enc_out, u_enc_out, np_state):
# sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)
# m_embed = self.emb(m_t_input)
# z_context = torch.mean(z_enc_out, 0)#= self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],
# inp_seqs=z_input_np)
# pdb.set_trace()
u_context = u_enc_out[-1, :, :]#= self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],
# inp_seqs=u_input_np)
state_from_np = torch.from_numpy(np_state).float().unsqueeze(0)
output0 = F.tanh(self.proj_0(torch.cat([u_context, state_from_np], 1)))
output1 = F.sigmoid(self.proj_1(output0))
output2 = F.sigmoid(self.proj_2(output1))
# gru_in = torch.cat([u_context, z_context], dim=2)
# gru_out, last_hidden = self.gru(gru_in)
# print(z_context)
# print(z_context.shape)
# print(u_context)
# print(u_context.shape)
gen_score = self.proj(output2)#.squeeze(0)# self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)
return gen_score
"""
z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))
z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
z_copy_score = z_copy_score.cpu()
z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]
z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]
z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(
1) + z_copy_score_max # [B,V]
z_copy_score = cuda_(z_copy_score)
scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)
gen_score, z_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)
"""
return proba, last_hidden, gru_out
class TSD(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, layer_num, dropout_rate, z_length,
max_ts, action_size=dialog_config.SYS_ACTION_CARDINALITY, discrete_act=False, beam_search=False, teacher_force=100, **kwargs):
super().__init__()
self.vocab = kwargs['vocab']
self.reader = kwargs['reader']
self.emb = nn.Embedding(vocab_size, embed_size)
self.dec_gru = nn.GRU(degree_size + embed_size + hidden_size * 2, hidden_size, dropout=dropout_rate)
self.proj = nn.Linear(hidden_size * 3, vocab_size)
self.proj_discrete = nn.Linear(hidden_size + dialog_config.STATE_DIM, action_size)
self.u_encoder = SimpleDynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)
self.z_decoder = BSpanDecoder(embed_size, hidden_size, vocab_size, dropout_rate, self.vocab)
self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,
self.dec_gru, self.proj, self.emb, self.vocab)
self.m_decoder_discrete = ResponseDecoder_discrete(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,
self.dec_gru, self.proj_discrete, self.emb, self.vocab)
self.embed_size = embed_size
self.z_length = z_length
self.max_ts = max_ts
self.discrete_act = discrete_act
self.beam_search = beam_search
self.teacher_force = teacher_force
self.pr_loss = nn.NLLLoss(ignore_index=0)
self.dec_loss = nn.NLLLoss(ignore_index=0)
self.saved_log_policy = []
if self.beam_search:
self.beam_size = kwargs['beam_size']
self.eos_token_idx = kwargs['eos_token_idx']
def forward(self, u_input, u_input_np, m_input, m_input_np, z_input, u_len, m_len, turn_states,
degree_input, mode, np_state, **kwargs):
if mode == 'train' or mode == 'valid':
pz_proba, pm_dec_proba, turn_states = \
self.forward_turn(u_input, u_len, m_input=m_input, m_len=m_len, z_input=z_input, mode='train',
turn_states=turn_states, degree_input=degree_input, u_input_np=u_input_np,
m_input_np=m_input_np, **kwargs)
loss, pr_loss, m_loss = self.supervised_loss(torch.log(pz_proba), torch.log(pm_dec_proba),
z_input, m_input)
return loss, pr_loss, m_loss, turn_states
elif mode == 'test':
if self.discrete_act:
m_output_index, pz_index, turn_states, pz_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,
mode='test',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
np_state=np_state,
**kwargs
)
return m_output_index, pz_index, turn_states, pz_proba
else:
m_output_index, pz_index, turn_states, pz_proba, mt_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,
mode='test',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np, m_input_np=m_input_np,
**kwargs
)
return m_output_index, pz_index, turn_states, pz_proba, mt_proba
elif mode == 'rl':
loss = self.forward_turn(u_input, u_len=u_len, is_train=False, mode='rl',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np, m_input_np=m_input_np,
**kwargs
)
return loss
def forward_turn(self, u_input, u_len, turn_states, mode, degree_input, u_input_np, m_input_np=None,
m_input=None, np_state=None, m_len=None, z_input=None, **kwargs):
"""
compute required outputs for a single dialogue turn. Turn state{Dict} will be updated in each call.
:param u_input_np:
:param m_input_np:
:param u_len:
:param turn_states:
:param is_train:
:param u_input: [T,B]
:param m_input: [T,B]
:param z_input: [T,B]
:return:
"""
prev_z_input = kwargs.get('prev_z_input', None)
prev_z_input_np = kwargs.get('prev_z_input_np', None)
prev_z_len = kwargs.get('prev_z_len', None)
pv_z_emb = None
batch_size = u_input.size(1)
pv_z_enc_out = None
if prev_z_input is not None:
pv_z_enc_out, _, pv_z_emb = self.u_encoder(prev_z_input, prev_z_len)
u_enc_out, u_enc_hidden, u_emb = self.u_encoder(u_input, u_len)
last_hidden = u_enc_hidden[:-1]
z_tm1 = cuda_(Variable(torch.ones(1, batch_size).long() * 3)) # GO_2 token
m_tm1 = cuda_(Variable(torch.ones(1, batch_size).long())) # GO token
if mode == 'train':
pz_dec_outs = []
pz_proba = []
z_length = z_input.size(0) if z_input is not None else self.z_length # GO token
hiddens = [None] * batch_size
for t in range(z_length):
pz_dec_out, last_hidden, proba = \
self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,
z_tm1=z_tm1, last_hidden=last_hidden,
pv_z_enc_out=pv_z_enc_out, prev_z_input_np=prev_z_input_np,
u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)
pz_proba.append(proba)
pz_dec_outs.append(pz_dec_out)
z_np = z_tm1.view(-1).cpu().data.numpy()
for i in range(batch_size):
if z_np[i] == self.vocab.encode('EOS_Z2'):
hiddens[i] = last_hidden[:, i, :]
z_tm1 = z_input[t].view(1, -1)
for i in range(batch_size):
if hiddens[i] is None:
hiddens[i] = last_hidden[:, i, :]
last_hidden = torch.stack(hiddens, dim=1)
z_input_np = z_input.cpu().data.numpy()
pz_dec_outs = torch.cat(pz_dec_outs, dim=0) # [Tz,B,H]
pz_proba = torch.stack(pz_proba, dim=0)
# P(m|z,u)
pm_dec_proba, m_dec_outs = [], []
m_length = m_input.size(0) # Tm
# last_hidden = u_enc_hidden[:-1]
for t in range(m_length):
teacher_forcing = toss_(self.teacher_force)
proba, last_hidden, dec_out = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, z_input_np)
if teacher_forcing:
m_tm1 = m_input[t].view(1, -1)
else:
_, m_tm1 = torch.topk(proba, 1)
m_tm1 = m_tm1.view(1, -1)
pm_dec_proba.append(proba)
m_dec_outs.append(dec_out)
pm_dec_proba = torch.stack(pm_dec_proba, dim=0) # [T,B,V]
return pz_proba, pm_dec_proba, None
else:
# assert z_input is not None
z_length = z_input.size(0) if z_input is not None else None # GO token
# print("z_input", z_input)
if z_input is None:
use_predicted_zt = True
else:
use_predicted_zt = False
pz_dec_outs, bspan_index, last_hidden, pz_proba = self.bspan_decoder(u_enc_out, z_tm1, last_hidden, u_input_np,
pv_z_enc_out=pv_z_enc_out,
prev_z_input_np=prev_z_input_np,
u_emb=u_emb, pv_z_emb=pv_z_emb,
z_length=z_length,
use_predicted_zt=use_predicted_zt,
z_input=z_input)
pz_proba = torch.stack(pz_proba, dim=0)
pz_dec_outs = torch.cat(pz_dec_outs, dim=0)
degree_input = self.reader.db_degree_handler(bspan_index, kwargs['dial_id'])
degree_input = cuda_(Variable(torch.from_numpy(degree_input).float()))
if mode == 'test':
if not self.discrete_act:
if not self.beam_search:
m_output_index, m_probas = self.greedy_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
degree_input, bspan_index)
# else:
# m_output_index = self.beam_search_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
# degree_input, bspan_index)
#
return m_output_index, bspan_index, None, pz_proba, m_probas
else:
act_logits = self.action_decode(pz_dec_outs, u_enc_out, np_state)
return act_logits, bspan_index, None, pz_proba
elif mode == 'rl':
return self.sampling_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
degree_input, bspan_index)
def action_decode(self, pz_dec_outs, u_enc_out, np_state):
logits = self.m_decoder_discrete(pz_dec_outs, u_enc_out, np_state)
return logits
def bspan_decoder(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
z_length=None, use_predicted_zt=True, z_input=None):
if not use_predicted_zt:
assert z_input is not None
assert z_length is not None
pz_dec_outs = []
pz_proba = []
decoded = []
batch_size = u_enc_out.size(1)
hiddens = [None] * batch_size
z_length = z_length if z_length is not None else cfg.z_length
# print(z_length)
# import pdb
# pdb.set_trace()
for t in range(z_length):
pz_dec_out, last_hidden, proba = \
self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,
z_tm1=z_tm1, last_hidden=last_hidden, pv_z_enc_out=pv_z_enc_out,
prev_z_input_np=prev_z_input_np, u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)
# print("--"*20)
# print("in bspan decoder")
# print("proba ", proba)
# print("z_tm1", z_tm1)
# print("t", t)
# print("--"*20)
pz_proba.append(proba)
pz_dec_outs.append(pz_dec_out)
# print("proba_size", proba.shape)
z_proba, z_index = torch.topk(proba, 1) # [B,1]
# print('z_index', z_index)
z_index = z_index.data.view(-1)
#####################################################
if prev_z_input_np is None:
tmp = u_input_np # [,B]
else:
# pdb.set_trace()
tmp = np.concatenate((u_input_np, prev_z_input_np), axis=0)
for i in range(z_index.size(0)):
if z_index[i] >= cfg.vocab_size:
# print(z_index)
z_index[i] = torch.tensor(int(tmp[z_index[i] - cfg.vocab_size, i]))
del tmp
decoded.append(z_index.clone())
# print(decoded)
#####################################################
for i in range(z_index.size(0)):
if z_index[i] >= cfg.vocab_size:
z_index[i] = 2 # unk
# print('z_index', z_index)
z_np = z_tm1.view(-1).cpu().data.numpy()
for i in range(batch_size):
if z_np[i] == self.vocab.encode('EOS_Z2'):
hiddens[i] = last_hidden[:, i, :]
if use_predicted_zt:
z_tm1 = cuda_(Variable(z_index).view(1, -1))
else:
z_tm1 = z_input[t].view(1, -1)
for i in range(batch_size):
if hiddens[i] is None:
hiddens[i] = last_hidden[:, i, :]
last_hidden = torch.stack(hiddens, dim=1)
if not use_predicted_zt:
z_input_np = z_input.cpu().data.numpy()
decoded = torch.stack(decoded, dim=0).transpose(0, 1)
decoded = list(decoded)
decoded = [list(_) for _ in decoded]
return pz_dec_outs, decoded, last_hidden, pz_proba
def greedy_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
decoded = []
probas = []
bspan_index_np = pad_sequences(bspan_index).transpose((1, 0))
for t in range(self.max_ts):
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, bspan_index_np)
probas.append(proba)
mt_proba, mt_index = torch.topk(proba, 1) # [B,1]
mt_index = mt_index.data.view(-1)
decoded.append(mt_index.clone())
for i in range(mt_index.size(0)):
if mt_index[i] >= cfg.vocab_size:
mt_index[i] = 2 # unk
m_tm1 = cuda_(Variable(mt_index).view(1, -1))
decoded = torch.stack(decoded, dim=0).transpose(0, 1)
decoded = list(decoded)
return [list(_) for _ in decoded], probas
def beam_search_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input,
bspan_index):
eos_token_id = self.vocab.encode(cfg.eos_m_token)
batch_size = pz_dec_outs.size(1)
if batch_size != 1:
raise ValueError('"Beam search single" requires batch size to be 1')
class BeamState:
def __init__(self, score, last_hidden, decoded, length):
"""
Beam state in beam decoding
:param score: sum of log-probabilities
:param last_hidden: last hidden
:param decoded: list of *Variable[1*1]* of all decoded words
:param length: current decoded sentence length
"""
self.score = score
self.last_hidden = last_hidden
self.decoded = decoded
self.length = length
def update_clone(self, score_incre, last_hidden, decoded_t):
decoded = copy.copy(self.decoded)
decoded.append(decoded_t)
clone = BeamState(self.score + score_incre, last_hidden, decoded, self.length + 1)
return clone
def beam_result_valid(decoded_t, bspan_index):
decoded_t = [_.view(-1).data[0] for _ in decoded_t]
req_slots = self.get_req_slots(bspan_index)
decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)
for req in req_slots:
if req not in decoded_sentence:
return False
return True
def score_bonus(state, decoded, bspan_index):
bonus = cfg.beam_len_bonus
return bonus
def soft_score_incre(score, turn):
return score
finished, failed = [], []
states = [] # sorted by score decreasingly
dead_k = 0
states.append(BeamState(0, last_hidden, [m_tm1], 0))
bspan_index_np = np.array(bspan_index).reshape(-1, 1)
for t in range(self.max_ts):
new_states = []
k = 0
while k < len(states) and k < self.beam_size - dead_k:
state = states[k]
last_hidden, m_tm1 = state.last_hidden, state.decoded[-1]
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1, degree_input,
last_hidden, bspan_index_np)
proba = torch.log(proba)
mt_proba, mt_index = torch.topk(proba, self.beam_size - dead_k) # [1,K]
for new_k in range(self.beam_size - dead_k):
score_incre = soft_score_incre(mt_proba[0][new_k].data[0], t) + score_bonus(state,
mt_index[0][new_k].data[
0], bspan_index)
if len(new_states) >= self.beam_size - dead_k and state.score + score_incre < new_states[-1].score:
break
decoded_t = mt_index[0][new_k]
if decoded_t.data[0] >= cfg.vocab_size:
decoded_t.data[0] = 2 # unk
if self.vocab.decode(decoded_t.data[0]) == cfg.eos_m_token:
if beam_result_valid(state.decoded, bspan_index):
finished.append(state)
dead_k += 1
else:
failed.append(state)
else:
decoded_t = decoded_t.view(1, -1)
new_state = state.update_clone(score_incre, last_hidden, decoded_t)
new_states.append(new_state)
k += 1
if self.beam_size - dead_k < 0:
break
new_states = new_states[:self.beam_size - dead_k]
new_states.sort(key=lambda x: -x.score)
states = new_states
if t == self.max_ts - 1 and not finished:
finished = failed
print('FAIL')
if not finished:
finished.append(states[0])
finished.sort(key=lambda x: -x.score)
decoded_t = finished[0].decoded
decoded_t = [_.view(-1).data[0] for _ in decoded_t]
decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)
# print(decoded_sentence)
generated = torch.cat(finished[0].decoded, dim=1).data # [B=1, T]
return generated
def beam_search_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(
m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)
decoded = []
for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):
decoded_s = self.beam_search_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,
u_input_np[:, i].reshape((-1, 1)),
last_hidden_s, degree_input_s, bspan_index[i])
decoded.append(decoded_s)
return [list(_.view(-1)) for _ in decoded]
def supervised_loss(self, pz_proba, pm_dec_proba, z_input, m_input):
pz_proba, pm_dec_proba = pz_proba[:, :, :cfg.vocab_size].contiguous(), pm_dec_proba[:, :,
:cfg.vocab_size].contiguous()
pr_loss = self.pr_loss(pz_proba.view(-1, pz_proba.size(2)), z_input.view(-1))
m_loss = self.dec_loss(pm_dec_proba.view(-1, pm_dec_proba.size(2)), m_input.view(-1))
loss = pr_loss + m_loss
return loss, pr_loss, m_loss
def self_adjust(self, epoch):
pass
# REINFORCEMENT fine-tuning with MC
def possible_reqs(self):
if cfg.dataset == 'camrest':
return ['address', 'phone', 'postcode', 'pricerange', 'area']
elif cfg.dataset == 'kvret':
req_by_intent = {
'weather': ['weather_attribute'],
'navigate': ['poi', 'traffic_info', 'address', 'distance'],
'schedule': ['event', 'date', 'time', 'party', 'agenda', 'room']
}
reqs = []
for value in req_by_intent.values():
reqs.extend(value)
return reqs
else:
raise ValueError('unknown dataset')
def get_req_slots(self, bspan_index):
reqs = self.possible_reqs()
reqs = set(self.vocab.sentence_decode(bspan_index).split()).intersection(reqs)
return [_ + '_SLOT' for _ in reqs]
def reward(self, m_tm1, decoded, bspan_index):
"""
The setting of the reward function is heuristic. It can be better optimized.
:param m_tm1:
:param decoded:
:param bspan_index:
:return:
"""
req_slots = self.get_req_slots(bspan_index)
m_tm1 = self.vocab.decode(m_tm1[0])
finished = m_tm1 == 'EOS_M'
decoded = [_.view(-1)[0] for _ in decoded]
decoded_sentence = self.vocab.sentence_decode(decoded, cfg.eos_m_token).split()
reward = -0.01 if cfg.dataset == 'camrest' else 0
'''
if not finished:
if m_tm1 in req_slots:
if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:
reward = 1.0
'''
# some modification for reward function.
if m_tm1 in req_slots:
if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:
reward += 1.0
else:
reward -= 1.0 if cfg.dataset == 'camrest' else 0 # repeat
return reward, finished
def sampling_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(
m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)
batch_loss = []
sample_num = 1
for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):
if not self.get_req_slots(bspan_index[i]):
continue
for j in range(sample_num):
loss = self.sampling_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,
u_input_np[:, i].reshape((-1, 1)),
last_hidden_s, degree_input_s, bspan_index[i])
batch_loss.append(loss)
if not batch_loss:
return None
else:
return sum(batch_loss) / len(batch_loss)
def sampling_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
decoded = []
reward_sum = 0
log_probs = []
rewards = []
bspan_index_np = np.array(bspan_index).reshape(-1, 1)
for t in range(self.max_ts):
# reward
reward, finished = self.reward(m_tm1.data.view(-1), decoded, bspan_index)
reward_sum += reward
rewards.append(reward)
if t == self.max_ts - 1:
finished = True
if finished:
loss = self.finish_episode(log_probs, rewards)
return loss
# action
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, bspan_index_np)
proba = proba.squeeze(0) # [B,V]
dis = Categorical(proba)
action = dis.sample()
log_probs.append(dis.log_prob(action))
mt_index = action.data.view(-1)
decoded.append(mt_index.clone())
for i in range(mt_index.size(0)):
if mt_index[i] >= cfg.vocab_size:
mt_index[i] = 2 # unk
m_tm1 = cuda_(Variable(mt_index).view(1, -1))
def finish_episode(self, log_probas, saved_rewards):
R = 0
policy_loss = []
rewards = []
for r in saved_rewards:
R = r + 0.8 * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
# rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for log_prob, reward in zip(log_probas, rewards):
policy_loss.append(-log_prob * reward)
l = len(policy_loss)
policy_loss = torch.cat(policy_loss).sum()
return policy_loss / l
| [
"torch.nn.Linear",
"torch.cat",
"torch.distributions.Categorical",
"torch.stack",
"torch.nn.GRU",
"torch.isnan",
"torch.bmm",
"torch.ones",
"torch.LongTensor",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.exp",
"torch.topk",
"torch.autograd.Variable",
"torch.FloatTensor",
"torch.nn.init.orthogonal_",
"torch.Tensor",
"torch.zeros",
"torch.max",
"torch.nn.functional.softmax",
"torch.log",
"torch.nn.NLLLoss",
"torch.nn.Dropout",
"torch.split",
"torch.from_numpy",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Embedding"
] | 0.4.1 | qbetterk/user-simulator | 77caca30ff67b9112b1fe5e65e191c6b5e25532c |
1.7 | # -*- coding: utf-8 -*-
'''
Author: TJUZQC
Date: 2020-10-26 10:26:51
LastEditors: TJUZQC
LastEditTime: 2020-11-20 19:23:55
Description: None
'''
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluation import eval_net
from models import ChooseModel, init_weights
from utils.dataset import BasicDataset
conf = yaml.load(open(os.path.join(
sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)
dir_img = conf['DATASET']['IMGS_DIR']
dir_mask = conf['DATASET']['MASKS_DIR']
dir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']
def train_net(net,
device,
epochs=5,
batch_size=16,
lr=0.001,
val_percent=0.1,
save_cp=True,
img_scale=0.5,
use_apex=False,
optimizer='adam',
classes=2,
lr_scheduler='steplr',
lr_scheduler_cfgs: dict = {'step_size': 10}):
dataset = BasicDataset(dir_img, dir_mask, img_scale,
train=True, classes=classes)
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(
train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size,
shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(
comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images scaling: {img_scale}
Use apex: {use_apex}
''')
optimizers = {
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamw': optim.AdamW,
'sparseadam': optim.SparseAdam,
'adamax': optim.Adamax,
'asgd': optim.ASGD,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': optim.SGD,
}
optimizer = optimizers.get(optimizer, None)(
net.parameters(), lr=lr, weight_decay=1e-8)
lr_scheduler_getter = {
'lambdalr': torch.optim.lr_scheduler.LambdaLR,
'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,
'steplr': torch.optim.lr_scheduler.StepLR,
'multisteplr': torch.optim.lr_scheduler.MultiStepLR,
'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,
'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,
'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,
'cycliclr': torch.optim.lr_scheduler.CyclicLR,
'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,
}
lr_scheduler = lr_scheduler_getter.get(
lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)
if use_apex:
try:
from apex import amp
net, optimizer = amp.initialize(net, optimizer, opt_level="O1")
except ImportError as e:
print(e)
use_apex = False
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
if net.n_classes > 1:
b, c, w, h = true_masks.shape
true_masks = true_masks.view(b, w, h)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
if not use_apex:
loss.backward()
else:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
dataset_len = len(dataset)
a1 = dataset_len // 10
a2 = dataset_len / 10
b1 = global_step % a1
b2 = global_step % a2
if global_step % (len(dataset) // (10 * batch_size)) == 0:
dice_coeff, pA, oA, precision, recall, f1score = eval_net(
net, val_loader, device, n_val)
if net.n_classes > 1:
logging.info(
'Validation cross entropy: {}'.format(dice_coeff))
writer.add_scalar('Loss/test', dice_coeff, global_step)
else:
logging.info(
'Validation Dice Coeff: {}'.format(dice_coeff))
writer.add_scalar('Dice/test', dice_coeff, global_step)
logging.info(
'Validation Pixel Accuracy: {}'.format(pA))
writer.add_scalar('pA/test', pA, global_step)
logging.info(
'Validation Overall Accuracy: {}'.format(oA))
writer.add_scalar('oA/test', oA, global_step)
logging.info(
'Validation Precision: {}'.format(precision))
writer.add_scalar('precision/test',
precision, global_step)
logging.info(
'Validation Recall: {}'.format(recall))
writer.add_scalar('recall/test', recall, global_step)
logging.info(
'Validation F1-score: {}'.format(f1score))
writer.add_scalar(
'F1-score/test', f1score, global_step)
writer.add_images('images', imgs, global_step)
if net.n_classes == 1:
writer.add_images(
'masks/true', true_masks, global_step)
writer.add_images(
'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
lr_scheduler.step()
if save_cp:
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth'))
logging.info(
f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item()))
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--network', metavar='NETWORK', type=str,
default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],
help='Init weights type')
parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],
help='Automatic Mixed Precision')
parser.add_argument('-o', '--optimizer', dest='optimizer',
type=str, default=conf['OPTIMIZER'], help='Optimizer type')
parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',
type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available(
) and conf['DEVICE'].lower() == 'cuda' else 'cpu')
logging.info(f'Using device {device}')
network = args.network.lower()
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
# - For 1 class and background, use n_classes=1
# - For 2 classes, use n_classes=1
# - For N > 2 classes, use n_classes=N
net = ChooseModel(network)(
n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])
assert net is not None, f'check your argument --network'
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling\n'
f'\tApex is {"using" if args.use_apex == "True" else "not using"}')
init_weights(net, args.init_type)
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
use_apex=(args.use_apex == "True"),
optimizer=args.optimizer.lower(),
classes=conf['DATASET']['NUM_CLASSES'],
lr_scheduler=args.lr_scheduler,
lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| [
"torch.sigmoid",
"torch.utils.tensorboard.SummaryWriter",
"torch.utils.data.random_split",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.7.0 | QinchengZhang/PathologySegmentation | 7a2c21346739a79c33e7a7ccc081018821868eb7 |
1.5 | import torch
from torchvision import datasets, transforms
import os
transform = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
[0.4914, 0.4821, 0.4465], [0.2470, 0.2435, 0.2616]
),
]
),
"val": transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.4940, 0.4849, 0.4502], [0.2467, 0.2430, 0.2616]
),
]
),
}
def get_loader(root, batch_size, num_workers):
dataset = {
x: datasets.ImageFolder(os.path.join(root, x), transform=transform[x])
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train"),
num_workers=num_workers,
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
def CIFAR10(batch_size, root="data/"):
dataset = {
x: datasets.CIFAR10(
root, train=(x == "train"), download=True, transform=transform[x]
)
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train")
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
| [
"torch.utils.data.DataLoader"
] | 1.5.1 | 1chimaruGin/Oject_classifier | d27ca8f47d2d0af107582c25a0756dda15361c2e |
1.0 | # Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Custom loss functions"""
import torch
from torch.nn import functional as F
from torch.autograd import Variable
import pdb
import numpy as np
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by num_classes
Divide by the batch size afterwards if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
num_classes = input_logits.size()[1]
return F.mse_loss(input_softmax, target_softmax, size_average=False) / num_classes
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1) # log(q)
target_softmax = F.softmax(target_logits, dim=1) # p
return F.kl_div(input_log_softmax, target_softmax, size_average=False)
def symmetric_mse_loss(input1, input2):
"""Like F.mse_loss but sends gradients to both directions.
cuz input1/input2 are tensors with grad, while target in F.mse_loss has no grad.
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to both input1 and input2.
"""
assert input1.size() == input2.size()
num_classes = input1.size()[1]
return torch.sum((input1 - input2)**2) / num_classes | [
"torch.nn.functional.log_softmax",
"torch.nn.functional.mse_loss",
"torch.nn.functional.kl_div",
"torch.nn.functional.softmax",
"torch.sum"
] | 1.0.0 | Shuai-Xie/LP-DeepSSL | 9389c6cb0b83c7ca509ce284c4d86b600ca44a9b |
1.0 | from typing import Union
import torch
from torch import nn
from ..composition import AdapterCompositionBlock, parse_composition
from ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin
from .bert import BertEncoderAdaptersMixin, BertModelHeadsMixin, BertOutputAdaptersMixin, BertSelfOutputAdaptersMixin
class DistilBertSelfAttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):
"""Adds attention adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return self.parent.sa_layer_norm
class DistilBertOutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):
"""Adds output adapters to the Transformer module of DistilBert."""
def __init__(self, parent):
super().__init__()
# keep a reference to the parent module without registering as a submodule
object.__setattr__(self, "parent", parent)
self.config = parent.config
@property
def transformer_layer_norm(self):
return self.parent.output_layer_norm
class DistilBertTransfomerBlockAdaptersMixin:
"""Adds adapters to the TransformerBlock module of DistilBert."""
def _init_adapter_modules(self):
self.attention_adapters = DistilBertSelfAttentionAdaptersModule(self)
self.output_adapters = DistilBertOutputAdaptersModule(self)
self.attention_adapters._init_adapter_modules()
self.output_adapters._init_adapter_modules()
self.register_forward_pre_hook(self._adapter_block_pre_hook)
def add_fusion_layer(self, adapter_names):
self.attention_adapters.add_fusion_layer(adapter_names)
self.output_adapters.add_fusion_layer(adapter_names)
def add_adapter(self, adapter_name: str, layer_idx: int):
self.attention_adapters.add_adapter(adapter_name, layer_idx)
self.output_adapters.add_adapter(adapter_name, layer_idx)
def delete_adapter(self, adapter_name):
self.attention_adapters.delete_adapter(adapter_name)
self.output_adapters.delete_adapter(adapter_name)
def delete_fusion_layer(self, adapter_names):
self.attention_adapters.delete_fusion_layer(adapter_names)
self.output_adapters.delete_fusion_layer(adapter_names)
def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):
self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)
# Makes sure the "parent" reference always points to the correct module.
# This is especially relevant when using torch data parallelism.
@staticmethod
def _adapter_block_pre_hook(module, input_tensors):
object.__setattr__(module.attention_adapters, "parent", module)
object.__setattr__(module.output_adapters, "parent", module)
class DistilBertTransformerAdaptersMixin(BertEncoderAdaptersMixin):
"""Adds adapters to the Transformer module of DistilBert."""
pass
class DistilBertModelAdaptersMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):
"""Adds adapters to the DistilBert module."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):
"""Sets the model into mode for training the given adapters."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.transformer.enable_adapters(adapter_setup, True, False)
self.enable_invertible_adapters(adapter_setup.flatten())
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):
"""Sets the model into mode for training of adapter fusion determined by a list of adapter names."""
self.train()
self.freeze_model(True)
adapter_setup = parse_composition(adapter_setup)
self.transformer.enable_adapters(adapter_setup, unfreeze_adapters, True)
# use the adapters to be trained by default in every forward pass
self.set_active_adapters(adapter_setup)
def _add_adapter(self, adapter_name):
self.transformer.add_adapter(adapter_name)
self.add_invertible_adapter(adapter_name)
def _add_fusion_layer(self, adapter_names):
self.transformer.add_fusion_layer(adapter_names)
def _delete_adapter(self, adapter_name: str):
self.transformer.delete_adapter(adapter_name)
self.delete_invertible_adapter(adapter_name)
def _delete_fusion_layer(self, adapter_names):
self.transformer.delete_fusion_layer(adapter_names)
def get_fusion_regularization_loss(self):
reg_loss = 0.0
target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)
for _, v in self.transformer.layer._modules.items():
for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():
if hasattr(layer_fusion, "value"):
reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()
return reg_loss
def get_adapter(self, name):
return_adapters = {}
for idx, layer in enumerate(self.transformer.layer):
adapters = {
"attention": layer.attention_adapters.adapters,
"output": layer.output_adapters.adapters,
}
for key, adapt in adapters.items():
if hasattr(adapt, name):
if idx not in return_adapters:
return_adapters[idx] = {}
return_adapters[idx][key] = getattr(adapt, name)
return return_adapters
class DistilBertModelHeadsMixin(BertModelHeadsMixin):
"""Adds heads to a DistilBert model."""
pass
| [
"torch.zeros"
] | 1.0 | uunal/adapter-transformers | 73a95a75f803e8fd243fc3d55ff3a9d557891377 |
1.4 | import torch
from .eval_reid import eval_func
def euclidean_dist(x, y):
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
| [
"torch.pow"
] | 1.4.0 | beesk135/ReID-Survey | d1467c0ce5d3ca78640196360a05df9ff9f9f42a |
1.9 | # Run MIL classification use pretrained CNN models
# Reference: 1.Campanella, G. et al. Clinical-grade computational pathology using weakly supervised
# deep learning on whole slide images. Nat Med 25, 1301–1309 (2019).
# doi:10.1038/s41591-019-0508-1. Available from http://www.nature.com/articles/s41591-019-0508-1
# The source codes of the referenced paper available at https://github.com/MSKCC-Computational-Pathology/MIL-nature-medicine-2019
# This code was modified by Shengjia Chen for our work.
import argparse
import os
import random
import sys
from pathlib import Path
from types import SimpleNamespace
from typing import Callable, Optional, Union
from urllib.error import HTTPError
import glob
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
from pytorch_lightning.callbacks import (EarlyStopping, LearningRateMonitor,
ModelCheckpoint)
from pytorch_lightning.lite import LightningLite
from pytorch_lightning.loops import Loop
from skimage import io
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from tqdm import tqdm
sys.path.append('/gpfs/scratch/sc9295/digPath/MSI_vs_MSS_Classification/Step1_Training_MSI_MSS')
from train_tile_level_classification import MSI_MSS_Module
from sklearn.metrics import (auc, confusion_matrix, f1_score, roc_auc_score,
roc_curve)
best_acc = 0
def inference(loader, model):
model.eval()
probs = torch.FloatTensor(len(loader.dataset))
with torch.no_grad():
for i, input in enumerate(loader):
# print(
# 'Inference\tEpoch: [{}/{}]\tBatch: [{}/{}]'.format(run+1, args.nepochs, i+1, len(loader)))
output = F.softmax(model(input), dim=1)
probs[i*args.batch_size:i*args.batch_size +
input.size(0)] = output.detach()[:, 1].clone()
return probs.cpu().numpy()
def train(run, loader, model, criterion, optimizer):
model.train()
running_loss = 0.
for i, (input, target) in enumerate(loader):
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()*input.size(0)
return running_loss/len(loader.dataset)
def calc_err(pred, real):
pred = np.array(pred)
real = np.array(real)
pos = np.equal(pred, real)
neq = np.not_equal(pred, real)
acc = float(pos.sum())/pred.shape[0]
err = float(neq.sum())/pred.shape[0]
fpr = float(np.logical_and(pred == 1, neq).sum())/(real == 0).sum()
fnr = float(np.logical_and(pred == 0, neq).sum())/(real == 1).sum()
return acc, err, fpr, fnr
def group_argtopk(groups, data, k=1):
# groups in slide, data is prob of each tile
k = min(k,len(data))
order = np.lexsort((data, groups))
groups = groups[order]
data = data[order]
index = np.empty(len(groups), 'bool')
index[-k:] = True
index[:-k] = groups[k:] != groups[:-k]
return list(order[index]) # output top prob tile index in each slide
def group_max(groups, data, nmax):
out = np.empty(nmax)
out[:] = np.nan
order = np.lexsort((data, groups))
groups = groups[order]
data = data[order]
index = np.empty(len(groups), 'bool')
index[-1] = True
index[:-1] = groups[1:] != groups[:-1]
out[groups[index]] = data[index]
return out
class MILdataset(Dataset):
def __init__(self, libraryfile_dir='', root_dir='', dataset_mode='Train', transform=None, subset_rate=None):
libraryfile_path = os.path.join(
libraryfile_dir, f'CRC_DX_{dataset_mode}_ALL.csv')
lib = pd.read_csv(libraryfile_path)
lib = lib if subset_rate is None else lib.sample(
frac=subset_rate, random_state=2022)
lib = lib.sort_values(['subject_id'], ignore_index=True)
lib.to_csv(os.path.join(libraryfile_dir,
f'{dataset_mode}_temporary.csv'))
slides = []
for i, name in enumerate(lib['subject_id'].unique()):
# sys.stdout.write(
# 'Slides: [{}/{}]\r'.format(i+1, len(lib['subject_id'].unique())))
# sys.stdout.flush()
slides.append(name)
# Flatten grid
grid = []
slideIDX = []
for i, g in enumerate(lib['subject_id'].unique()):
tiles = lib[lib['subject_id'] == g]['slice_id']
grid.extend(tiles)
slideIDX.extend([i]*len(tiles))
# print('Number of tiles: {}'.format(len(grid)))
self.dataframe = self.load_data_and_get_class(lib)
self.slidenames = list(lib['subject_id'].values)
self.slides = slides
self.targets = self.dataframe['Class']
self.grid = grid
self.slideIDX = slideIDX
self.transform = transform
self.root_dir = root_dir
self.dset = f"CRC_DX_{dataset_mode}"
def setmode(self, mode):
self.mode = mode
def maketraindata(self, idxs):
self.t_data = [(self.slideIDX[x], self.grid[x],
self.targets[x]) for x in idxs]
def shuffletraindata(self):
self.t_data = random.sample(self.t_data, len(self.t_data))
def load_data_and_get_class(self, df):
df.loc[df['label'] == 'MSI', 'Class'] = 1
df.loc[df['label'] == 'MSS', 'Class'] = 0
return df
def __getitem__(self, index):
if self.mode == 1:
slideIDX = self.slideIDX[index]
tile_id = self.grid[index]
slide_id = self.slides[slideIDX]
img_name = "blk-{}-{}.png".format(tile_id, slide_id)
target = self.targets[index]
label = 'CRC_DX_MSIMUT' if target == 1 else 'CRC_DX_MSS'
img_path = os.path.join(self.root_dir, self.dset, label, img_name)
img = io.imread(img_path)
if self.transform is not None:
img = self.transform(img)
return img
elif self.mode == 2:
slideIDX, tile_id, target = self.t_data[index]
slide_id = self.slides[slideIDX]
label = 'CRC_DX_MSIMUT' if target == 1 else 'CRC_DX_MSS'
img_name = "blk-{}-{}.png".format(tile_id, slide_id)
img_path = os.path.join(self.root_dir, self.dset, label, img_name)
img = io.imread(img_path)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
if self.mode == 1:
return len(self.grid)
elif self.mode == 2:
return len(self.t_data)
class Lite(LightningLite):
def run(self, args):
global best_acc
print(args)
self.seed_everything(2022)
model_name = args.model_name
sample_rate = args.sample_rate
ckpt_path = os.path.join(args.model_path, f'{args.model_name}_bs{args.batch_size}_lr{args.learning_rate}')
ckpt_file_path = glob.glob(os.path.join(ckpt_path,'*.ckpt'))[0]
model = MSI_MSS_Module.load_from_checkpoint(ckpt_file_path)
optimizer = torch.optim.AdamW(
model.parameters(), lr=args.learning_rate, weight_decay=1e-4)
if args.weights == 0.5:
criterion = nn.CrossEntropyLoss()
else:
w = torch.Tensor([1-args.weights, args.weights])
criterion = nn.CrossEntropyLoss(w)
# Scale model and optimizers
model, optimizer = self.setup(model, optimizer, move_to_device=True)
DATA_MEANS = [0.485, 0.456, 0.406]
DATA_STD = [0.229, 0.224, 0.225]
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.Normalize(DATA_MEANS, DATA_STD)])
test_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize(DATA_MEANS, DATA_STD)])
train_dataset = MILdataset(
args.lib_dir, args.root_dir, 'Train', transform=train_transform, subset_rate=sample_rate)
val_dataset = MILdataset(
args.lib_dir, args.root_dir, 'Val', transform=test_transform, subset_rate=sample_rate)
test_dataset = MILdataset(
args.lib_dir, args.root_dir, 'Test', transform=test_transform, subset_rate=sample_rate)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, pin_memory=True)
train_dataloader, val_dataloader, test_dataloader = self.setup_dataloaders(
train_dataloader, val_dataloader, test_dataloader, move_to_device=True)
# open output file
version_name = f'MIL_{model_name}_bs{args.batch_size}_lr{args.learning_rate}_w{args.weights}_k{args.k}_output'
# logger
output_path = os.path.join(args.output_path,version_name)
writer = SummaryWriter(output_path)
for epoch in tqdm(range(args.nepochs)):
train_dataset.setmode(1)
# print("train_set_len:", len(train_dataloader.dataset))
probs = inference(train_dataloader, model)
# return the indices of topk tile(s) in each slides
topk = group_argtopk(
np.array(train_dataset.slideIDX), probs, args.k)
train_dataset.maketraindata(topk)
train_dataset.shuffletraindata()
train_dataset.setmode(2)
model.train()
running_loss = 0.
for i, (input, target) in enumerate(train_dataloader):
output = model(input)
loss = criterion(output, target.long())
optimizer.zero_grad()
self.backward(loss)
optimizer.step()
running_loss += loss.item()*input.size(0)
train_loss = running_loss/len(train_dataloader.dataset)
print(
'Training\tEpoch: [{}/{}]\tLoss: {}'.format(epoch+1, args.nepochs, train_loss))
writer.add_scalar('train_loss', train_loss, epoch+1)
# Validation
if (epoch+1) % args.test_every == 0:
val_dataset.setmode(1)
probs = inference(val_dataloader, model)
maxs = group_max(np.array(val_dataset.slideIDX),
probs, len(val_dataset.targets))
pred = [1 if x >= 0.5 else 0 for x in probs]
val_acc, err, fpr, fnr = calc_err(pred, val_dataset.targets)
print('Validation\tEpoch: [{}/{}]\t ACC: {}\tError: {}\tFPR: {}\tFNR: {}'.format(
epoch+1, args.nepochs, val_acc, err, fpr, fnr))
writer.add_scalar('val_acc', val_acc, epoch+1)
writer.add_scalar('fpr', fpr, epoch+1)
writer.add_scalar('fnr', fnr, epoch+1)
# Save best model
err = (fpr+fnr)/2.
if 1-err >= best_acc:
best_acc = 1-err
obj = {
'epoch': epoch+1,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict()
}
torch.save(obj, os.path.join(output_path, 'checkpoint_best.pth'))
# test
ch = torch.load(os.path.join(output_path,'checkpoint_best.pth'))
# load params
model.load_state_dict(ch['state_dict'])
model = model.cuda()
cudnn.benchmark = True
train_dataset.setmode(1)
val_dataset.setmode(1)
test_dataset.setmode(1)
# Train
probs = inference(train_dataloader, model)
maxs = group_max(np.array(train_dataset.slideIDX), probs, len(train_dataset.targets))
fp = open(os.path.join(output_path, f'Train_{version_name}.csv'), 'w')
fp.write('slides,tiles,target,prediction,probability\n')
for slides, tiles, target, prob in zip(train_dataset.slidenames, train_dataset.grid, train_dataset.targets, probs):
fp.write('{},{},{},{},{}\n'.format(slides, tiles, target, int(prob>=0.5), prob))
fp.close()
# Val
probs = inference(val_dataloader, model)
maxs = group_max(np.array(val_dataset.slideIDX), probs, len(val_dataset.targets))
fp = open(os.path.join(output_path, f'Val_{version_name}.csv'), 'w')
fp.write('slides,tiles,target,prediction,probability\n')
for slides, tiles, target, prob in zip(val_dataset.slidenames, val_dataset.grid, val_dataset.targets, probs):
fp.write('{},{},{},{},{}\n'.format(slides, tiles, target, int(prob>=0.5), prob))
fp.close()
# Test
probs = inference(test_dataloader, model)
maxs = group_max(np.array(test_dataset.slideIDX), probs, len(test_dataset.targets))
fp = open(os.path.join(output_path, f'Test_{version_name}.csv'), 'w')
fp.write('slides,tiles,target,prediction,probability\n')
for slides, tiles, target, prob in zip(test_dataset.slidenames, test_dataset.grid, test_dataset.targets, probs):
fp.write('{},{},{},{},{}\n'.format(slides, tiles, target, int(prob>=0.5), prob))
fp.close()
pred = [1 if x >= 0.5 else 0 for x in probs]
test_acc, err, fnr, fpr = calc_err(pred, test_dataset.targets)
test_f1_score = f1_score(test_dataset.targets, pred, average='binary')
try:
test_auroc_score = roc_auc_score(test_dataset.targets, probs)
writer.add_scalar("test_auroc_score", test_auroc_score)
except ValueError:
writer.add_scalar('test_auroc_score', .0)
writer.add_scalar('test_f1_score', test_f1_score)
writer.add_scalar('test_acc', test_acc)
def main(args):
Lite(devices="auto", accelerator="auto").run(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--root_dir",
type=Path,
required=True,
help="root directory of dataset",
)
parser.add_argument(
"--lib_dir",
type=Path,
required=True,
help="root directory of libraryfile",
)
parser.add_argument(
"--model_path",
type=Path,
required=True,
help="root directory of pretrained models",
)
parser.add_argument(
"--output_path",
type=Path,
required=True,
help="output directory",
)
parser.add_argument(
"--model_name",
default='alexnet',
choices=('resnet18', 'resnet34', 'alexnet', 'vgg',
'squeezenet', 'densenet', 'inception'),
type=str,
help="model use for train",
)
parser.add_argument(
"--sample_rate",
default=1,
type=float,
help="undersample rate",
)
parser.add_argument(
"--batch_size",
default=128,
type=int,
help="batch size",
)
parser.add_argument(
"--learning_rate",
default=1e-3,
type=float,
help="learning rate",
)
parser.add_argument(
"--num_workers",
default=0,
type=int,
required=True,
help="number of workers",
)
parser.add_argument(
"--nepochs",
default=50,
type=int,
help="training epoch",
)
parser.add_argument(
'--test_every',
default=1,
type=int,
help='test on val every (default: 10)')
parser.add_argument(
"--weights",
default=0.5,
type=float,
help="unbalanced positive class weight (default: 0.5, balanced classes)",
)
parser.add_argument(
"--k",
default=1,
type=int,
help="top k tiles are assumed to be of the same class as the slide (default: 1, standard MIL)",
)
args = parser.parse_args()
main(args)
| [
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.Tensor",
"torch.utils.tensorboard.SummaryWriter"
] | 1.9.0 | Gaskell-1206/MSI_vs_MSS_Classification | be6fd8a6961624367b2bb0e1299219e940f6f418 |
1.4 | """ COCO transforms (quick and dirty)
Hacked together by Ross Wightman
"""
import torch
from PIL import Image
import numpy as np
import random
import math
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
class ImageToNumpy:
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return np_img, annotations
class ImageToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype), annotations
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def clip_boxes_(boxes, img_size):
height, width = img_size
clip_upper = np.array([height, width] * 2, dtype=boxes.dtype)
np.clip(boxes, 0, clip_upper, out=boxes)
def clip_boxes(boxes, img_size):
clipped_boxes = boxes.copy()
clip_boxes_(clipped_boxes, img_size)
return clipped_boxes
def _size_tuple(size):
if isinstance(size, int):
return size, size
else:
assert len(size) == 2
return size
class ResizePad:
def __init__(self, target_size: int, interpolation: str = 'bilinear', fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.interpolation = interpolation
self.fill_color = fill_color
def __call__(self, img, anno: dict):
width, height = img.size
img_scale_y = self.target_size[0] / height
img_scale_x = self.target_size[1] / width
img_scale = min(img_scale_y, img_scale_x)
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
interp_method = _pil_interp(self.interpolation)
img = img.resize((scaled_w, scaled_h), interp_method)
new_img.paste(img)
if 'bbox' in anno:
# FIXME haven't tested this path since not currently using dataset annotations for train/eval
bbox = anno['bbox']
bbox[:, :4] *= img_scale
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomResizePad:
def __init__(self, target_size: int, scale: tuple = (0.1, 2.0), interpolation: str = 'random',
fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.scale = scale
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.fill_color = fill_color
def _get_params(self, img):
# Select a random scale factor.
scale_factor = random.uniform(*self.scale)
scaled_target_height = scale_factor * self.target_size[0]
scaled_target_width = scale_factor * self.target_size[1]
# Recompute the accurate scale_factor using rounded scaled image size.
width, height = img.size
img_scale_y = scaled_target_height / height
img_scale_x = scaled_target_width / width
img_scale = min(img_scale_y, img_scale_x)
# Select non-zero random offset (x, y) if scaled image is larger than target size
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
offset_y = scaled_h - self.target_size[0]
offset_x = scaled_w - self.target_size[1]
offset_y = int(max(0.0, float(offset_y)) * random.uniform(0, 1))
offset_x = int(max(0.0, float(offset_x)) * random.uniform(0, 1))
return scaled_h, scaled_w, offset_y, offset_x, img_scale
def __call__(self, img, anno: dict):
scaled_h, scaled_w, offset_y, offset_x, img_scale = self._get_params(img)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
img = img.resize((scaled_w, scaled_h), interpolation)
right, lower = min(scaled_w, offset_x + self.target_size[1]), min(scaled_h, offset_y + self.target_size[0])
img = img.crop((offset_x, offset_y, right, lower))
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
new_img.paste(img)
if 'bbox' in anno:
# FIXME not fully tested
bbox = anno['bbox'].copy() # FIXME copy for debugger inspection, back to inplace
bbox[:, :4] *= img_scale
box_offset = np.stack([offset_y, offset_x] * 2)
bbox -= box_offset
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomFlip:
def __init__(self, horizontal=True, vertical=False, prob=0.5):
self.horizontal = horizontal
self.vertical = vertical
self.prob = prob
def _get_params(self):
do_horizontal = random.random() < self.prob if self.horizontal else False
do_vertical = random.random() < self.prob if self.vertical else False
return do_horizontal, do_vertical
def __call__(self, img, annotations: dict):
do_horizontal, do_vertical = self._get_params()
width, height = img.size
def _fliph(bbox):
x_max = width - bbox[:, 1]
x_min = width - bbox[:, 3]
bbox[:, 1] = x_min
bbox[:, 3] = x_max
def _flipv(bbox):
y_max = height - bbox[:, 0]
y_min = height - bbox[:, 2]
bbox[:, 0] = y_min
bbox[:, 2] = y_max
if do_horizontal and do_vertical:
img = img.transpose(Image.ROTATE_180)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
_flipv(annotations['bbox'])
elif do_horizontal:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
elif do_vertical:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
if 'bbox' in annotations:
_flipv(annotations['bbox'])
return img, annotations
def resolve_fill_color(fill_color, img_mean=IMAGENET_DEFAULT_MEAN):
if isinstance(fill_color, tuple):
assert len(fill_color) == 3
fill_color = fill_color
else:
try:
int_color = int(fill_color)
fill_color = (int_color,) * 3
except ValueError:
assert fill_color == 'mean'
fill_color = tuple([int(round(255 * x)) for x in img_mean])
return fill_color
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, img, annotations: dict):
for t in self.transforms:
img, annotations = t(img, annotations)
return img, annotations
def transforms_coco_eval(
img_size=224,
interpolation='bilinear',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
ResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
def transforms_coco_train(
img_size=224,
interpolation='random',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
RandomFlip(horizontal=True, prob=0.5),
RandomResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
| [
"torch.from_numpy"
] | 1.4.0 | saikrishna-pallerla/efficientdet-pytorch | dc7b790f537d28476a26af6f793acc4757becd0d |
1.8 | # Standard Library
import json
import os
import shutil
# Import from third library
import torch
# Import from local
from .log_helper import default_logger as logger
from .registry_factory import SAVER_REGISTRY
__all__ = ['Saver']
@SAVER_REGISTRY.register('base')
class Saver(object):
def __init__(self, save_cfg, yml_path=None, work_dir='./'):
# checkpoint dir
self.save_cfg = self.prepend_work_dir(save_cfg, work_dir)
self.work_dir = work_dir
self.save_dir = save_cfg['save_dir']
os.makedirs(self.save_dir, exist_ok=True)
if yml_path is not None and 's3://' not in yml_path: # TODO, save cpeh data
yml_name = os.path.basename(yml_path)
dst_path = os.path.join(self.save_dir, yml_name)
shutil.copy(yml_path, dst_path)
self.auto_resume = self.save_cfg.get('auto_resume', False)
self.running_config_file = os.path.join(self.save_dir, 'running_config.json')
def prepend_work_dir(self, save_cfg, work_dir):
def osp(path):
return os.path.join(work_dir, path)
save_cfg['save_dir'] = osp(save_cfg['save_dir'])
save_cfg['results_dir'] = osp(save_cfg['results_dir'])
return save_cfg
@staticmethod
def get_model_from_ckpt(ckpt_path):
return Saver.load_checkpoint(ckpt_path)['model']
def load_pretrain_or_resume(self):
if self.auto_resume:
last_checkpoint_path = self.find_last_checkpoint()
if last_checkpoint_path is not None:
logger.warning('Load checkpoint from {}'.format(last_checkpoint_path))
return self.load_checkpoint(last_checkpoint_path)
else:
logger.warning('Not found any valid checkpoint yet')
if 'resume_model' in self.save_cfg:
logger.warning('Load checkpoint from {}'.format(self.save_cfg['resume_model']))
state = self.load_checkpoint(self.save_cfg['resume_model'])
return state
elif 'pretrain_model' in self.save_cfg:
state = self.load_checkpoint(self.save_cfg['pretrain_model'])
logger.warning('Load checkpoint from {}'.format(self.save_cfg['pretrain_model']))
output = {}
if 'ema' in state:
if "ema_state_dict" in state['ema']:
logger.info("Load ema pretrain model")
st = state['ema']['ema_state_dict']
else:
st = state['model']
else:
st = state['model']
output['model'] = st
return output
else:
logger.warning('Load nothing! No weights provided {}')
return {'model': {}}
@staticmethod
def load_checkpoint(ckpt_path):
"""Load state_dict from checkpoint"""
def remove_prefix(state_dict, prefix):
"""Old style model is stored with all names of parameters share common prefix 'module.'"""
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
# assert os.path.exists(ckpt_path), f'No such file: {ckpt_path}'
device = torch.cuda.current_device()
ckpt_dict = torch.load(ckpt_path, map_location=lambda storage, loc: storage.cuda(device))
if 'model' in ckpt_dict:
state_dict = ckpt_dict['model']
elif 'state_dict' in ckpt_dict:
state_dict = ckpt_dict['state_dict']
else:
state_dict = ckpt_dict
state_dict = remove_prefix(state_dict, 'module.')
ckpt_dict['model'] = state_dict
return ckpt_dict
def lns_latest_ckpt(self, ckpt_path, new_path):
try:
pwd = os.getcwd()
absolute_ckpt_path = os.path.join(pwd, ckpt_path)
absolute_new_path = os.path.join(pwd, new_path)
if os.path.exists(absolute_new_path):
os.system(f'rm {absolute_new_path}')
os.system(f"ln -s {absolute_ckpt_path} {absolute_new_path}")
except Exception as e:
logger.warning(f'Failed to ln -s {ckpt_path} {new_path}')
logger.warning(e)
def save(self, epoch, iter, **kwargs):
"""Save model checkpoint for one epoch"""
os.makedirs(self.save_dir, exist_ok=True)
# Assume we warmup for a epochs and training a+b epochs in total,
# then our checkpoints are named of ckpt_e{-a+1}.pth ~ ckpt_e{b}.pth
# if best in kwargs, we save the best ckpt as ckpt_best.path.auto
if 'suffix' in kwargs:
suffix = kwargs['suffix']
ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}-{}.pth'.format(epoch, suffix))
elif 'auto_save' in kwargs:
ckpt_path = os.path.join(self.save_dir, 'ckpt_{}.pth'.format(kwargs['auto_save']))
else:
ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}.pth'.format(epoch))
# since epoch not in kwargs
kwargs['epoch'] = epoch
kwargs['iter'] = iter
kwargs['metric_val'] = kwargs.get('metric_val', -1)
lns_latest_ckpt = kwargs.pop('lns', True)
torch.save(kwargs, ckpt_path)
if lns_latest_ckpt:
latest_path = os.path.join(self.save_dir, 'ckpt_latest.pth')
self.lns_latest_ckpt(ckpt_path, latest_path)
return ckpt_path
def save_model_arch(self, model):
"""Save model structure"""
os.makedirs(self.save_dir, exist_ok=True)
meta_path = os.path.join(self.save_dir, 'model_arch.txt')
with open(meta_path, 'w') as fid:
fid.write(str(model))
def save_running_config(self, config):
with open(self.running_config_file, 'w') as rcf:
json.dump(config, rcf, indent=2)
def find_last_checkpoint(self):
last_ckpt_path = os.path.join(self.save_dir, "ckpt_latest.pth")
if os.path.exists(last_ckpt_path):
return last_ckpt_path
else:
return None
| [
"torch.save",
"torch.cuda.current_device"
] | 1.8.1 | scott-mao/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 |
1.8 | # Standard Library
import math
from collections import defaultdict
# Import from third library
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
from eod.utils.env.dist_helper import env, get_rank, get_world_size
from eod.utils.general.log_helper import default_logger as logger
from eod.utils.general.registry_factory import SAMPLER_REGISTRY
__all__ = ['DistributedSampler', 'LocalSampler', 'TestDistributedSampler']
@SAMPLER_REGISTRY.register('dist')
class DistributedSampler(Sampler):
"""
Sampler that restricts data loading to a subset of the dataset.
.. note:
Dataset is assumed to be of constant size.
Arguments:
dataset (Dataset): dataset used for sampling.
num_replicas (int): number of processes participating in distributed training, optional.
rank (int): rank of the current process within num_replicas, optional.
"""
def __init__(self, dataset, num_replicas=None, rank=None, fix_seed=False):
"""
Arguments:
- dataset (:obj:`dataset`): instance of dataset object
"""
if num_replicas is None:
num_replicas = env.world_size
if rank is None:
rank = env.rank
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.fix_seed = fix_seed
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch * (not self.fix_seed))
indices = list(torch.randperm(len(self.dataset), generator=g))
# add extra samples to make it evenly divisible
# indices += indices[:(self.total_size - len(indices))]
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
@SAMPLER_REGISTRY.register('local')
class LocalSampler(Sampler):
def __init__(self, dataset, rank=None):
if rank is None:
rank = env.rank
self.dataset = dataset
self.rank = rank
self.epoch = 0
self.num_samples = len(self.dataset)
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch + self.rank)
indices = list(torch.randperm(self.num_samples, generator=g))
return iter(indices)
def set_epoch(self, epoch):
self.epoch = epoch
def __len__(self):
return self.num_samples
@SAMPLER_REGISTRY.register('dist_test')
class TestDistributedSampler(Sampler):
"""
Sampler that restricts data loading to a subset of the dataset, but won't align the total data
size to be divisible by world_size bacause this will lead to duplicate detecton results
"""
def __init__(self, dataset, num_replicas=None, rank=None):
"""
Arguments:
- dataset (:obj:`dataset`): instance of dataset object
"""
if num_replicas is None:
num_replicas = env.world_size
if rank is None:
rank = env.rank
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = len(range(rank, len(self.dataset), num_replicas))
self.total_size = len(self.dataset)
def __iter__(self):
indices = torch.arange(len(self.dataset))
indices = indices[self.rank::self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
@SAMPLER_REGISTRY.register('repeat_factor')
class DistributedRepeatFactorReSampler(Sampler):
""" Suitable for long-tail distribution datasets.
Refer to `LVIS <https://arxiv.org/abs/1908.03195>`_ paper
"""
def __init__(self, dataset, t=0.001, ri_mode='random_round', pn=0.5,
ri_if_empty=1, num_replicas=None, static_size=True, rank=None):
"""
Arguments:
- dataset (:obj:`Dataset`): dataset used for sampling.
- t (:obj:`float`): thresh- old that intuitively controls the point at which oversampling kicks in
- ri_mode (:obj:`str`): choices={floor, round, random_round, ceil, c_ceil_r_f_floor}, method to compute
repeat factor for one image
- pn (:obj:`float`): power number
- num_replicas (int): number of processes participating in distributed training, optional.
- rank (int): rank of the current process within num_replicas, optional.
"""
if num_replicas is None:
num_replicas = get_world_size()
if rank is None:
rank = get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.original_num_samples = self.num_samples
self.t = t
self.ri_mode = ri_mode
self.ri_if_empty = int(ri_if_empty)
self.pn = pn
self.static_size = static_size
self._prepare()
logger.info('init re-sampler, ri mode: {}'.format(self.ri_mode))
def _prepare(self):
# prepare re-sampling factor for category
rc = defaultdict(int)
img_num_per_class = defaultdict(int)
for cls, img_num in sorted(self.dataset.num_images_per_class.items()):
f = img_num / len(self.dataset)
img_num_per_class[cls] = img_num
rc[cls] = max(1, math.pow(self.t / f, self.pn))
logger.info('class id {}, image count {}, rc {}'.format(cls, img_num, rc[cls]))
self.rc = rc
def _compute_ri(self, img_index):
classes = self.dataset.get_image_classes(img_index)
ris = [self.rc[cls] for cls in classes]
if len(ris) == 0:
return self.ri_if_empty
if self.ri_mode == 'floor':
ri = int(max(ris))
elif self.ri_mode == 'round':
ri = round(max(ris))
elif self.ri_mode == 'random_round':
ri_max = max(ris)
p = ri_max - int(ri_max)
if np.random.rand() < p:
ri = math.ceil(ri_max)
else:
ri = int(ri_max)
elif self.ri_mode == 'ceil':
ri = math.ceil(max(ris))
elif self.ri_mode == 'c_ceil_r_f_floor':
max_ind = np.argmax(ris)
assert hasattr(self.dataset, 'lvis'), 'Only lvis dataset supportted for c_ceil_r_f_floor mode'
img_id = self.dataset.img_ids[img_index]
meta_annos = self.dataset.lvis.img_ann_map[img_id]
f = self.dataset.lvis.cats[meta_annos[max_ind]['category_id']]['frequency']
assert f in ['f', 'c', 'r']
if f in ['r', 'f']:
ri = int(max(ris))
else:
ri = math.ceil(max(ris))
else:
raise NotImplementedError
return ri
def _get_new_indices(self):
indices = []
for idx in range(len(self.dataset)):
ri = self._compute_ri(idx)
indices += [idx] * ri
logger.info('dataset size {}, indexes size {}'.format(len(self.dataset), len(indices)))
return indices
def __iter__(self):
# deterministically shuffle based on epoch
# generate a perm based using class-aware balance for this epoch
indices = self._get_new_indices()
# override num_sample total size
self.num_samples = int(math.ceil(len(indices) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
indices = np.random.RandomState(seed=self.epoch).permutation(np.array(indices))
indices = list(indices)
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
# convert to int because this array will be converted to torch.tensor,
# but torch.as_tensor dosen't support numpy.int64
# a = torch.tensor(np.float64(1)) # works
# b = torch.tensor(np.int64(1)) # fails
indices = list(map(lambda x: int(x), indices))
return iter(indices)
def __len__(self):
return self.original_num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| [
"torch.Generator",
"torch.randperm"
] | 1.8.1 | scott-mao/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 |
0.0 | from typing import Dict
import dgl
import dgl.function as fn # for graphs
import numpy as np
import torch
import torch.nn as nn
from dgl.nn.pytorch.glob import AvgPooling, MaxPooling
from dgl.nn.pytorch.softmax import edge_softmax
from einops import rearrange
from packaging import version
from torch import Tensor, einsum, broadcast_tensors, relu, sigmoid
from torch.nn import GELU
from torch.nn.functional import normalize
from torch.nn.parameter import Parameter
from project.utils.fibers import Fiber, fiber2head
from project.utils.from_se3cnn.utils_steerable import _basis_transformation_Q_J, get_spherical_from_cartesian_torch, \
precompute_sh
from project.utils.utils import fourier_encode_dist, batched_index_select
from project.utils.utils_profiling import profile # load before other local modules
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code derived from SE(3)-Transformer (https://github.com/FabianFuchsML/se3-transformer-public/):
# -------------------------------------------------------------------------------------------------------------------------------------
@profile
def get_basis(Y, max_degree):
"""Precompute the SE(3)-equivariant weight basis.
This is called by get_basis_and_r().
Args:
Y: spherical harmonic dict, returned by utils_steerable.precompute_sh()
max_degree: non-negative int for degree of highest feature type
Returns:
dict of equivariant bases, keys are in form '<d_in><d_out>'
"""
device = Y[0].device
# No need to backprop through the basis construction
with torch.no_grad():
basis = {}
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
K_Js = []
for J in range(abs(d_in - d_out), d_in + d_out + 1):
# Get spherical harmonic projection matrices
Q_J = _basis_transformation_Q_J(J, d_in, d_out)
Q_J = Q_J.float().T.to(device)
# Create kernel from spherical harmonics
K_J = torch.matmul(Y[J], Q_J)
K_Js.append(K_J)
# Reshape so can take linear combinations with a dot product
size = (-1, 1, 2 * d_out + 1, 1, 2 * d_in + 1, 2 * min(d_in, d_out) + 1)
basis[f'{d_in},{d_out}'] = torch.stack(K_Js, -1).view(*size)
return basis
def get_basis_and_r(G, max_degree):
"""Return equivariant weight basis (basis) and internodal distances (r).
Call this function *once* at the start of each forward pass of the model.
It computes the equivariant weight basis, W_J^lk(x), and internodal
distances, needed to compute varphi_J^lk(x), of eqn 8 of
https://arxiv.org/pdf/2006.10503.pdf. The return values of this function
can be shared as input across all SE(3)-Transformer layers in a model.
Args:
G: DGL graph instance of type dgl.DGLGraph()
max_degree: non-negative int for degree of highest feature-type
Returns:
dict of equivariant bases, keys are in form '<d_in><d_out>'
vector of relative distances, ordered according to edge ordering of G
"""
# Relative positional encodings (vector)
r_ij = get_spherical_from_cartesian_torch(G.edata['d'])
# Spherical harmonic basis
Y = precompute_sh(r_ij, 2 * max_degree)
# Equivariant basis (dict['d_in><d_out>'])
basis = get_basis(Y, max_degree)
# Relative distances (scalar)
r = torch.sqrt(torch.sum(G.edata['d'] ** 2, -1, keepdim=True))
return basis, r
### SE(3) equivariant operations on graphs in DGL
class GConvSE3(nn.Module):
"""A tensor field network layer as a DGL module.
GConvSE3 stands for a Graph Convolution SE(3)-equivariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
At each node, the activations are split into different "feature types",
indexed by the SE(3) representation type: non-negative integers 0, 1, 2, ..
"""
def __init__(self, f_in, f_out, self_interaction: bool = False, edge_dim: int = 0):
"""SE(3)-equivariant Graph Conv Layer
Args:
f_in: list of tuples [(multiplicities, type),...]
f_out: list of tuples [(multiplicities, type),...]
self_interaction: include self-interaction in convolution
edge_dim: number of dimensions for edge embedding
"""
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.edge_dim = edge_dim
self.self_interaction = self_interaction
# Neighbor -> center weights
self.kernel_unary = nn.ModuleDict()
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)
# Center -> center weights
self.kernel_self = nn.ParameterDict()
if self_interaction:
for m_in, d_in in self.f_in.structure:
if d_in in self.f_out.degrees:
m_out = self.f_out.structure_dict[d_in]
W = nn.Parameter(torch.randn(1, m_out, m_in) / np.sqrt(m_in))
self.kernel_self[f'{d_in}'] = W
def __repr__(self):
return f'GConvSE3(structure={self.f_out}, self_interaction={self.self_interaction})'
def udf_u_mul_e(self, d_out):
"""Compute the convolution for a single output feature type.
This function is set up as a User Defined Function in DGL.
Args:
d_out: output feature type
Returns:
edge -> node function handle
"""
def fnc(edges):
# Neighbor -> center messages
msg = 0
for m_in, d_in in self.f_in.structure:
src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)
edge = edges.data[f'({d_in},{d_out})']
msg = msg + torch.matmul(edge, src)
msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)
# Center -> center messages
if self.self_interaction:
if f'{d_out}' in self.kernel_self.keys():
dst = edges.dst[f'{d_out}']
W = self.kernel_self[f'{d_out}']
msg = msg + torch.matmul(W, dst)
return {'msg': msg.view(msg.shape[0], -1, 2 * d_out + 1)}
return fnc
@profile
def forward(self, h, G=None, r=None, basis=None, **kwargs):
"""Forward pass of the linear layer
Args:
G: minibatch of (homo)graphs
h: dict of features
r: inter-atomic distances
basis: pre-computed Q * Y
Returns:
tensor with new features [B, n_points, n_features_out]
"""
with G.local_scope():
# Add node features to local graph scope
for k, v in h.items():
G.ndata[k] = v
# Add edge features
if 'w' in G.edata.keys():
w = G.edata['w']
feat = torch.cat([w, r], -1)
else:
feat = torch.cat([r, ], -1)
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
etype = f'({di},{do})'
G.edata[etype] = self.kernel_unary[etype](feat, basis)
# Perform message-passing for each output feature type
for d in self.f_out.degrees:
G.update_all(self.udf_u_mul_e(d), fn.mean('msg', f'out{d}'))
return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}
class RadialFunc(nn.Module):
"""NN parameterized radial profile function."""
def __init__(self, num_freq, in_dim, out_dim, edge_dim: int = 0):
"""NN parameterized radial profile function.
Args:
num_freq: number of output frequencies
in_dim: multiplicity of input (num input channels)
out_dim: multiplicity of output (num output channels)
edge_dim: number of dimensions for edge embedding
"""
super().__init__()
self.num_freq = num_freq
self.in_dim = in_dim
self.mid_dim = 32
self.out_dim = out_dim
self.edge_dim = edge_dim
self.net = nn.Sequential(nn.Linear(self.edge_dim + 1, self.mid_dim),
BN(self.mid_dim),
nn.ReLU(),
nn.Linear(self.mid_dim, self.mid_dim),
BN(self.mid_dim),
nn.ReLU(),
nn.Linear(self.mid_dim, self.num_freq * in_dim * out_dim))
nn.init.kaiming_uniform_(self.net[0].weight)
nn.init.kaiming_uniform_(self.net[3].weight)
nn.init.kaiming_uniform_(self.net[6].weight)
def __repr__(self):
return f"RadialFunc(edge_dim={self.edge_dim}, in_dim={self.in_dim}, out_dim={self.out_dim})"
def forward(self, x):
y = self.net(x)
return y.view(-1, self.out_dim, 1, self.in_dim, 1, self.num_freq)
class PairwiseConv(nn.Module):
"""SE(3)-equivariant convolution between two single-type features"""
def __init__(self, degree_in: int, nc_in: int, degree_out: int,
nc_out: int, edge_dim: int = 0):
"""SE(3)-equivariant convolution between a pair of feature types.
This layer performs a convolution from nc_in features of type degree_in
to nc_out features of type degree_out.
Args:
degree_in: degree of input fiber
nc_in: number of channels on input
degree_out: degree of out order
nc_out: number of channels on output
edge_dim: number of dimensions for edge embedding
"""
super().__init__()
# Log settings
self.degree_in = degree_in
self.degree_out = degree_out
self.nc_in = nc_in
self.nc_out = nc_out
# Functions of the degree
self.num_freq = 2 * min(degree_in, degree_out) + 1
self.d_out = 2 * degree_out + 1
self.edge_dim = edge_dim
# Radial profile function
self.rp = RadialFunc(self.num_freq, nc_in, nc_out, self.edge_dim)
@profile
def forward(self, feat, basis):
# Get radial weights
R = self.rp(feat)
kernel = torch.sum(R * basis[f'{self.degree_in},{self.degree_out}'], -1)
return kernel.view(kernel.shape[0], self.d_out * self.nc_out, -1)
class G1x1SE3(nn.Module):
"""Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.
This is equivalent to a self-interaction layer in TensorField Networks.
"""
def __init__(self, f_in, f_out, learnable=True):
"""SE(3)-equivariant 1x1 convolution.
Args:
f_in: input Fiber() of feature multiplicities and types
f_out: output Fiber() of feature multiplicities and types
"""
super().__init__()
self.f_in = f_in
self.f_out = f_out
# Linear mappings: 1 per output feature type
self.transform = nn.ParameterDict()
for m_out, d_out in self.f_out.structure:
m_in = self.f_in.structure_dict[d_out]
self.transform[str(d_out)] = nn.Parameter(torch.randn(m_out, m_in) / np.sqrt(m_in), requires_grad=learnable)
def __repr__(self):
return f"G1x1SE3(structure={self.f_out})"
def forward(self, features, **kwargs):
output = {}
for k, v in features.items():
if str(k) in self.transform.keys():
output[k] = torch.matmul(self.transform[str(k)], v)
return output
class GNormSE3(nn.Module):
"""Graph Norm-based SE(3)-equivariant nonlinearity.
Nonlinearities are important in SE(3) equivariant GCNs. They are also quite
expensive to compute, so it is convenient for them to share resources with
other layers, such as normalization. The general workflow is as follows:
> for feature type in features:
> norm, phase <- feature
> output = fnc(norm) * phase
where fnc: {R+}^m -> R^m is a learnable map from m norms to m scalars.
"""
def __init__(self, fiber, nonlin=nn.ReLU(inplace=True), num_layers: int = 0):
"""Initializer.
Args:
fiber: Fiber() of feature multiplicities and types
nonlin: nonlinearity to use everywhere
num_layers: non-negative number of linear layers in fnc
"""
super().__init__()
self.fiber = fiber
self.nonlin = nonlin
self.num_layers = num_layers
# Regularization for computing phase: gradients explode otherwise
self.eps = 1e-12
# Norm mappings: 1 per feature type
self.transform = nn.ModuleDict()
for m, d in self.fiber.structure:
self.transform[str(d)] = self._build_net(int(m))
def __repr__(self):
return f"GNormSE3(num_layers={self.num_layers}, nonlin={self.nonlin})"
def _build_net(self, m: int):
net = []
for i in range(self.num_layers):
net.append(BN(int(m)))
net.append(self.nonlin)
# TODO: implement cleaner init
net.append(nn.Linear(m, m, bias=(i == self.num_layers - 1)))
nn.init.kaiming_uniform_(net[-1].weight)
if self.num_layers == 0:
net.append(BN(int(m)))
net.append(self.nonlin)
return nn.Sequential(*net)
@profile
def forward(self, features, **kwargs):
output = {}
for k, v in features.items():
# Compute the norms and normalized features
# v shape: [...,m , 2*k+1]
norm = v.norm(2, -1, keepdim=True).clamp_min(self.eps).expand_as(v)
phase = v / norm
# Transform on norms
transformed = self.transform[str(k)](norm[..., 0]).unsqueeze(-1)
# Nonlinearity on norm
output[k] = (transformed * phase).view(*v.shape)
return output
class BN(nn.Module):
"""SE(3)-equvariant batch/layer normalization"""
def __init__(self, m):
"""SE(3)-equvariant batch/layer normalization
Args:
m: int for number of output channels
"""
super().__init__()
self.bn = nn.LayerNorm(m)
def forward(self, x):
return self.bn(x)
class GConvSE3Partial(nn.Module):
"""Graph SE(3)-equivariant node -> edge layer"""
def __init__(self, f_in, f_out, edge_dim: int = 0):
"""SE(3)-equivariant partial convolution.
A partial convolution computes the inner product between a kernel and
each input channel, without summing over the result from each input
channel. This unfolded structure makes it amenable to be used for
computing the value-embeddings of the attention mechanism.
Args:
f_in: list of tuples [(multiplicities, type),...]
f_out: list of tuples [(multiplicities, type),...]
"""
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.edge_dim = edge_dim
# Node -> edge weights
self.kernel_unary = nn.ModuleDict()
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)
def __repr__(self):
return f'GConvSE3Partial(structure={self.f_out})'
def udf_u_mul_e(self, d_out):
"""Compute the partial convolution for a single output feature type.
This function is set up as a User Defined Function in DGL.
Args:
d_out: output feature type
Returns:
node -> edge function handle
"""
def fnc(edges):
# Neighbor -> center messages
msg = 0
for m_in, d_in in self.f_in.structure:
src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)
edge = edges.data[f'({d_in},{d_out})']
msg = msg + torch.matmul(edge, src)
msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)
return {f'out{d_out}': msg.view(msg.shape[0], -1, 2 * d_out + 1)}
return fnc
@profile
def forward(self, h, G=None, r=None, basis=None, **kwargs):
"""Forward pass of the linear layer
Args:
h: dict of node-features
G: minibatch of (homo)graphs
r: inter-atomic distances
basis: pre-computed Q * Y
Returns:
tensor with new features [B, n_points, n_features_out]
"""
with G.local_scope():
# Add node features to local graph scope
for k, v in h.items():
G.ndata[k] = v
# Add edge features
if 'w' in G.edata.keys():
w = G.edata['w'] # shape: [#edges_in_batch, #bond_types]
feat = torch.cat([w, r], -1)
else:
feat = torch.cat([r, ], -1)
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
etype = f'({di},{do})'
G.edata[etype] = self.kernel_unary[etype](feat, basis)
# Perform message-passing for each output feature type
for d in self.f_out.degrees:
G.apply_edges(self.udf_u_mul_e(d))
return {f'{d}': G.edata[f'out{d}'] for d in self.f_out.degrees}
class GMABSE3(nn.Module):
"""An SE(3)-equivariant multi-headed self-attention module for DGL graphs."""
def __init__(self, f_value: Fiber, f_key: Fiber, n_heads: int):
"""SE(3)-equivariant MAB (multi-headed attention block) layer.
Args:
f_value: Fiber() object for value-embeddings
f_key: Fiber() object for key-embeddings
n_heads: number of heads
"""
super().__init__()
self.f_value = f_value
self.f_key = f_key
self.n_heads = n_heads
self.new_dgl = version.parse(dgl.__version__) > version.parse('0.4.4')
def __repr__(self):
return f'GMABSE3(n_heads={self.n_heads}, structure={self.f_value})'
def udf_u_mul_e(self, d_out):
"""Compute the weighted sum for a single output feature type.
This function is set up as a User Defined Function in DGL.
Args:
d_out: output feature type
Returns:
edge -> node function handle
"""
def fnc(edges):
# Neighbor -> center messages
attn = edges.data['a']
value = edges.data[f'v{d_out}']
# Apply attention weights
msg = attn.unsqueeze(-1).unsqueeze(-1) * value
return {'m': msg}
return fnc
@profile
def forward(self, v, k: Dict = None, q: Dict = None, G=None, **kwargs):
"""Forward pass of the linear layer
Args:
G: minibatch of (homo)graphs
v: dict of value edge-features
k: dict of key edge-features
q: dict of query node-features
Returns:
tensor with new features [B, n_points, n_features_out]
"""
with G.local_scope():
# Add node features to local graph scope
## We use the stacked tensor representation for attention
for m, d in self.f_value.structure:
G.edata[f'v{d}'] = v[f'{d}'].view(-1, self.n_heads, m // self.n_heads, 2 * d + 1)
G.edata['k'] = fiber2head(k, self.n_heads, self.f_key, squeeze=True)
G.ndata['q'] = fiber2head(q, self.n_heads, self.f_key, squeeze=True)
# Compute attention weights
## Inner product between (key) neighborhood and (query) center
G.apply_edges(fn.e_dot_v('k', 'q', 'e'))
## Apply softmax
e = G.edata.pop('e')
if self.new_dgl:
# in dgl 5.3, e has an extra dimension compared to dgl 4.3
# the following, we get rid of this be reshaping
n_edges = G.edata['k'].shape[0]
e = e.view([n_edges, self.n_heads])
e = e / np.sqrt(self.f_key.n_features)
G.edata['a'] = edge_softmax(G, e)
# Perform attention-weighted message-passing
for d in self.f_value.degrees:
G.update_all(self.udf_u_mul_e(d), fn.sum('m', f'out{d}'))
output = {}
for m, d in self.f_value.structure:
output[f'{d}'] = G.ndata[f'out{d}'].view(-1, m, 2 * d + 1)
return output
class GSE3Res(nn.Module):
"""Graph attention block with SE(3)-equivariance and skip connection"""
def __init__(self, f_in: Fiber, f_out: Fiber, edge_dim: int = 0, div: float = 4,
n_heads: int = 1, learnable_skip=True):
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.div = div
self.n_heads = n_heads
# f_mid_out has same structure as 'f_out' but #channels divided by 'div'
# this will be used for the values
f_mid_out = {k: int(v // div) for k, v in self.f_out.structure_dict.items()}
self.f_mid_out = Fiber(dictionary=f_mid_out)
# f_mid_in has same structure as f_mid_out, but only degrees which are in f_in
# this will be used for keys and queries
# (queries are merely projected, hence degrees have to match input)
f_mid_in = {d: m for d, m in f_mid_out.items() if d in self.f_in.degrees}
self.f_mid_in = Fiber(dictionary=f_mid_in)
self.edge_dim = edge_dim
self.GMAB = nn.ModuleDict()
# Projections
self.GMAB['v'] = GConvSE3Partial(f_in, self.f_mid_out, edge_dim=edge_dim)
self.GMAB['k'] = GConvSE3Partial(f_in, self.f_mid_in, edge_dim=edge_dim)
self.GMAB['q'] = G1x1SE3(f_in, self.f_mid_in)
# Attention
self.GMAB['attn'] = GMABSE3(self.f_mid_out, self.f_mid_in, n_heads=n_heads)
# Skip connections
self.project = G1x1SE3(self.f_mid_out, f_out, learnable=learnable_skip)
self.add = GSum(f_out, f_in)
# the following checks whether the skip connection would change
# the output fibre structure; the reason can be that the input has
# more channels than the output (for at least one degree); this would
# then cause a (hard to debug) error in the next layer
assert self.add.f_out.structure_dict == f_out.structure_dict, \
'skip connection would change output structure'
@profile
def forward(self, features, G, **kwargs):
# Embeddings
v = self.GMAB['v'](features, G=G, **kwargs)
k = self.GMAB['k'](features, G=G, **kwargs)
q = self.GMAB['q'](features, G=G)
# Attention
z = self.GMAB['attn'](v, k=k, q=q, G=G)
# Skip + residual
z = self.project(z)
z = self.add(z, features)
return z
### Helper and wrapper functions
class GSum(nn.Module):
"""SE(3)-equivariant graph residual sum function."""
def __init__(self, f_x: Fiber, f_y: Fiber):
"""SE(3)-equivariant graph residual sum function.
Args:
f_x: Fiber() object for fiber of summands
f_y: Fiber() object for fiber of summands
"""
super().__init__()
self.f_x = f_x
self.f_y = f_y
self.f_out = Fiber.combine_max(f_x, f_y)
def __repr__(self):
return f"GSum(structure={self.f_out})"
def forward(self, x, y):
out = {}
for k in self.f_out.degrees:
k = str(k)
if (k in x) and (k in y):
if x[k].shape[1] > y[k].shape[1]:
diff = x[k].shape[1] - y[k].shape[1]
zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)
y[k] = torch.cat([y[k], zeros], 1)
elif x[k].shape[1] < y[k].shape[1]:
diff = y[k].shape[1] - x[k].shape[1]
zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)
x[k] = torch.cat([x[k], zeros], 1)
out[k] = x[k] + y[k]
elif k in x:
out[k] = x[k]
elif k in y:
out[k] = y[k]
return out
class GAvgPooling(nn.Module):
"""Graph Average Pooling module."""
def __init__(self, type='0'):
super().__init__()
self.pool = AvgPooling()
self.type = type
@profile
def forward(self, features, G, **kwargs):
if self.type == '0':
h = features['0'][..., -1]
pooled = self.pool(G, h)
elif self.type == '1':
pooled = []
for i in range(3):
h_i = features['1'][..., i]
pooled.append(self.pool(G, h_i).unsqueeze(-1))
pooled = torch.cat(pooled, axis=-1)
pooled = {'1': pooled}
else:
print('GAvgPooling for type > 0 not implemented')
exit()
return pooled
class GMaxPooling(nn.Module):
"""Graph Max Pooling module."""
def __init__(self):
super().__init__()
self.pool = MaxPooling()
@profile
def forward(self, features, G, **kwargs):
h = features['0'][..., -1]
return self.pool(G, h)
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code derived from egnn-pytorch (https://github.com/lucidrains/egnn-pytorch/blob/main/egnn_pytorch/egnn_pytorch.py):
# -------------------------------------------------------------------------------------------------------------------------------------
class EnInvGraphConv(nn.Module):
"""A graph neural network layer as a DGL module.
EnInvGraphConv stands for a Graph Convolution E(n)-invariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
"""
def __init__(
self,
node_feat,
edge_feat=0,
coord_feat=16,
fourier_feat=0,
norm_rel_coords=False,
norm_coord_weights=False,
num_nearest_neighbors=0,
dropout=0.0,
init_eps=1e-3
):
"""E(n)-invariant Graph Conv Layer
Parameters
----------
node_feat : int
Node feature size.
edge_feat : int
Edge feature size.
coord_feat : int
Coordinates feature size.
fourier_feat : int
Fourier feature size.
norm_rel_coords : boolean
Fourier feature size.
norm_coord_weights : boolean
Fourier feature size.
num_nearest_neighbors : int
Fourier feature size.
dropout : float
Fourier feature size.
init_eps : float
Fourier feature size.
"""
super().__init__()
self.fourier_feat = fourier_feat
edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1
dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
dropout,
GELU(),
nn.Linear(edge_input_dim * 2, coord_feat),
GELU()
)
self.node_mlp = nn.Sequential(
nn.Linear(node_feat + coord_feat, node_feat * 2),
dropout,
GELU(),
nn.Linear(node_feat * 2, node_feat),
)
self.norm_coord_weights = norm_coord_weights
self.norm_rel_coords = norm_rel_coords
if norm_rel_coords:
self.rel_coords_scale = nn.Parameter(torch.ones(1))
self.coords_mlp = nn.Sequential(
nn.Linear(coord_feat, coord_feat * 4),
dropout,
GELU(),
nn.Linear(coord_feat * 4, 1)
)
self.num_nearest_neighbors = num_nearest_neighbors
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
# Seems to be needed to keep the network from exploding to NaN with greater depths
nn.init.normal_(module.weight, std=self.init_eps)
def forward(self, h, x, e=None, mask=None):
"""Forward pass of the linear layer
Parameters
----------
h : Tensor
The input node embedding.
x : Tensor
The input coordinates embedding.
e : Tensor
The input edge embedding.
mask : Tensor
The coordinate mask to apply.
"""
b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors
use_nearest = num_nearest > 0
nbhd_indices = None
rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')
rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)
if use_nearest:
nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices
rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)
if fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)
rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')
if use_nearest:
feats_j = batched_index_select(h, nbhd_indices, dim=1)
else:
feats_j = rearrange(h, 'b j d -> b () j d')
feats_i = rearrange(h, 'b i d -> b i () d')
feats_i, feats_j = broadcast_tensors(feats_i, feats_j)
edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)
if e is not None:
edge_input = torch.cat((edge_input, e), dim=-1)
m_ij = self.edge_mlp(edge_input)
m_i = m_ij.sum(dim=-2)
node_mlp_input = torch.cat((h, m_i), dim=-1)
node_out = self.node_mlp(node_mlp_input) + h
# Free GPU memory
rel_coords.detach()
rel_dist.detach()
feats_i.detach()
feats_j.detach()
edge_input.detach()
m_i.detach()
m_ij.detach()
node_mlp_input.detach()
if nbhd_indices is not None:
nbhd_indices.detach()
if mask is not None:
mask.detach()
return node_out
def __repr__(self):
return f'EnInvGraphConv(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'
class EnGraphConv(nn.Module):
"""A graph neural network layer.
EnGraphConv stands for a Graph Convolution E(n)-equivariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
"""
def __init__(
self,
node_feat,
edge_feat=0,
coord_feat=16,
fourier_feat=0,
norm_rel_coords=False,
norm_coord_weights=False,
num_nearest_neighbors=0,
dropout=0.0,
init_eps=1e-3
):
"""E(n)-equivariant Graph Conv Layer
Parameters
----------
node_feat : int
Node feature size.
edge_feat : int
Edge feature size.
coord_feat : int
Coordinates feature size.
fourier_feat : int
Fourier feature size.
norm_rel_coords : boolean
Fourier feature size.
norm_coord_weights : boolean
Fourier feature size.
num_nearest_neighbors : int
Fourier feature size.
dropout : float
Fourier feature size.
init_eps : float
Fourier feature size.
"""
super().__init__()
self.fourier_feat = fourier_feat
edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1
dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
dropout,
GELU(),
nn.Linear(edge_input_dim * 2, coord_feat),
GELU()
)
self.node_mlp = nn.Sequential(
nn.Linear(node_feat + coord_feat, node_feat * 2),
dropout,
GELU(),
nn.Linear(node_feat * 2, node_feat),
)
self.norm_coord_weights = norm_coord_weights
self.norm_rel_coords = norm_rel_coords
if norm_rel_coords:
self.rel_coords_scale = nn.Parameter(torch.ones(1))
self.coords_mlp = nn.Sequential(
nn.Linear(coord_feat, coord_feat * 4),
dropout,
GELU(),
nn.Linear(coord_feat * 4, 1)
)
self.num_nearest_neighbors = num_nearest_neighbors
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
# Seems to be needed to keep the network from exploding to NaN with greater depths
nn.init.normal_(module.weight, std=self.init_eps)
def forward(self, h, x, e=None, mask=None):
"""Forward pass of the linear layer
Parameters
----------
h : Tensor
The input node embedding.
x : Tensor
The input coordinates embedding.
e : Tensor
The input edge embedding.
mask : Tensor
The coordinate mask to apply.
"""
nbhd_indices = None
b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors
use_nearest = num_nearest > 0
rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')
rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)
if use_nearest:
nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices
rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)
if fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)
rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')
if use_nearest:
feats_j = batched_index_select(h, nbhd_indices, dim=1)
else:
feats_j = rearrange(h, 'b j d -> b () j d')
feats_i = rearrange(h, 'b i d -> b i () d')
feats_i, feats_j = broadcast_tensors(feats_i, feats_j)
edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)
if e is not None:
edge_input = torch.cat((edge_input, e), dim=-1)
m_ij = self.edge_mlp(edge_input)
coord_weights = self.coords_mlp(m_ij)
coord_weights = rearrange(coord_weights, 'b i j () -> b i j')
if self.norm_coord_weights:
coord_weights = coord_weights.tanh()
if self.norm_rel_coords:
rel_coords = normalize(rel_coords, dim=-1) * self.rel_coords_scale
if mask is not None:
mask_i = rearrange(mask, 'b i -> b i ()')
if use_nearest:
mask_j = batched_index_select(mask, nbhd_indices, dim=1)
else:
mask_j = rearrange(mask, 'b j -> b () j')
mask = mask_i * mask_j
coord_weights.masked_fill_(~mask, 0.)
# Free GPU memory
mask_i.detach()
mask_j.detach()
coords_out = einsum('b i j, b i j c -> b i c', coord_weights, rel_coords) + x
m_i = m_ij.sum(dim=-2)
node_mlp_input = torch.cat((h, m_i), dim=-1)
node_out = self.node_mlp(node_mlp_input) + h
# Free GPU memory
rel_coords.detach()
rel_dist.detach()
feats_i.detach()
feats_j.detach()
edge_input.detach()
m_i.detach()
m_ij.detach()
coord_weights.detach()
node_mlp_input.detach()
if nbhd_indices is not None:
nbhd_indices.detach()
if mask is not None:
mask.detach()
return node_out, coords_out
def __repr__(self):
return f'GConvEn(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code derived from DMLC (https://github.com/dmlc/dgl/blob/master/examples/pytorch/dagnn/main.py):
# -------------------------------------------------------------------------------------------------------------------------------------
class DAGNNConv(nn.Module):
def __init__(self,
in_dim,
k):
super(DAGNNConv, self).__init__()
self.s = Parameter(torch.FloatTensor(in_dim, 1))
self.k = k
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain('sigmoid')
nn.init.xavier_uniform_(self.s, gain=gain)
def forward(self, graph, feats):
with graph.local_scope():
results = [feats]
degs = graph.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm = norm.to(feats.device).unsqueeze(1)
for _ in range(self.k):
feats = feats * norm
graph.ndata['h'] = feats
graph.update_all(fn.copy_u('h', 'm'),
fn.sum('m', 'h'))
feats = graph.ndata['h']
feats = feats * norm
results.append(feats)
H = torch.stack(results, dim=1)
S = sigmoid(torch.matmul(H, self.s))
S = S.permute(0, 2, 1)
H = torch.matmul(S, H).squeeze()
return H
class MLPLayer(nn.Module):
def __init__(self,
in_dim,
out_dim,
bias=True,
activation=None,
dropout=0):
super(MLPLayer, self).__init__()
self.linear = nn.Linear(in_dim, out_dim, bias=bias)
self.activation = activation
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = 1.
if self.activation is relu:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_uniform_(self.linear.weight, gain=gain)
if self.linear.bias is not None:
nn.init.zeros_(self.linear.bias)
def forward(self, feats):
feats = self.dropout(feats)
feats = self.linear(feats)
if self.activation:
feats = self.activation(feats)
return feats
class DAGNN(nn.Module):
def __init__(self,
k,
in_dim,
hid_dim,
out_dim,
bias=True,
activation=relu,
dropout=0, ):
super(DAGNN, self).__init__()
self.mlp = nn.ModuleList()
self.mlp.append(MLPLayer(in_dim=in_dim, out_dim=hid_dim, bias=bias,
activation=activation, dropout=dropout))
self.mlp.append(MLPLayer(in_dim=hid_dim, out_dim=out_dim, bias=bias,
activation=None, dropout=dropout))
self.dagnn = DAGNNConv(in_dim=out_dim, k=k)
def forward(self, graph, feats):
for layer in self.mlp:
feats = layer(feats)
feats = self.dagnn(graph, feats)
return feats
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code curated for DeepInteract (https://github.com/jianlin-cheng/DeepInteract):
# -------------------------------------------------------------------------------------------------------------------------------------
class SAGEConv(nn.Module):
"""GraphSAGE convolution module used by the GraphSAGE model.
This variant of the SAGEConv layer is able to infer edges via a soft estimation on messages.
Parameters
----------
in_feat : int
Input feature size.
out_feat : int
Output feature size.
"""
def __init__(self, in_feat, out_feat):
super(SAGEConv, self).__init__()
# A linear submodule for projecting the input and neighbor feature to the output.
self.linear = nn.Linear(in_feat * 2, out_feat)
def forward(self, g, h):
"""Forward computation
Parameters
----------
g : Graph
The input graph.
h : Tensor
The input node feature.
"""
with g.local_scope():
g.ndata['h'] = h
# update_all is a message passing API.
g.update_all(message_func=fn.copy_u('h', 'm'), reduce_func=fn.mean('m', 'h_N'))
h_N = g.ndata['h_N']
h_total = torch.cat([h, h_N], dim=1)
return self.linear(h_total)
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.einsum",
"torch.nn.ModuleDict",
"torch.ones",
"torch.sum",
"torch.nn.LayerNorm",
"torch.FloatTensor",
"torch.nn.init.normal_",
"torch.nn.init.calculate_gain",
"torch.nn.init.zeros_",
"torch.nn.ParameterDict",
"torch.zeros",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Identity",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.GELU",
"torch.matmul",
"torch.pow",
"torch.nn.functional.normalize",
"torch.nn.Dropout",
"torch.no_grad",
"torch.broadcast_tensors",
"torch.nn.init.xavier_uniform_",
"torch.randn"
] | 0.0.24 | amorehead/Equivariant-GNNs | 4e81136242a4c8905b0e5fc39be5f704a42cc5e1 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import tempfile
import unittest
import torch
from monai.handlers.utils import write_metrics_reports
class TestWriteMetricsReports(unittest.TestCase):
def test_content(self):
with tempfile.TemporaryDirectory() as tempdir:
write_metrics_reports(
save_dir=tempdir,
images=["filepath1", "filepath2"],
metrics={"metric1": 1, "metric2": 2},
metric_details={"metric3": torch.tensor([[1, 2], [2, 3]]), "metric4": torch.tensor([[5, 6], [7, 8]])},
summary_ops=["mean", "median", "max", "90percentile"],
deli="\t",
output_type="csv",
)
# check the metrics.csv and content
self.assertTrue(os.path.exists(os.path.join(tempdir, "metrics.csv")))
with open(os.path.join(tempdir, "metrics.csv")) as f:
f_csv = csv.reader(f)
for i, row in enumerate(f_csv):
self.assertEqual(row, [f"metric{i + 1}\t{i + 1}"])
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_raw.csv")))
# check the metric_raw.csv and content
with open(os.path.join(tempdir, "metric3_raw.csv")) as f:
f_csv = csv.reader(f)
for i, row in enumerate(f_csv):
if i > 0:
self.assertEqual(row, [f"filepath{i}\t{float(i)}\t{float(i + 1)}\t{i + 0.5}"])
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_summary.csv")))
# check the metric_summary.csv and content
with open(os.path.join(tempdir, "metric3_summary.csv")) as f:
f_csv = csv.reader(f)
for i, row in enumerate(f_csv):
if i == 1:
self.assertEqual(row, ["class0\t1.5000\t1.5000\t2.0000\t1.9000"])
elif i == 2:
self.assertEqual(row, ["class1\t2.5000\t2.5000\t3.0000\t2.9000"])
elif i == 3:
self.assertEqual(row, ["mean\t2.0000\t2.0000\t2.5000\t2.4000"])
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_raw.csv")))
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_summary.csv")))
if __name__ == "__main__":
unittest.main()
| [
"torch.tensor"
] | 1.5 | dylanbuchi/MONAI | 1651f1b003b0ffae8b615d191952ad65ad091277 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import torch
from parameterized import parameterized
from monai.transforms import SaveImage
TEST_CASE_1 = [
torch.randint(0, 255, (1, 2, 3, 4)),
{"filename_or_obj": "testfile0.nii.gz"},
".nii.gz",
False,
]
TEST_CASE_2 = [
torch.randint(0, 255, (1, 2, 3, 4)),
None,
".nii.gz",
False,
]
class TestSaveImage(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_saved_content(self, test_data, meta_data, output_ext, resample):
with tempfile.TemporaryDirectory() as tempdir:
trans = SaveImage(
output_dir=tempdir,
output_ext=output_ext,
resample=resample,
# test saving into the same folder
separate_folder=False,
)
trans(test_data, meta_data)
filepath = "testfile0" if meta_data is not None else "0"
self.assertTrue(os.path.exists(os.path.join(tempdir, filepath + "_trans" + output_ext)))
if __name__ == "__main__":
unittest.main()
| [
"torch.randint"
] | 1.5 | dylanbuchi/MONAI | 1651f1b003b0ffae8b615d191952ad65ad091277 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from torch.autograd import gradcheck
from monai.networks.layers.filtering import BilateralFilter
from tests.utils import skip_if_no_cpp_extension
TEST_CASES = [
[
# Case Description
"1 dimension, 1 channel, low spatial sigma, low color sigma",
# Spatial and Color Sigmas
(1, 0.2),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
],
# Batch 1
[
# Channel 0
[0.000000, 0.000000, 1.000000, 0.000000, 0.000000]
],
],
],
[
# Case Description
"1 dimension, 1 channel, low spatial sigma, high color sigma",
# Spatial and Color Sigmas
(1, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.631360, 0.099349, 0.070177, 0.164534, 0.649869]
],
# Batch 1
[
# Channel 0
[0.052271, 0.173599, 0.481337, 0.183721, 0.045619]
],
],
],
[
# Case Description
"1 dimension, 1 channel, high spatial sigma, low color sigma",
# Spatial and Color Sigmas
(4, 0.2),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
],
# Batch 1
[
# Channel 0
[0.000000, 0.000000, 1.000000, 0.000000, 0.000000]
],
],
],
[
# Case Description
"1 dimension, 1 channel, high spatial sigma, high color sigma",
# Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.497667, 0.268683, 0.265026, 0.261467, 0.495981]
],
# Batch 1
[
# Channel 0
[0.145959, 0.142282, 0.315710, 0.135609, 0.132572]
],
],
],
[
# Case Description
"1 dimension, 4 channel, low spatial sigma, high color sigma",
# Spatial and Color Sigmas
(1, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 0],
# Channel 1
[1, 0, 1, 0, 0],
# Channel 2
[0, 0, 1, 0, 1],
# Channel 3
[0, 0, 0, 0, 1],
]
],
# Expected
[
# Batch 0
[
# Channel 0
[0.960843, 0.073540, 0.027689, 0.002676, 0.000000],
# Channel 1
[0.960843, 0.073540, 0.951248, 0.003033, 0.000750],
# Channel 2
[0.000000, 0.000000, 0.923559, 0.000357, 0.981324],
# Channel 3
[0.000000, 0.000000, 0.000000, 0.000000, 0.980574],
]
],
],
[
# Case Description
"2 dimension, 1 channel, high spatial sigma, high color sigma",
# Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]]
],
# Batch 1
[
# Channel 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
[0.213684, 0.094356, 0.092973, 0.091650, 0.216281],
[0.094085, 0.092654, 0.091395, 0.090186, 0.089302],
[0.092436, 0.091150, 0.090008, 0.088896, 0.088897],
[0.090849, 0.089717, 0.088759, 0.087751, 0.088501],
[0.211458, 0.088334, 0.087495, 0.087049, 0.212173],
]
],
# Batch 1
[
# Channel 0
[
[0.033341, 0.031314, 0.029367, 0.027494, 0.025692],
[0.031869, 0.030632, 0.028820, 0.027074, 0.025454],
[0.030455, 0.029628, 0.084257, 0.026704, 0.025372],
[0.029095, 0.028391, 0.027790, 0.026375, 0.025292],
[0.027786, 0.027197, 0.026692, 0.026181, 0.025213],
]
],
],
],
[
# Case Description
"2 dimension, 4 channel, high spatial sigma, high color sigma",
# Spatial and Color Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]],
# Channel 1
[[1, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 1]],
# Channel 2
[[0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 1, 0, 0]],
# Channel 3
[[0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0]],
]
],
# Expected
[
# Batch 0
[
# Channel 0
[
[0.244373, 0.014488, 0.036589, 0.014226, 0.024329],
[0.014108, 0.014228, 0.014096, 0.013961, 0.013823],
[0.013574, 0.013757, 0.013836, 0.013699, 0.013558],
[0.013008, 0.013211, 0.013404, 0.013438, 0.013295],
[0.025179, 0.012634, 0.034555, 0.013050, 0.237582],
],
# Channel 1
[
[0.271496, 0.015547, 0.439432, 0.015700, 0.089579],
[0.015252, 0.015702, 0.015779, 0.015859, 0.015940],
[0.015020, 0.015556, 0.015935, 0.016015, 0.016098],
[0.014774, 0.015331, 0.015860, 0.016171, 0.016255],
[0.107384, 0.015094, 0.462471, 0.016166, 0.263480],
],
# Channel 2
[
[0.027123, 0.003527, 0.467273, 0.004912, 0.645776],
[0.003810, 0.004908, 0.005605, 0.006319, 0.007050],
[0.004816, 0.005991, 0.006989, 0.007716, 0.008459],
[0.005880, 0.007060, 0.008179, 0.009101, 0.009858],
[0.633398, 0.008191, 0.496893, 0.010376, 0.025898],
],
# Channel 3
[
[0.000000, 0.002468, 0.064430, 0.003437, 0.580526],
[0.002666, 0.003434, 0.003922, 0.004422, 0.004933],
[0.003370, 0.004192, 0.004890, 0.005399, 0.005919],
[0.004115, 0.004940, 0.005723, 0.006368, 0.006898],
[0.551194, 0.005731, 0.068977, 0.007260, 0.000000],
],
]
],
],
[
# Case Description
"3 dimension, 1 channel, high spatial sigma, high color sigma",
# Sigmas
(4, 0.9),
# Input
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]],
]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
# Frame 0
[
[0.086801, 0.036670, 0.035971, 0.035304, 0.088456],
[0.036639, 0.035652, 0.035009, 0.034394, 0.033803],
[0.035899, 0.034897, 0.034136, 0.033566, 0.033129],
[0.035180, 0.034238, 0.033413, 0.032811, 0.032577],
[0.088290, 0.033597, 0.032821, 0.032134, 0.088786],
],
# Frame 1
[
[0.036286, 0.035269, 0.034632, 0.034021, 0.033435],
[0.035398, 0.034485, 0.033922, 0.033381, 0.033177],
[0.034688, 0.033822, 0.033169, 0.032664, 0.032780],
[0.034024, 0.033234, 0.032533, 0.032005, 0.032388],
[0.033564, 0.032797, 0.032118, 0.031525, 0.032105],
],
# Frame 2
[
[0.035225, 0.034169, 0.033404, 0.032843, 0.032766],
[0.034383, 0.033487, 0.032908, 0.032415, 0.032650],
[0.033691, 0.032921, 0.032353, 0.031900, 0.032384],
[0.033080, 0.032390, 0.031786, 0.031432, 0.032008],
[0.033099, 0.032373, 0.031737, 0.031479, 0.032054],
],
# Frame 3
[
[0.034216, 0.033231, 0.032337, 0.031758, 0.032101],
[0.033456, 0.032669, 0.031913, 0.031455, 0.032034],
[0.032788, 0.032140, 0.031618, 0.031413, 0.031977],
[0.032221, 0.031650, 0.031145, 0.031130, 0.031652],
[0.032642, 0.031968, 0.031378, 0.031433, 0.032003],
],
# Frame 4
[
[0.086207, 0.032335, 0.031499, 0.030832, 0.087498],
[0.032570, 0.031884, 0.031155, 0.030858, 0.031401],
[0.031967, 0.031417, 0.030876, 0.030881, 0.031388],
[0.031602, 0.031103, 0.030696, 0.030960, 0.031455],
[0.090599, 0.031546, 0.031127, 0.031386, 0.083483],
],
]
]
],
],
]
@skip_if_no_cpp_extension
class BilateralFilterTestCaseCpuApprox(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_cpu_approx(self, test_case_description, sigmas, input, expected):
# Params to determine the implementation to test
device = torch.device("cpu")
fast_approx = True
# Create input tensor and apply filter
input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device)
output = BilateralFilter.apply(input_tensor, *sigmas, fast_approx).cpu().numpy()
# Ensure result are as expected
np.testing.assert_allclose(output, expected, atol=1e-5)
@parameterized.expand(TEST_CASES)
def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected):
# Params to determine the implementation to test
device = torch.device("cpu")
fast_approx = True
# Prepare input tensor
input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device)
input_tensor.requires_grad = True
# Prepare args
args = (input_tensor, *sigmas, fast_approx)
# Run grad check
gradcheck(BilateralFilter.apply, args, raise_exception=False)
if __name__ == "__main__":
unittest.main()
| [
"torch.device",
"torch.autograd.gradcheck"
] | 1.5 | dylanbuchi/MONAI | 1651f1b003b0ffae8b615d191952ad65ad091277 |
1.7 | import copy
from pathlib import Path
from typing import Dict, List, Optional, Union
import torch
from pytorch_lightning.metrics import Accuracy
from torch import Tensor, optim
from torch.utils import data
import pytorch_lightning as pl
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.callbacks.base import Callback
from mlmi.structs import OptimizerArgs, TrainArgs, ModelArgs
from mlmi.log import getLogger
from mlmi.settings import CHECKPOINT_DIR
logger = getLogger(__name__)
def optimizer_state_dict_to_cpu(optimizer_state_dict):
c = copy.deepcopy(optimizer_state_dict)
o = {}
state_dict = c.get('state')
r = {}
for key, state in state_dict.items():
s = {}
for k, v in state.items():
if torch.is_tensor(v):
s[k] = v.cpu()
else:
s[k] = v
r[key] = s
o['state'] = r
o['param_groups'] = c.get('param_groups')
return o
class BaseParticipant(object):
def __init__(self, participant_name: str, model_args: ModelArgs, context):
assert participant_name is not None, 'A participant name is required to load and save logs'
assert model_args is not None, 'Model args are required to initialize a model for the participant'
assert context is not None, 'Experiment context is required for participant'
self._name = participant_name
self._cluster_id = None
self._experiment_context = context
participant_model_kwargs = self.get_model_kwargs()
if participant_model_kwargs is not None:
self._model = model_args(participant_name=participant_name, **participant_model_kwargs)
else:
self._model = model_args(participant_name=participant_name)
self._model_args = model_args
def get_model_kwargs(self) -> Optional[Dict]:
return None
@property
def model(self) -> Union[pl.LightningModule, 'BaseParticipantModel']:
"""
The model to train
:return: The model
"""
return self._model
@property
def cluster_id(self) -> str:
return self._cluster_id
@cluster_id.setter
def cluster_id(self, value: str):
self._cluster_id = value
def overwrite_model_state(self, model_state: Dict[str, Tensor]):
"""
Loads the model state into the current model instance
:param model_state: The model state to load
"""
self._model.load_state_dict(model_state, strict=False)
def load_model_state_from_checkpoint(self):
"""
Load the model state from an existing saved checkpoint
"""
self._model = self._model_args.model_class.load_from_checkpoint(
checkpoint_path=str(self.get_checkpoint_path().absolute()))
def get_checkpoint_path(self, suffix: Union[str, None] = None) -> Path:
"""
Constructs a checkpoint path based on
:return:
"""
str_suffix = '' if suffix is None else '_' + suffix
filename = (self._name + str_suffix + '.ckpt')
return CHECKPOINT_DIR / self._experiment_context.name / filename
def save_model_state(self):
"""
Saves the model state of the aggregated model
:param target_path: The path to save the model at
:return:
"""
path = self.get_checkpoint_path()
path.parent.mkdir(parents=True, exist_ok=True)
torch.save(self._model.state_dict(), path)
class BaseTrainingParticipant(BaseParticipant):
def __init__(self, client_id: str, model_args: ModelArgs, context,
train_dataloader: data.DataLoader, num_train_samples: int,
test_dataloader: data.DataLoader, num_test_samples: int,
lightning_logger: LightningLoggerBase, *args, **kwargs):
self._train_dataloader = train_dataloader
self._test_dataloader = test_dataloader
self._num_train_samples = sum([len(y) for x, y in train_dataloader])
self._num_test_samples = num_test_samples
self._lightning_logger = lightning_logger
self._callbacks = None
self._model_state = None
self._trainer = None
super().__init__(client_id, model_args, context)
def create_trainer(self, enable_logging=True, **kwargs) -> pl.Trainer:
"""
Creates a new trainer instance for each training round.
:param kwargs: additional keyword arguments to send to the trainer for configuration
:return: a pytorch lightning trainer instance
"""
_kwargs = kwargs.copy()
_kwargs['logger'] = self.logger
_kwargs['checkpoint_callback'] = False
if torch.cuda.is_available():
_kwargs['gpus'] = 1
return pl.Trainer(callbacks=self._callbacks, limit_val_batches=0.0, **_kwargs)
def set_trainer_callbacks(self, callbacks: List[Callback]):
self._callbacks = callbacks
@property
def logger(self) -> LightningLoggerBase:
"""
Gets the logger to use for the training in later stage.
:return: The lightning logger to use
"""
return self._lightning_logger
@property
def train_data_loader(self) -> data.DataLoader:
return self._train_dataloader
@property
def test_data_loader(self) -> data.DataLoader:
return self._test_dataloader
@property
def num_train_samples(self) -> int:
return self._num_train_samples
@property
def num_test_samples(self) -> int:
return self._num_test_samples
def train(self, training_args: TrainArgs, *args, **kwargs):
"""
Implement the training routine.
:param training_args:
:param args:
:param kwargs:
:return:
"""
trainer = self.create_trainer(enable_logging=False, **training_args.kwargs)
train_dataloader = self.train_data_loader
trainer.fit(self.model, train_dataloader)
del self.model.trainer
def test(self, model: Optional[torch.nn.Module] = None, use_local_model: bool = False):
"""
Test the model state on this clients data.
:param
:param model_state: The model state to evaluate
:return: The output loss
"""
assert use_local_model or model is not None
trainer = self.create_trainer(enable_logging=False, progress_bar_refresh_rate=0)
if use_local_model:
result = trainer.test(model=self.model, test_dataloaders=self.test_data_loader, verbose=False)
self._model = self._model.cpu()
del self._model.trainer
else:
result = trainer.test(model=model, test_dataloaders=self.test_data_loader, verbose=False)
return result
class BaseAggregatorParticipant(BaseParticipant):
def __init__(self, participant_name: str, model_args: ModelArgs, context):
super().__init__(participant_name, model_args, context)
def aggregate(self, participants: List['BaseTrainingParticipant'], *args, **kwargs):
"""
Aggregate the models of other participants with their models.
:param participants: Participants to apply the model changes from
:return:
"""
raise NotImplementedError()
class BaseParticipantModel(object):
def __init__(self, *args, participant_name=None, optimizer_args: Optional[OptimizerArgs]=None,
model=None, **kwargs):
assert participant_name is not None, 'Please provide a participant name parameter in model args to identify' \
'your model in logging'
assert optimizer_args is not None, 'Optimizer args not set!'
assert model is not None, 'Model not passed!'
self.participant_name = participant_name
self.optimizer_args = optimizer_args
super().__init__(*args, **kwargs)
self.model = model
self._optimizer_state = None
@property
def optimizer_state(self):
return self._optimizer_state
@optimizer_state.setter
def optimizer_state(self, value):
self._optimizer_state = value
def configure_optimizers(self):
return self.optimizer_args(self.model.parameters())
"""
Do not restore state
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
return optimizer
"""
| [
"torch.is_tensor",
"torch.cuda.is_available"
] | 1.7.1 | iwan933/mlmi-federated-learning | e148664304dd7fbbc2cc2a6a34567533748c1720 |
1.5 | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2021 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Fine-tuning pre-trained models for token classification tasks.
Heavily adapted from: https://github.com/huggingface/transformers/blob/
v3.0.1/examples/token-classification/run_ner.py"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import (
accuracy_score as seq_accuracy_score,
f1_score as seq_f1_score,
precision_score as seq_precision_score,
recall_score as seq_recall_score
)
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score
)
from torch import nn
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from utils import TokenClassificationDataSet, Split, get_labels
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are
going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from "
"huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if "
"not the same as model_name"}
)
# If you want to tweak more attributes on your tokenizer, you should do it
# in a distinct script, or just modify its tokenizer_config.json.
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if "
"not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to "
"use fast "
"tokenization."})
task_type: Optional[str] = field(
default="ner", metadata={"help": "the name of the task (ner or pos)"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the "
"pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for
training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .txt files "
"for a CoNLL-2003-formatted task."}
)
labels: Optional[str] = field(
default=None,
metadata={"help": "Path to a file containing all labels."},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after "
"tokenization. Sequences longer than this will be truncated, "
"sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and "
"evaluation sets"}
)
blind_test: bool = field(
default=False, metadata={"help": "Use blind test set"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments,
DataTrainingArguments,
TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a
# json file, let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(
sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists "
"and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=(logging.INFO if training_args.local_rank in [-1, 0]
else logging.WARN),
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, "
"16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Prepare task
labels = get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can
# concurrently download model & vocab.
config = AutoConfig.from_pretrained(
(model_args.config_name if model_args.config_name
else model_args.model_name_or_path),
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
(model_args.tokenizer_name if model_args.tokenizer_name
else model_args.model_name_or_path),
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
train_dataset = (
TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
def align_predictions(predictions: np.ndarray,
label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(p: EvalPrediction) -> Dict:
preds_list, out_label_list = align_predictions(p.predictions,
p.label_ids)
# If task type is NER, use seqeval metrics.
# Otherwise, use scikit learn
if model_args.task_type == "ner":
return {
"accuracy": seq_accuracy_score(out_label_list, preds_list),
"precision": seq_precision_score(out_label_list, preds_list),
"recall": seq_recall_score(out_label_list, preds_list),
"f1": seq_f1_score(out_label_list, preds_list),
}
else:
# Flatten the preds_list and out_label_list
preds_list = [p for sublist in preds_list for p in sublist]
out_label_list = [p for sublist in out_label_list for p in sublist]
return {
"accuracy": accuracy_score(out_label_list, preds_list),
"precision_micro": precision_score(out_label_list, preds_list,
average="micro"),
"recall_micro": recall_score(out_label_list, preds_list,
average="micro"),
"f1_micro": f1_score(out_label_list, preds_list,
average="micro"),
"precision_macro": precision_score(out_label_list, preds_list,
average="macro"),
"recall_macro": recall_score(out_label_list, preds_list,
average="macro"),
"f1_macro": f1_score(out_label_list, preds_list,
average="macro"),
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train(
model_path=(model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path)
else None)
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir,
"eval_results.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
# Predict
if training_args.do_predict:
data_split = Split.test
if data_args.blind_test:
data_split = Split.blind_test
test_dataset = TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=data_split,
)
predictions, label_ids, metrics = trainer.predict(test_dataset)
preds_list, _ = align_predictions(predictions, label_ids)
output_test_results_file = os.path.join(training_args.output_dir,
f"{data_split.value}_results.txt")
if trainer.is_world_master():
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir,
f"{data_split.value}_predictions.txt")
if trainer.is_world_master():
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(data_args.data_dir, f"{data_split.value}.txt"), "r") as f:
example_id = 0
for line in f:
if (line.startswith("-DOCSTART-") or line == ""
or line == "\n"):
writer.write(line)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
output_line = (line.split()[0] + " " +
preds_list[example_id].pop(0) + "\n")
writer.write(output_line)
else:
logger.warning(
"Maximum sequence length exceeded: "
"No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| [
"torch.nn.CrossEntropyLoss"
] | 1.5.1 | CAMeL-Lab/CAMeLBERT_morphosyntactic_tagger | 5bea542c2e731d263281d0ab16ba9c065f602f94 |
1.1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Nagoya University (Wen-Chin Huang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Voice Transformer Network (Transformer-VC) related modules."""
import logging
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.e2e_asr_transformer import subsequent_mask
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (
Tacotron2Loss as TransformerLoss, # noqa: H301
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.fill_missing_args import fill_missing_args
from espnet.nets.pytorch_backend.e2e_tts_transformer import (
GuidedMultiHeadAttentionLoss, # noqa: H301
TTSPlot, # noqa: H301
)
class Transformer(TTSInterface, torch.nn.Module):
"""VC Transformer module.
This is a module of the Voice Transformer Network
(a.k.a. VTN or Transformer-VC) described in
`Voice Transformer Network: Sequence-to-Sequence
Voice Conversion Using Transformer with
Text-to-Speech Pretraining`_,
which convert the sequence of acoustic features
into the sequence of acoustic features.
.. _`Voice Transformer Network: Sequence-to-Sequence
Voice Conversion Using Transformer with
Text-to-Speech Pretraining`:
https://arxiv.org/pdf/1912.06813.pdf
"""
@staticmethod
def add_arguments(parser):
"""Add model-specific arguments to the parser."""
group = parser.add_argument_group("transformer model setting")
# network structure related
group.add_argument(
"--eprenet-conv-layers",
default=0,
type=int,
help="Number of encoder prenet convolution layers",
)
group.add_argument(
"--eprenet-conv-chans",
default=0,
type=int,
help="Number of encoder prenet convolution channels",
)
group.add_argument(
"--eprenet-conv-filts",
default=0,
type=int,
help="Filter size of encoder prenet convolution",
)
group.add_argument(
"--transformer-input-layer",
default="linear",
type=str,
help="Type of input layer (linear or conv2d)",
)
group.add_argument(
"--dprenet-layers",
default=2,
type=int,
help="Number of decoder prenet layers",
)
group.add_argument(
"--dprenet-units",
default=256,
type=int,
help="Number of decoder prenet hidden units",
)
group.add_argument(
"--elayers", default=3, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits", default=1536, type=int, help="Number of encoder hidden units"
)
group.add_argument(
"--adim",
default=384,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
group.add_argument(
"--dlayers", default=3, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1536, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--positionwise-layer-type",
default="linear",
type=str,
choices=["linear", "conv1d", "conv1d-linear"],
help="Positionwise layer type.",
)
group.add_argument(
"--positionwise-conv-kernel-size",
default=1,
type=int,
help="Kernel size of positionwise conv1d layer",
)
group.add_argument(
"--postnet-layers", default=5, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=256, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--use-scaled-pos-enc",
default=True,
type=strtobool,
help="Use trainable scaled positional encoding"
"instead of the fixed scale one.",
)
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--encoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before encoder block",
)
group.add_argument(
"--decoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before decoder block",
)
group.add_argument(
"--encoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in encoder",
)
group.add_argument(
"--decoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in decoder",
)
group.add_argument(
"--reduction-factor",
default=1,
type=int,
help="Reduction factor (for decoder)",
)
group.add_argument(
"--encoder-reduction-factor",
default=1,
type=int,
help="Reduction factor (for encoder)",
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spk-embed-integration-type",
type=str,
default="add",
choices=["add", "concat"],
help="How to integrate speaker embedding",
)
# training related
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="How to initialize transformer parameters",
)
group.add_argument(
"--initial-encoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in encoder's ScaledPositionalEncoding",
)
group.add_argument(
"--initial-decoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in decoder's ScaledPositionalEncoding",
)
group.add_argument(
"--transformer-lr",
default=1.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=4000,
type=int,
help="Optimizer warmup steps",
)
group.add_argument(
"--transformer-enc-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder except for attention",
)
group.add_argument(
"--transformer-enc-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder positional encoding",
)
group.add_argument(
"--transformer-enc-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder self-attention",
)
group.add_argument(
"--transformer-dec-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder "
"except for attention and pos encoding",
)
group.add_argument(
"--transformer-dec-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder positional encoding",
)
group.add_argument(
"--transformer-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder self-attention",
)
group.add_argument(
"--transformer-enc-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder-decoder attention",
)
group.add_argument(
"--eprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in encoder prenet",
)
group.add_argument(
"--dprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in decoder prenet",
)
group.add_argument(
"--postnet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in postnet",
)
group.add_argument(
"--pretrained-model", default=None, type=str, help="Pretrained model path"
)
# loss related
group.add_argument(
"--use-masking",
default=True,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--use-weighted-masking",
default=False,
type=strtobool,
help="Whether to use weighted masking in calculation of loss",
)
group.add_argument(
"--loss-type",
default="L1",
choices=["L1", "L2", "L1+L2"],
help="How to calc loss",
)
group.add_argument(
"--bce-pos-weight",
default=5.0,
type=float,
help="Positive sample weight in BCE calculation "
"(only for use-masking=True)",
)
group.add_argument(
"--use-guided-attn-loss",
default=False,
type=strtobool,
help="Whether to use guided attention loss",
)
group.add_argument(
"--guided-attn-loss-sigma",
default=0.4,
type=float,
help="Sigma in guided attention loss",
)
group.add_argument(
"--guided-attn-loss-lambda",
default=1.0,
type=float,
help="Lambda in guided attention loss",
)
group.add_argument(
"--num-heads-applied-guided-attn",
default=2,
type=int,
help="Number of heads in each layer to be applied guided attention loss"
"if set -1, all of the heads will be applied.",
)
group.add_argument(
"--num-layers-applied-guided-attn",
default=2,
type=int,
help="Number of layers to be applied guided attention loss"
"if set -1, all of the layers will be applied.",
)
group.add_argument(
"--modules-applied-guided-attn",
type=str,
nargs="+",
default=["encoder-decoder"],
help="Module name list to be applied guided attention loss",
)
return parser
@property
def attention_plot_class(self):
"""Return plot class for attention weight plot."""
return TTSPlot
def __init__(self, idim, odim, args=None):
"""Initialize Transformer-VC module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (Namespace, optional):
- eprenet_conv_layers (int):
Number of encoder prenet convolution layers.
- eprenet_conv_chans (int):
Number of encoder prenet convolution channels.
- eprenet_conv_filts (int):
Filter size of encoder prenet convolution.
- transformer_input_layer (str): Input layer before the encoder.
- dprenet_layers (int): Number of decoder prenet layers.
- dprenet_units (int): Number of decoder prenet hidden units.
- elayers (int): Number of encoder layers.
- eunits (int): Number of encoder hidden units.
- adim (int): Number of attention transformation dimensions.
- aheads (int): Number of heads for multi head attention.
- dlayers (int): Number of decoder layers.
- dunits (int): Number of decoder hidden units.
- postnet_layers (int): Number of postnet layers.
- postnet_chans (int): Number of postnet channels.
- postnet_filts (int): Filter size of postnet.
- use_scaled_pos_enc (bool):
Whether to use trainable scaled positional encoding.
- use_batch_norm (bool):
Whether to use batch normalization in encoder prenet.
- encoder_normalize_before (bool):
Whether to perform layer normalization before encoder block.
- decoder_normalize_before (bool):
Whether to perform layer normalization before decoder block.
- encoder_concat_after (bool): Whether to concatenate
attention layer's input and output in encoder.
- decoder_concat_after (bool): Whether to concatenate
attention layer's input and output in decoder.
- reduction_factor (int): Reduction factor (for decoder).
- encoder_reduction_factor (int): Reduction factor (for encoder).
- spk_embed_dim (int): Number of speaker embedding dimenstions.
- spk_embed_integration_type: How to integrate speaker embedding.
- transformer_init (float): How to initialize transformer parameters.
- transformer_lr (float): Initial value of learning rate.
- transformer_warmup_steps (int): Optimizer warmup steps.
- transformer_enc_dropout_rate (float):
Dropout rate in encoder except attention & positional encoding.
- transformer_enc_positional_dropout_rate (float):
Dropout rate after encoder positional encoding.
- transformer_enc_attn_dropout_rate (float):
Dropout rate in encoder self-attention module.
- transformer_dec_dropout_rate (float):
Dropout rate in decoder except attention & positional encoding.
- transformer_dec_positional_dropout_rate (float):
Dropout rate after decoder positional encoding.
- transformer_dec_attn_dropout_rate (float):
Dropout rate in deocoder self-attention module.
- transformer_enc_dec_attn_dropout_rate (float):
Dropout rate in encoder-deocoder attention module.
- eprenet_dropout_rate (float): Dropout rate in encoder prenet.
- dprenet_dropout_rate (float): Dropout rate in decoder prenet.
- postnet_dropout_rate (float): Dropout rate in postnet.
- use_masking (bool):
Whether to apply masking for padded part in loss calculation.
- use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
- bce_pos_weight (float): Positive sample weight in bce calculation
(only for use_masking=true).
- loss_type (str): How to calculate loss.
- use_guided_attn_loss (bool): Whether to use guided attention loss.
- num_heads_applied_guided_attn (int):
Number of heads in each layer to apply guided attention loss.
- num_layers_applied_guided_attn (int):
Number of layers to apply guided attention loss.
- modules_applied_guided_attn (list):
List of module names to apply guided attention loss.
- guided-attn-loss-sigma (float) Sigma in guided attention loss.
- guided-attn-loss-lambda (float): Lambda in guided attention loss.
"""
# initialize base classes
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
# fill missing arguments
args = fill_missing_args(args, self.add_arguments)
# store hyperparameters
self.idim = idim
self.odim = odim
self.spk_embed_dim = args.spk_embed_dim
if self.spk_embed_dim is not None:
self.spk_embed_integration_type = args.spk_embed_integration_type
self.use_scaled_pos_enc = args.use_scaled_pos_enc
self.reduction_factor = args.reduction_factor
self.encoder_reduction_factor = args.encoder_reduction_factor
self.transformer_input_layer = args.transformer_input_layer
self.loss_type = args.loss_type
self.use_guided_attn_loss = args.use_guided_attn_loss
if self.use_guided_attn_loss:
if args.num_layers_applied_guided_attn == -1:
self.num_layers_applied_guided_attn = args.elayers
else:
self.num_layers_applied_guided_attn = (
args.num_layers_applied_guided_attn
)
if args.num_heads_applied_guided_attn == -1:
self.num_heads_applied_guided_attn = args.aheads
else:
self.num_heads_applied_guided_attn = args.num_heads_applied_guided_attn
self.modules_applied_guided_attn = args.modules_applied_guided_attn
# use idx 0 as padding idx
padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# define transformer encoder
if args.eprenet_conv_layers != 0:
# encoder prenet
encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
elayers=0,
econv_layers=args.eprenet_conv_layers,
econv_chans=args.eprenet_conv_chans,
econv_filts=args.eprenet_conv_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.eprenet_dropout_rate,
padding_idx=padding_idx,
input_layer=torch.nn.Linear(
idim * args.encoder_reduction_factor, idim
),
),
torch.nn.Linear(args.eprenet_conv_chans, args.adim),
)
elif args.transformer_input_layer == "linear":
encoder_input_layer = torch.nn.Linear(
idim * args.encoder_reduction_factor, args.adim
)
else:
encoder_input_layer = args.transformer_input_layer
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=encoder_input_layer,
dropout_rate=args.transformer_enc_dropout_rate,
positional_dropout_rate=args.transformer_enc_positional_dropout_rate,
attention_dropout_rate=args.transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=args.encoder_normalize_before,
concat_after=args.encoder_concat_after,
positionwise_layer_type=args.positionwise_layer_type,
positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,
)
# define projection layer
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)
else:
self.projection = torch.nn.Linear(
args.adim + self.spk_embed_dim, args.adim
)
# define transformer decoder
if args.dprenet_layers != 0:
# decoder prenet
decoder_input_layer = torch.nn.Sequential(
DecoderPrenet(
idim=odim,
n_layers=args.dprenet_layers,
n_units=args.dprenet_units,
dropout_rate=args.dprenet_dropout_rate,
),
torch.nn.Linear(args.dprenet_units, args.adim),
)
else:
decoder_input_layer = "linear"
self.decoder = Decoder(
odim=-1,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.transformer_dec_dropout_rate,
positional_dropout_rate=args.transformer_dec_positional_dropout_rate,
self_attention_dropout_rate=args.transformer_dec_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_enc_dec_attn_dropout_rate,
input_layer=decoder_input_layer,
use_output_layer=False,
pos_enc_class=pos_enc_class,
normalize_before=args.decoder_normalize_before,
concat_after=args.decoder_concat_after,
)
# define final projection
self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)
self.prob_out = torch.nn.Linear(args.adim, args.reduction_factor)
# define postnet
self.postnet = (
None
if args.postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=args.postnet_layers,
n_chans=args.postnet_chans,
n_filts=args.postnet_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.postnet_dropout_rate,
)
)
# define loss function
self.criterion = TransformerLoss(
use_masking=args.use_masking,
use_weighted_masking=args.use_weighted_masking,
bce_pos_weight=args.bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_criterion = GuidedMultiHeadAttentionLoss(
sigma=args.guided_attn_loss_sigma, alpha=args.guided_attn_loss_lambda,
)
# initialize parameters
self._reset_parameters(
init_type=args.transformer_init,
init_enc_alpha=args.initial_encoder_alpha,
init_dec_alpha=args.initial_decoder_alpha,
)
# load pretrained model
if args.pretrained_model is not None:
self.load_pretrained_model(args.pretrained_model)
def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):
# initialize parameters
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
def _add_first_frame_and_remove_last_frame(self, ys):
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in
def forward(self, xs, ilens, ys, labels, olens, spembs=None, *args, **kwargs):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional): Batch of speaker embedding vectors
(B, spk_embed_dim).
Returns:
Tensor: Loss value.
"""
# remove unnecessary padded part (for multi-gpus)
max_ilen = max(ilens)
max_olen = max(olens)
if max_ilen != xs.shape[1]:
xs = xs[:, :max_ilen]
if max_olen != ys.shape[1]:
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
# forward encoder
x_masks = self._source_mask(ilens_ds)
hs, hs_masks = self.encoder(xs_ds, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs_int = self._integrate_with_spk_embed(hs, spembs)
else:
hs_int = hs
# thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# if conv2d, modify mask. Use ceiling division here
if "conv2d" in self.transformer_input_layer:
ilens_ds_st = ilens_ds.new(
[((ilen - 2 + 1) // 2 - 2 + 1) // 2 for ilen in ilens_ds]
)
else:
ilens_ds_st = ilens_ds
# forward decoder
y_masks = self._target_mask(olens_in)
zs, _ = self.decoder(ys_in, y_masks, hs_int, hs_masks)
# (B, Lmax//r, odim * r) -> (B, Lmax//r * r, odim)
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
# (B, Lmax//r, r) -> (B, Lmax//r * r)
logits = self.prob_out(zs).view(zs.size(0), -1)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
labels[:, -1] = 1.0 # make sure at least one frame has 1
# caluculate loss values
l1_loss, l2_loss, bce_loss = self.criterion(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = l2_loss + bce_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss + bce_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
report_keys = [
{"l1_loss": l1_loss.item()},
{"l2_loss": l2_loss.item()},
{"bce_loss": bce_loss.item()},
{"loss": loss.item()},
]
# calculate guided attention loss
if self.use_guided_attn_loss:
# calculate for encoder
if "encoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.encoder.encoders)))
):
att_ws += [
self.encoder.encoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_in, T_in)
enc_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, ilens_ds_st
) # TODO(unilight): is changing to ilens_ds_st right?
loss = loss + enc_attn_loss
report_keys += [{"enc_attn_loss": enc_attn_loss.item()}]
# calculate for decoder
if "decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_out)
dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)
loss = loss + dec_attn_loss
report_keys += [{"dec_attn_loss": dec_attn_loss.item()}]
# calculate for encoder-decoder
if "encoder-decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].src_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_in)
enc_dec_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, olens_in
) # TODO(unilight): is changing to ilens_ds_st right?
loss = loss + enc_dec_attn_loss
report_keys += [{"enc_dec_attn_loss": enc_dec_attn_loss.item()}]
# report extra information
if self.use_scaled_pos_enc:
report_keys += [
{"encoder_alpha": self.encoder.embed[-1].alpha.data.item()},
{"decoder_alpha": self.decoder.embed[-1].alpha.data.item()},
]
self.reporter.report(report_keys)
return loss
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
"""Generate the sequence of features given the sequences of acoustic features.
Args:
x (Tensor): Input sequence of acoustic features (T, idim).
inference_args (Namespace):
- threshold (float): Threshold in inference.
- minlenratio (float): Minimum length ratio in inference.
- maxlenratio (float): Maximum length ratio in inference.
spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Encoder-decoder (source) attention weights (#layers, #heads, L, T).
"""
# get options
threshold = inference_args.threshold
minlenratio = inference_args.minlenratio
maxlenratio = inference_args.maxlenratio
use_att_constraint = getattr(
inference_args, "use_att_constraint", False
) # keep compatibility
if use_att_constraint:
logging.warning(
"Attention constraint is not yet supported in Transformer. Not enabled."
)
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
Lmax, idim = x.shape
if Lmax % self.encoder_reduction_factor != 0:
x = x[: -(Lmax % self.encoder_reduction_factor), :]
x_ds = x.contiguous().view(
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
else:
x_ds = x
# forward encoder
x_ds = x_ds.unsqueeze(0)
hs, _ = self.encoder(x_ds, None)
# integrate speaker embedding
if self.spk_embed_dim is not None:
spembs = spemb.unsqueeze(0)
hs = self._integrate_with_spk_embed(hs, spembs)
# set limits of length
maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)
minlen = int(hs.size(1) * minlenratio / self.reduction_factor)
# initialize
idx = 0
ys = hs.new_zeros(1, 1, self.odim)
outs, probs = [], []
# forward decoder step-by-step
z_cache = self.decoder.init_state(x)
while True:
# update index
idx += 1
# calculate output and stop prob at idx-th step
y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)
z, z_cache = self.decoder.forward_one_step(
ys, y_masks, hs, cache=z_cache
) # (B, adim)
outs += [
self.feat_out(z).view(self.reduction_factor, self.odim)
] # [(r, odim), ...]
probs += [torch.sigmoid(self.prob_out(z))[0]] # [(r), ...]
# update next inputs
ys = torch.cat(
(ys, outs[-1][-1].view(1, 1, self.odim)), dim=1
) # (1, idx + 1, odim)
# get attention weights
att_ws_ = []
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and "src" in name:
att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] # [(#heads, 1, T),...]
if idx == 1:
att_ws = att_ws_
else:
# [(#heads, l, T), ...]
att_ws = [
torch.cat([att_w, att_w_], dim=1)
for att_w, att_w_ in zip(att_ws, att_ws_)
]
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = (
torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)
) # (L, odim) -> (1, L, odim) -> (1, odim, L)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
break
# concatenate attention weights -> (#layers, #heads, L, T)
att_ws = torch.stack(att_ws, dim=0)
return outs, probs, att_ws
def calculate_all_attentions(
self,
xs,
ilens,
ys,
olens,
spembs=None,
skip_output=False,
keep_tensor=False,
*args,
**kwargs
):
"""Calculate all of the attention weights.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional): Batch of speaker embedding vectors
(B, spk_embed_dim).
skip_output (bool, optional): Whether to skip calculate the final output.
keep_tensor (bool, optional): Whether to keep original tensor.
Returns:
dict: Dict of attention weights and outputs.
"""
with torch.no_grad():
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
# forward encoder
x_masks = self._source_mask(ilens_ds)
hs, hs_masks = self.encoder(xs_ds, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# thin out frames for reduction factor
# (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# forward decoder
y_masks = self._target_mask(olens_in)
zs, _ = self.decoder(ys_in, y_masks, hs, hs_masks)
# calculate final outputs
if not skip_output:
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of output lengths due to reduction factor > 1
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
# store into dict
att_ws_dict = dict()
if keep_tensor:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
att_ws_dict[name] = m.attn
if not skip_output:
att_ws_dict["before_postnet_fbank"] = before_outs
att_ws_dict["after_postnet_fbank"] = after_outs
else:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
attn = m.attn.cpu().numpy()
if "encoder" in name:
attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]
elif "decoder" in name:
if "src" in name:
attn = [
a[:, :ol, :il]
for a, il, ol in zip(
attn, ilens.tolist(), olens_in.tolist()
)
]
elif "self" in name:
attn = [
a[:, :l, :l] for a, l in zip(attn, olens_in.tolist())
]
else:
logging.warning("unknown attention module: " + name)
else:
logging.warning("unknown attention module: " + name)
att_ws_dict[name] = attn
if not skip_output:
before_outs = before_outs.cpu().numpy()
after_outs = after_outs.cpu().numpy()
att_ws_dict["before_postnet_fbank"] = [
m[:l].T for m, l in zip(before_outs, olens.tolist())
]
att_ws_dict["after_postnet_fbank"] = [
m[:l].T for m, l in zip(after_outs, olens.tolist())
]
return att_ws_dict
def _integrate_with_spk_embed(self, hs, spembs):
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _target_mask(self, olens):
"""Make masks for masked self-attention.
Args:
olens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for masked self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> olens = [5, 3]
>>> self._target_mask(olens)
tensor([[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]],
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
y_masks = make_non_pad_mask(olens).to(next(self.parameters()).device)
s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)
return y_masks.unsqueeze(-2) & s_masks
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
keys should match what `chainer.reporter` reports.
If you add the key `loss`, the reporter will report `main/loss`
and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list: List of strings which are base keys to plot during training.
"""
plot_keys = ["loss", "l1_loss", "l2_loss", "bce_loss"]
if self.use_scaled_pos_enc:
plot_keys += ["encoder_alpha", "decoder_alpha"]
if self.use_guided_attn_loss:
if "encoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_attn_loss"]
if "decoder" in self.modules_applied_guided_attn:
plot_keys += ["dec_attn_loss"]
if "encoder-decoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_dec_attn_loss"]
return plot_keys
| [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.cat",
"torch.stack",
"torch.no_grad",
"torch.nn.Module.__init__",
"torch.tensor"
] | 1.1.0 | undeadyequ/espnet | 8c3f85ce695153abcb9cf365180b1d7554ad565e |
1.5 | import os
import glob
import math
import hydra
import cv2
import numpy as np
from shapely.geometry import Polygon
import torch
from torch.utils.data import Dataset, DataLoader
import imgaug.augmenters as iaa
import pyclipper
import db_transforms
from utils import dict_to_device, minmax_scaler_img
class BaseDatasetIter(Dataset):
def __init__(self,
train_dir,
train_gt_dir,
ignore_tags,
is_training=True,
image_size=640,
min_text_size=8,
shrink_ratio=0.4,
thresh_min=0.3,
thresh_max=0.7,
augment=None,
mean=[103.939, 116.779, 123.68],
debug=False):
self.train_dir = train_dir
self.train_gt_dir = train_gt_dir
self.ignore_tags = ignore_tags
self.is_training = is_training
self.image_size = image_size
self.min_text_size = min_text_size
self.shrink_ratio = shrink_ratio
self.thresh_min = thresh_min
self.thresh_max = thresh_max
self.augment = augment
if self.augment is None:
self.augment = self._get_default_augment()
self.mean = mean
self.debug = debug
# load metadata
self.image_paths, self.gt_paths = self.load_metadata(
train_dir, train_gt_dir)
# load annotation
self.all_anns = self.load_all_anns(self.gt_paths)
assert len(self.image_paths) == len(self.all_anns)
def _get_default_augment(self):
augment_seq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Affine(rotate=(-10, 10)),
iaa.Resize((0.5, 3.0))
])
return augment_seq
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
image_path = self.image_paths[index]
anns = self.all_anns[index]
if self.debug:
print(image_path)
print(len(anns))
img = cv2.imread(image_path)[:, :, ::-1]
if self.is_training and self.augment is not None:
augment_seq = self.augment.to_deterministic()
img, anns = db_transforms.transform(augment_seq, img, anns)
img, anns = db_transforms.crop(img, anns)
img, anns = db_transforms.resize(self.image_size, img, anns)
anns = [ann for ann in anns if Polygon(ann['poly']).buffer(0).is_valid]
gt = np.zeros((self.image_size, self.image_size),
dtype=np.float32) # batch_gts
mask = np.ones((self.image_size, self.image_size), dtype=np.float32)
thresh_map = np.zeros((self.image_size, self.image_size),
dtype=np.float32) # batch_thresh_maps
# batch_thresh_masks
thresh_mask = np.zeros((self.image_size, self.image_size),
dtype=np.float32)
if self.debug:
print(type(anns), len(anns))
ignore_tags = []
for ann in anns:
# i.e shape = (4, 2) / (6, 2) / ...
poly = np.array(ann['poly'])
height = max(poly[:, 1]) - min(poly[:, 1])
width = max(poly[:, 0]) - min(poly[:, 0])
polygon = Polygon(poly)
# generate gt and mask
if polygon.area < 1 or \
min(height, width) < self.min_text_size or \
ann['text'] in self.ignore_tags:
ignore_tags.append(True)
cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
continue
else:
# 6th equation
distance = polygon.area * \
(1 - np.power(self.shrink_ratio, 2)) / polygon.length
subject = [tuple(_l) for _l in ann['poly']]
padding = pyclipper.PyclipperOffset()
padding.AddPath(subject, pyclipper.JT_ROUND,
pyclipper.ET_CLOSEDPOLYGON)
shrinked = padding.Execute(-distance)
if len(shrinked) == 0:
ignore_tags.append(True)
cv2.fillPoly(mask,
poly.astype(np.int32)[np.newaxis, :, :], 0)
continue
else:
shrinked = np.array(shrinked[0]).reshape(-1, 2)
if shrinked.shape[0] > 2 and \
Polygon(shrinked).buffer(0).is_valid:
ignore_tags.append(False)
cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1)
else:
ignore_tags.append(True)
cv2.fillPoly(mask,
poly.astype(np.int32)[np.newaxis, :, :],
0)
continue
# generate thresh map and thresh mask
db_transforms.draw_thresh_map(ann['poly'],
thresh_map,
thresh_mask,
shrink_ratio=self.shrink_ratio)
thresh_map = thresh_map * \
(self.thresh_max - self.thresh_min) + self.thresh_min
img = img.astype(np.float32)
img[..., 0] -= self.mean[0]
img[..., 1] -= self.mean[1]
img[..., 2] -= self.mean[2]
img = np.transpose(img, (2, 0, 1))
data_return = {
"image_path": image_path,
"img": img,
"prob_map": gt,
"supervision_mask": mask,
"thresh_map": thresh_map,
"text_area_map": thresh_mask,
}
# for batch_size = 1
if not self.is_training:
data_return["anns"] = [ann['poly'] for ann in anns]
data_return["ignore_tags"] = ignore_tags
# return image_path, img, gt, mask, thresh_map, thresh_mask
return data_return
class TotalTextDatasetIter(BaseDatasetIter):
def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):
super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)
def load_metadata(self, img_dir, gt_dir):
img_fps = sorted(glob.glob(os.path.join(img_dir, "*")))
gt_fps = []
for img_fp in img_fps:
img_id = img_fp.split("/")[-1].replace("img", "").split(".")[0]
gt_fn = "gt_img{}.txt".format(img_id)
gt_fp = os.path.join(gt_dir, gt_fn)
assert os.path.exists(img_fp)
gt_fps.append(gt_fp)
assert len(img_fps) == len(gt_fps)
return img_fps, gt_fps
def load_all_anns(self, gt_paths):
res = []
for gt in gt_paths:
lines = []
reader = open(gt, 'r').readlines()
for line in reader:
item = {}
parts = line.strip().split(',')
label = parts[-1]
line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts]
num_points = math.floor((len(line) - 1) / 2) * 2
poly = np.array(list(map(float, line[:num_points]))).reshape(
(-1, 2)).tolist()
if len(poly) < 3:
continue
item['poly'] = poly
item['text'] = label
lines.append(item)
res.append(lines)
return res
class CTW1500DatasetIter(BaseDatasetIter):
def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):
super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)
def load_metadata(self, img_dir, gt_dir):
img_fps = sorted(glob.glob(os.path.join(img_dir, "*")))
gt_fps = []
for img_fp in img_fps:
img_id = img_fp.split("/")[-1][:-4]
gt_fn = "{}.txt".format(img_id)
gt_fp = os.path.join(gt_dir, gt_fn)
assert os.path.exists(img_fp)
gt_fps.append(gt_fp)
assert len(img_fps) == len(gt_fps)
return img_fps, gt_fps
def load_all_anns(self, gt_fps):
"""
Reference: https://github.com/whai362/PSENet/blob/master/dataset/ctw1500_loader.py
"""
res = []
for gt_fp in gt_fps:
lines = []
with open(gt_fp, 'r') as f:
for line in f:
item = {}
gt = line.strip().strip('\ufeff').strip('\xef\xbb\xbf')
gt = list(map(int, gt.split(',')))
x1 = np.int(gt[0])
y1 = np.int(gt[1])
bbox = [np.int(gt[i]) for i in range(4, 32)]
bbox = np.asarray(bbox) + ([x1, y1] * 14)
bbox = bbox.reshape(-1, 2).tolist()
item['poly'] = bbox
item['text'] = 'True'
lines.append(item)
res.append(lines)
return res
class ICDAR2015DatasetIter(BaseDatasetIter):
def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):
super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)
def load_metadata(self, img_dir, gt_dir):
img_fps = glob.glob(os.path.join(img_dir, "*"))
gt_fps = []
for img_fp in img_fps:
img_id = img_fp.split("/")[-1].split(".")[0]
gt_fn = "gt_{}.txt".format(img_id)
gt_fp = os.path.join(gt_dir, gt_fn)
assert os.path.exists(img_fp)
gt_fps.append(gt_fp)
assert len(img_fps) == len(gt_fps)
return img_fps, gt_fps
def load_all_anns(self, gt_fps):
res = []
for gt_fp in gt_fps:
lines = []
with open(gt_fp, 'r') as f:
for line in f:
item = {}
gt = line.strip().strip('\ufeff').strip(
'\xef\xbb\xbf').split(",")
label = ",".join(gt[8:])
poly = list(map(int, gt[:8]))
poly = np.asarray(poly).reshape(-1, 2).tolist()
item['poly'] = poly
item['text'] = label
lines.append(item)
res.append(lines)
return res
class MSRATD500DatasetIter(BaseDatasetIter):
def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs):
super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs)
def transform_four_points(self, points, center_point, theta):
"""Reference: https://stackoverflow.com/questions/622140
"""
theta = -theta
new_coords = []
x_center, y_center = center_point
for point in points:
x, y = point
x_new = x_center + (x - x_center) * np.cos(theta) + \
(y - y_center) * np.sin(theta)
y_new = y_center - (x - x_center) * np.sin(theta) + \
(y - y_center) * np.cos(theta)
x_new = int(x_new)
y_new = int(y_new)
new_coords.append((x_new, y_new))
return new_coords
def load_metadata(self, img_dir, gt_dir=None):
# ignore gt_dir
img_fps = sorted(glob.glob(os.path.join(img_dir, "*.JPG")))
gt_fps = sorted(glob.glob(os.path.join(img_dir, "*.gt")))
assert len(img_fps) == len(gt_fps)
return img_fps, gt_fps
def load_all_anns(self, gt_fps):
res = []
for gt_fp in gt_fps:
lines = []
with open(gt_fp, 'r') as f:
for line in f:
item = {}
line = list(map(float, line.strip().split()))
index, dif, x_min, y_min, w, h, theta = line
if int(dif) == 1: # difficult label
continue
c1 = (x_min, y_min)
c2 = (x_min + w, y_min)
c3 = (x_min + w, y_min + h)
c4 = (x_min, y_min + h)
center = (x_min + w / 2, y_min + h / 2)
rot_box = self.transform_four_points([c1, c2, c3, c4],
center, theta)
rot_box = np.array(rot_box).tolist()
item['poly'] = rot_box
item['text'] = 'True'
lines.append(item)
res.append(lines)
return res
@hydra.main(config_path="../config.yaml", strict=False)
def run(cfg):
dataset_name = cfg.dataset.name
ignore_tags = cfg.data[dataset_name].ignore_tags
train_dir = cfg.data[dataset_name].train_dir
train_gt_dir = cfg.data[dataset_name].train_gt_dir
if dataset_name == 'totaltext':
TextDatasetIter = TotalTextDatasetIter
elif dataset_name == 'ctw1500':
TextDatasetIter = CTW1500DatasetIter
elif dataset_name == 'icdar2015':
TextDatasetIter = ICDAR2015DatasetIter
elif dataset_name == 'msra_td500':
TextDatasetIter = MSRATD500DatasetIter
else:
raise NotImplementedError("Pls provide valid dataset name!")
train_iter = TextDatasetIter(train_dir,
train_gt_dir,
ignore_tags,
is_training=True,
debug=False)
train_loader = DataLoader(dataset=train_iter,
batch_size=1,
shuffle=True,
num_workers=1)
samples = next(iter(train_loader))
samples = dict_to_device(samples, device='cpu')
for k, v in samples.items():
if isinstance(v, torch.Tensor):
print(samples[k].device)
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(minmax_scaler_img(samples['img'][0].numpy().transpose(1, 2, 0)))
plt.imshow(samples['prob_map'][0], cmap='jet', alpha=0.35)
plt.imshow(samples['thresh_map'][0], cmap='jet', alpha=0.5)
# plt.imshow(samples['text_area_map'][0], cmap='jet', alpha=0.5)
# plt.imshow(samples['supervision_mask'][0], cmap='jet', alpha=0.5)
plt.savefig(os.path.join(cfg.meta.root_dir, 'tmp/foo.jpg'),
bbox_inches='tight')
if __name__ == '__main__':
run()
| [
"torch.utils.data.DataLoader"
] | 1.5.0 | huyhoang17/DB_text_minimal | 0d1466889b21cb74a0571a0fb3856902739ea523 |
1.4 | import numpy as np
import torch
import torch.distributed as dist
def tensor(x, device):
if isinstance(x, torch.Tensor):
return x.to(device)
x = np.asarray(x, dtype=np.float)
x = torch.tensor(x, device=device, dtype=torch.float32)
return x
def input_preprocessing(x, device):
x = tensor(x, device)
x = x.float()
x /= 255.0
return x
def to_np(t):
return t.cpu().detach().numpy()
def random_seed(seed=None):
np.random.seed(seed)
torch.manual_seed(np.random.randint(int(1e6)))
def restore_model(model, save_path):
checkpoint = torch.load(save_path)
model.network.load_state_dict(checkpoint["model_state_dict"])
model.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
update = checkpoint["update"]
return update
def sync_initial_weights(model):
for param in model.parameters():
dist.broadcast(param.data, src=0)
def sync_gradients(model):
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
def cleanup():
dist.destroy_process_group()
def sync_values(tensor_sum_values, tensor_nb_values):
dist.reduce(tensor_sum_values, dst=0)
dist.reduce(tensor_nb_values, dst=0)
return tensor_sum_values / tensor_nb_values
def range_tensor(t, device):
return torch.arange(t).long().to(device)
def zeros(shape, dtype):
"""Attempt to return torch tensor of zeros, or if numpy dtype provided,
return numpy array or zeros."""
try:
return torch.zeros(shape, dtype=dtype)
except TypeError:
return np.zeros(shape, dtype=dtype)
| [
"torch.zeros",
"torch.distributed.destroy_process_group",
"torch.arange",
"torch.tensor",
"torch.distributed.all_reduce",
"torch.load",
"torch.distributed.reduce",
"torch.distributed.broadcast"
] | 1.4.0 | Laurans/procgen_adventure | 5f88f3f647f7854c8fb2ae516f3490d89845eefa |
1.4 | import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
from tools.preprocessing import preprocess_images, preprocess_single_image
from tools.kfold import KFold_cross_validation_split
from tools.extraction_and_metrics import extract_features, compute_confusion_matrix
from .network import Net
import torchvision.models as models
import torch
import os
import cv2
# Feature composer training
def train_feature_composer(
composed_dataset_path: str,
epochs: int,
batch_size: int,
num_classes: int,
folds: int,
lr:float,
cuda: bool,
ckpt_dir: str
):
"""
Feature extractor training.
params:
<string> composed_dataset_path
<int> epochs
<int> batch_size
<int> num_classes
<int> folds: Number of folds for KFold cross validation
<float> lr: Learning rate
<bool> cuda: Whether to use GPU or not
<string> ckpt_dir: Model's location
"""
# Preprocess images, returning the classes, features and labels
class_names, x, y = preprocess_images(
dataset_path=composed_dataset_path,
width=224,
height=224,
num_classes=num_classes,
framework="torch",
imagenet=True
)
# Split data
X_train, X_test, Y_train, Y_test = KFold_cross_validation_split(
features=x,
labels=y,
n_splits=folds
)
# Normalize
X_train /= 255
X_test /= 255
# Instantiate model
net = Net(
models.vgg16(pretrained=True),
num_classes=num_classes,
lr=lr,
cuda=cuda,
mode="feature_composer",
ckpt_dir=ckpt_dir,
labels=class_names
)
# Train model
net.fit(
X_train,
Y_train,
X_test,
Y_test,
epochs,
batch_size,
resume=False
)
# Confusion matrix
compute_confusion_matrix(
y_true=Y_test,
y_pred=net.infer(X_test),
framework="torch",
mode="feature_composer",
num_classes = num_classes // 2
)
# Inference
def infer(
ckpt_dir: str,
ckpt_name: str,
input_image: str
) -> dict:
"""
Main inference method.
params:
<string> ckpt_dir: Saved model's directory
<string> ckpt_name: Saved model's name
<string> input_image: Image path
returns:
<dict> Dictionary containing the predictions with their levels of confidence.
E.g.: {
COVID19_1:0.10
COVID19_2:0.15
...
}
"""
ckpt_path = os.path.join(ckpt_dir, ckpt_name)
num_classes = torch.load(ckpt_path, map_location=lambda storage, loc: storage)["num_classes"]
# Instantiate model
net = Net(
models.vgg16(pretrained=True),
num_classes=num_classes,
mode="feature_composer",
ckpt_dir=ckpt_dir
)
# Load model
net.load_model_for_inference(os.path.join(ckpt_dir, ckpt_name))
# Check if inputed file is an image.
assert input_image.lower().endswith("png") or input_image.lower().endswith("jpg") or input_image.lower().endswith("jpeg")
# Preprocess
img = preprocess_single_image(
img=input_image,
width=224,
height=224,
imagenet=True,
framework="torch"
)
# Return prediction
return net.infer(img, ckpt_path = os.path.join(ckpt_dir, ckpt_name), use_labels=True)
| [
"torch.load"
] | 1.4.0 | abdelsamea/DeTraC | ab03719b49a1a048f74f08600a6670f6757bbe60 |
1.3 | from collections import OrderedDict
import pytest
import gym
from gym import spaces
import torch
from torch import nn
import torch.nn.functional as F
from torch import distributions
import pytorch_lightning as pl
from lightning_baselines3.on_policy_models.on_policy_model import OnPolicyModel
class DummyModel(OnPolicyModel):
def __init__(self, *args, **kwargs):
super(DummyModel, self).__init__(*args, **kwargs)
if isinstance(self.action_space, spaces.Discrete):
self.p = nn.Parameter(torch.ones(1, self.action_space.n) * 0.5)
elif isinstance(self.action_space, spaces.Box):
self.p = nn.Parameter(torch.ones(1, self.action_space.shape[0] * 2) * 0.5)
else:
raise Exception('Incompatible environment action space')
def forward(self, x, **kwargs):
p = self.p.expand(x.shape[0], self.p.shape[-1])
if isinstance(self.action_space, spaces.Discrete):
dist = distributions.Categorical(probs=F.softmax(p, dim=1))
elif isinstance(self.action_space, spaces.Box):
p = torch.chunk(p, 2, dim=1)
dist = distributions.Normal(loc=p[0], scale=1 + p[1] ** 2)
return dist, torch.ones_like(x)[:, :1]
def predict(self, x, deterministic=True):
p = self.p.expand(x.shape[0], self.p.shape[-1])
if deterministic:
if isinstance(self.action_space, spaces.Discrete):
out = torch.max(p, dim=1)[1]
elif isinstance(self.action_space, spaces.Box):
out = torch.chunk(p, 2, dim=1)[0]
else:
if isinstance(self.action_space, spaces.Discrete):
out = distributions.Categorical(probs=F.softmax(p, dim=1)).sample()
elif isinstance(self.action_space, spaces.Box):
p = torch.chunk(p, 2, dim=1)
out = distributions.Normal(loc=p[0], scale=1 + p[1] ** 2).sample()
return out.cpu().numpy()
def training_step(self, x, batch_idx):
loss = self(x.observations)[0].entropy().mean()
self.log('loss', loss)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
@pytest.mark.parametrize("env_id", ["CartPole-v1", "MountainCar-v0", "MountainCarContinuous-v0"])
def test_on_policy_model(env_id):
"""
Check that environmnent integrated in Gym pass the test.
:param env_id: (str)
"""
model = DummyModel(
env_id,
eval_env=env_id,
buffer_length=512,
num_rollouts=1,
batch_size=32,
epochs_per_rollout=10,
num_eval_episodes=10,
gamma=0.9,
gae_lambda=0.95,
use_sde=False,
sde_sample_freq=-1,
verbose=1,
seed=1234)
trainer = pl.Trainer(max_epochs=2, terminate_on_nan=True)
trainer.fit(model)
| [
"torch.max",
"torch.distributions.Normal",
"torch.ones",
"torch.ones_like",
"torch.nn.functional.softmax",
"torch.chunk"
] | 1.3 | HenryJia/lightning-baselines3 | 10d1a0eed6136978204323250e37d49915a12e14 |
1.7 | """
Deep Reinforcement Learning: Deep Q-network (DQN)
This example is based on https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-
Second-Edition/blob/master/Chapter06/02_dqn_pong.py
The template illustrates using Lightning for Reinforcement Learning. The example builds a basic DQN using the
classic CartPole environment.
To run the template just run:
python reinforce_learn_Qnet.py
After ~1500 steps, you will see the total_reward hitting the max score of 200. Open up TensorBoard to
see the metrics:
tensorboard --logdir default
"""
import argparse
from torch.utils.data import DataLoader
from ...sepsis_simulator.dataset import SepsisExpertDataset
from .base import Base_AIRLLightning, Base_AIRL_NODEGAM_Lightning
from .disc import FCNN_Disc
class SepsisMixin(object):
monitor_metric = 'val_a'
monitor_mode = 'max'
def _dataloader(self, split='train') -> DataLoader:
"""Initialize the Replay Buffer dataset used for retrieving experiences"""
dataset = SepsisExpertDataset(
mdp=self.hparams.mdp,
N=self.hparams.N,
gamma=self.hparams.gamma,
split=split,
val_ratio=0.2,
expert_pol=self.hparams.expert_pol,
)
dataloader = DataLoader(dataset=dataset,
batch_size=self.hparams.batch_size,
)
return dataloader
def train_dataloader(self) -> DataLoader:
return self._dataloader('train')
def val_dataloader(self) -> DataLoader:
return self._dataloader('val')
def test_dataloader(self) -> DataLoader:
return self._dataloader('test')
@classmethod
def get_rs_loader(cls, args, rs=None):
rs = super().get_rs_loader(args, rs=rs)
# rs.add_rs_hparams('seed', short_name='s', chose_from=[321])
rs.add_rs_hparams('seed', short_name='s', gen=lambda hparams: rs.np_gen.randint(200))
rs.add_rs_hparams('batch_size', short_name='bs', chose_from=[512])
rs.add_rs_hparams('noise', short_name='dn', chose_from=[0., 0.1])
rs.add_rs_hparams('noise_epochs', short_name='dns',
gen=lambda hparams: rs.np_gen.choice([0.1, 0.2]) if hparams.noise > 0 else 0)
return rs
@classmethod
def add_model_specific_args(cls, parser) -> argparse.ArgumentParser:
"""
Adds arguments for DQN model
Note: these params are fine tuned for Pong env
Args:
parent
"""
# Model
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--gamma', type=float, default=0.9,
help='Decay rate in RL. Set it to 0.9 to encourage treating patients '
'earlier to leave the hospitals.')
parser.add_argument('--noise', type=float, default=0.1)
parser.add_argument('--noise_epochs', type=float, default=0.4)
# Environment
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--patience', type=int, default=30)
parser.add_argument('--mdp', type=str, choices=['original', 'gam', 'linear', 'cgam', 'clinear',
'cogam', 'colinear'],
default='gam', help='How to generate reward.')
parser.add_argument('--fold', type=int, default=0)
parser.add_argument('--model_gamma', type=float, default=None,
help='The gamma of the model. If None, same as gamma')
parser.add_argument('--N', type=int, default=5000,
help='Number of samples generated')
parser.add_argument('--expert_pol', type=str, default='optimal',
choices=['optimal', 'eps0.07', 'eps0.14'])
parser = super().add_model_specific_args(parser)
return parser
def trainer_args(self):
return dict()
class AIRLLightning(SepsisMixin, Base_AIRLLightning):
pass
class AIRL_NODEGAM_Lightning(SepsisMixin, Base_AIRL_NODEGAM_Lightning):
pass
class AIRL_FCNN_Lightning(SepsisMixin, Base_AIRLLightning):
disc_model_cls = FCNN_Disc
| [
"torch.utils.data.DataLoader"
] | 1.7 | zzzace2000/cairl_nodegam | 90d0d56a0e7be3d1cbba6179cbfc36d626456770 |
0.4 | import os
import torch
class Dictionary(object):
"""Build word2idx and idx2word from Corpus(train/val/test)"""
def __init__(self):
self.word2idx = {} # word: index
self.idx2word = [] # position(index): word
def add_word(self, word):
"""Create/Update word2idx and idx2word"""
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
"""Corpus Tokenizer"""
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
# line to list of token + eos
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids | [
"torch.LongTensor"
] | 0.4.1 | stanford-oval/word-language-model | 3be3f65a198b518b66e22a910f28f83324db3825 |
0.4 | """
Code based loosely on implementation:
https://github.com/openai/baselines/blob/master/baselines/ppo2/policies.py
Under MIT license.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import vel.util.network as net_util
from vel.rl.models.backbone.CoordConv import CoordConv
from vel.api.base import LinearBackboneModel, ModelFactory
class NatureCnnTwoTower(LinearBackboneModel):
""" Neural network as defined in the paper 'Human-level control through deep reinforcement learning' """
def __init__(self, input_width, input_height, input_channels, output_dim=512):
super().__init__()
self._output_dim = output_dim
# self.conv1 = nn.Conv2d(
# in_channels=input_channels,
# out_channels=32,
# kernel_size=(8, 8),
# stride=2
# )
self.conv1 = CoordConv(x_dim=133, y_dim=133, with_r=False,
in_channels=input_channels+2,
out_channels=32,
kernel_size=(8, 8),
stride=2
)
self.conv2 = nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=(4, 4),
stride=2
)
self.conv3 = nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=1
)
self.linear1 = nn.Linear(5, 1024)
self.linear2 = nn.Linear(1024, 512)
self.final_width = net_util.convolutional_layer_series(input_width, [
(8, 0, 2),
(4, 0, 2),
(3, 0, 1)
])
self.final_height = net_util.convolutional_layer_series(input_height, [
(8, 0, 2),
(4, 0, 2),
(3, 0, 1)
])
self.linear_layer1 = nn.Linear(
self.final_width * self.final_height * 64*1 + 512, # 64 is the number of channels of the last conv layer
1024
)
self.linear_layer2 = nn.Linear(1024, self.output_dim)
@property
def output_dim(self) -> int:
""" Final dimension of model output """
return self._output_dim
def reset_weights(self):
""" Call proper initializers for the weights """
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
init.orthogonal_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0.0)
elif isinstance(m, nn.Linear):
# init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
init.orthogonal_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0.0)
def forward(self, image):
input1 = image['environment']
input2 = image['goal'].float()
result1 = input1.permute(0, 3, 1, 2).contiguous().type(torch.float) / 255.0
result1 = F.relu(self.conv1(result1))
result1 = F.relu(self.conv2(result1))
result1 = F.relu(self.conv3(result1))
result2 = input2.view(input2.size(0), -1)
result2 = F.leaky_relu(self.linear1(result2))
result2 = F.leaky_relu(self.linear2(result2))
flattened1 = result1.view(result1.size(0), -1)
flattened2 = result2.view(result2.size(0), -1)
flattened = torch.cat((flattened1, flattened2), 1)
result = F.leaky_relu(self.linear_layer1(flattened))
result = F.leaky_relu(self.linear_layer2(result))
return result
def create(input_width, input_height, input_channels=1, output_dim=512):
def instantiate(**_):
return NatureCnnTwoTower(
input_width=input_width, input_height=input_height, input_channels=input_channels,
output_dim=output_dim
)
return ModelFactory.generic(instantiate)
# Add this to make nicer scripting interface
NatureCnnTwoTowerFactory = create
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Conv2d"
] | 0.4.1 | tigerwlin/vel | 00e4fbb7b612e888e2cbb5d8455146664638cd0b |
0.4 | import typing
import torch
import torch.nn.functional as F
import torch.nn as nn
from vel.api.base import SupervisedModel, ModelFactory, LinearBackboneModel
from vel.metrics.accuracy import Accuracy
from vel.metrics.loss_metric import Loss
class MultilayerSequenceClassificationGRU(SupervisedModel):
""" Multilayer GRU network for sequence modeling (n:1) """
def __init__(self, input_block: LinearBackboneModel, output_dim: int,
rnn_layers: typing.List[int], rnn_dropout: float=0.0, bidirectional: bool=False,
linear_layers: typing.List[int]=None, linear_dropout: float=0.0):
super().__init__()
self.output_dim = output_dim
self.rnn_layers_sizes = rnn_layers
self.rnn_dropout = rnn_dropout
self.linear_layers_sizes = linear_layers
self.linear_dropout = linear_dropout
self.bidirectional = bidirectional
self.input_block = input_block
current_dim = self.input_block.output_dim
self.rnn_layers = []
self.rnn_dropout_layers = []
bidirectional_multiplier = 1
for idx, current_layer in enumerate(rnn_layers, 1):
gru = nn.GRU(
input_size=current_dim * bidirectional_multiplier,
hidden_size=current_layer,
bidirectional=bidirectional,
batch_first=True,
)
self.add_module('gru{:02}'.format(idx), gru)
self.rnn_layers.append(gru)
if self.rnn_dropout > 0.0:
dropout_layer = nn.Dropout(p=self.rnn_dropout)
self.add_module('rnn_dropout{:02}'.format(idx), dropout_layer)
self.rnn_dropout_layers.append(dropout_layer)
current_dim = current_layer
if self.bidirectional:
bidirectional_multiplier = 2
else:
bidirectional_multiplier = 1
self.linear_layers = []
self.linear_dropout_layers = []
for idx, current_layer in enumerate(linear_layers, 1):
linear_layer = nn.Linear(current_dim * bidirectional_multiplier, current_layer)
self.add_module('linear{:02}'.format(idx), linear_layer)
self.linear_layers.append(linear_layer)
if self.linear_dropout > 0.0:
dropout_layer = nn.Dropout(p=self.linear_dropout)
self.add_module('linear_dropout{:02}'.format(idx), dropout_layer)
self.linear_dropout_layers.append(dropout_layer)
bidirectional_multiplier = 1
current_dim = current_layer
if self.bidirectional:
self.output_layer = nn.Linear(bidirectional_multiplier * current_dim, output_dim)
else:
self.output_layer = nn.Linear(current_dim, output_dim)
self.output_activation = nn.LogSoftmax(dim=1)
def reset_weights(self):
self.input_block.reset_weights()
for layer in self.linear_layers:
nn.init.kaiming_normal_(layer.weight, nonlinearity='relu')
nn.init.zeros_(layer.bias)
nn.init.kaiming_normal_(self.output_layer.weight, nonlinearity='relu')
nn.init.zeros_(self.output_layer.bias)
def forward(self, sequence):
""" Forward propagate batch of sequences through the network, without accounting for the state """
data = self.input_block(sequence)
for idx in range(len(self.rnn_layers)):
data, _ = self.rnn_layers[idx](data)
if self.rnn_dropout_layers:
data = self.rnn_dropout_layers[idx](data)
# We are interested only in the last element of the sequence
if self.bidirectional:
last_hidden_size = self.rnn_layers_sizes[-1]
data = torch.cat([data[:, -1, :last_hidden_size], data[:, 0, last_hidden_size:]], dim=1)
else:
data = data[:, -1]
for idx in range(len(self.linear_layers_sizes)):
data = F.relu(self.linear_layers[idx](data))
if self.linear_dropout_layers:
data = self.linear_dropout_layers[idx](data)
data = self.output_layer(data)
return self.output_activation(data)
# def forward_state(self, sequence, state=None):
# """ Forward propagate a sequence through the network accounting for the state """
# if state is None:
# state = self.initial_state(sequence.size(0))
#
# data = self.input_layer(sequence)
#
# state_outputs = []
#
# # for layer_length, layer in zip(self.hidden_layers, self.gru_layers):
# for idx in range(len(self.gru_layers)):
# layer_length = self.hidden_layers[idx]
#
# # Partition hidden state, for each layer we have layer_length of h state and layer_length of c state
# current_state = state[:, :, :layer_length]
# state = state[:, :, layer_length:]
#
# # Propagate through the GRU state
# data, new_h = self.gru_layers[idx](data, current_state)
#
# if self.dropout_layers:
# data = self.dropout_layers[idx](data)
#
# state_outputs.append(new_h)
#
# output_data = self.output_activation(self.output_layer(data))
#
# concatenated_hidden_output = torch.cat(state_outputs, dim=2)
#
# return output_data, concatenated_hidden_output
def get_layer_groups(self):
return [
self.input_block,
self.rnn_layers,
self.linear_layers,
self.output_layer
]
def initial_state(self, batch_size):
""" Initial state of the network """
return torch.zeros(batch_size, 1, sum(self.rnn_layers_sizes))
def loss_value(self, x_data, y_true, y_pred):
""" Calculate a value of loss function """
return F.nll_loss(y_pred, y_true)
def metrics(self) -> list:
""" Set of metrics for this model """
return [Loss(), Accuracy()]
def create(input_block: LinearBackboneModel, output_dim: int,
rnn_layers: typing.List[int], rnn_dropout: float=0.0, bidirectional: bool=False,
linear_layers: typing.List[int]=None, linear_dropout: float=0.0):
""" Vel creation function """
if linear_layers is None:
linear_layers = []
def instantiate(**_):
return MultilayerSequenceClassificationGRU(
input_block=input_block, output_dim=output_dim,
rnn_layers=rnn_layers, rnn_dropout=rnn_dropout, bidirectional=bidirectional,
linear_layers=linear_layers, linear_dropout=linear_dropout
)
return ModelFactory.generic(instantiate)
| [
"torch.nn.LogSoftmax",
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.GRU",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.nll_loss",
"torch.nn.init.zeros_"
] | 0.4.1 | tigerwlin/vel | 00e4fbb7b612e888e2cbb5d8455146664638cd0b |
1.8 | from abc import abstractmethod
import PIL
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.hub import load_state_dict_from_url
from torchvision.models import DenseNet as _DenseNet
from torchvision.models import ResNet as _ResNet
from torchvision.models.densenet import _load_state_dict
from torchvision.models.densenet import model_urls as densenet_model_urls
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls as resnet_model_urls
class Model(pl.LightningModule):
DEFAULT_CONFIG = {}
def __init__(self, config: dict = None):
super().__init__()
self.config = self.DEFAULT_CONFIG.copy()
if config is not None:
self.config.update(config)
self._set_model()
@abstractmethod
def _set_model(self):
raise NotImplementedError()
class ResNet(_ResNet):
ACTIVATION_DIMS = [64, 128, 256, 512]
ACTIVATION_WIDTH_HEIGHT = [64, 32, 16, 8]
RESNET_TO_ARCH = {"resnet18": [2, 2, 2, 2], "resnet50": [3, 4, 6, 3]}
def __init__(
self,
num_classes: int,
arch: str = "resnet18",
dropout: float = 0.0,
pretrained: bool = True,
):
if arch not in self.RESNET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.RESNET_TO_ARCH.keys()}"
)
block = BasicBlock if arch == "resnet18" else Bottleneck
super().__init__(block, self.RESNET_TO_ARCH[arch])
if pretrained:
state_dict = load_state_dict_from_url(
resnet_model_urls[arch], progress=True
)
self.load_state_dict(state_dict)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc = nn.Sequential(
nn.Dropout(dropout), nn.Linear(512 * block.expansion, num_classes)
)
def default_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
def default_train_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
class DenseNet(_DenseNet):
DENSENET_TO_ARCH = {
"densenet121": {
"growth_rate": 32,
"block_config": (6, 12, 24, 16),
"num_init_features": 64,
}
}
def __init__(
self, num_classes: int, arch: str = "densenet121", pretrained: bool = True
):
if arch not in self.DENSENET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.DENSENET_TO_ARCH.keys()}"
)
super().__init__(**self.DENSENET_TO_ARCH[arch])
if pretrained:
_load_state_dict(self, densenet_model_urls[arch], progress=True)
self.classifier = nn.Linear(self.classifier.in_features, num_classes)
class VisionClassifier(Model):
DEFAULT_CONFIG = {
"lr": 1e-4,
"model_name": "resnet",
"arch": "resnet18",
"pretrained": True,
"num_classes": 2,
"transform": default_transform,
"train_transform": default_train_transform,
}
def _set_model(self):
if self.config["model_name"] == "resnet":
self.model = ResNet(
num_classes=self.config["num_classes"],
arch=self.config["arch"],
pretrained=self.config["pretrained"],
)
elif self.config["model_name"] == "densenet":
self.model = DenseNet(
num_classes=self.config["num_classes"], arch=self.config["arch"]
)
else:
raise ValueError(f"Model name {self.config['model_name']} not supported.")
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
inputs, targets, _ = batch["input"], batch["target"], batch["id"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("train_loss", loss, on_step=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
inputs, targets = batch["input"], batch["target"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("valid_loss", loss)
def validation_epoch_end(self, outputs) -> None:
for metric_name, metric in self.metrics.items():
self.log(f"valid_{metric_name}", metric.compute())
metric.reset()
def test_epoch_end(self, outputs) -> None:
return self.validation_epoch_end(outputs)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.config["lr"])
return optimizer
| [
"torch.nn.Linear",
"torch.nn.functional.cross_entropy",
"torch.hub.load_state_dict_from_url",
"torch.nn.Dropout"
] | 1.8.0 | data-centric-ai/dcbench | 831ab2359d686739d0b0c7a589974ce08448e58d |
0.4 | # coding:utf8
import torch as t
import torchvision as tv
import torchnet as tnt
from torch.utils import data
from transformer_net import TransformerNet
import utils
from PackedVGG import Vgg16
from torch.nn import functional as F
import tqdm
import os
import ipdb
# from WCT2_train import WCT2
# import model
from LapSobGaus_train import Lap_Sob_Gaus
import net
import Ovodus_Laplace_model
import utils_
from WCT2_train import train_transform
from tensorboardX import SummaryWriter
from pathlib import Path
from torchvision.utils import save_image
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
class Config(object):
# General Args
use_gpu = True
model_path = None # pretrain model path (for resume training or test)
# Train Args
image_size = 448 # image crop_size for training
batch_size = 2
data_root = r'F:\DataSets\train2017' # 'data/' dataset root:$data_root/coco/a.jpg D:\CoCo_Dataset\train2017
num_workers = 4 # dataloader num of workers
lr = 1e-4
epoches = 20 # total epoch to train
content_weight = 1e10 # weight of content_loss
style_weight = 1e2 # weight of style_loss
style_path = 'style_input' # style image path
env = 'onlyencodercontent_58_Laps_test_nores_noDynamic_10_2' # visdom env
plot_every = 1 # visualize in visdom for every 10 batch
debug_file = '/tmp/debugnn' # touch $debug_fie to interrupt and enter ipdb
# Test Args
content_path = 'input.png' # input file to do style transfer [for test]
result_path = 'output.png' # style transfer result [for test]
option_unpool = 'sum'
cpu = False
transfer_at_encoder = True
transfer_at_decoder = True
transfer_at_skip = True
verbose = True
save_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_experiments_10_2'
log_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_logs_10_2'
lr_decay = 5e-5
def adjust_learning_rate(lr ,optimizer, iteration_count, lr_decay):
"""Imitating the original implementation"""
lr = lr / (1.0 + lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(**kwargs):
opt = Config()
for k_, v_ in kwargs.items():
setattr(opt, k_, v_)
device = 'cpu' if opt.cpu or not t.cuda.is_available() else 'cuda:0'
device = t.device(device)
# device=t.device('cuda') if opt.use_gpu else t.device('cpu')
vis = utils_.Visualizer(opt.env)
save_dir = Path(opt.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(opt.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
# Data loading
transfroms = tv.transforms.Compose([
tv.transforms.Resize(opt.image_size),
tv.transforms.CenterCrop(opt.image_size),
tv.transforms.ToTensor(),
#tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
#tv.transforms.Lambda(lambda x: x*255)
])
dataset = tv.datasets.ImageFolder(opt.data_root, transfroms)
dataloader = data.DataLoader(dataset, opt.batch_size)
# style transformer network
# transformer = TransformerNet()
print('come!')
# visualizer = Visualizer(config) # create a visualizer that display/save images and plots
# device = 'cpu' if opt.cpu or not t.cuda.is_available() else 'cuda:0'
# device = t.device(device)
transfer_at = set()
if opt.transfer_at_encoder:
transfer_at.add('encoder')
if opt.transfer_at_decoder:
transfer_at.add('decoder')
if opt.transfer_at_skip:
transfer_at.add('skip')
# save_dir = Path(config.save_dir)
# save_dir.mkdir(exist_ok=True, parents=True)
# log_dir = Path(config.log_dir)
# log_dir.mkdir(exist_ok=True, parents=True)
# writer = SummaryWriter(log_dir=str(log_dir))
# vgg = net.vgg
wct2 = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool=opt.option_unpool, device=device,
verbose=False)
encoder = Ovodus_Laplace_model.Lap_Sob_GausEncoder(opt.option_unpool).to(device)
decoder = Ovodus_Laplace_model.Lap_Sob_GausDecoder(opt.option_unpool).to(device)
# vgg.load_state_dict(torch.load(config.vgg))
# vgg = nn.Sequential(*list(vgg.children())[:31])
laps = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool='sum', device=device)
network = net.Net(encoder, decoder)
network.train()
network.to(device)
transformer = network
if opt.model_path:
transformer.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
transformer.to(device)
# Vgg16 for Perceptual Loss
# vgg = Vgg16().eval()
# vgg.to(device)
# for param in vgg.parameters():
# param.requires_grad = False
# Optimizer
# optimizer = t.optim.Adam(transformer.parameters(), opt.lr)
enoptimizer = t.optim.Adam(network.encoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))
deoptimizer = t.optim.Adam(network.decoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))
# # Get style image
# style_dataloader = utils_.get_style_data(opt.style_path, opt.batch_size)
# #style_list = list(enumerate(style_dataloader))
# for ii, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):
# #a = style
# style = style.expand(opt.batch_size, 3, 256, 256)
# vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))
# #style_list.append(style)
#
# style = style.to(device)
# #
# # #
# # # # gram matrix for style image
# with t.no_grad():
# features_style = vgg(style)
# gram_style = [utils_.gram_matrix(y) for y in features_style]
# Loss meter
style_meter = tnt.meter.AverageValueMeter()
content_meter = tnt.meter.AverageValueMeter()
for epoch in range(opt.epoches):
# for jj, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):
# a = style
# vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))
# style = style.to(device)
#
content_meter.reset()
style_meter.reset()
for ii, (x, _) in tqdm.tqdm(enumerate(dataloader)):
if epoch == 0:
adjust_learning_rate(opt.lr, enoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)
adjust_learning_rate(opt.lr, deoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)
print(opt.lr)
# style = style_list[ii][1][0]
# # style = style_list[ii]
# style = style.to(device)
# # # gram matrix for style image
# with t.no_grad():
# features_style = vgg(style)
# gram_style = [utils_.gram_matrix(y) for y in features_style]
style_dataloader = utils_.get_style_data(opt.style_path, opt.batch_size)
# style_list = list(enumerate(style_dataloader))
for jj, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):
# a = style
style = style.expand(opt.batch_size, 3, 256, 256)
#vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))
vis.img('style', (style.data[0]).clamp(min=0, max=1))
# style_list.append(style)
style = style.to(device)
#
# #
# # # gram matrix for style image
# with t.no_grad():
# features_style = vgg(style)
# gram_style = [utils_.gram_matrix(y) for y in features_style]
# Train
enoptimizer.zero_grad()
deoptimizer.zero_grad()
x = x.to(device)
#y = network(x, style, Laps=laps)
# if (ii + 1) % 10 == 0:
# print(y)
# y = y.clamp_(0, 1) * 255
#y = utils_.normalize_batch(y)
#x = utils_.normalize_batch(x)
# features_y = vgg(y)
# features_x = vgg(x)
# # content loss
# content_loss = opt.content_weight * F.mse_loss(features_y.relu2_2, features_x.relu2_2)
#
# # style loss
# style_loss = 0
#
# for ft_y, gm_s in zip(features_y, gram_style):
# gram_y = utils_.gram_matrix(ft_y)
# style_loss += F.mse_loss(gram_y, gm_s.expand_as(gram_y))
y, content_feats, content_loss, style_loss = network(x, style, Laps=laps)
content_loss *= opt.content_weight
style_loss *= opt.style_weight
total_loss = content_loss + style_loss
total_loss.backward()
enoptimizer.step()
deoptimizer.step()
# Loss smooth for visualization
content_meter.add(content_loss.item())
style_meter.add(style_loss.item())
if ii % 50 == 1:
print('\n')
print('iters:', ii, 'total_loss:', total_loss, 'loss_c:', content_loss, 'loss_s: ', style_loss)
if (ii + 1) % opt.plot_every == 0:
if os.path.exists(opt.debug_file):
ipdb.set_trace()
# visualization
vis.plot('content_loss', content_meter.value()[0])
vis.plot('style_loss', style_meter.value()[0])
# denorm input/output, since we have applied (utils.normalize_batch)
vis.img('output1', (y.data.cpu()[0]).clamp(min=0, max=1))
vis.img('input1', (x.data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_1', (content_feats['decoder'][0][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_2', (content_feats['decoder'][1][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_3', (content_feats['decoder'][2][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_4', (content_feats['decoder'][3][0].data.cpu()[0]).clamp(min=0, max=1))
#save_image(content_feat.clamp_(0, 1), fname_output + "decoder{:d}".format(level), padding=0)
if (ii) % 1000 == 0:
if not os.path.exists(save_dir /'epoch_{:d}'.format(epoch)):
os.makedirs(save_dir /'epoch_{:d}'.format(epoch))
de_state_dict = network.decoder.state_dict()
en_state_dict = network.encoder.state_dict()
for key in de_state_dict.keys():
de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))
t.save(de_state_dict, save_dir /'epoch_{:d}'.format(epoch)/
'decoder_iter_{:d}.pth.tar'.format(ii + 1))
for key in en_state_dict.keys():
en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))
t.save(en_state_dict, save_dir /'epoch_{:d}'.format(epoch)/
'encoder_iter_{:d}.pth.tar'.format(ii + 1))
de_state_dict = network.decoder.state_dict()
en_state_dict = network.encoder.state_dict()
for key in de_state_dict.keys():
de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))
t.save(de_state_dict, save_dir /
'epoch_decoder_iter_{:d}.pth.tar'.format(epoch + 1))
for key in en_state_dict.keys():
en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))
t.save(en_state_dict, save_dir /
'epoch_encoder_iter_{:d}.pth.tar'.format(epoch + 1))
# save checkpoints
vis.save([opt.env])
t.save(network.state_dict(), 'checkpoints/epoch_%s_style.pth' % epoch)
writer.close()
@t.no_grad()
def stylize(**kwargs):
"""
perform style transfer
"""
opt = Config()
for k_, v_ in kwargs.items():
setattr(opt, k_, v_)
device = t.device('cuda') if opt.use_gpu else t.device('cpu')
# input image preprocess
content_image = tv.datasets.folder.default_loader(opt.content_path)
content_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device).detach()
# model setup
style_model = TransformerNet().eval()
style_model.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
style_model.to(device)
# style transfer and save output
output = style_model(content_image)
output_data = output.cpu().data[0]
tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1), opt.result_path)
if __name__ == '__main__':
import fire
fire.Fire()
train() | [
"torch.device",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
] | 0.4.1 | Raeyi/multipooling-AdaPECT | 9632b98ff1612344de798321298f6488f1c303b0 |
1.5 | import argparse
import os
import cv2
import librosa
import numpy as np
import soundfile as sf
import torch
from tqdm import tqdm
from lib import dataset
from lib import nets
from lib import spec_utils
class VocalRemover(object):
def __init__(self, model, device, window_size):
self.model = model
self.offset = model.offset
self.device = device
self.window_size = window_size
def _execute(self, X_mag_pad, roi_size, n_window):
self.model.eval()
with torch.no_grad():
preds = []
for i in tqdm(range(n_window)):
start = i * roi_size
X_mag_window = X_mag_pad[None, :, :, start:start + self.window_size]
X_mag_window = torch.from_numpy(X_mag_window).to(self.device)
pred = self.model.predict(X_mag_window)
pred = pred.detach().cpu().numpy()
preds.append(pred[0])
pred = np.concatenate(preds, axis=2)
return pred
def preprocess(self, X_spec):
X_mag = np.abs(X_spec)
X_phase = np.angle(X_spec)
return X_mag, X_phase
def inference(self, X_spec):
X_mag, X_phase = self.preprocess(X_spec)
coef = X_mag.max()
X_mag_pre = X_mag / coef
n_frame = X_mag_pre.shape[2]
pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset)
n_window = int(np.ceil(n_frame / roi_size))
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred = self._execute(X_mag_pad, roi_size, n_window)
pred = pred[:, :, :n_frame]
return pred * coef, X_mag, np.exp(1.j * X_phase)
def inference_tta(self, X_spec):
X_mag, X_phase = self.preprocess(X_spec)
coef = X_mag.max()
X_mag_pre = X_mag / coef
n_frame = X_mag_pre.shape[2]
pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset)
n_window = int(np.ceil(n_frame / roi_size))
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred = self._execute(X_mag_pad, roi_size, n_window)
pred = pred[:, :, :n_frame]
pad_l += roi_size // 2
pad_r += roi_size // 2
n_window += 1
X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant')
pred_tta = self._execute(X_mag_pad, roi_size, n_window)
pred_tta = pred_tta[:, :, roi_size // 2:]
pred_tta = pred_tta[:, :, :n_frame]
return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.j * X_phase)
def main():
p = argparse.ArgumentParser()
p.add_argument('--gpu', '-g', type=int, default=-1)
p.add_argument('--pretrained_model', '-P', type=str, default='models/baseline.pth')
p.add_argument('--input', '-i', required=True)
p.add_argument('--sr', '-r', type=int, default=44100)
p.add_argument('--n_fft', '-f', type=int, default=2048)
p.add_argument('--hop_length', '-l', type=int, default=1024)
p.add_argument('--window_size', '-w', type=int, default=512)
p.add_argument('--output_image', '-I', action='store_true')
p.add_argument('--postprocess', '-p', action='store_true')
p.add_argument('--tta', '-t', action='store_true')
args = p.parse_args()
print('loading model...', end=' ')
device = torch.device('cpu')
model = nets.CascadedASPPNet(args.n_fft)
model.load_state_dict(torch.load(args.pretrained_model, map_location=device))
if torch.cuda.is_available() and args.gpu >= 0:
device = torch.device('cuda:{}'.format(args.gpu))
model.to(device)
print('done')
print('loading wave source...', end=' ')
X, sr = librosa.load(
args.input, args.sr, False, dtype=np.float32, res_type='kaiser_fast')
basename = os.path.splitext(os.path.basename(args.input))[0]
print('done')
if X.ndim == 1:
X = np.asarray([X, X])
print('stft of wave source...', end=' ')
X = spec_utils.wave_to_spectrogram(X, args.hop_length, args.n_fft)
print('done')
vr = VocalRemover(model, device, args.window_size)
if args.tta:
pred, X_mag, X_phase = vr.inference_tta(X)
else:
pred, X_mag, X_phase = vr.inference(X)
if args.postprocess:
print('post processing...', end=' ')
pred_inv = np.clip(X_mag - pred, 0, np.inf)
pred = spec_utils.mask_silence(pred, pred_inv)
print('done')
print('inverse stft of instruments...', end=' ')
y_spec = pred * X_phase
wave = spec_utils.spectrogram_to_wave(y_spec, hop_length=args.hop_length)
print('done')
sf.write('{}_Instruments.wav'.format(basename), wave.T, sr)
print('inverse stft of vocals...', end=' ')
v_spec = np.clip(X_mag - pred, 0, np.inf) * X_phase
wave = spec_utils.spectrogram_to_wave(v_spec, hop_length=args.hop_length)
print('done')
sf.write('{}_Vocals.wav'.format(basename), wave.T, sr)
if args.output_image:
with open('{}_Instruments.jpg'.format(basename), mode='wb') as f:
image = spec_utils.spectrogram_to_image(y_spec)
_, bin_image = cv2.imencode('.jpg', image)
bin_image.tofile(f)
with open('{}_Vocals.jpg'.format(basename), mode='wb') as f:
image = spec_utils.spectrogram_to_image(v_spec)
_, bin_image = cv2.imencode('.jpg', image)
bin_image.tofile(f)
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.no_grad",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.load"
] | 1.5.1 | charzy/vocalremover | 9bf983ab5579c36c75447c74eec0400d78ab49f9 |
1.0 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
)
from .modeling_utils import PreTrainedModel
from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
TrainOutput,
default_compute_objective,
default_hp_space,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
def _model_unwrap(model: nn.Module) -> nn.Module:
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return _model_unwrap(model.module)
else:
return model
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
"""
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# Model parallel
if not self.is_model_parallel:
model = model.to(args.device)
else:
# Force n_gpu to 1 to avoid DataParallel.
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Setup Sharded DDP training
self.sharded_dpp = False
if args.sharded_ddp:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
else:
self.sharded_dpp = True
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
signature_columns += ["label", "label_ids"]
columns = [k for k in signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif (
self.args.parallel_mode == ParallelMode.DISTRIBUTED
or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED
):
num_processes = dist.get_world_size()
process_index = dist.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
if num_processes <= 1:
return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)
else:
return DistributedLengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_dpp:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.state.global_step % self.args.save_steps == 0:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
self.args.output_dir = checkpoint_dir
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def train(
self,
resume_from_checkpoint: Optional[str] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str`, `optional`):
Local path to a saved checkpoint as saved by a previous instance of :class:`~transformers.Trainer`. If
present, training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
model = self.model_wrapped
# Mixed precision training with apex (torch < 1.6)
if self.use_apex:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.sharded_dpp:
model = ShardedDDP(model, self.optimizer)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
self.deepspeed.step()
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
self.lr_scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else self.lr_scheduler.get_lr()[0]
)
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save.
assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
else:
output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
self.store_flos()
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_process_zero():
self._save(output_dir)
# If on sagemaker and we are saving the main model (not a checkpoint so output_dir=None), save a copy to
# SM_MODEL_DIR for easy deployment.
if output_dir is None and os.getenv("SM_MODEL_DIR") is not None:
self.save_model(output_dir=os.getenv("SM_MODEL_DIR"))
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.utils.data.dataloader.DataLoader",
"torch.utils.data.sampler.RandomSampler",
"torch.cuda.amp.autocast",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.sampler.SequentialSampler",
"torch.tensor",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.amp.GradScaler",
"torch.distributed.get_rank",
"torch.distributed.get_local_rank",
"torch.nn.DataParallel"
] | 1.0 | marcoabrate/transformers | 3f77c26d74e1282955fefa8dfff2451e44f6d4a9 |
0.4 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Donny You, RainbowSecret
## Microsoft Research
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pdb
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from lib.utils.tools.logger import Logger as Log
class WeightedFSOhemCELoss(nn.Module):
def __init__(self, configer):
super().__init__()
self.configer = configer
self.thresh = self.configer.get('loss', 'params')['ohem_thresh']
self.reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
self.reduction = self.configer.get('loss', 'params')['ce_reduction']
def forward(self, predict, target, min_kept=1, weight=None, ignore_index=-1, **kwargs):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
"""
prob_out = F.softmax(predict, dim=1)
tmp_target = target.clone()
tmp_target[tmp_target == ignore_index] = 0
prob = prob_out.gather(1, tmp_target.unsqueeze(1))
mask = target.contiguous().view(-1,) != ignore_index
sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()
min_threshold = sort_prob[min(min_kept, sort_prob.numel() - 1)]
threshold = max(min_threshold, self.thresh)
loss_matrix = F.cross_entropy(predict, target, weight=weight, ignore_index=ignore_index, reduction='none').contiguous().view(-1,)
sort_loss_matrix = loss_matrix[mask][sort_indices]
select_loss_matrix = sort_loss_matrix[sort_prob < threshold]
if self.reduction == 'sum':
return select_loss_matrix.sum()
elif self.reduction == 'elementwise_mean':
return select_loss_matrix.mean()
else:
raise NotImplementedError('Reduction Error!')
# Cross-entropy Loss
class FSCELoss(nn.Module):
def __init__(self, configer=None):
super(FSCELoss, self).__init__()
self.configer = configer
weight = None
if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):
weight = self.configer.get('loss', 'params')['ce_weight']
weight = torch.FloatTensor(weight).cuda()
reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
reduction = self.configer.get('loss', 'params')['ce_reduction']
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, inputs, *targets, weights=None, **kwargs):
loss = 0.0
if isinstance(inputs, tuple) or isinstance(inputs, list):
if weights is None:
weights = [1.0] * len(inputs)
for i in range(len(inputs)):
if len(targets) > 1:
target = self._scale_target(targets[i], (inputs[i].size(2), inputs[i].size(3)))
loss += weights[i] * self.ce_loss(inputs[i], target)
else:
target = self._scale_target(targets[0], (inputs[i].size(2), inputs[i].size(3)))
loss += weights[i] * self.ce_loss(inputs[i], target)
else:
target = self._scale_target(targets[0], (inputs.size(2), inputs.size(3)))
loss = self.ce_loss(inputs, target)
return loss
@staticmethod
def _scale_target(targets_, scaled_size):
targets = targets_.clone().unsqueeze(1).float()
targets = F.interpolate(targets, size=scaled_size, mode='nearest')
return targets.squeeze(1).long()
class FSOhemCELoss(nn.Module):
def __init__(self, configer):
super(FSOhemCELoss, self).__init__()
self.configer = configer
self.thresh = self.configer.get('loss', 'params')['ohem_thresh']
self.min_kept = max(1, self.configer.get('loss', 'params')['ohem_minkeep'])
weight = None
if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):
weight = self.configer.get('loss', 'params')['ce_weight']
weight = torch.FloatTensor(weight).cuda()
self.reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
self.reduction = self.configer.get('loss', 'params')['ce_reduction']
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
self.ignore_label = ignore_index
self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction='none')
def forward(self, predict, target, **kwargs):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
prob_out = F.softmax(predict, dim=1)
tmp_target = target.clone()
tmp_target[tmp_target == self.ignore_label] = 0
prob = prob_out.gather(1, tmp_target.unsqueeze(1))
mask = target.contiguous().view(-1,) != self.ignore_label
sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()
min_threshold = sort_prob[min(self.min_kept, sort_prob.numel() - 1)]
threshold = max(min_threshold, self.thresh)
loss_matirx = self.ce_loss(predict, target).contiguous().view(-1,)
sort_loss_matirx = loss_matirx[mask][sort_indices]
select_loss_matrix = sort_loss_matirx[sort_prob < threshold]
if self.reduction == 'sum':
return select_loss_matrix.sum()
elif self.reduction == 'elementwise_mean':
return select_loss_matrix.mean()
else:
raise NotImplementedError('Reduction Error!')
class FSAuxOhemCELoss(nn.Module):
def __init__(self, configer=None):
super(FSAuxOhemCELoss, self).__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
if self.configer.get('loss', 'loss_type') == 'fs_auxohemce_loss':
self.ohem_ce_loss = FSOhemCELoss(self.configer)
else:
assert self.configer.get('loss', 'loss_type') == 'fs_auxslowohemce_loss'
self.ohem_ce_loss = FSSlowOhemCELoss(self.configer)
def forward(self, inputs, targets, **kwargs):
aux_out, seg_out = inputs
seg_loss = self.ohem_ce_loss(seg_out, targets)
aux_loss = self.ce_loss(aux_out, targets)
loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss
loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss
return loss
class FSAuxCELoss(nn.Module):
def __init__(self, configer=None):
super(FSAuxCELoss, self).__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
def forward(self, inputs, targets, **kwargs):
aux_out, seg_out = inputs
seg_loss = self.ce_loss(seg_out, targets)
aux_loss = self.ce_loss(aux_out, targets)
loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss
loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss
return loss
class SegFixLoss(nn.Module):
"""
We predict a binary mask to categorize the boundary pixels as class 1 and otherwise as class 0
Based on the pixels predicted as 1 within the binary mask, we further predict the direction for these
pixels.
"""
def __init__(self, configer=None):
super().__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
def calc_weights(self, label_map, num_classes):
weights = []
for i in range(num_classes):
weights.append((label_map == i).sum().data)
weights = torch.FloatTensor(weights)
weights_sum = weights.sum()
return (1 - weights / weights_sum).cuda()
def forward(self, inputs, targets, **kwargs):
from lib.utils.helpers.offset_helper import DTOffsetHelper
pred_mask, pred_direction = inputs
seg_label_map, distance_map, angle_map = targets[0], targets[1], targets[2]
gt_mask = DTOffsetHelper.distance_to_mask_label(distance_map, seg_label_map, return_tensor=True)
gt_size = gt_mask.shape[1:]
mask_weights = self.calc_weights(gt_mask, 2)
pred_direction = F.interpolate(pred_direction, size=gt_size, mode="bilinear", align_corners=True)
pred_mask = F.interpolate(pred_mask, size=gt_size, mode="bilinear", align_corners=True)
mask_loss = F.cross_entropy(pred_mask, gt_mask, weight=mask_weights, ignore_index=-1)
mask_threshold = float(os.environ.get('mask_threshold', 0.5))
binary_pred_mask = torch.softmax(pred_mask, dim=1)[:, 1, :, :] > mask_threshold
gt_direction = DTOffsetHelper.angle_to_direction_label(
angle_map,
seg_label_map=seg_label_map,
extra_ignore_mask=(binary_pred_mask == 0),
return_tensor=True
)
direction_loss_mask = gt_direction != -1
direction_weights = self.calc_weights(gt_direction[direction_loss_mask], pred_direction.size(1))
direction_loss = F.cross_entropy(pred_direction, gt_direction, weight=direction_weights, ignore_index=-1)
if self.training \
and self.configer.get('iters') % self.configer.get('solver', 'display_iter') == 0 \
and torch.cuda.current_device() == 0:
Log.info('mask loss: {} direction loss: {}.'.format(mask_loss, direction_loss))
mask_weight = float(os.environ.get('mask_weight', 1))
direction_weight = float(os.environ.get('direction_weight', 1))
return mask_weight * mask_loss + direction_weight * direction_loss | [
"torch.nn.functional.interpolate",
"torch.FloatTensor",
"torch.softmax",
"torch.cuda.current_device",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss"
] | 0.4.1 | Shuai-Xie/openseg.pytorch | 79116a58782ccd2150f9eb9054e70cfd42fc9773 |
1.1 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# n(net) o(oil) h(hang) r(rust) 检测模块
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
from mmdet.models import build_detector
import mmcv
import torch
import cv2
import time
import json
from mmcv.runner import load_checkpoint
import PIL.Image as Image
import numpy as np
from torchvision.transforms import transforms
import pycocotools.mask as maskUtils
current_dir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(current_dir, 'configs','config_cascade_rcnn.py')
weight_file = '/home/kilox/weights/nohr_best.pth'
# weight_file = '/Weights/verified/oil_detection_v1/oil_best.pth'
class Object(object):
def __init__(self):
self.class_name = "Unknown"
self.trust = 0.0
self.rank = 0
def to_json(self):
return json.dumps(self.__dict__)
class Port:
def __init__(self):
self.cfg = mmcv.Config.fromfile(config_file)
# 创建模型 , test_cfg 是rpn rcnn的nms等配置
self.detector = build_detector(self.cfg.model, train_cfg=None, test_cfg=self.cfg.test_cfg)
# 加载权重
load_checkpoint(self.detector, weight_file, map_location='cpu')
self.detector = self.detector.to('cuda')
self.detector.eval()
self.class_names = ('油污','鸟巢','锈蚀','飘挂物')
def process(self, image,save=None):
"""
:param image: PIL.Image 输入图像
"""
np_image = np.asarray(image)
img, img_meta = self.prepare_single(np_image)
# forward
with torch.no_grad():
# 传入rescale则代表返回的mask是原图的
result = self.detector.simple_test(img, [img_meta], proposals=None, rescale=True)
# 将mask 以及bbox画在图上
img = self.draw_image(np_image, img_meta, result)
real_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))
output_file_name = os.path.join(real_time + '.jpg')
cv2.imwrite(output_file_name, img)
return False,None,output_file_name
# 将图片添加meta的函数
def prepare_single(self,img):
img_info = {'height': img.shape[0], 'width': img.shape[1]}
img_norm_cfg = self.cfg.img_norm_cfg
size_divisor = self.cfg.data.test.size_divisor
img, scale_factor = mmcv.imrescale(img, (4014,2400), return_scale=True)
img_shape = img.shape
img = mmcv.imnormalize(img, img_norm_cfg.mean, img_norm_cfg.std, img_norm_cfg.to_rgb)
img = mmcv.impad_to_multiple(img, size_divisor)
pad_shape = img.shape
_img = transforms.ToTensor()(img).float()
_img = _img.unsqueeze(0)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
_img = _img.to('cuda')
return _img, _img_meta,
def draw_image(self,img, meta, result, score_thr=0.9):
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
h, w, _ = meta['ori_shape']
img_show = img[:h, :w, :].copy()
bboxes = np.vstack(bbox_result)
# 画mask
# # draw segmentation masks
# if segm_result is not None:
# segms = mmcv.concat_list(segm_result)
# inds = np.where(bboxes[:, -1] > score_thr)[0]
# for i in inds:
# color_mask = np.random.randint(
# 0, 256, (1, 3), dtype=np.uint8)
# mask = maskUtils.decode(segms[i]).astype(np.bool)
# # todo fix dimension not equal
# img_check_shape = tuple(img_show.shape[0:2])
# if mask.shape != img_check_shape:
# width_diff = mask.shape[1] - img_check_shape[1]
# if mask.shape[1] < img_check_shape[1]:
# mask = np.pad(mask, (0, width_diff), mode='constant', constant_values=False)
# np.insert(mask, False, )
# else:
# mask = mask[:, :-width_diff]
# img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# 画bbox
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
for bbox, label in zip(bboxes, labels):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(
img_show, left_top, right_bottom, (0, 255, 0), thickness=2)
label_text = self.class_names[
label] if self.class_names is not None else 'cls {}'.format(label)
if len(bbox) > 4:
label_text += '|{:.02f}'.format(bbox[-1])
cv2.putText(img_show, label_text, (bbox_int[0], bbox_int[1] - 2),
cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0))
return img_show
def test():
pass
if __name__ == '__main__':
im = Image.open('/home/kilox/3.jpg')
port = Port()
print(port.process(im,True))
| [
"torch.no_grad"
] | 1.1 | yikir/mmdetection | dfceb61b0252f81b010f550f2acbe46c7dad6ef6 |
1.0 | # Copyright (c) Facebook, Inc. and its affiliates.
import collections
import gc
import os
from bisect import bisect
import requests
import torch
import tqdm
import yaml
from torch import nn
def lr_lambda_update(i_iter, cfg):
if (
cfg["training_parameters"]["use_warmup"] is True
and i_iter <= cfg["training_parameters"]["warmup_iterations"]
):
alpha = float(i_iter) / float(cfg["training_parameters"]["warmup_iterations"])
return cfg["training_parameters"]["warmup_factor"] * (1.0 - alpha) + alpha
else:
idx = bisect(cfg["training_parameters"]["lr_steps"], i_iter)
return pow(cfg["training_parameters"]["lr_ratio"], idx)
def clip_gradients(model, i_iter, writer, config):
# TODO: Fix question model retrieval
max_grad_l2_norm = config["training_parameters"]["max_grad_l2_norm"]
clip_norm_mode = config["training_parameters"]["clip_norm_mode"]
if max_grad_l2_norm is not None:
if clip_norm_mode == "all":
norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_l2_norm)
writer.add_scalars({"grad_norm": norm}, i_iter)
elif clip_norm_mode == "question":
question_embedding = model.module.question_embedding_module
norm = nn.utils.clip_grad_norm(
question_embedding.parameters(), max_grad_l2_norm
)
writer.add_scalars({"question_grad_norm": norm}, i_iter)
else:
raise NotImplementedError(
"Clip norm mode %s not implemented" % clip_norm_mode
)
def ckpt_name_from_core_args(config):
return "%s_%s_%s_%d" % (
config["tasks"],
config["datasets"],
config["model"],
config["training_parameters"]["seed"],
)
def foldername_from_config_override(args):
cfg_override = None
if hasattr(args, "config_override"):
cfg_override = args.config_override
elif "config_override" in args:
cfg_override = args["config_override"]
folder_name = ""
if cfg_override is not None and len(cfg_override) > 0:
folder_name = yaml.safe_dump(cfg_override, default_flow_style=True)
folder_name = folder_name.replace(":", ".").replace("\n", " ")
folder_name = folder_name.replace("/", "_")
folder_name = " ".join(folder_name.split())
folder_name = folder_name.replace(". ", ".").replace(" ", "_")
folder_name = "_" + folder_name
return folder_name
def get_pythia_root():
from pythia.common.registry import registry
pythia_root = registry.get("pythia_root", no_warning=True)
if pythia_root is None:
pythia_root = os.path.dirname(os.path.abspath(__file__))
pythia_root = os.path.abspath(os.path.join(pythia_root, ".."))
registry.register("pythia_root", pythia_root)
return pythia_root
def download_file(url, output_dir=".", filename=""):
if len(filename) == 0:
filename = os.path.join(".", url.split("/")[-1])
os.makedirs(output_dir, exist_ok=True)
filename = os.path.join(output_dir, filename)
r = requests.get(url, stream=True)
file_size = int(r.headers["Content-Length"])
chunk_size = 1024 * 1024
num_bars = int(file_size / chunk_size)
with open(filename, "wb") as fh:
for chunk in tqdm.tqdm(
r.iter_content(chunk_size=chunk_size),
total=num_bars,
unit="MB",
desc=filename,
leave=True,
):
fh.write(chunk)
def get_optimizer_parameters(model, config):
parameters = model.parameters()
has_custom = hasattr(model, "get_optimizer_parameters")
if has_custom:
parameters = model.get_optimizer_parameters(config)
is_parallel = isinstance(model, nn.DataParallel)
if is_parallel and hasattr(model.module, "get_optimizer_parameters"):
parameters = model.module.get_optimizer_parameters(config)
return parameters
def dict_to_string(dictionary):
logs = []
if dictionary is None:
return ""
for key, val in dictionary.items():
if hasattr(val, "item"):
val = val.item()
# if key.count('_') == 2:
# key = key[key.find('_') + 1:]
logs.append("%s: %.4f" % (key, val))
return ", ".join(logs)
def get_overlap_score(candidate, target):
"""Takes a candidate word and a target word and returns the overlap
score between the two.
Parameters
----------
candidate : str
Candidate word whose overlap has to be detected.
target : str
Target word against which the overlap will be detected
Returns
-------
float
Overlap score betwen candidate and the target.
"""
if len(candidate) < len(target):
temp = candidate
candidate = target
target = temp
overlap = 0.0
while len(target) >= 2:
if target in candidate:
overlap = len(target)
return overlap * 1.0 / len(candidate)
else:
target = target[:-1]
return 0.0
def updir(d, n):
"""Given path d, go up n dirs from d and return that path"""
ret_val = d
for _ in range(n):
ret_val = os.path.dirname(ret_val)
return ret_val
def print_cuda_usage():
print("Memory Allocated:", torch.cuda.memory_allocated() / (1024 * 1024))
print("Max Memory Allocated:", torch.cuda.max_memory_allocated() / (1024 * 1024))
print("Memory Cached:", torch.cuda.memory_cached() / (1024 * 1024))
print("Max Memory Cached:", torch.cuda.max_memory_cached() / (1024 * 1024))
def get_current_tensors():
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (
hasattr(obj, "data") and torch.is_tensor(obj.data)
):
print(type(obj), obj.size())
except:
pass
| [
"torch.cuda.max_memory_cached",
"torch.is_tensor",
"torch.cuda.max_memory_allocated",
"torch.cuda.memory_allocated",
"torch.cuda.memory_cached"
] | 1.0.1 | winnerineast/pythia | b6fe288405490f6e02a3e59dbf32a181aee35645 |
1.0 | # Copyright (c) Facebook, Inc. and its affiliates.
"""
The metrics module contains implementations of various metrics used commonly to
understand how well our models are performing. For e.g. accuracy, vqa_accuracy,
r@1 etc.
For implementing your own metric, you need to follow these steps:
1. Create your own metric class and inherit ``BaseMetric`` class.
2. In the ``__init__`` function of your class, make sure to call
``super().__init__('name')`` where 'name' is the name of your metric. If
you require any parameters in your ``__init__`` function, you can use
keyword arguments to represent them and metric constructor will take care of
providing them to your class from config.
3. Implement a ``calculate`` function which takes in ``SampleList`` and
`model_output` as input and return back a float tensor/number.
4. Register your metric with a key 'name' by using decorator,
``@registry.register_metric('name')``.
Example::
import torch
from pythia.common.registry import registry
from pythia.modules.metrics import BaseMetric
@registry.register_metric("some")
class SomeMetric(BaseMetric):
def __init__(self, some_param=None):
super().__init__("some")
....
def calculate(self, sample_list, model_output):
metric = torch.tensor(2, dtype=torch.float)
return metric
Example config for above metric::
model_attributes:
pythia:
metrics:
- type: some
params:
some_param: a
"""
import collections
import torch
from pythia.common.registry import registry
class Metrics:
"""Internally used by Pythia, Metrics acts as wrapper for handling
calculation of metrics over various metrics specified by the model in
the config. It initializes all of the metrics and when called it runs
calculate on each of them one by one and returns back a dict with proper
naming back. For e.g. an example dict returned by Metrics class:
``{'val/vqa_accuracy': 0.3, 'val/r@1': 0.8}``
Args:
metric_list (List[ConfigNode]): List of ConfigNodes where each ConfigNode
specifies name and parameters of the
metrics used.
"""
def __init__(self, metric_list):
if not isinstance(metric_list, list):
metrics_list = [metric_list]
self.writer = registry.get("writer")
self.metrics = self._init_metrics(metric_list)
def _init_metrics(self, metric_list):
metrics = {}
for metric in metric_list:
params = {}
if isinstance(metric, collections.abc.Mapping):
if not hasattr(metric, "type"):
raise ValueError(
"Metric {} needs to have 'type' attribute".format(metric)
)
metric = metric.type
params = getattr(metric, "params", {})
else:
if not isinstance(metric, str):
raise TypeError(
"Metric {} has inappropriate type"
"'dict' or 'str' allowed".format(metric)
)
metric_cls = registry.get_metric_class(metric)
if metric_cls is None:
raise ValueError(
"No metric named {} registered to registry".format(metric)
)
metrics[metric] = metric_cls(**params)
return metrics
def __call__(self, sample_list, model_output, *args, **kwargs):
values = {}
if not hasattr(sample_list, "targets"):
return values
dataset_type = sample_list.dataset_type
with torch.no_grad():
for metric_name, metric_object in self.metrics.items():
key = "{}/{}".format(dataset_type, metric_name)
values[key] = metric_object._calculate_with_checks(
sample_list, model_output, *args, **kwargs
)
if not isinstance(values[key], torch.Tensor):
values[key] = torch.tensor(values[key], dtype=torch.float)
if values[key].dim() == 0:
values[key] = values[key].view(1)
registry.register(
"{}.{}.{}".format("metrics", sample_list.dataset_name, dataset_type), values
)
return values
class BaseMetric:
"""Base class to be inherited by all metrics registered to Pythia. See
the description on top of the file for more information. Child class must
implement ``calculate`` function.
Args:
name (str): Name of the metric.
"""
def __init__(self, name, *args, **kwargs):
self.name = name
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Abstract method to be implemented by the child class. Takes
in a ``SampleList`` and a dict returned by model as output and
returns back a float tensor/number indicating value for this metric.
Args:
sample_list (SampleList): SampleList provided by the dataloader for the
current iteration.
model_output (Dict): Output dict from the model for the current
SampleList
Returns:
torch.Tensor|float: Value of the metric.
"""
# Override in your child class
raise NotImplementedError(
"'calculate' must be implemented in the child class"
)
def __call__(self, *args, **kwargs):
return self.calculate(*args, **kwargs)
def _calculate_with_checks(self, *args, **kwargs):
value = self.calculate(*args, **kwargs)
return value
@registry.register_metric("accuracy")
class Accuracy(BaseMetric):
"""Metric for calculating accuracy.
**Key:** ``accuracy``
"""
def __init__(self):
super().__init__("accuracy")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: accuracy.
"""
output = model_output["scores"]
expected = sample_list["targets"]
output = torch.max(output, 1)[1]
correct = (expected == output.squeeze()).sum()
correct = correct
total = len(expected)
value = correct / total
return value
@registry.register_metric("caption_bleu4")
class CaptionBleu4Metric(BaseMetric):
"""Metric for calculating caption accuracy using BLEU4 Score.
**Key:** ``caption_bleu4``
"""
import nltk.translate.bleu_score as bleu_score
def __init__(self):
super().__init__("caption_bleu4")
self.caption_processor = registry.get("coco_caption_processor")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: bleu4 score.
"""
# Create reference and hypotheses captions.
references = []
hypotheses = []
# References
targets = sample_list.answers
for j, p in enumerate(targets):
img_captions = [
self.caption_processor(c)["tokens"] for c in targets[j].tolist()
]
references.append(img_captions)
# Hypotheses
scores = torch.max(model_output["scores"], dim=-1)[1]
scores = scores.tolist()
predictions = []
for j, p in enumerate(scores):
caption = self.caption_processor(scores[j])["tokens"]
predictions.append(caption)
hypotheses.extend(predictions)
assert len(references) == len(hypotheses)
bleu4 = self.bleu_score.corpus_bleu(references, hypotheses)
return targets.new_tensor(bleu4, dtype=torch.float)
@registry.register_metric("vqa_accuracy")
class VQAAccuracy(BaseMetric):
"""
Calculate VQAAccuracy. Find more information here_
**Key**: ``vqa_accuracy``.
.. _here: https://visualqa.org/evaluation.html
"""
def __init__(self):
super().__init__("vqa_accuracy")
def _masked_unk_softmax(self, x, dim, mask_idx):
x1 = torch.nn.functional.softmax(x, dim=dim)
x1[:, mask_idx] = 0
x1_sum = torch.sum(x1, dim=1, keepdim=True)
y = x1 / x1_sum
return y
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate vqa accuracy and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: VQA Accuracy
"""
output = model_output["scores"]
expected = sample_list["targets"]
output = self._masked_unk_softmax(output, 1, 0)
output = output.argmax(dim=1) # argmax
one_hots = expected.new_zeros(*expected.size())
one_hots.scatter_(1, output.view(-1, 1), 1)
scores = one_hots * expected
accuracy = torch.sum(scores) / expected.size(0)
return accuracy
class RecallAtK(BaseMetric):
def __init__(self, name="recall@k"):
super().__init__(name)
def score_to_ranks(self, scores):
# sort in descending order - largest score gets highest rank
sorted_ranks, ranked_idx = scores.sort(1, descending=True)
# convert from ranked_idx to ranks
ranks = ranked_idx.clone().fill_(0)
for i in range(ranked_idx.size(0)):
for j in range(100):
ranks[i][ranked_idx[i][j]] = j
ranks += 1
return ranks
def get_gt_ranks(self, ranks, ans_ind):
_, ans_ind = ans_ind.max(dim=1)
ans_ind = ans_ind.view(-1)
gt_ranks = torch.LongTensor(ans_ind.size(0))
for i in range(ans_ind.size(0)):
gt_ranks[i] = int(ranks[i, ans_ind[i].long()])
return gt_ranks
def get_ranks(self, sample_list, model_output, *args, **kwargs):
output = model_output["scores"]
expected = sample_list["targets"]
ranks = self.score_to_ranks(output)
gt_ranks = self.get_gt_ranks(ranks, expected)
ranks = self.process_ranks(gt_ranks)
return ranks.float()
def calculate(self, sample_list, model_output, k, *args, **kwargs):
ranks = self.get_ranks(sample_list, model_output)
recall = float(torch.sum(torch.le(ranks, k))) / ranks.size(0)
return recall
@registry.register_metric("r@1")
class RecallAt1(RecallAtK):
"""
Calculate Recall@1 which specifies how many time the chosen candidate
was rank 1.
**Key**: ``r@1``.
"""
def __init__(self):
super().__init__("r@1")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@1 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@1
"""
return self.calculate(sample_list, model_output, k=1)
@registry.register_metric("r@5")
class RecallAt5(RecallAtK):
"""
Calculate Recall@5 which specifies how many time the chosen candidate
was among first 5 rank.
**Key**: ``r@5``.
"""
def __init__(self):
super().__init__("r@5")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@5 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@5
"""
return self.calculate(sample_list, model_output, k=5)
@registry.register_metric("r@10")
class RecallAt10(RecallAtK):
"""
Calculate Recall@10 which specifies how many time the chosen candidate
was among first 10 ranks.
**Key**: ``r@10``.
"""
def __init__(self):
super().__init__("r@10")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Recall@10 and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Recall@10
"""
return self.calculate(sample_list, model_output, k=10)
@registry.register_metric("mean_r")
class MeanRank(RecallAtK):
"""
Calculate MeanRank which specifies what was the average rank of the chosen
candidate.
**Key**: ``mean_r``.
"""
def __init__(self):
super().__init__("mean_r")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Mean Rank and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: mean rank
"""
ranks = self.get_ranks(sample_list, model_output)
return torch.mean(ranks)
@registry.register_metric("mean_rr")
class MeanReciprocalRank(RecallAtK):
"""
Calculate reciprocal of mean rank..
**Key**: ``mean_rr``.
"""
def __init__(self):
super().__init__("mean_rr")
def calculate(self, sample_list, model_output, *args, **kwargs):
"""Calculate Mean Reciprocal Rank and return it back.
Args:
sample_list (SampleList): SampleList provided by DataLoader for
current iteration
model_output (Dict): Dict returned by model.
Returns:
torch.FloatTensor: Mean Reciprocal Rank
"""
ranks = self.get_ranks(sample_list, model_output)
return torch.mean(ranks.reciprocal())
| [
"torch.max",
"torch.no_grad",
"torch.le",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.mean",
"torch.sum"
] | 1.0.1 | winnerineast/pythia | b6fe288405490f6e02a3e59dbf32a181aee35645 |
0.1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_proposer import (
BaseProposer,
)
from beanmachine.ppl.world import World, init_from_prior
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@bm.functional
def baz(self):
return self.bar() * 2.0
class SampleDoubleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0).double())
@pytest.mark.parametrize("multiprocess", [False, True])
def test_inference(multiprocess):
if multiprocess and sys.platform.startswith("win"):
pytest.skip(
"Windows does not support fork-based multiprocessing (which is necessary "
"for running parallel inference within pytest."
)
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.foo(), model.baz()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
samples = mh.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
run_in_parallel=multiprocess,
mp_context="fork",
)
assert model.foo() in samples
assert isinstance(samples[model.foo()], torch.Tensor)
assert samples[model.foo()].shape == (num_chains, num_samples)
assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2
# make sure that the RNG state for each chain is different
assert not torch.equal(
samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]
)
def test_get_proposers():
world = World()
model = SampleModel()
world.call(model.bar())
nuts = bm.GlobalNoUTurnSampler()
proposers = nuts.get_proposers(world, world.latent_nodes, 10)
assert all(isinstance(proposer, BaseProposer) for proposer in proposers)
def test_initialize_world():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
world = nuts._initialize_world([model.bar()], {})
assert model.foo() in world
assert model.bar() in world
def test_initialize_from_prior():
mh = bm.SingleSiteAncestralMetropolisHastings()
model = SampleModel()
queries = [model.foo()]
samples_from_prior = []
for _ in range(10000):
world = mh._initialize_world(queries, {}, init_from_prior)
val = world.get(model.foo())
samples_from_prior.append(val.item())
assert samples_from_prior[0] != samples_from_prior[1]
assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)
def test_initialization_resampling():
mh = bm.SingleSiteAncestralMetropolisHastings()
@bm.random_variable
def foo():
return dist.Uniform(3.0, 5.0)
# verify that the method re-sample as expected
retries = 0
def init_after_three_tries(d: dist.Distribution):
nonlocal retries
retries += 1
return torch.tensor(float("nan")) if retries < 3 else d.sample()
sampler = mh.sampler(
[foo()], {}, num_samples=10, initialize_fn=init_after_three_tries
)
for world in sampler:
assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())
# an extreme case where the init value is always out of the support
def init_to_zero(d: dist.Distribution):
return torch.zeros_like(d.sample())
with pytest.raises(ValueError, match="Cannot find a valid initialization"):
mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteNewtonianMonteCarlo(),
bm.SingleSiteUniformMetropolisHastings(),
],
)
def test_inference_with_double_dtype(algorithm):
model = SampleDoubleModel()
queries = [model.foo()]
bar_val = torch.tensor(0.5).double()
# make sure that the inference can run successfully
samples = algorithm.infer(
queries,
{model.bar(): bar_val},
num_samples=20,
num_chains=1,
)
assert samples[model.foo()].dtype == bar_val.dtype
| [
"torch.distributions.Normal",
"torch.distributions.Uniform",
"torch.tensor"
] | 0.1.0 | ToddSmall/beanmachine | 85768bd1785bf6a8b3760a04f37a8fca69b4e4ca |
1.8 | '''
Copyright (c) 2020, Martel Lab, Sunnybrook Research Institute
Codes inspired by Hugging Face Transformers package code run_mlm.py
https://github.com/huggingface/transformers/blob/main/examples/pytorch/
language-modeling/run_mlm.py
Description: Training code used to train a BERT embedding in Masked Language
Modeling for BERTFineTuning Codes.
Input: train and test folders filled with .txt documents holding a list of
sentences. These .txt files can be created with TextPReProcessingBERTModel.py
file.
Output: A saved Transformer model based on Huggingface Transformers package.
This includes a cnofig.json, eval_results.txt, pytorch_model.bin,
trianing_args.bin, and vocab.txt.
'''
import sys
sys.path.append('.')
import argparse
import os
import torch
import logging
import random
import numpy as np
from transformers import BertConfig, BertForMaskedLM, AdamW, \
get_linear_schedule_with_warmup, BertTokenizer
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
from tqdm import tqdm, trange
from tokenizers.implementations import BertWordPieceTokenizer
from transformers.data.data_collator import DataCollatorForLanguageModeling
from transformers.data.datasets import TextDataset
from datetime import datetime as dt
tic = dt.now()
parser = argparse.ArgumentParser()
logger = logging.getLogger(__name__)
# Required parameters
parser.add_argument("--train_data_file", default=None, type=str,
required=True,
help="The input training data in a .txt file"
"files.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions "
"and checkpoints will be written.")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument("--per_gpu_train_batch_size", default=16, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--eval_data_file", default=None, type=str,
required=False,
help="The input training data in a .txt file"
"files.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_steps", default=2000, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--save_steps', type=int, default=10000,
help="Save checkpoint every X updates steps.")
parser.add_argument('--data_portion', type=float, default=1.0,
help="The portion of the training data you wish to load. "
"(1.0 for all data, >1.0 for a portion")
parser.add_argument('--logging_steps', type=int, default=10000,
help="Log every X updates steps.")
parser.add_argument('--block_size', type=int, default=32,
help="Max sequence length used in tokenizer and dataset.")
parser.add_argument("--start_from_checkpoint", action='store_true',
help="Start training from latest checkpoint.")
parser.add_argument("--preliminary_model", type=str, default='fromScratch',
help='Choice to start the model from a previously trained '
'model or start from scratch. Used with '
'model.from_pretrained(preliminary_model. ')
args = parser.parse_args()
def set_seed(sd):
random.seed(sd)
np.random.seed(sd)
torch.manual_seed(sd)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(sd)
def evaluate(args, model, eval_dataset, tokenizer, step, prefix=""):
"""
Evaluation of model
:param args: input arguments from parser
:param model: pytorch model to be evaluated
:param eval_dataset: dataset used for evaluation
:param tokenizer: tokenizer used by the model
:param step: the current step in training
:param prefix: prescript to be added to the beginning of save file
:return: results of evaluation
"""
# Loop to handle MNLI double evaluation (matched, mis-matched)
print('')
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=True,
mlm_probability=0.15
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,
batch_size=eval_batch_size,
collate_fn=data_collator
)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.train_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader,
desc="Evaluating",
position=0,
leave=True):
with torch.no_grad():
outputs = model(input_ids=batch['input_ids'].to(args.device),
labels=batch['labels'].to(args.device))
loss = outputs['loss']
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss /= nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"perplexity": perplexity,
'loss': eval_loss,
"Iteration": str(step)
}
output_eval_file = os.path.join(eval_output_dir, prefix,
"eval_results.txt")
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write('\n')
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s, " % (key, str(result[key])))
writer.close()
return result
def train(args, train_dataset, model, tokenizer, eval_dataset=None):
"""
Train the model
:param args: input arguments from parser
:param train_dataset: dataset used for training
:param model: pytorch model to be evaluated
:param tokenizer: tokenizer used by the model
:param eval_dataset: dataset used for evaluation
:return:
"""
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=True,
mlm_probability=0.15
)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=data_collator
)
init_total = len(
train_dataloader) * args.num_train_epochs
# loading a modle from a checkpoint if neccesary
if args.start_from_checkpoint:
chk_pt_fdlr = [fldr for fldr in os.listdir(args.output_dir) if
fldr.startswith('checkpoint')]
chk_pt_fdlr.sort()
logger.info("***** Running training from checkpoint: " + str(
chk_pt_fdlr[-1]) + "*****")
global_step = int(''.join([chr for chr in chk_pt_fdlr[-1]
if chr.isdigit()]))
it_total = init_total - global_step
args.num_train_epochs = np.round(it_total / len(train_dataloader))
# model = BertForMaskedLM(config=config)
model = BertForMaskedLM.from_pretrained(args.output_dir + '/' +
chk_pt_fdlr[-1])
model.to(args.device)
logger.info('Loaded checkpoint model. Beginning training.')
else:
global_step = 0
it_total = init_total
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if
any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5,
eps=1e-8)
if global_step > args.warmup_steps:
scheduler = \
get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=init_total)
for _ in range(global_step):
scheduler.step()
logger.info('Initialized LR Scheduler and brought it to current step.')
else:
scheduler = \
get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=it_total)
# multi-gpu training
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d",
args.per_gpu_train_batch_size)
logger.info(" Total optimization steps = %d", it_total)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch")
set_seed(seed) # Added here for reproducibility (even between python 2
# and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
position=0,
leave=True)
epoch_iterator.set_postfix({'loss': 'Initialized'})
for step, batch in enumerate(epoch_iterator):
model.train()
outputs = model(input_ids=batch['input_ids'].to(args.device),
labels=batch['labels'].to(args.device))
# model outputs are always tuple in transformers (see doc)
loss = outputs['loss']
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel training
loss = loss.mean()
loss.backward()
tr_loss += loss.item()
epoch_iterator.set_postfix({'loss': loss.item()})
torch.nn.utils.clip_grad_norm_(model.parameters(),
1.0)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
results = evaluate(args, model, eval_dataset, tokenizer,
step=global_step)
if args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = 'checkpoint'
# Save model checkpoint
output_dir = os.path.join(args.output_dir,
'{}-{}'.format(checkpoint_prefix,
global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module \
if hasattr(model, 'module') \
else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args,
os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
return global_step, tr_loss / global_step, model
args.mlm = True
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use "
"--overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup CUDA, GPU & distributed training
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger.info(
"Device: %s, n_gpu: %s", device, args.n_gpu)
# Set seed
seed = 20210325
set_seed(seed)
logger.info("Beginning Tokenizer Training on data in " + args.train_data_file)
paths = args.train_data_file
args.vocab_size = int(''.join([char for char in args.train_data_file.split(
'/')[-1] if char.isnumeric()]))
if not args.preliminary_model != 'fromScratch' and \
not args.start_from_checkpoint:
# Building custom Tokenizer
tokenizer = BertWordPieceTokenizer(
clean_text=True,
strip_accents=True,
lowercase=True,
)
tokenizer.train(
paths,
vocab_size=args.vocab_size + 5,
min_frequency=2,
show_progress=True,
special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"],
limit_alphabet=1000,
wordpieces_prefix="##",
)
tokenizer.save_model(args.output_dir)
if args.preliminary_model != 'fromScratch':
tokenizer = BertTokenizer.from_pretrained(args.preliminary_model)
else:
tokenizer = BertTokenizer.from_pretrained(args.output_dir)
config = BertConfig.from_pretrained('bert-base-cased')
config.vocab_size = tokenizer.vocab_size
if args.preliminary_model != 'fromScratch':
model = BertForMaskedLM.from_pretrained(args.preliminary_model)
else:
model = BertForMaskedLM(config=config)
model.to(args.device)
train_dataset = TextDataset(
tokenizer=tokenizer,
file_path=args.train_data_file,
block_size=32,
overwrite_cache=args.overwrite_output_dir
)
eval_dataset = TextDataset(
tokenizer=tokenizer,
file_path=args.eval_data_file,
block_size=32,
overwrite_cache=args.overwrite_output_dir
)
if args.data_portion < 1.0:
train_dataset.examples = train_dataset.examples[:int(len(
train_dataset.examples)*args.data_portion)]
eval_dataset.examples = eval_dataset.examples[:int(len(
eval_dataset.examples)*args.data_portion)]
logger.info("Training and validation set limited to " + str(
args.data_portion) + " portion of original data.")
logger.info("Training/evaluation parameters %s", args)
global_step, tr_loss, model = train(args,
train_dataset,
model,
tokenizer,
eval_dataset=eval_dataset)
logger.info(" global_step = %s, average loss = %s", global_step,
tr_loss)
# Do the saving
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of parallel training
model_to_save = model.module if hasattr(model,
'module') else model
model_to_save.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = BertForMaskedLM.from_pretrained(args.output_dir)
if args.preliminary_model != 'fromScratch':
tokenizer = BertTokenizer.from_pretrained(args.preliminary_model)
else:
tokenizer = BertTokenizer.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval:
checkpoints = [args.output_dir]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(
checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find(
'checkpoint') != -1 else ""
model = BertForMaskedLM.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, eval_dataset, tokenizer, step='TestSet')
result = dict(
(k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
toc = dt.now()
print("End of MLM_Training_transformers.py Script.")
print('Total Script Runtime: ' + str(toc-tic))
| [
"torch.utils.data.RandomSampler",
"torch.cuda.manual_seed_all",
"torch.no_grad",
"torch.utils.data.SequentialSampler",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel"
] | 1.8.1 | gkuling/BIRADS_BERT | f218d05283df90e536b210efbb4fab1d6dff082d |
1.10 | #
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains functions for training a PyTorch MNIST Model
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import os
from random import randint
# Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = nn.Conv2d(20, 50, kernel_size=5)
self.fc1 = nn.Linear(800, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2)
x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2)
x = x.view(-1, 800)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class MnistModel(object):
def __init__(self):
self.batch_size = 64
self.test_batch_size = 100
self.learning_rate = 0.0025
self.sgd_momentum = 0.9
self.log_interval = 100
# Fetch MNIST data set.
self.train_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.batch_size,
shuffle=True,
num_workers=1,
timeout=600)
self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self.test_batch_size,
shuffle=True,
num_workers=1,
timeout=600)
self.network = Net()
# Train the network for one or more epochs, validating after each epoch.
def learn(self, num_epochs=2):
# Train the network for a single epoch
def train(epoch):
self.network.train()
optimizer = optim.SGD(self.network.parameters(), lr=self.learning_rate, momentum=self.sgd_momentum)
for batch, (data, target) in enumerate(self.train_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = self.network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch % self.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch * len(data), len(self.train_loader.dataset), 100. * batch / len(self.train_loader), loss.data.item()))
# Test the network
def test(epoch):
self.network.eval()
test_loss = 0
correct = 0
for data, target in self.test_loader:
with torch.no_grad():
data, target = Variable(data), Variable(target)
output = self.network(data)
test_loss += F.nll_loss(output, target).data.item()
pred = output.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
test_loss /= len(self.test_loader)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(self.test_loader.dataset), 100. * correct / len(self.test_loader.dataset)))
for e in range(num_epochs):
train(e + 1)
test(e + 1)
def get_weights(self):
return self.network.state_dict()
def get_random_testcase(self):
data, target = next(iter(self.test_loader))
case_num = randint(0, len(data) - 1)
test_case = data.numpy()[case_num].ravel().astype(np.float32)
test_name = target.numpy()[case_num]
return test_case, test_name
| [
"torch.nn.Linear",
"torch.autograd.Variable",
"torch.no_grad",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.functional.nll_loss"
] | 1.10.2 | L-Net-1992/TensorRT | 34b664d404001bd724cb56b52a6e0e05e1fd97f2 |
1.7 | import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler, SubsetRandomSampler
from wilds.common.utils import get_counts, split_into_groups
def get_train_loader(loader, dataset, batch_size,
uniform_over_groups=None, grouper=None, distinct_groups=True, n_groups_per_batch=None, **loader_kwargs):
"""
Constructs and returns the data loader for training.
Args:
- loader (str): Loader type. 'standard' for standard loaders and 'group' for group loaders,
which first samples groups and then samples a fixed number of examples belonging
to each group.
- dataset (WILDSDataset or WILDSSubset): Data
- batch_size (int): Batch size
- uniform_over_groups (None or bool): Whether to sample the groups uniformly or according
to the natural data distribution.
Setting to None applies the defaults for each type of loaders.
For standard loaders, the default is False. For group loaders,
the default is True.
- grouper (Grouper): Grouper used for group loaders or for uniform_over_groups=True
- distinct_groups (bool): Whether to sample distinct_groups within each minibatch for group loaders.
- n_groups_per_batch (int): Number of groups to sample in each minibatch for group loaders.
- loader_kwargs: kwargs passed into torch DataLoader initialization.
Output:
- data loader (DataLoader): Data loader.
"""
if loader == 'standard':
if uniform_over_groups is None or not uniform_over_groups:
return DataLoader(
dataset,
shuffle=True, # Shuffle training dataset
sampler=None,
collate_fn=dataset.collate,
batch_size=batch_size,
**loader_kwargs)
else:
assert grouper is not None
groups, group_counts = grouper.metadata_to_group(
dataset.metadata_array,
return_counts=True)
group_weights = 1 / group_counts
weights = group_weights[groups]
# Replacement needs to be set to True, otherwise we'll run out of minority samples
sampler = WeightedRandomSampler(weights, len(dataset), replacement=True)
return DataLoader(
dataset,
shuffle=False, # The WeightedRandomSampler already shuffles
sampler=sampler,
collate_fn=dataset.collate,
batch_size=batch_size,
**loader_kwargs)
elif loader == 'group':
if uniform_over_groups is None:
uniform_over_groups = True
assert grouper is not None
assert n_groups_per_batch is not None
if n_groups_per_batch > grouper.n_groups:
raise ValueError(f'n_groups_per_batch was set to {n_groups_per_batch} but there are only {grouper.n_groups} groups specified.')
group_ids = grouper.metadata_to_group(dataset.metadata_array)
batch_sampler = GroupSampler(
group_ids=group_ids,
batch_size=batch_size,
n_groups_per_batch=n_groups_per_batch,
uniform_over_groups=uniform_over_groups,
distinct_groups=distinct_groups)
return DataLoader(dataset,
shuffle=None,
sampler=None,
collate_fn=dataset.collate,
batch_sampler=batch_sampler,
drop_last=False,
**loader_kwargs)
def get_eval_loader(loader, dataset, batch_size, grouper=None, **loader_kwargs):
"""
Constructs and returns the data loader for evaluation.
Args:
- loader (str): Loader type. 'standard' for standard loaders.
- dataset (WILDSDataset or WILDSSubset): Data
- batch_size (int): Batch size
- loader_kwargs: kwargs passed into torch DataLoader initialization.
Output:
- data loader (DataLoader): Data loader.
"""
if loader == 'standard':
return DataLoader(
dataset,
shuffle=False, # Do not shuffle eval datasets
sampler=None,
collate_fn=dataset.collate,
batch_size=batch_size,
**loader_kwargs)
class GroupSampler:
"""
Constructs batches by first sampling groups,
then sampling data from those groups.
It drops the last batch if it's incomplete.
"""
def __init__(self, group_ids, batch_size, n_groups_per_batch,
uniform_over_groups, distinct_groups):
if batch_size % n_groups_per_batch != 0:
raise ValueError(f'batch_size ({batch_size}) must be evenly divisible by n_groups_per_batch ({n_groups_per_batch}).')
if len(group_ids) < batch_size:
raise ValueError(f'The dataset has only {len(group_ids)} examples but the batch size is {batch_size}. There must be enough examples to form at least one complete batch.')
self.group_ids = group_ids
self.unique_groups, self.group_indices, unique_counts = split_into_groups(group_ids)
self.distinct_groups = distinct_groups
self.n_groups_per_batch = n_groups_per_batch
self.n_points_per_group = batch_size // n_groups_per_batch
self.dataset_size = len(group_ids)
self.num_batches = self.dataset_size // batch_size
if uniform_over_groups: # Sample uniformly over groups
self.group_prob = None
else: # Sample a group proportionately to its size
self.group_prob = unique_counts.numpy() / unique_counts.numpy().sum()
def __iter__(self):
for batch_id in range(self.num_batches):
# Note that we are selecting group indices rather than groups
groups_for_batch = np.random.choice(
len(self.unique_groups),
size=self.n_groups_per_batch,
replace=(not self.distinct_groups),
p=self.group_prob)
sampled_ids = [
np.random.choice(
self.group_indices[group],
size=self.n_points_per_group,
replace=len(self.group_indices[group]) <= self.n_points_per_group, # False if the group is larger than the sample size
p=None)
for group in groups_for_batch]
# Flatten
sampled_ids = np.concatenate(sampled_ids)
yield sampled_ids
def __len__(self):
return self.num_batches
| [
"torch.utils.data.DataLoader"
] | 1.7.0 | caglasozen/wilds | db2ff095304891244962509459ee48e2fc5fd5e6 |
1.7 | import os
import torch
import pandas as pd
from PIL import Image
import numpy as np
from wilds.datasets.wilds_dataset import WILDSDataset
from wilds.common.grouper import CombinatorialGrouper
from wilds.common.metrics.all_metrics import Accuracy
class WaterbirdsDataset(WILDSDataset):
"""
The Waterbirds dataset.
This dataset is not part of the official WILDS benchmark.
We provide it for convenience and to facilitate comparisons to previous work.
Supported `split_scheme`:
'official'
Input (x):
Images of birds against various backgrounds that have already been cropped and centered.
Label (y):
y is binary. It is 1 if the bird is a waterbird (e.g., duck), and 0 if it is a landbird.
Metadata:
Each image is annotated with whether the background is a land or water background.
Original publication:
@inproceedings{sagawa2019distributionally,
title = {Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},
author = {Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},
booktitle = {International Conference on Learning Representations},
year = {2019}
}
The dataset was constructed from the CUB-200-2011 dataset and the Places dataset:
@techreport{WahCUB_200_2011,
Title = {{The Caltech-UCSD Birds-200-2011 Dataset}},
Author = {Wah, C. and Branson, S. and Welinder, P. and Perona, P. and Belongie, S.},
Year = {2011}
Institution = {California Institute of Technology},
Number = {CNS-TR-2011-001}
}
@article{zhou2017places,
title = {Places: A 10 million Image Database for Scene Recognition},
author = {Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},
journal ={IEEE Transactions on Pattern Analysis and Machine Intelligence},
year = {2017},
publisher = {IEEE}
}
License:
The use of this dataset is restricted to non-commercial research and educational purposes.
"""
_dataset_name = 'waterbirds'
_versions_dict = {
'1.0': {
'download_url': 'https://worksheets.codalab.org/rest/bundles/0x505056d5cdea4e4eaa0e242cbfe2daa4/contents/blob/',
'compressed_size': None}}
def __init__(self, version=None, root_dir='data', download=False, split_scheme='official'):
self._version = version
self._data_dir = self.initialize_data_dir(root_dir, download)
if not os.path.exists(self.data_dir):
raise ValueError(
f'{self.data_dir} does not exist yet. Please generate the dataset first.')
# Read in metadata
# Note: metadata_df is one-indexed.
metadata_df = pd.read_csv(
os.path.join(self.data_dir, 'metadata.csv'))
# Get the y values
self._y_array = torch.LongTensor(metadata_df['y'].values)
self._y_size = 1
self._n_classes = 2
self._metadata_array = torch.stack(
(torch.LongTensor(metadata_df['place'].values), self._y_array),
dim=1
)
self._metadata_fields = ['background', 'y']
self._metadata_map = {
'background': [' land', 'water'], # Padding for str formatting
'y': [' landbird', 'waterbird']
}
# Extract filenames
self._input_array = metadata_df['img_filename'].values
self._original_resolution = (224, 224)
# Extract splits
self._split_scheme = split_scheme
if self._split_scheme != 'official':
raise ValueError(f'Split scheme {self._split_scheme} not recognized')
self._split_array = metadata_df['split'].values
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=(['background', 'y']))
super().__init__(root_dir, download, split_scheme)
def get_input(self, idx):
"""
Returns x for a given idx.
"""
img_filename = os.path.join(
self.data_dir,
self._input_array[idx])
x = Image.open(img_filename).convert('RGB')
return x
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
"""
Computes all evaluation metrics.
Args:
- y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).
But they can also be other model outputs such that prediction_fn(y_pred)
are predicted labels.
- y_true (LongTensor): Ground-truth labels
- metadata (Tensor): Metadata
- prediction_fn (function): A function that turns y_pred into predicted labels
Output:
- results (dictionary): Dictionary of evaluation metrics
- results_str (str): String summarizing the evaluation metrics
"""
metric = Accuracy(prediction_fn=prediction_fn)
results, results_str = self.standard_group_eval(
metric,
self._eval_grouper,
y_pred, y_true, metadata)
# For Waterbirds, the validation and test sets are constructed to be more balanced
# compared to the training set.
# To compute the actual average accuracy over the empirical (training) distribution,
# we therefore weight each groups according to their frequency in the training set.
results['adj_acc_avg'] = (
(results['acc_y:landbird_background:land'] * 3498
+ results['acc_y:landbird_background:water'] * 184
+ results['acc_y:waterbird_background:land'] * 56
+ results['acc_y:waterbird_background:water'] * 1057) /
(3498 + 184 + 56 + 1057))
del results['acc_avg']
results_str = f"Adjusted average acc: {results['adj_acc_avg']:.3f}\n" + '\n'.join(results_str.split('\n')[1:])
return results, results_str
| [
"torch.LongTensor"
] | 1.7.0 | caglasozen/wilds | db2ff095304891244962509459ee48e2fc5fd5e6 |
1.1 | """Training mechanism for VAE-GAN"""
import os
import time
import logging
import numpy as np
import torch
import torch.nn.functional as F
from spml import (
image_util,
loss_utils,
)
from . import (
misc_utils,
saved_model_manager,
)
_LG = logging.getLogger(__name__)
def _save_images(images, src_path, step, output_dir):
src_name = os.path.splitext(os.path.basename(src_path))[0]
save_path = os.path.join(
output_dir, 'images', src_name, 'step_%d.png' % step)
misc_utils.ensure_dir(save_path)
images = [img.detach().cpu().numpy() for img in images]
images = np.concatenate(images, axis=1)
image_util.save_image(images, save_path)
def _log_header():
fields = ' '.join(['%10s'] * 9) % (
'KLD', 'BETA', 'F_RECON',
'G_RECON', 'G_FAKE', 'D_REAL', 'D_RECON', 'D_FAKE', '[PIXEL]',
)
_LG.info('%5s %5s: %s', '', 'PHASE', fields)
_LOGGED = {'last': 0}
def _log_loss(loss, phase, progress=None):
if _LOGGED['last'] % 30 == 0:
_log_header()
_LOGGED['last'] += 1
header = '' if progress is None else '%3d %%' % progress
fields = ' '.join(['%10.2e'] * 9) % (
loss['kld'], loss['beta'], loss['feats_recon'],
loss['gen_recon'], loss['gen_fake'],
loss['disc_orig'], loss['disc_recon'], loss['disc_fake'],
loss['pixel'],
)
_LG.info('%5s %5s: %s', header, phase, fields)
def _get_latent_stats(z, z_std):
z = z.detach().cpu().numpy()
z_std = z_std.detach().cpu().numpy()
return {
'z_mean': np.mean(z),
'z_min': np.min(z),
'z_max': np.max(z),
'z_var': np.var(z),
'z_std_mean': np.mean(z_std),
'z_std_min': np.min(z_std),
'z_std_max': np.max(z_std),
'z_std_var': np.var(z_std),
}
class Trainer:
def __init__(
self, model, optimizers,
train_loader, test_loader,
device, output_dir,
initial_beta=10.0,
beta_step=0.1,
target_kld=0.1,
beta_momentum=0.1,
samples=None,
):
self.model = model.float().to(device)
self.train_loader = train_loader
self.test_loader = test_loader
self.optimizers = optimizers
self.device = device
self.output_dir = output_dir
self.beta = initial_beta
self.beta_step = beta_step
self.target_kld = target_kld
self.beta_momentum = beta_momentum
self.samples = samples
self.saved_model_manager = saved_model_manager.SavedModelManager()
fields = [
'PHASE', 'TIME', 'STEP', 'EPOCH', 'KLD', 'BETA', 'F_RECON',
'G_RECON', 'G_FAKE', 'D_REAL', 'D_RECON', 'D_FAKE', 'PIXEL',
'Z_MEAN', 'Z_MIN', 'Z_MAX', 'Z_VAR',
'Z_STD_MEAN', 'Z_STD_MIN', 'Z_STD_MAX', 'Z_STD_VAR',
]
logfile = open(os.path.join(output_dir, 'result.csv'), 'w')
self.writer = misc_utils.CSVWriter(fields, logfile)
self.step = 0
self.epoch = 0
self.latent_stats = loss_utils.MovingStats(beta_momentum)
def _write(self, phase, loss, stats):
self.writer.write(
PHASE=phase, STEP=self.step, EPOCH=self.epoch, TIME=time.time(),
KLD=loss['kld'], BETA=loss['beta'],
F_RECON=loss['feats_recon'],
G_RECON=loss['gen_recon'], G_FAKE=loss['gen_fake'],
D_REAL=loss['disc_orig'],
D_RECON=loss['disc_recon'], D_FAKE=loss['disc_fake'],
PIXEL=loss['pixel'],
Z_MEAN=stats['z_mean'], Z_VAR=stats['z_var'],
Z_MIN=stats['z_min'], Z_MAX=stats['z_max'],
Z_STD_MEAN=stats['z_std_mean'], Z_STD_VAR=stats['z_std_var'],
Z_STD_MIN=stats['z_std_min'], Z_STD_MAX=stats['z_std_max'],
)
def save(self):
filename = 'epoch_%s_step_%s.pt' % (self.epoch, self.step)
output = os.path.join(self.output_dir, 'checkpoints', filename)
_LG.info('Saving checkpoint at %s', output)
misc_utils.ensure_dir(output)
torch.save({
'model': self.model.state_dict(),
'optimizers': {
key: opt.state_dict()
for key, opt in self.optimizers.items()
},
'epoch': self.epoch,
'step': self.step,
}, output)
return output
def manage_saved(self, path, loss):
path = self.saved_model_manager.update(path, loss)
if path:
os.remove(path)
def load(self, checkpoint):
_LG.info('Loading checkpoint from %s', checkpoint)
data = torch.load(checkpoint, map_location=self.device)
self.model.load_state_dict(data['model'])
for key, opt in data['optimizers'].items():
self.optimizers[key].load_state_dict(opt)
self.epoch = data['epoch']
self.step = data['step']
def _forward_gan(self, orig, update=False):
# Update discriminator with original image
preds_orig, _ = self.model.discriminator(orig)
disc_loss_orig = loss_utils.bce(preds_orig, 1)
if update:
self.model.zero_grad()
disc_loss_orig.backward()
self.optimizers['discriminator'].step()
# Update discriminator with reconstructed image
recon, latent = self.model.vae(orig)
preds_recon, _ = self.model.discriminator(recon.detach())
disc_loss_recon = loss_utils.bce(preds_recon, 0)
if update:
self.model.zero_grad()
disc_loss_recon.backward()
self.optimizers['discriminator'].step()
# Update generator with reconstructed image
preds_recon, _ = self.model.discriminator(recon)
gen_loss_recon = loss_utils.bce(preds_recon, 1)
if update:
self.model.zero_grad()
gen_loss_recon.backward()
self.optimizers['decoder'].step()
# Update discriminator with fake image
sample = torch.randn_like(latent[0], requires_grad=True)
fake = self.model.vae.decoder(sample)
preds_fake, _ = self.model.discriminator(fake.detach())
disc_loss_fake = loss_utils.bce(preds_fake, 0)
if update:
self.model.zero_grad()
disc_loss_fake.backward()
self.optimizers['discriminator'].step()
# Update generator with fake image
preds_fake, _ = self.model.discriminator(fake)
gen_loss_fake = loss_utils.bce(preds_fake, 1)
if update:
self.model.zero_grad()
gen_loss_fake.backward()
self.optimizers['decoder'].step()
return {
'disc_orig': disc_loss_orig.item(),
'disc_recon': disc_loss_recon.item(),
'disc_fake': disc_loss_fake.item(),
'gen_recon': gen_loss_recon.item(),
'gen_fake': gen_loss_fake.item(),
}
def _forward_vae(self, orig, update=False):
# Update feature
recon, _ = self.model.vae(orig)
_, feats_orig = self.model.discriminator(orig)
_, feats_recon = self.model.discriminator(recon)
feats_loss = F.mse_loss(input=feats_recon, target=feats_orig)
if update:
self.model.zero_grad()
feats_loss.backward()
self.optimizers['encoder'].step()
self.optimizers['decoder'].step()
# KLD
sample, latent = self.model.vae.encoder(orig)
latent_stats = self.latent_stats(sample, update)
kld = torch.mean(loss_utils.kld_loss(*latent_stats))
if update:
beta_latent_loss = self.beta * kld
self.model.zero_grad()
beta_latent_loss.backward()
self.optimizers['encoder'].step()
# Adjust beta
if update:
kld_error = kld.item() - self.target_kld
self.beta += self.beta_step * kld_error
self.beta = max(1e-3, self.beta)
loss = {
'kld': kld.item(),
'beta': self.beta,
'feats_recon': feats_loss.item(),
}
stats = _get_latent_stats(*latent)
return recon, loss, stats
def _get_pixel_loss(self, orig):
recon, _ = self.model.vae(orig)
return F.mse_loss(orig, recon)
def _forward(self, orig, update=False):
loss_gan = self._forward_gan(orig, update=update)
recon, loss_vae, stats = self._forward_vae(orig, update=update)
with torch.no_grad():
pixel_loss = self._get_pixel_loss(orig)
loss = {'pixel': pixel_loss.item()}
loss.update(loss_vae)
loss.update(loss_gan)
return recon, loss, stats
def train_batch(self, batch):
self.model.train()
orig = batch['image'].float().to(self.device)
_, loss, stats = self._forward(orig, update=True)
self._write('train', loss, stats)
return loss
def test(self):
with torch.no_grad():
return self._test()
def _test(self):
self.model.eval()
loss_tracker = misc_utils.StatsTracker()
stats_tracker = misc_utils.StatsTracker()
for i, batch in enumerate(self.test_loader):
orig, path = batch['image'].float().to(self.device), batch['path']
recon, loss, stats = self._forward(orig, update=False)
loss_tracker.update(loss)
stats_tracker.update(stats)
if i % 10 == 0:
_save_images(
(orig[0], recon[0]), path[0],
self.step, self.output_dir)
self._write('test', loss_tracker, stats_tracker)
_log_loss(loss_tracker, phase='Test')
return loss_tracker
def generate(self, samples=None):
samples = self.samples if samples is None else samples
with torch.no_grad():
self._generate(samples)
def _generate(self, samples):
self.model.eval()
recons = self.model.vae.decoder(samples)
for i, recon in enumerate(recons):
path = 'sample_%d.png' % i
_save_images([recon], path, self.step, self.output_dir)
def train_one_epoch(self, report_every=180, test_interval=1000):
last_report = 0
for i, batch in enumerate(self.train_loader):
loss = self.train_batch(batch)
self.step += 1
if time.time() - last_report > report_every:
progress = 100. * i / len(self.train_loader)
_log_loss(loss, 'Train', progress)
last_report = time.time()
if self.step % test_interval == 0:
self.generate()
loss = self.test()
path = self.save()
self.manage_saved(path, loss['pixel'])
self.epoch += 1
def __repr__(self):
opt = '\n'.join([
'%s: %s' % (key, val) for key, val in self.optimizers.items()
])
beta = '\n'.join([
'Beta: %s' % self.beta,
'Beta Step: %s' % self.beta_step,
'Target KLD: %s' % self.target_kld,
'Beta Momuntum: %s' % self.beta_momentum,
])
return 'Epoch: %d\nStep: %d\nModel: %s\nOptimizers: %s\n%s\n' % (
self.epoch, self.step, self.model, opt, beta
)
| [
"torch.no_grad",
"torch.nn.functional.mse_loss",
"torch.randn_like",
"torch.load"
] | 1.1.0 | hellomoto-ai/splatoon2-ml | 4bd24eed527d6b56ce4369b70d24f20058962383 |
1.0 | import logging
import time
import os
import torch
from utils.lr_scheduler import WarmupMultiStepLR
from net import Network
def create_logger(cfg):
dataset = cfg.DATASET.DATASET
net_type = cfg.BACKBONE.TYPE
module_type = cfg.MODULE.TYPE
log_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, "logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
time_str = time.strftime("%Y-%m-%d-%H-%M")
log_name = "{}_{}_{}_{}.log".format(dataset, net_type, module_type, time_str)
log_file = os.path.join(log_dir, log_name)
# set up logger
print("=> creating log {}".format(log_file))
head = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=str(log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger("").addHandler(console)
logger.info("---------------------Cfg is set as follow--------------------")
logger.info(cfg)
logger.info("-------------------------------------------------------------")
return logger, log_file
def get_optimizer(cfg, model):
base_lr = cfg.TRAIN.OPTIMIZER.BASE_LR
params = []
for name, p in model.named_parameters():
if p.requires_grad:
params.append({"params": p})
if cfg.TRAIN.OPTIMIZER.TYPE == "SGD":
optimizer = torch.optim.SGD(
params,
lr=base_lr,
momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
nesterov=True,
)
elif cfg.TRAIN.OPTIMIZER.TYPE == "ADAM":
optimizer = torch.optim.Adam(
params,
lr=base_lr,
betas=(0.9, 0.999),
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
)
return optimizer
def get_scheduler(cfg, optimizer):
if cfg.TRAIN.LR_SCHEDULER.TYPE == "multistep":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "cosine":
if cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END > 0:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END, eta_min=1e-4
)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.MAX_EPOCH, eta_min=1e-4
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "warmup":
scheduler = WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
warmup_epochs=cfg.TRAIN.LR_SCHEDULER.WARM_EPOCH,
)
else:
raise NotImplementedError("Unsupported LR Scheduler: {}".format(cfg.TRAIN.LR_SCHEDULER.TYPE))
return scheduler
def get_model(cfg, num_classes, device, logger):
model = Network(cfg, mode="train", num_classes=num_classes)
if cfg.BACKBONE.FREEZE == True:
model.freeze_backbone()
logger.info("Backbone has been freezed")
if cfg.CPU_MODE:
model = model.to(device)
else:
model = torch.nn.DataParallel(model).cuda()
return model
def get_category_list(annotations, num_classes, cfg):
num_list = [0] * num_classes
cat_list = []
print("Weight List has been produced")
for anno in annotations:
category_id = anno["category_id"]
num_list[category_id] += 1
cat_list.append(category_id)
return num_list, cat_list | [
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.optim.Adam",
"torch.optim.SGD",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.DataParallel"
] | 1.0.1 | tasx0823/BBN | 7992e908842f5934f0d1ee3f430d796621e81975 |
1.0 | import _init_paths
from net import Network
from config import cfg, update_config
from dataset import *
import numpy as np
import torch
import os
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
from core.evaluate import FusionMatrix
def parse_args():
parser = argparse.ArgumentParser(description="BBN evaluation")
parser.add_argument(
"--cfg",
help="decide which cfg to use",
required=True,
default="configs/cifar10.yaml",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def valid_model(dataLoader, model, cfg, device, num_classes):
result_list = []
pbar = tqdm(total=len(dataLoader))
model.eval()
top1_count, top2_count, top3_count, index, fusion_matrix = (
[],
[],
[],
0,
FusionMatrix(num_classes),
)
func = torch.nn.Softmax(dim=1)
with torch.no_grad():
for i, (image, image_labels, meta) in enumerate(dataLoader):
image = image.to(device)
output = model(image)
result = func(output)
_, top_k = result.topk(5, 1, True, True)
score_result = result.cpu().numpy()
fusion_matrix.update(score_result.argmax(axis=1), image_labels.numpy())
topk_result = top_k.cpu().tolist()
if not "image_id" in meta:
meta["image_id"] = [0] * image.shape[0]
image_ids = meta["image_id"]
for i, image_id in enumerate(image_ids):
result_list.append(
{
"image_id": image_id,
"image_label": int(image_labels[i]),
"top_3": topk_result[i],
}
)
top1_count += [topk_result[i][0] == image_labels[i]]
top2_count += [image_labels[i] in topk_result[i][0:2]]
top3_count += [image_labels[i] in topk_result[i][0:3]]
index += 1
now_acc = np.sum(top1_count) / index
pbar.set_description("Now Top1:{:>5.2f}%".format(now_acc * 100))
pbar.update(1)
top1_acc = float(np.sum(top1_count) / len(top1_count))
top2_acc = float(np.sum(top2_count) / len(top1_count))
top3_acc = float(np.sum(top3_count) / len(top1_count))
print(
"Top1:{:>5.2f}% Top2:{:>5.2f}% Top3:{:>5.2f}%".format(
top1_acc * 100, top2_acc * 100, top3_acc * 100
)
)
pbar.close()
if __name__ == "__main__":
args = parse_args()
update_config(cfg, args)
test_set = eval(cfg.DATASET.DATASET)("valid", cfg)
num_classes = test_set.get_num_classes()
device = torch.device("cpu" if cfg.CPU_MODE else "cuda")
model = Network(cfg, mode="test", num_classes=num_classes)
model_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, "models")
model_file = cfg.TEST.MODEL_FILE
if "/" in model_file:
model_path = model_file
else:
model_path = os.path.join(model_dir, model_file)
model.load_model(model_path)
if cfg.CPU_MODE:
model = model.to(device)
else:
model = torch.nn.DataParallel(model).cuda()
testLoader = DataLoader(
test_set,
batch_size=cfg.TEST.BATCH_SIZE,
shuffle=False,
num_workers=cfg.TEST.NUM_WORKERS,
pin_memory=cfg.PIN_MEMORY,
)
valid_model(testLoader, model, cfg, device, num_classes)
| [
"torch.device",
"torch.nn.Softmax",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel"
] | 1.0.1 | tasx0823/BBN | 7992e908842f5934f0d1ee3f430d796621e81975 |
0.3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from logging import Logger
from typing import Any, Dict, List, Optional, Type
import torch
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import MultiObjective
from ax.core.observation import ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig
from ax.modelbridge.discrete import DiscreteModelBridge
from ax.modelbridge.multi_objective_torch import MultiObjectiveTorchModelBridge
from ax.modelbridge.random import RandomModelBridge
from ax.modelbridge.registry import (
Cont_X_trans,
Models,
MT_MTGP_trans,
ST_MTGP_trans,
Y_trans,
)
from ax.modelbridge.torch import TorchModelBridge
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.convert_metric_names import tconfig_from_mt_experiment
from ax.models.torch.botorch import (
BotorchModel,
TAcqfConstructor,
TModelConstructor,
TModelPredictor,
TOptimizer,
)
from ax.models.torch.botorch_defaults import (
get_and_fit_model,
get_NEI,
predict_from_model,
scipy_optimizer,
)
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast
logger: Logger = get_logger(__name__)
DEFAULT_TORCH_DEVICE = torch.device("cpu")
DEFAULT_EHVI_BATCH_LIMIT = 5
"""
Module containing functions that generate standard models, such as Sobol,
GP+EI, etc.
Note: a special case here is a composite generator, which requires an
additional ``GenerationStrategy`` and is able to delegate work to multiple models
(for instance, to a random model to generate the first trial, and to an
optimization model for subsequent trials).
"""
def get_sobol(
search_space: SearchSpace,
seed: Optional[int] = None,
deduplicate: bool = False,
init_position: int = 0,
scramble: bool = True,
) -> RandomModelBridge:
"""Instantiates a Sobol sequence quasi-random generator.
Args:
search_space: Sobol generator search space.
kwargs: Custom args for sobol generator.
Returns:
RandomModelBridge, with SobolGenerator as model.
"""
return checked_cast(
RandomModelBridge,
Models.SOBOL(
search_space=search_space,
seed=seed,
deduplicate=deduplicate,
init_position=init_position,
scramble=scramble,
),
)
def get_uniform(
search_space: SearchSpace, deduplicate: bool = False, seed: Optional[int] = None
) -> RandomModelBridge:
"""Instantiate uniform generator.
Args:
search_space: Uniform generator search space.
kwargs: Custom args for uniform generator.
Returns:
RandomModelBridge, with UniformGenerator as model.
"""
return checked_cast(
RandomModelBridge,
Models.UNIFORM(search_space=search_space, seed=seed, deduplicate=deduplicate),
)
def get_botorch(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
transform_configs: Optional[Dict[str, TConfig]] = None,
model_constructor: TModelConstructor = get_and_fit_model,
model_predictor: TModelPredictor = predict_from_model,
acqf_constructor: TAcqfConstructor = get_NEI, # pyre-ignore[9]
acqf_optimizer: TOptimizer = scipy_optimizer, # pyre-ignore[9]
refit_on_cv: bool = False,
refit_on_update: bool = True,
optimization_config: Optional[OptimizationConfig] = None,
) -> TorchModelBridge:
"""Instantiates a BotorchModel."""
if data.df.empty: # pragma: no cover
raise ValueError("`BotorchModel` requires non-empty data.")
return checked_cast(
TorchModelBridge,
Models.BOTORCH(
experiment=experiment,
data=data,
search_space=search_space or experiment.search_space,
torch_dtype=dtype,
torch_device=device,
transforms=transforms,
transform_configs=transform_configs,
model_constructor=model_constructor,
model_predictor=model_predictor,
acqf_constructor=acqf_constructor,
acqf_optimizer=acqf_optimizer,
refit_on_cv=refit_on_cv,
refit_on_update=refit_on_update,
optimization_config=optimization_config,
),
)
def get_GPEI(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
) -> TorchModelBridge:
"""Instantiates a GP model that generates points with EI."""
if data.df.empty: # pragma: no cover
raise ValueError("GP+EI BotorchModel requires non-empty data.")
return checked_cast(
TorchModelBridge,
Models.BOTORCH(
experiment=experiment,
data=data,
search_space=search_space or experiment.search_space,
torch_dtype=dtype,
torch_device=device,
),
)
def get_GPKG(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
cost_intercept: float = 0.01,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
transform_configs: Optional[Dict[str, TConfig]] = None,
**kwargs: Any,
) -> TorchModelBridge:
"""Instantiates a GP model that generates points with KG."""
if search_space is None:
search_space = experiment.search_space
if data.df.empty: # pragma: no cover
raise ValueError("GP+KG BotorchModel requires non-empty data.")
inputs = {
"search_space": search_space,
"experiment": experiment,
"data": data,
"cost_intercept": cost_intercept,
"torch_dtype": dtype,
"torch_device": device,
"transforms": transforms,
"transform_configs": transform_configs,
}
if any(p.is_fidelity for k, p in experiment.parameters.items()):
inputs["linear_truncated"] = kwargs.get("linear_truncated", True)
return checked_cast(TorchModelBridge, Models.GPKG(**inputs)) # pyre-ignore: [16]
# TODO[Lena]: how to instantiate MTGP through the enum? The Multi-type MTGP requires
# a MultiTypeExperiment, so we would need validation for that, but more importantly,
# we need to create `trial_index_to_type` as in the factory function below.
# Maybe `MultiTypeExperiment` could have that mapping as a property?
def get_MTGP(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
trial_index: Optional[int] = None,
) -> TorchModelBridge:
"""Instantiates a Multi-task Gaussian Process (MTGP) model that generates
points with EI.
If the input experiment is a MultiTypeExperiment then a
Multi-type Multi-task GP model will be instantiated.
Otherwise, the model will be a Single-type Multi-task GP.
"""
if isinstance(experiment, MultiTypeExperiment):
trial_index_to_type = {
t.index: t.trial_type for t in experiment.trials.values()
}
transforms = MT_MTGP_trans
transform_configs = {
"TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}},
"ConvertMetricNames": tconfig_from_mt_experiment(experiment),
}
else:
# Set transforms for a Single-type MTGP model.
transforms = ST_MTGP_trans
transform_configs = None
# Choose the status quo features for the experiment from the selected trial.
# If trial_index is None, we will look for a status quo from the last
# experiment trial to use as a status quo for the experiment.
if trial_index is None:
trial_index = len(experiment.trials) - 1
elif trial_index >= len(experiment.trials):
raise ValueError("trial_index is bigger than the number of experiment trials")
# pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.
status_quo = experiment.trials[trial_index].status_quo
if status_quo is None:
status_quo_features = None
else:
status_quo_features = ObservationFeatures(
parameters=status_quo.parameters, trial_index=trial_index
)
return TorchModelBridge(
experiment=experiment,
search_space=search_space or experiment.search_space,
data=data,
model=BotorchModel(),
transforms=transforms,
# pyre-fixme[6]: Expected `Optional[Dict[str, Dict[str,
# typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,
# int, str]]]]` for 6th param but got `Optional[Dict[str,
# typing.Union[Dict[str, Dict[str, Dict[int, Optional[str]]]], Dict[str,
# typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,
# int, str]]]]]`.
transform_configs=transform_configs,
torch_dtype=torch.double,
torch_device=DEFAULT_TORCH_DEVICE,
status_quo_features=status_quo_features,
)
def get_factorial(search_space: SearchSpace) -> DiscreteModelBridge:
"""Instantiates a factorial generator."""
return checked_cast(
DiscreteModelBridge,
Models.FACTORIAL(search_space=search_space, fit_out_of_design=True),
)
def get_empirical_bayes_thompson(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
num_samples: int = 10000,
min_weight: Optional[float] = None,
uniform_weights: bool = False,
) -> DiscreteModelBridge:
"""Instantiates an empirical Bayes / Thompson sampling model."""
if data.df.empty: # pragma: no cover
raise ValueError("Empirical Bayes Thompson sampler requires non-empty data.")
return checked_cast(
DiscreteModelBridge,
Models.EMPIRICAL_BAYES_THOMPSON(
experiment=experiment,
data=data,
search_space=search_space or experiment.search_space,
num_samples=num_samples,
min_weight=min_weight,
uniform_weights=uniform_weights,
fit_out_of_design=True,
),
)
def get_thompson(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
num_samples: int = 10000,
min_weight: Optional[float] = None,
uniform_weights: bool = False,
) -> DiscreteModelBridge:
"""Instantiates a Thompson sampling model."""
if data.df.empty: # pragma: no cover
raise ValueError("Thompson sampler requires non-empty data.")
return checked_cast(
DiscreteModelBridge,
Models.THOMPSON(
experiment=experiment,
data=data,
search_space=search_space or experiment.search_space,
num_samples=num_samples,
min_weight=min_weight,
uniform_weights=uniform_weights,
fit_out_of_design=True,
),
)
def get_GPMES(
experiment: Experiment,
data: Data,
search_space: Optional[SearchSpace] = None,
cost_intercept: float = 0.01,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,
transform_configs: Optional[Dict[str, TConfig]] = None,
**kwargs: Any,
) -> TorchModelBridge:
"""Instantiates a GP model that generates points with MES."""
if search_space is None:
search_space = experiment.search_space
if data.df.empty: # pragma: no cover
raise ValueError("GP + MES BotorchModel requires non-empty data.")
inputs = {
"search_space": search_space,
"experiment": experiment,
"data": data,
"cost_intercept": cost_intercept,
"torch_dtype": dtype,
"torch_device": device,
"transforms": transforms,
"transform_configs": transform_configs,
}
if any(p.is_fidelity for k, p in experiment.parameters.items()):
inputs["linear_truncated"] = kwargs.get("linear_truncated", True)
return checked_cast(TorchModelBridge, Models.GPMES(**inputs)) # pyre-ignore: [16]
def get_MOO_EHVI(
experiment: Experiment,
data: Data,
ref_point: Dict[str, float],
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
),
) -> MultiObjectiveTorchModelBridge:
"""Instantiates a multi-objective model that generates points with EHVI.
Requires a `ref_point`, a dictionary of the metric name to the reference point value
for every objective being optimized. An arm only improves hypervolume if it is
strictly better than this point in all metrics.
"""
# pyre-ignore: [16] `Optional` has no attribute `objective`.
if not isinstance(experiment.optimization_config.objective, MultiObjective):
raise ValueError("Multi-objective optimization requires multiple objectives.")
if data.df.empty: # pragma: no cover
raise ValueError("MultiObjectiveOptimization requires non-empty data.")
return checked_cast(
MultiObjectiveTorchModelBridge,
Models.MOO(
experiment=experiment,
data=data,
ref_point=ref_point,
search_space=search_space or experiment.search_space,
torch_dtype=dtype,
torch_device=device,
default_model_gen_options={
"acquisition_function_kwargs": {"sequential": True},
"optimizer_kwargs": {
# having a batch limit is very important for avoiding
# memory issues in the initialization
"batch_limit": DEFAULT_EHVI_BATCH_LIMIT
},
},
),
)
def get_MOO_PAREGO(
experiment: Experiment,
data: Data,
ref_point: Optional[List[float]] = None,
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
) -> MultiObjectiveTorchModelBridge:
"""Instantiates a multi-objective model that generates points with ParEGO.
qParEGO optimizes random augmented chebyshev scalarizations of the multiple
objectives. This allows it to explore non-convex pareto frontiers.
"""
# pyre-ignore: [16] `Optional` has no attribute `objective`.
if not isinstance(experiment.optimization_config.objective, MultiObjective):
raise ValueError("Multi-Objective optimization requires multiple objectives")
if data.df.empty:
raise ValueError("MultiObjectiveOptimization requires non-empty data.")
return checked_cast(
MultiObjectiveTorchModelBridge,
Models.MOO(
experiment=experiment,
data=data,
ref_point=ref_point,
search_space=search_space or experiment.search_space,
torch_dtype=dtype,
torch_device=device,
acqf_constructor=get_NEI,
default_model_gen_options={
"acquisition_function_kwargs": {
"chebyshev_scalarization": True,
"sequential": True,
}
},
),
)
def get_MOO_RS(
experiment: Experiment,
data: Data,
ref_point: Optional[List[float]] = None,
search_space: Optional[SearchSpace] = None,
dtype: torch.dtype = torch.double,
device: torch.device = DEFAULT_TORCH_DEVICE,
) -> MultiObjectiveTorchModelBridge:
"""Instantiates a Random Scalarization multi-objective model.
Chooses a different random linear scalarization of the objectives
for generating each new candidate arm. This will only explore the
convex hull of the pareto frontier.
"""
# pyre-ignore: [16] `Optional` has no attribute `objective`.
if not isinstance(experiment.optimization_config.objective, MultiObjective):
raise ValueError("Multi-Objective optimization requires multiple objectives")
if data.df.empty:
raise ValueError("MultiObjectiveOptimization requires non-empty data.")
return checked_cast(
MultiObjectiveTorchModelBridge,
Models.MOO(
experiment=experiment,
data=data,
ref_point=ref_point,
search_space=search_space or experiment.search_space,
torch_dtype=dtype,
torch_device=device,
acqf_constructor=get_NEI,
default_model_gen_options={
"acquisition_function_kwargs": {
"random_scalarization": True,
"sequential": True,
}
},
),
)
| [
"torch.device",
"torch.cuda.is_available"
] | 0.3.1 | KireinaHoro/Ax | 16cb868911eecba323759e2e129df8833361e614 |
1.7 | """Contains a class which extracts the needed arguments of an arbitrary
methode/function and wraps them for future usage. E.g correctly choosing
the needed arguments and passing them on to the original function.
"""
import inspect
import copy
import torch
from ..problem.spaces.points import Points
class UserFunction:
"""Wraps a function, so that it can be called with arbitrary input arguments.
Parameters
----------
fun : callable
The original function that should be wrapped.
defaults : dict, optional
Possible defaults arguments of the function. If none are specified will
check by itself if there are any.
args : dict, optional
All arguments of the function. If none are specified will
check by itself if there are any.
Notes
-----
Uses inspect.getfullargspec(fun) to get the possible input arguments.
When called just extracts the needed arguments and passes them to the
original function.
"""
def __init__(self, fun, defaults={}, args={}):
if isinstance(fun, (UserFunction, DomainUserFunction)):
self.fun = fun.fun
self.defaults = fun.defaults
self.args = fun.args
else:
self._transform_to_user_function(fun, defaults, args)
def _transform_to_user_function(self, fun, defaults, args):
self.fun = fun
self.defaults = defaults
self.args = args
if callable(self.fun) and self.defaults == {} and self.args == {}:
self._set_input_args_for_function()
def _set_input_args_for_function(self):
f_args = inspect.getfullargspec(self.fun).args
# we check that the function defines all needed parameters
if inspect.getfullargspec(self.fun).varargs is not None or \
inspect.getfullargspec(self.fun).varkw is not None:
raise ValueError("""
Variable arguments are not supported in
UserFunctions. Please use keyword arguments.
""")
f_defaults = inspect.getfullargspec(self.fun).defaults
f_kwonlyargs = inspect.getfullargspec(self.fun).kwonlyargs
#f_kwonlydefaults = inspect.getfullargspec(self.fun).kwonlydefaults
# NOTE: By above check, there should not be kwonlyargs. However, we still catch
# this case here.
self.args = f_args + f_kwonlyargs
# defaults always align at the end of the args
self.defaults = {}
if not f_defaults is None:
self.defaults = {self.args[-i]: f_defaults[-i]
for i in range(len(f_defaults), 0, -1)}
#if not f_kwonlydefaults is None:
# self.defaults.update(f_kwonlydefaults)
def __call__(self, args={}, vectorize=False):
"""To evalute the function. Will automatically extract the needed arguments
from the input data and will set the possible default values.
Parameters
----------
args : dict or torchphysics.Points
The input data, where the function should be evaluated.
vectorize : bool, optional
If the original function can work with a batch of data, or
a loop needs to be used to evaluate the function.
default is False, which means that we assume the function
can work with a batch of data.
Returns
-------
torch.tensor
The output values of the function.
"""
if isinstance(args, Points):
args = args.coordinates
# check that every necessary arg is given
for key in self.necessary_args:
assert key in args, \
f"The argument '{key}' is necessary in {self.__name__} but not given."
# if necessary, pass defaults
inp = {key: args[key] for key in self.args if key in args}
inp.update({key: self.defaults[key] for key in self.args if key not in args})
if not vectorize:
return self.evaluate_function(**inp)
else:
return self.apply_to_batch(inp)
def evaluate_function(self, **inp):
"""Evaluates the original input function. Should not be used directly,
rather use the call-methode.
"""
if callable(self.fun):
return self.fun(**inp)
return self.fun
def apply_to_batch(self, inp):
"""Apply the function to a batch of elements by running a for-loop.
we assume that all inputs either have batch (i.e. maximum) dimension or
are a constant param.
Parameters
----------
inp : torchphysics.points
The Points-object of the input data
Returns
-------
torch.tensor
The output values of the function, for each input.
"""
batch_size = max(len(inp[key]) for key in inp)
out = []
for i in range(batch_size):
inp_i = {}
for key in inp:
if len(inp[key]) == batch_size:
inp_i[key] = inp[key][i]
else:
inp_i[key] = inp[key]
o = self.fun(**inp_i)
if o is not None:
out.append(o)
return out
def partially_evaluate(self, **args):
"""(partially) evaluates a given function.
Parameters
----------
**args :
The arguments where the function should be (partially) evaluated.
Returns
-------
Out : value or UserFunction
If the input arguments are enough to evalate the whole function, the
corresponding output is returned.
If some needed arguments are missing, a copy of this UserFunction will
be returned. Whereby the values of **args will be added to the
default values of the returned UserFunction.
"""
if callable(self.fun):
if all(arg in args for arg in self.necessary_args):
inp = {key: args[key] for key in self.args if key in args}
inp.update({key: self.defaults[key] for key in self.args if key not in args})
return self.fun(**inp)
else:
# to avoid manipulation of given param obj, we create a copy
copy_self = copy.deepcopy(self)
copy_self.set_default(**args)
return copy_self
return self.fun
def __name__(self):
"""The name of the function
Returns
-------
str
The name of the function
"""
return self.fun.__name__
def set_default(self, **args):
"""Sets a input argument to given value.
Parameters
----------
**args:
The value the input should be set to.
"""
self.defaults.update({key: args[key] for key in args if key in self.args})
def remove_default(self, *args, **kwargs):
"""Removes an default value of a input argument.
Parameters
----------
*args, **kwargs:
The arguments for which the default values should be deleted.
"""
for key in args:
self.defaults.pop(key)
for key in kwargs.keys():
self.defaults.pop(key)
def __deepcopy__(self, memo):
"""Creates a copy of the function
"""
cls = self.__class__
copy_object = cls.__new__(cls, self.fun)
memo[id(self)] = copy_object
for k, v in self.__dict__.items():
setattr(copy_object, k, copy.deepcopy(v, memo))
return copy_object
@property
def necessary_args(self):
"""Returns the function arguments that are needed to evaluate this function.
Returns
-------
list :
The needed arguments.
"""
return [arg for arg in self.args if arg not in self.defaults]
@property
def optional_args(self):
"""Returns the function arguments that are optional to evaluate this function.
Returns
-------
list :
The optional arguments.
"""
return [arg for arg in self.args if arg in self.defaults]
class DomainUserFunction(UserFunction):
"""Extension of the original UserFunctions, that are used in the Domain-Class.
Parameters
----------
fun : callable
The original function that should be wrapped.
defaults : dict, optional
Possible defaults arguments of the function. If none are specified will
check by itself if there are any.
args : dict, optional
All arguments of the function. If none are specified will
check by itself if there are any.
Notes
-----
The only difference to normal UserFunction is how the evaluation
of the original function is handled. Since all Domains use Pytorch,
we check that the output always is a torch.tensor. In the case that the function
is not constant, we also append an extra dimension to the output, so that the
domains can work with it correctly.
"""
def __call__(self, args={}, device='cpu'):
"""To evalute the function. Will automatically extract the needed arguments
from the input data and will set the possible default values.
Parameters
----------
args : dict or torchphysics.Points
The input data, where the function should be evaluated.
device : str, optional
The device on which the output of th efunction values should lay.
Default is 'cpu'.
Returns
-------
torch.tensor
The output values of the function.
"""
if isinstance(args, Points):
args = args.coordinates
if len(args) != 0: # set the device correctly
device = args[list(args.keys())[0]].device
# check that every necessary arg is given
for key in self.necessary_args:
assert key in args, \
f"The argument '{key}' is necessary in {self.__name__} but not given."
# if necessary, pass defaults
inp = {key: args[key] for key in self.args if key in args}
inp.update({key: self.defaults[key] for key in self.args if key not in args})
return self.evaluate_function(device=device, **inp)
def evaluate_function(self, device='cpu', **inp):
"""Evaluates the original input function. Should not be used directly,
rather use the call-methode.
Parameters
----------
device : str, optional
The device on which the output of th efunction values should lay.
Default is 'cpu'.
inp
The input values.
"""
if callable(self.fun):
fun_eval = self.fun(**inp)
if not isinstance(fun_eval, torch.Tensor):
fun_eval = torch.tensor(fun_eval, device=device)
return fun_eval[:, None]
else:
if isinstance(self.fun, torch.Tensor):
self.fun = self.fun.to(device)
return self.fun
else:
return torch.tensor(self.fun, device=device).float() | [
"torch.tensor"
] | 1.7.1 | uwe-iben/torchphysics | 775d9aca71752a568f1fca972c958b99107f3b7c |
1.0 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
ProphetNetConfig,
ProphetNetDecoder,
ProphetNetEncoder,
ProphetNetForCausalLM,
ProphetNetForConditionalGeneration,
ProphetNetModel,
ProphetNetTokenizer,
)
class ProphetNetModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
hidden_size=16,
encoder_seq_length=7,
decoder_seq_length=9,
# For common tests
is_training=True,
use_attention_mask=True,
use_labels=True,
decoder_start_token_id=0,
encoder_ffn_dim=32,
num_encoder_layers=4,
num_encoder_attention_heads=4,
decoder_ffn_dim=32,
num_decoder_layers=4,
num_decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
ngram=2,
num_buckets=32,
relative_max_distance=128,
disable_ngram_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.num_attention_heads = num_decoder_attention_heads
self.num_encoder_attention_heads = num_encoder_attention_heads
self.num_decoder_attention_heads = num_decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.ngram = ngram
self.num_buckets = num_buckets
self.relative_max_distance = relative_max_distance
self.disable_ngram_loss = disable_ngram_loss
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 7
self.num_hidden_states_types = 3 # encoder, decoder_main, decoder_ngram
self.decoder_attention_idx = 2
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
decoder_attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = ProphetNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_encoder_layers=self.num_encoder_layers,
num_decoder_layers=self.num_decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_ffn_dim=self.encoder_ffn_dim,
num_encoder_attention_heads=self.num_encoder_attention_heads,
num_decoder_attention_heads=self.num_decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
ngram=self.ngram,
num_buckets=self.num_buckets,
relative_max_distance=self.relative_max_distance,
disable_ngram_loss=self.disable_ngram_loss,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
return_dict=True,
)
return (
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
return (
config,
decoder_input_ids,
decoder_attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def check_prepare_lm_labels_via_shift_left(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetModel(config=config)
model.to(torch_device)
model.eval()
# make sure that lm_labels are correctly padded from the right
lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id)
# add casaul pad token mask
triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not()
lm_labels.masked_fill_(triangular_mask, self.pad_token_id)
decoder_input_ids = model._shift_right(lm_labels)
for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)):
# first item
self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id)
if i < decoder_input_ids_slice.shape[-1]:
if i < decoder_input_ids.shape[-1] - 1:
# items before diagonal
self.parent.assertListEqual(
decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist()
)
# pad items after diagonal
if i < decoder_input_ids.shape[-1] - 2:
self.parent.assertListEqual(
decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist()
)
else:
# all items after square
self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist())
def create_and_check_model(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(decoder_past), config.num_decoder_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]), 2) # cross-attention + uni-directional self-attention
def create_and_check_with_lm_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
)
self.parent.assertEqual(len(outputs), 5)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_causal_lm_decoder(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetForCausalLM(config=config).to(torch_device).eval()
outputs = model(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
labels=lm_labels,
)
self.parent.assertEqual(len(outputs), 4)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_generate_with_past_key_value_states(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval()
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetModel(config=config).to(torch_device).half().eval()
output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_check_encoder_decoder_shared_weights(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
for model_class in [ProphetNetModel, ProphetNetForConditionalGeneration]:
torch.manual_seed(0)
model = model_class(config=config).to(torch_device).eval()
# load state dict copies weights but does not tie them
if model_class == ProphetNetForConditionalGeneration:
model.prophetnet.encoder.load_state_dict(model.prophetnet.decoder.state_dict(), strict=False)
else:
model.encoder.load_state_dict(model.decoder.state_dict(), strict=False)
torch.manual_seed(0)
tied_config = copy.deepcopy(config)
tied_config.tie_encoder_decoder = True
tied_model = model_class(config=tied_config).to(torch_device).eval()
model_result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
return_dict=True,
)
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
return_dict=True,
)
# check that models has less parameters
self.parent.assertLess(
sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4
)
)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
tied_model.save_pretrained(tmpdirname)
tied_model = model_class.from_pretrained(tmpdirname)
tied_model.to(torch_device)
tied_model.eval()
# check that models has less parameters
self.parent.assertLess(
sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx],
tied_model_result[0][0, :, random_slice_idx],
atol=1e-4,
)
)
def check_fast_integration(
self,
config,
*args,
):
input_ids = torch.tensor([[7, 4, 78, 0, 24, 52, 43]], device=torch_device, dtype=torch.long)
decoder_input_ids = torch.tensor([[12, 62, 25, 11, 47, 15, 14]], device=torch_device, dtype=torch.long)
attention_mask = torch.tensor([[1, 1, 1, 0, 1, 0, 0]], device=torch_device, dtype=torch.long)
decoder_attention_mask = torch.tensor([[1, 1, 1, 0, 0, 1, 0]], device=torch_device, dtype=torch.long)
lm_labels = torch.tensor([[62, 25, 11, 47, 15, 14, 24]], device=torch_device, dtype=torch.long)
torch.manual_seed(0)
config.ngram = 4
model = ProphetNetForConditionalGeneration(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
return_dict=True,
)
self.parent.assertTrue(torch.allclose(result.loss, torch.tensor(128.2925, device=torch_device), atol=1e-3))
expected_logit_slice = torch.tensor(
[-0.1565, 0.0418, 0.1207, 0.0030, 0.0665, 0.0467, 0.0412], device=torch_device
)
self.parent.assertTrue(torch.allclose(result.logits[0, :, 1], expected_logit_slice, atol=1e-3))
def check_model_with_attn_mask(self, config, input_ids, decoder_input_ids, *args):
model = ProphetNetModel(config=config)
model.to(torch_device)
model.eval()
outputs_no_mask = model(
input_ids=input_ids[:, :5], decoder_input_ids=decoder_input_ids[:, :5], return_dict=True
)
attention_mask = torch.ones_like(input_ids)
decoder_attention_mask = torch.ones_like(decoder_input_ids)
attention_mask[:, 5:] = 0
outputs_with_mask = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
return_dict=True,
)
# check encoder
self.parent.assertTrue(
torch.allclose(
outputs_no_mask.encoder_last_hidden_state[0, :, 0],
outputs_with_mask.encoder_last_hidden_state[0, :5, 0],
atol=1e-3,
)
)
# check decoder
# main stream
self.parent.assertTrue(
torch.allclose(
outputs_no_mask.last_hidden_state[0, :, 0], outputs_with_mask.last_hidden_state[0, :5, 0], atol=1e-3
)
)
# predict stream
self.parent.assertTrue(
torch.allclose(
outputs_no_mask.last_hidden_state_ngram[0, :5, 0],
outputs_with_mask.last_hidden_state_ngram[0, :5, 0],
atol=1e-3,
)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"use_cache": False,
}
return config, inputs_dict
class ProphetNetStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
hidden_size=16,
encoder_seq_length=7,
decoder_seq_length=7,
# For common tests
is_training=True,
is_decoder=True,
use_attention_mask=True,
add_cross_attention=False,
use_cache=False,
use_labels=True,
decoder_start_token_id=0,
encoder_ffn_dim=32,
num_encoder_layers=4,
num_encoder_attention_heads=4,
decoder_ffn_dim=32,
num_decoder_layers=4,
num_decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
ngram=2,
return_dict=True,
num_buckets=32,
relative_max_distance=128,
disable_ngram_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.num_attention_heads = num_decoder_attention_heads
self.num_encoder_attention_heads = num_encoder_attention_heads
self.num_decoder_attention_heads = num_decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.ngram = ngram
self.num_buckets = num_buckets
self.relative_max_distance = relative_max_distance
self.use_cache = use_cache
self.disable_ngram_loss = disable_ngram_loss
self.max_position_embeddings = max_position_embeddings
self.add_cross_attention = add_cross_attention
self.is_encoder_decoder = is_encoder_decoder
self.return_dict = return_dict
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.num_hidden_states_types = 2 # decoder_main, decoder_ngram
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
config = ProphetNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_encoder_layers=self.num_encoder_layers,
num_decoder_layers=self.num_decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_ffn_dim=self.encoder_ffn_dim,
num_encoder_attention_heads=self.num_encoder_attention_heads,
num_decoder_attention_heads=self.num_decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
ngram=self.ngram,
num_buckets=self.num_buckets,
relative_max_distance=self.relative_max_distance,
disable_ngram_loss=self.disable_ngram_loss,
max_position_embeddings=self.max_position_embeddings,
add_cross_attention=self.add_cross_attention,
is_encoder_decoder=self.is_encoder_decoder,
return_dict=self.return_dict,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = ProphetNetDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = ProphetNetDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
class ProphetNetStandaloneEncoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
hidden_size=16,
encoder_seq_length=7,
decoder_seq_length=7,
# For common tests
is_training=True,
is_decoder=False,
use_attention_mask=True,
add_cross_attention=False,
use_cache=False,
use_labels=True,
decoder_start_token_id=0,
encoder_ffn_dim=32,
num_encoder_layers=4,
num_encoder_attention_heads=4,
decoder_ffn_dim=32,
num_decoder_layers=4,
num_decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
return_dict=True,
num_buckets=32,
relative_max_distance=128,
disable_ngram_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.num_attention_heads = num_decoder_attention_heads
self.num_encoder_attention_heads = num_encoder_attention_heads
self.num_decoder_attention_heads = num_decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.num_buckets = num_buckets
self.relative_max_distance = relative_max_distance
self.use_cache = use_cache
self.disable_ngram_loss = disable_ngram_loss
self.max_position_embeddings = max_position_embeddings
self.add_cross_attention = add_cross_attention
self.is_encoder_decoder = is_encoder_decoder
self.return_dict = return_dict
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 1
self.num_hidden_states_types = 1
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
config = ProphetNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_encoder_layers=self.num_encoder_layers,
num_decoder_layers=self.num_decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_ffn_dim=self.encoder_ffn_dim,
num_encoder_attention_heads=self.num_encoder_attention_heads,
num_decoder_attention_heads=self.num_decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
num_buckets=self.num_buckets,
relative_max_distance=self.relative_max_distance,
disable_ngram_loss=self.disable_ngram_loss,
max_position_embeddings=self.max_position_embeddings,
add_cross_attention=self.add_cross_attention,
is_encoder_decoder=self.is_encoder_decoder,
return_dict=self.return_dict,
)
return (
config,
input_ids,
attention_mask,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class ProphetNetModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetModel, ProphetNetForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (ProphetNetForConditionalGeneration,) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_headmasking = False
is_encoder_decoder = True
def setUp(self):
self.model_tester = ProphetNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_lm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
def test_only_decoder_causal_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_decoder(*config_and_inputs)
def test_fast_integration(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_fast_integration(*config_and_inputs)
def test_shared_weights(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs)
def test_shift_labels_via_shift_left(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)
def test_decoder_model_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_generate_with_past_key_value_states(*config_and_inputs)
def test_attn_mask_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_model_with_attn_mask(*config_and_inputs)
def test_config_save(self):
config = self.model_tester.prepare_config_and_inputs()[0]
config.add_cross_attention = False
with tempfile.TemporaryDirectory() as tmp_dirname:
config.save_pretrained(tmp_dirname)
config = ProphetNetConfig.from_pretrained(tmp_dirname)
self.assertFalse(config.add_cross_attention)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
@require_torch
class ProphetNetStandaloneDecoderModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetDecoder, ProphetNetForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (ProphetNetForCausalLM,) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_headmasking = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = ProphetNetStandaloneDecoderModelTester(self)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@require_torch
class ProphetNetStandaloneEncoderModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetEncoder,) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_headmasking = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = ProphetNetStandaloneEncoderModelTester(self)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
@require_torch
class ProphetNetModelIntegrationTest(unittest.TestCase):
@slow
def test_pretrained_checkpoint_hidden_states(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased")
model.to(torch_device)
# encoder-decoder outputs
encoder_ids = torch.tensor(
[
[
2871,
102,
2048,
3176,
2780,
1997,
2871,
26727,
2169,
2097,
12673,
1996,
8457,
2006,
2049,
8240,
2859,
2799,
1012,
2023,
6512,
2038,
2174,
13977,
2195,
25962,
1012,
102,
]
]
).to(torch_device)
decoder_prev_ids = torch.tensor([[102, 2129, 2116, 2372, 2024, 2006, 2169, 1997, 2122, 2048, 2780, 1029]]).to(
torch_device
)
output = model(
input_ids=encoder_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=decoder_prev_ids,
return_dict=True,
)
output_predited_logits = output[0]
expected_shape = torch.Size((1, 12, 30522))
self.assertEqual(output_predited_logits.shape, expected_shape)
expected_slice = torch.tensor(
[[[-7.6213, -7.9008, -7.9979], [-7.6834, -7.8467, -8.2187], [-7.5326, -7.4762, -8.1914]]]
).to(torch_device)
# self.assertTrue(torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4))
assert torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4)
# encoder outputs
encoder_outputs = model.prophetnet.encoder(encoder_ids)[0]
expected_encoder_outputs_slice = torch.tensor(
[[[-0.2526, -0.1951, -0.2185], [-0.8923, 0.2992, -0.4623], [-0.4585, 0.0165, -0.6652]]]
).to(torch_device)
expected_shape_encoder = torch.Size((1, 28, 1024))
self.assertEqual(encoder_outputs.shape, expected_shape_encoder)
# self.assertTrue(torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4))
assert torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)
# decoder outputs
decoder_outputs = model.prophetnet.decoder(
decoder_prev_ids, encoder_hidden_states=encoder_outputs, return_dict=True
)
predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 12, -1)
predicting_streams_logits = model.lm_head(predicting_streams)
next_first_stream_logits = predicting_streams_logits[:, 0]
# self.assertTrue(torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4))
assert torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)
@slow
def test_cnndm_inference(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-cnndm")
model.config.max_length = 512
model.to(torch_device)
tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-cnndm")
ARTICLE_TO_SUMMARIZE = "USTC was founded in Beijing by the Chinese Academy of Sciences (CAS) in September 1958. The Director of CAS, Mr. Guo Moruo was appointed the first president of USTC. USTC's founding mission was to develop a high-level science and technology workforce, as deemed critical for development of China's economy, defense, and science and technology education. The establishment was hailed as \"A Major Event in the History of Chinese Education and Science.\" CAS has supported USTC by combining most of its institutes with the departments of the university. USTC is listed in the top 16 national key universities, becoming the youngest national key university.".lower()
input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=511, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
summary_ids = model.generate(
input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True
)
EXPECTED_SUMMARIZE_512 = "us ##tc was founded by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc is listed in the top 16 national key universities ."
generated_titles = [
" ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids
]
self.assertListEqual(
[EXPECTED_SUMMARIZE_512],
generated_titles,
)
input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=99, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
# actually 98 tokens are used. max_length=100 contains bos and eos.
summary_ids = model.generate(
input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True
)
EXPECTED_SUMMARIZE_100 = (
r"us ##tc was founded in beijing by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc "
"'"
' s founding mission was to develop a high - level science and technology workforce . [X_SEP] establishment hailed as " a major event in the history of chinese education and science "'
)
generated_titles = [
" ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids
]
self.assertListEqual(
[EXPECTED_SUMMARIZE_100],
generated_titles,
)
@slow
def test_question_gen_inference(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg")
model.to(torch_device)
tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg")
INPUTS = [
"Bill Gates [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
"1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
"April 4, 1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
]
input_ids = tokenizer(INPUTS, truncation=True, padding=True, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
gen_output = model.generate(input_ids, num_beams=5, early_stopping=True)
generated_questions = tokenizer.batch_decode(gen_output, skip_special_tokens=True)
EXPECTED_QUESTIONS = [
"along with paul allen, who founded microsoft?",
"what year was microsoft founded?",
"on what date was microsoft founded?",
]
self.assertListEqual(
EXPECTED_QUESTIONS,
generated_questions,
)
| [
"torch.Size",
"torch.cat",
"torch.isnan",
"torch.no_grad",
"torch.ones",
"torch.manual_seed",
"torch.all",
"torch.tensor",
"torch.ones_like",
"torch.allclose"
] | 1.0 | savindi-wijenayaka/transformer_old | 016960521eaaf5393c9fad1c4db15338455213f8 |
1.7 | import os
import torch
from src.helper_functions.helper_functions import parse_args
from src.loss_functions.losses import AsymmetricLoss, AsymmetricLossOptimized
from src.models import create_model
import argparse
import matplotlib
import torchvision.transforms as transforms
from pgd import create_targeted_adversarial_examples
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from src.helper_functions.helper_functions import mAP, CocoDetection, CocoDetectionFiltered, CutoutPIL, ModelEma, add_weight_decay
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # USE GPU
########################## ARGUMENTS #############################################
parser = argparse.ArgumentParser(description='ASL MS-COCO Inference on a single image')
parser.add_argument('data', metavar='DIR', help='path to dataset', default='coco')
parser.add_argument('--model_path', type=str, default='mlc-model-epoch50')
parser.add_argument('--pic_path', type=str, default='./pics/test.jpg')
parser.add_argument('--model_name', type=str, default='tresnet_m')
parser.add_argument('--input_size', type=int, default=224)
parser.add_argument('--dataset_type', type=str, default='MS-COCO')
#IMPORTANT PARAMETER!
parser.add_argument('--th', type=float, default=0.5)
parser.add_argument('-b', '--batch-size', default=16, type=int,
metavar='N', help='mini-batch size (default: 16)')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 16)')
args = parse_args(parser)
########################## SETUP THE MODEL AND LOAD THE DATA #####################
# setup model
print('creating and loading the model...')
# state = torch.load(args.model_path, map_location='cpu')
args.num_classes = 80
model = create_model(args).cuda()
model_state = torch.load(args.model_path, map_location='cpu')
model.load_state_dict(model_state["state_dict"])
model.eval()
# Load the data
instances_path = os.path.join(args.data, 'annotations/instances_train2014.json')
# data_path_train = args.data
data_path = '{0}/train2014'.format(args.data)
################ EXPERIMENT DETAILS ########################
NUMBER_OF_BATCHES = 64
# TARGET_LABELS = [0, 1, 11, 56, 78, 79]
TARGET_LABELS = [0, 78]
EPSILON_VALUES = [0, 0.005, 0.01, 0.02, 0.05, 0.1]
########################## EXPERIMENT LOOP #####################
dataset = CocoDetectionFiltered(data_path,
instances_path,
transforms.Compose([
transforms.Resize((args.input_size, args.input_size)),
transforms.ToTensor(),
# normalize, # no need, toTensor does normalization
]), label_indices_positive=np.array(TARGET_LABELS))
# Pytorch Data loader
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
# zero for each epsion value
flipped_labels = np.zeros((len(EPSILON_VALUES), len(TARGET_LABELS)))
for i, (tensor_batch, labels) in enumerate(data_loader):
tensor_batch = tensor_batch.to(device)
if i >= NUMBER_OF_BATCHES:
break;
# process a batch and add the flipped labels for every epsilon
for epsilon_index in range(len(EPSILON_VALUES)):
# perform the pgd attack
pred = torch.sigmoid(model(tensor_batch)) > args.th
target = torch.clone(pred).detach()
target[:, TARGET_LABELS] = 0
adversarials = create_targeted_adversarial_examples(model, tensor_batch, target, eps=EPSILON_VALUES[epsilon_index], device="cuda")
# do inference again
pred_after_attack = torch.sigmoid(model(adversarials)) > args.th
# compare the attaced labels before and after the attack
for _id, target_label in enumerate(TARGET_LABELS):
flipped_labels[epsilon_index, _id] += (torch.sum(pred[:, target_label]).item() - torch.sum(pred_after_attack[:, target_label]).item())
# plot and save the figures
# plt.figure()
for _id, target_label in enumerate(TARGET_LABELS):
plt.plot(EPSILON_VALUES, flipped_labels[:, _id], label='target {0}'.format(target_label))
plt.xlabel("Epsilon")
plt.ylabel("Number of flipped labels")
plt.title("PGD multi-label flipdown attack")
plt.legend()
plt.savefig('flipdown-pgd-multi-attack.png')
# displaying image
# print('showing image on screen...')
# fig = plt.figure()
# plt.imshow(im)
# plt.axis('off')
# plt.axis('tight')
# # plt.rcParams["axes.titlesize"] = 10
# plt.title("detected classes: {}".format(detected_classes))
# plt.show()
# print('done\n') | [
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.clone",
"torch.sum"
] | 1.7 | erwinvanthiel/ASL | 1b8846919f4bcf7bf65881faf254395cb01f8ae3 |
0.4 | """
Script for training model on PyTorch.
"""
import os
import time
import logging
import argparse
import random
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from pytorch.utils import prepare_pt_context, prepare_model, validate
from pytorch.utils import report_accuracy, get_composite_metric, get_metric_name
from pytorch.dataset_utils import get_dataset_metainfo
from pytorch.dataset_utils import get_train_data_source, get_val_data_source
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--batch-size-scale",
type=int,
default=1,
help="manual batch-size increasing factor")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--poly-power",
type=float,
default=2,
help="power value for poly LR scheduler")
parser.add_argument(
"--warmup-epochs",
type=int,
default=0,
help="number of warmup epochs")
parser.add_argument(
"--warmup-lr",
type=float,
default=1e-8,
help="starting warmup learning rate")
parser.add_argument(
"--warmup-mode",
type=str,
default="linear",
help="learning rate scheduler warmup mode. options are linear, poly and constant")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--gamma-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm gamma")
parser.add_argument(
"--beta-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm beta")
parser.add_argument(
"--bias-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for bias")
parser.add_argument(
"--grad-clip",
type=float,
default=None,
help="max_norm for gradient clipping")
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing")
parser.add_argument(
"--mixup",
action="store_true",
help="use mixup strategy")
parser.add_argument(
"--mixup-epoch-tail",
type=int,
default=15,
help="number of epochs without mixup at the end of training")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="torch, torchvision",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="",
help="list of pip packages for logging")
parser.add_argument(
"--tune-layers",
type=str,
default="",
help="regexp for selecting layers for fine tuning")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
"""
Initialize all random generators by seed.
Parameters:
----------
seed : int
Seed value.
Returns:
-------
int
Generated seed value.
"""
if seed <= 0:
seed = np.random.randint(10000)
else:
cudnn.deterministic = True
logging.warning(
"You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down "
"your training considerably! You may see unexpected behavior when restarting from checkpoints.")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
num_epochs,
state_file_path):
"""
Prepare trainer.
Parameters:
----------
net : Module
Model.
optimizer_name : str
Name of optimizer.
wd : float
Weight decay rate.
momentum : float
Momentum value.
lr_mode : str
Learning rate scheduler mode.
lr : float
Learning rate.
lr_decay_period : int
Interval for periodic learning rate decays.
lr_decay_epoch : str
Epoches at which learning rate decays.
lr_decay : float
Decay rate of learning rate.
num_epochs : int
Number of training epochs.
state_file_path : str
Path for file with trainer state.
Returns:
-------
Optimizer
Optimizer.
LRScheduler
Learning rate scheduler.
int
Start epoch.
"""
optimizer_name = optimizer_name.lower()
if (optimizer_name == "sgd") or (optimizer_name == "nag"):
optimizer = torch.optim.SGD(
params=net.parameters(),
lr=lr,
momentum=momentum,
weight_decay=wd,
nesterov=(optimizer_name == "nag"))
else:
raise ValueError("Usupported optimizer: {}".format(optimizer_name))
if state_file_path:
checkpoint = torch.load(state_file_path)
if type(checkpoint) == dict:
optimizer.load_state_dict(checkpoint["optimizer"])
start_epoch = checkpoint["epoch"]
else:
start_epoch = None
else:
start_epoch = None
cudnn.benchmark = True
lr_mode = lr_mode.lower()
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
if (lr_mode == "step") and (lr_decay_period != 0):
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=optimizer,
step_size=lr_decay_period,
gamma=lr_decay,
last_epoch=-1)
elif (lr_mode == "multistep") or ((lr_mode == "step") and (lr_decay_period == 0)):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=lr_decay_epoch,
gamma=lr_decay,
last_epoch=-1)
elif lr_mode == "cosine":
for group in optimizer.param_groups:
group.setdefault("initial_lr", group["lr"])
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer=optimizer,
T_max=num_epochs,
last_epoch=(num_epochs - 1))
else:
raise ValueError("Usupported lr_scheduler: {}".format(lr_mode))
return optimizer, lr_scheduler, start_epoch
def save_params(file_stem,
state):
"""
Save current model/trainer parameters.
Parameters:
----------
file_stem : str
File stem (with path).
state : dict
Whole state of model & trainer.
trainer : Trainer
Trainer.
"""
torch.save(
obj=state["state_dict"],
f=(file_stem + ".pth"))
torch.save(
obj=state,
f=(file_stem + ".states"))
def train_epoch(epoch,
net,
train_metric,
train_data,
use_cuda,
L,
optimizer,
# lr_scheduler,
batch_size,
log_interval):
"""
Train model on particular epoch.
Parameters:
----------
epoch : int
Epoch number.
net : Module
Model.
train_metric : EvalMetric
Metric object instance.
train_data : DataLoader
Data loader.
use_cuda : bool
Whether to use CUDA.
L : Loss
Loss function.
optimizer : Optimizer
Optimizer.
batch_size : int
Training batch size.
log_interval : int
Batch count period for logging.
Returns:
-------
float
Loss value.
"""
tic = time.time()
net.train()
train_metric.reset()
train_loss = 0.0
btic = time.time()
for i, (data, target) in enumerate(train_data):
if use_cuda:
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = net(data)
loss = L(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_metric.update(
labels=target,
preds=output)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, optimizer.param_groups[0]["lr"]))
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("[Epoch {}] training: {}\tloss={:.4f}".format(
epoch + 1, train_accuracy_msg, train_loss))
return train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
net,
optimizer,
lr_scheduler,
lp_saver,
log_interval,
num_classes,
val_metric,
train_metric,
use_cuda):
"""
Main procedure for training model.
Parameters:
----------
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
start_epoch1 : int
Number of starting epoch (1-based).
train_data : DataLoader
Data loader (training subset).
val_data : DataLoader
Data loader (validation subset).
net : Module
Model.
optimizer : Optimizer
Optimizer.
lr_scheduler : LRScheduler
Learning rate scheduler.
lp_saver : TrainLogParamSaver
Model/trainer state saver.
log_interval : int
Batch count period for logging.
num_classes : int
Number of model classes.
val_metric : EvalMetric
Metric object instance (validation subset).
train_metric : EvalMetric
Metric object instance (training subset).
use_cuda : bool
Whether to use CUDA.
"""
assert (num_classes > 0)
L = nn.CrossEntropyLoss()
if use_cuda:
L = L.cuda()
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
lr_scheduler.step()
train_loss = train_epoch(
epoch=epoch,
net=net,
train_metric=train_metric,
train_data=train_data,
use_cuda=use_cuda,
L=L,
optimizer=optimizer,
# lr_scheduler,
batch_size=batch_size,
log_interval=log_interval)
validate(
metric=val_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
state = {
"epoch": epoch + 1,
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
}
lp_saver_kwargs = {"state": state}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [train_loss, optimizer.param_groups[0]["lr"]]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
opt_metric_name = get_metric_name(val_metric, lp_saver.acc_ind)
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda)
real_net = net.module if hasattr(net, "module") else net
assert (hasattr(real_net, "num_classes"))
num_classes = real_net.num_classes
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
optimizer, lr_scheduler, start_epoch = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
num_epochs=args.num_epochs,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".pth", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
net=net,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
num_classes=num_classes,
val_metric=get_composite_metric(ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs),
train_metric=get_composite_metric(ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs),
use_cuda=use_cuda)
if __name__ == "__main__":
main()
| [
"torch.nn.CrossEntropyLoss"
] | 0.4.0 | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 |
0.4 | """
SKNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
"""
__all__ = ['SKNet', 'sknet50', 'sknet101', 'sknet152']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent
from .resnet import ResInitBlock
class SKConvBlock(nn.Module):
"""
SKNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 32
Number of groups in branches.
num_branches : int, default 2
Number of branches (`M` parameter in the paper).
reduction : int, default 16
Reduction value for intermediate channels (`r` parameter in the paper).
min_channels : int, default 32
Minimal number of intermediate channels (`L` parameter in the paper).
"""
def __init__(self,
in_channels,
out_channels,
stride,
groups=32,
num_branches=2,
reduction=16,
min_channels=32):
super(SKConvBlock, self).__init__()
self.num_branches = num_branches
self.out_channels = out_channels
mid_channels = max(in_channels // reduction, min_channels)
self.branches = Concurrent(stack=True)
for i in range(num_branches):
dilation = 1 + i
self.branches.add_module("branch{}".format(i + 2), conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=dilation,
dilation=dilation,
groups=groups))
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc1 = conv1x1_block(
in_channels=out_channels,
out_channels=mid_channels)
self.fc2 = conv1x1(
in_channels=mid_channels,
out_channels=(out_channels * num_branches))
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
y = self.branches(x)
u = y.sum(dim=1)
s = self.pool(u)
z = self.fc1(s)
w = self.fc2(z)
batch = w.size(0)
w = w.view(batch, self.num_branches, self.out_channels)
w = self.softmax(w)
w = w.unsqueeze(-1).unsqueeze(-1)
y = y * w
y = y.sum(dim=1)
return y
class SKNetBottleneck(nn.Module):
"""
SKNet bottleneck block for residual path in SKNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 2
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck_factor=2):
super(SKNetBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = SKConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class SKNetUnit(nn.Module):
"""
SKNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(SKNetUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = SKNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class SKNet(nn.Module):
"""
SKNet model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SKNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), SKNetUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_sknet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SKNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported SKNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SKNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sknet50(**kwargs):
"""
SKNet-50 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=50, model_name="sknet50", **kwargs)
def sknet101(**kwargs):
"""
SKNet-101 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=101, model_name="sknet101", **kwargs)
def sknet152(**kwargs):
"""
SKNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=152, model_name="sknet152", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sknet50,
sknet101,
sknet152,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sknet50 or weight_count == 27479784)
assert (model != sknet101 or weight_count == 48736040)
assert (model != sknet152 or weight_count == 66295656)
x = torch.randn(14, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Softmax",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.ReLU",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn"
] | 0.4.0 | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 |
0.4 | """
ESPNet for image segmentation, implemented in PyTorch.
Original paper: 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,'
https://arxiv.org/abs/1803.06815.
"""
__all__ = ['ESPNet', 'espnet_cityscapes']
import os
import torch
import torch.nn as nn
from common import conv1x1, conv3x3_block, NormActivation, DeconvBlock
from espcnet import ESPCNet, ESPBlock
class ESPFinalBlock(nn.Module):
"""
ESPNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(ESPFinalBlock, self).__init__()
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(out_channels)))
self.deconv = nn.ConvTranspose2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=2,
stride=2,
padding=0,
output_padding=0,
bias=False)
def forward(self, x):
x = self.conv(x)
x = self.deconv(x)
return x
class ESPNet(ESPCNet):
"""
ESPNet model from 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,'
https://arxiv.org/abs/1803.06815.
Parameters:
----------
layers : list of int
Number of layers for each unit.
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
cut_x : list of int
Whether to concatenate with x-branch for each unit.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
num_classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
layers,
channels,
init_block_channels,
cut_x,
bn_eps=1e-5,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
num_classes=19):
super(ESPNet, self).__init__(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
cut_x=cut_x,
bn_eps=bn_eps,
aux=aux,
fixed_size=fixed_size,
in_channels=in_channels,
in_size=in_size,
num_classes=num_classes)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.fixed_size = fixed_size
self.skip1 = nn.BatchNorm2d(
num_features=num_classes,
eps=bn_eps)
self.skip2 = conv1x1(
in_channels=channels[1],
out_channels=num_classes)
self.up1 = nn.Sequential(nn.ConvTranspose2d(
in_channels=num_classes,
out_channels=num_classes,
kernel_size=2,
stride=2,
padding=0,
output_padding=0,
bias=False))
self.up2 = nn.Sequential()
self.up2.add_module("block1", NormActivation(
in_channels=(2 * num_classes),
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(2 * num_classes))))
self.up2.add_module("block2", ESPBlock(
in_channels=(2 * num_classes),
out_channels=num_classes,
downsample=False,
residual=False,
bn_eps=bn_eps))
self.up2.add_module("block3", DeconvBlock(
in_channels=num_classes,
out_channels=num_classes,
kernel_size=2,
stride=2,
padding=0,
bn_eps=bn_eps,
activation=(lambda: nn.PReLU(num_classes))))
self.decoder_head = ESPFinalBlock(
in_channels=(channels[0] + num_classes),
out_channels=num_classes,
bn_eps=bn_eps)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
y0 = self.features.init_block(x)
y1, x = self.features.stage1(y0, x)
y2, x = self.features.stage2(y1, x)
y3, x = self.features.stage3(y2, x)
yh = self.head(y3)
v1 = self.skip1(yh)
z1 = self.up1(v1)
v2 = self.skip2(y2)
z2 = torch.cat((v2, z1), dim=1)
z2 = self.up2(z2)
z = torch.cat((z2, y1), dim=1)
z = self.decoder_head(z)
return z
def get_espnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ESPNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 16
layers = [0, 3, 4]
channels = [19, 131, 256]
cut_x = [1, 1, 0]
bn_eps = 1e-3
net = ESPNet(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
cut_x=cut_x,
bn_eps=bn_eps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def espnet_cityscapes(num_classes=19, **kwargs):
"""
ESPNet model for Cityscapes from 'ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic
Segmentation,' https://arxiv.org/abs/1803.06815.
Parameters:
----------
num_classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_espnet(num_classes=num_classes, model_name="espnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
espnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != espnet_cityscapes or weight_count == 201542)
batch = 4
x = torch.randn(batch, 3, in_size[0], in_size[1])
y = net(x)
# y.sum().backward()
assert (tuple(y.size()) == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| [
"torch.cat",
"torch.nn.init.kaiming_uniform_",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.PReLU",
"torch.randn"
] | 0.4.0 | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 |
0.4 | """
BAM-ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
"""
__all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnet import ResInitBlock, ResUnit
class DenseBlock(nn.Module):
"""
Standard dense block with Batch normalization and ReLU activation.
Parameters:
----------
in_features : int
Number of input features.
out_features : int
Number of output features.
"""
def __init__(self,
in_features,
out_features):
super(DenseBlock, self).__init__()
self.fc = nn.Linear(
in_features=in_features,
out_features=out_features)
self.bn = nn.BatchNorm1d(num_features=out_features)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
x = self.activ(x)
return x
class ChannelGate(nn.Module):
"""
BAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
num_layers : int, default 1
Number of dense blocks.
"""
def __init__(self,
channels,
reduction_ratio=16,
num_layers=1):
super(ChannelGate, self).__init__()
mid_channels = channels // reduction_ratio
self.pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.init_fc = DenseBlock(
in_features=channels,
out_features=mid_channels)
self.main_fcs = nn.Sequential()
for i in range(num_layers - 1):
self.main_fcs.add_module("fc{}".format(i + 1), DenseBlock(
in_features=mid_channels,
out_features=mid_channels))
self.final_fc = nn.Linear(
in_features=mid_channels,
out_features=channels)
def forward(self, x):
input = x
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.init_fc(x)
x = self.main_fcs(x)
x = self.final_fc(x)
x = x.unsqueeze(2).unsqueeze(3).expand_as(input)
return x
class SpatialGate(nn.Module):
"""
BAM spatial gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
num_dil_convs : int, default 2
Number of dilated convolutions.
dilation : int, default 4
Dilation/padding value for corresponding convolutions.
"""
def __init__(self,
channels,
reduction_ratio=16,
num_dil_convs=2,
dilation=4):
super(SpatialGate, self).__init__()
mid_channels = channels // reduction_ratio
self.init_conv = conv1x1_block(
in_channels=channels,
out_channels=mid_channels,
stride=1,
bias=True)
self.dil_convs = nn.Sequential()
for i in range(num_dil_convs):
self.dil_convs.add_module("conv{}".format(i + 1), conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=1,
padding=dilation,
dilation=dilation,
bias=True))
self.final_conv = conv1x1(
in_channels=mid_channels,
out_channels=1,
stride=1,
bias=True)
def forward(self, x):
input = x
x = self.init_conv(x)
x = self.dil_convs(x)
x = self.final_conv(x)
x = x.expand_as(input)
return x
class BamBlock(nn.Module):
"""
BAM attention block for BAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
"""
def __init__(self,
channels):
super(BamBlock, self).__init__()
self.ch_att = ChannelGate(channels=channels)
self.sp_att = SpatialGate(channels=channels)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
att = 1 + self.sigmoid(self.ch_att(x) * self.sp_att(x))
x = x * att
return x
class BamResUnit(nn.Module):
"""
BAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck):
super(BamResUnit, self).__init__()
self.use_bam = (stride != 1)
if self.use_bam:
self.bam = BamBlock(channels=in_channels)
self.res_unit = ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=False)
def forward(self, x):
if self.use_bam:
x = self.bam(x)
x = self.res_unit(x)
return x
class BamResNet(nn.Module):
"""
BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(BamResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), BamResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create BAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported BAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def bam_resnet18(**kwargs):
"""
BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="bam_resnet18", **kwargs)
def bam_resnet34(**kwargs):
"""
BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="bam_resnet34", **kwargs)
def bam_resnet50(**kwargs):
"""
BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="bam_resnet50", **kwargs)
def bam_resnet101(**kwargs):
"""
BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="bam_resnet101", **kwargs)
def bam_resnet152(**kwargs):
"""
BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="bam_resnet152", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
bam_resnet18,
bam_resnet34,
bam_resnet50,
bam_resnet101,
bam_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bam_resnet18 or weight_count == 11712503)
assert (model != bam_resnet34 or weight_count == 21820663)
assert (model != bam_resnet50 or weight_count == 25915099)
assert (model != bam_resnet101 or weight_count == 44907227)
assert (model != bam_resnet152 or weight_count == 60550875)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn"
] | 0.4.0 | naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 |
1.4 | import copy
import logging
import numpy as np
import torch
import torchvision
from ignite.metrics import Loss
from torchvision import transforms as T
from torchvision.transforms import functional as F
import datasetinsights.constants as const
from datasetinsights.data.datasets import Dataset
from datasetinsights.data.loader import create_loader
from datasetinsights.data.transforms import Compose, RandomHorizontalFlip
from datasetinsights.evaluation_metrics import EvaluationMetric
from datasetinsights.visualization.plots import decode_segmap, grid_plot
from .base import Estimator
logger = logging.getLogger(__name__)
# Normalization constants (heuristics) from ImageNet dataset
_IMGNET_MEAN = (0.485, 0.456, 0.406)
_IMGNET_STD = (0.229, 0.224, 0.225)
# Inverse Normalization constants
_INV_IMGNET_MEAN = (-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225)
_INV_IMGNET_STD = (1.0 / 0.229, 1.0 / 0.224, 1.0 / 0.5)
def pad_if_smaller(img, size, fill=0):
min_size = min(img.size)
if min_size < size:
ow, oh = img.size
padh = size - oh if oh < size else 0
padw = size - ow if ow < size else 0
img = F.pad(img, (0, 0, padw, padh), fill=fill)
return img
class RandomCrop:
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = pad_if_smaller(image, self.size)
target = pad_if_smaller(target, self.size, fill=255)
crop_params = T.RandomCrop.get_params(image, (self.size, self.size))
image = F.crop(image, *crop_params)
target = F.crop(target, *crop_params)
return image, target
class ToTensor:
"""Convert a pair of (image, target) to tensor
"""
def __call__(self, image, target):
image = F.to_tensor(image)
target = torch.as_tensor(np.asarray(target), dtype=torch.int64)
return image, target
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
class DeeplabV3(Estimator):
""" DeeplabV3 Model https://arxiv.org/abs/1706.05587
Args:
config (CfgNode): estimator config
writer: Tensorboard writer object
checkpointer: Model checkpointer callback to save models
device: model training on device (cpu|cuda)
Attributes:
backbone: model backbone (resnet50|resnet101)
num_classes: number of classes for semantic segmentation
model: tensorflow or pytorch graph
writer: Tensorboard writer object
checkpointer: Model checkpointer callback to save models
device: model training on device (cpu|cuda)
optimizer: pytorch optimizer
lr_scheduler: pytorch learning rate scheduler
"""
def __init__(self, *, config, writer, checkpointer, device, **kwargs):
self.config = config
self.backbone = config.backbone
self.num_classes = config.num_classes
model_name = "deeplabv3_" + self.backbone
self.model = torchvision.models.segmentation.__dict__[model_name](
num_classes=self.num_classes
)
self.writer = writer
self.checkpointer = checkpointer
self.device = device
opname = config.optimizer.name
if opname == "Adam":
optimizer = torch.optim.Adam(
self.model.parameters(), **config.optimizer.args
)
# use fixed learning rate when using Adam
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lambda x: 1.0
)
else:
raise ValueError(f"Unsupported optimizer type {opname}")
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
# load estimators from file if checkpoint_file exists
ckpt_file = config.checkpoint_file
if ckpt_file != const.NULL_STRING:
checkpointer.load(self, ckpt_file)
@staticmethod
def _transforms(is_train=True, crop_size=769):
"""Transformations for a pair of input and target image
Args:
is_train (bool): indicator whether this is a transformation
during training (default: True)
crop_size (int): crop size. Images will be cropped to
(crop_size, crop_size)
"""
transforms = []
if is_train:
transforms.append(RandomHorizontalFlip(0.5))
transforms.append(RandomCrop(crop_size))
transforms.append(ToTensor())
transforms.append(Normalize(mean=_IMGNET_MEAN, std=_IMGNET_STD))
return Compose(transforms)
@staticmethod
def _loss_fn(outputs, target):
""" Compute loss
Args:
outputs (dict): named output of deeplabv3 model. Since this
implementation outpus two semantic segmentation images from two
heads of the model, we are expecting dict of tow keys
"out" and "aux" that corresponds to two pytorch tenor of images.
target (torch.Tensor): ground truth 2D image tensor
Returns:
numerical value of loss
"""
losses = {}
for name, x in outputs.items():
losses[name] = torch.nn.functional.cross_entropy(
x, target, ignore_index=255
)
if len(losses) == 1:
return losses["out"]
return losses["out"] + 0.5 * losses["aux"]
def _train_one_epoch(self, loader, epoch):
""" Train one epoch
Args:
loader (DataLoader): pytorch dataloader
epoch (int): the current epoch number
"""
logger.info(f"Epoch[{epoch}] training started.")
self.model.train()
n_batch = len(loader)
accumulation_steps = self.config.train.accumulation_steps
loss_metric = Loss(self._loss_fn)
self.optimizer.zero_grad()
for i, (image, target) in enumerate(loader):
image, target = image.to(self.device), target.to(self.device)
output = self.model(image)
loss = self._loss_fn(output, target)
loss.backward()
# Accumulated Gradients are only updated after X steps.
# This creates an effective batch size of
# batch_size * accumulation_steps
if (i + 1) % accumulation_steps == 0:
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
loss_metric.update((output, target))
iter_num = (i + 1) % n_batch
logger.debug(
f"Epoch[{epoch}] Iteration[{iter_num}/{n_batch}] "
f"Loss: {loss:.3f}"
)
epoch_loss = loss_metric.compute()
logger.info(
f"Epoch[{epoch}] training completed. Loss: {epoch_loss:.3f}"
)
self.writer.add_scalar("training/loss", epoch_loss, epoch)
loss_metric.reset()
def _evaluate_one_epoch(self, loader, epoch):
""" Evaluate one epoch
Args:
loader (DataLoader): pytorch dataloader
epoch (int): the current epoch number
"""
logger.info(f"Epoch[{epoch}] evaluation started")
self.model.eval()
loss_metric = Loss(self._loss_fn)
# TODO: Support other metrics other than IoU and support multiple
# mettics
iou_metric = EvaluationMetric.create(
self.config.metric, num_classes=self.num_classes
)
with torch.no_grad():
for image, target in loader:
image, target = image.to(self.device), target.to(self.device)
output = self.model(image)
loss_metric.update((output, target))
iou_metric.update((output["out"], target))
loss = loss_metric.compute()
iou = iou_metric.compute()
# some classes are not used in cityscapes evaluation.
# TODO: Move class masking logic to IoU metric class.
keep_mask = [
not c.ignore_in_eval
for c in torchvision.datasets.Cityscapes.classes
]
class_names = [c.name for c in torchvision.datasets.Cityscapes.classes]
iou_info = {
name: f"{iou[i].item():.3f}"
for i, name in enumerate(class_names)
if keep_mask[i]
}
miou = iou[keep_mask].mean()
logger.info(
f"Epoch[{epoch}] evaluation completed. "
f"Loss: {loss:.3f}, mIoU: {miou:.3f}\n"
f"IoU per class: {iou_info}"
)
self.writer.add_scalar("validation/loss", loss, epoch)
self.writer.add_scalar("validation/miou", miou, epoch)
inv_normalize = T.Normalize(mean=_INV_IMGNET_MEAN, std=_INV_IMGNET_STD)
# Visualize segmentation images from last mini-batch
n_images = list(image.shape)[0]
image_grid = []
for i in range(n_images):
img = inv_normalize(image[i, :]).permute(1, 2, 0).cpu().numpy()
out = decode_segmap(output["out"][i, :].max(0)[1].cpu().numpy())
tgt = decode_segmap(target[i, :].cpu().numpy())
image_grid.append([img, out, tgt])
fig = grid_plot(image_grid)
self.writer.add_figure("validation/visualize", fig, epoch)
loss_metric.reset()
iou_metric.reset()
def train(self, **kwargs):
config = self.config
train_dataset = Dataset.create(
config.train.dataset,
split="train",
data_root=config.system.data_root,
transforms=self._transforms(
is_train=True, crop_size=config.train.crop_size
),
)
train_loader = create_loader(
train_dataset,
batch_size=config.train.batch_size,
num_workers=config.system.workers,
dryrun=config.system.dryrun,
)
val_dataset = Dataset.create(
config.val.dataset,
split="val",
data_root=config.system.data_root,
transforms=self._transforms(is_train=False),
)
val_loader = create_loader(
val_dataset,
batch_size=config.val.batch_size,
num_workers=config.system.workers,
dryrun=config.system.dryrun,
)
logger.info("Start training estimator: %s", type(self).__name__)
self.model.to(self.device)
n_epochs = config.train.epochs
val_interval = config.system.val_interval
for epoch in range(1, n_epochs + 1):
logger.info(f"Training Epoch[{epoch}/{n_epochs}]")
self._train_one_epoch(train_loader, epoch)
if epoch % val_interval == 0:
self._evaluate_one_epoch(val_loader, epoch)
self.checkpointer.save(self, epoch=epoch)
def evaluate(self, **kwargs):
config = self.config
test_dataset = Dataset.create(
config.test.dataset,
split="test",
data_root=config.system.data_root,
transforms=self._transforms(is_train=False),
)
test_loader = create_loader(
test_dataset,
batch_size=config.test.batch_size,
num_workers=config.system.workers,
dryrun=config.system.dryrun,
)
logger.info("Start evaluating estimator: %s", type(self).__name__)
self.model.to(self.device)
self._evaluate_one_epoch(test_loader, epoch=1)
def save(self, path):
""" Serialize Estimator to path
Args:
path (str): full path to save serialized estimator
Returns:
saved full path of the serialized estimator
"""
save_dict = {"model": self.model.state_dict(), "config": self.config}
torch.save(save_dict, path)
return path
def load(self, path):
""" Load Estimator from path
Args:
path (str): full path to the serialized estimator
"""
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint["model"])
loaded_config = copy.deepcopy(checkpoint["config"])
stored_config = copy.deepcopy(self.config)
del stored_config["checkpoint_file"]
del loaded_config["checkpoint_file"]
if stored_config != loaded_config:
logger.warning(
f"Found difference in estimator config."
f"Estimator loaded from {path} was trained using "
f"config: "
f"{loaded_config}. However, the current config is: "
f"{self.config}."
)
| [
"torch.save",
"torch.no_grad",
"torch.nn.functional.cross_entropy",
"torch.load",
"torch.optim.lr_scheduler.LambdaLR"
] | 1.4.0 | BlairLee/dataset-insights | 892e2ed3a2facf97cfa3a883700830d959a0c49b |
1.6 | import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class ConditionalBatchNorm2d(nn.BatchNorm2d):
"""Conditional Batch Normalization"""
def __init__(self, num_features, eps=1e-05, momentum=0.1,
affine=False, track_running_stats=True):
super(ConditionalBatchNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats
)
def forward(self, input, weight, bias, **kwargs):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 /\
self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
output = F.batch_norm(input, self.running_mean, self.running_var,
self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
if weight.dim() == 1:
weight = weight.unsqueeze(0)
if bias.dim() == 1:
bias = bias.unsqueeze(0)
size = output.size()
weight = weight.unsqueeze(-1).unsqueeze(-1).expand(size)
bias = bias.unsqueeze(-1).unsqueeze(-1).expand(size)
return weight * output + bias
class CategoricalConditionalBatchNorm2d(ConditionalBatchNorm2d):
def __init__(self, num_classes, num_features, eps=1e-5, momentum=0.1,
affine=False, track_running_stats=True):
super(CategoricalConditionalBatchNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats
)
self.weights = nn.Embedding(num_classes, num_features)
self.biases = nn.Embedding(num_classes, num_features)
self._initialize()
def _initialize(self):
init.ones_(self.weights.weight.data)
init.zeros_(self.biases.weight.data)
def forward(self, input, c, **kwargs):
weight = self.weights(c)
bias = self.biases(c)
return super(CategoricalConditionalBatchNorm2d, self)\
.forward(input, weight, bias)
if __name__ == '__main__':
"""Forward computation check."""
import torch
size = (3, 3, 12, 12)
batch_size, num_features = size[:2]
print('# Affirm embedding output')
naive_bn = nn.BatchNorm2d(3)
idx_input = torch.tensor([1, 2, 0], dtype=torch.long)
embedding = nn.Embedding(3, 3)
weights = embedding(idx_input)
print('# weights size', weights.size())
empty = torch.tensor((), dtype=torch.float)
running_mean = empty.new_zeros((3,))
running_var = empty.new_ones((3,))
naive_bn_W = naive_bn.weight
input = torch.rand(*size, dtype=torch.float32)
print('input size', input.size())
print('input ndim ', input.dim())
_ = naive_bn(input)
print('# batch_norm with given weights')
try:
with torch.no_grad():
output = F.batch_norm(input, running_mean, running_var,
weights, naive_bn.bias, False, 0.0, 1e-05)
except Exception as e:
print("\tFailed to use given weights")
print('# Error msg:', e)
print()
else:
print("Succeeded to use given weights")
print('\n# Batch norm before use given weights')
with torch.no_grad():
tmp_out = F.batch_norm(input, running_mean, running_var,
naive_bn_W, naive_bn.bias, False, .0, 1e-05)
weights_cast = weights.unsqueeze(-1).unsqueeze(-1)
weights_cast = weights_cast.expand(tmp_out.size())
try:
out = weights_cast * tmp_out
except Exception:
print("Failed")
else:
print("Succeeded!")
print('\t {}'.format(out.size()))
print(type(tuple(out.size())))
print('--- condBN and catCondBN ---')
catCondBN = CategoricalConditionalBatchNorm2d(3, 3)
output = catCondBN(input, idx_input)
assert tuple(output.size()) == size
condBN = ConditionalBatchNorm2d(3)
idx = torch.tensor([1], dtype=torch.long)
out = catCondBN(input, idx)
print('cat cond BN weights\n', catCondBN.weights.weight.data)
print('cat cond BN biases\n', catCondBN.biases.weight.data)
| [
"torch.rand",
"torch.nn.functional.batch_norm",
"torch.nn.BatchNorm2d",
"torch.no_grad",
"torch.nn.init.ones_",
"torch.tensor",
"torch.nn.init.zeros_",
"torch.nn.Embedding"
] | 1.6.0 | tasbolat1/DGflow | 6ce22095a0d33f4da3c15f093aa365ba6cabbac9 |
1.7 | # -*- coding: utf-8 -*-
"""
Created on December 30, 2020
@author: Siqi Miao
"""
import torch
from torch_sparse import SparseTensor
import torch_geometric.transforms as T
from pathlib2 import Path
import scipy.io as sio
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import train_test_split
from skmultilearn.model_selection import iterative_train_test_split
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
class Dataset(object):
def __init__(self, root, name, make_edge_index=False):
self.root = root
self.name = name
self.make_edge_index = make_edge_index
self.num_classes = None
self.split_idx = None
self.x = None
self.y = None
self.adj_t = None
self.edge_index = None
self.num_nodes = None
self.criterion = None
self.metric = None
self.heterophily_dataset = ['chameleon', 'actor']
if name == 'ogb':
self.setup_ogb()
elif name == 'wiki':
self.setup_wiki()
elif name in self.heterophily_dataset:
self.setup_geom()
else:
raise KeyboardInterrupt
def setup_ogb(self):
dataset = PygNodePropPredDataset(name='ogbn-arxiv', root=self.root, transform=T.ToSparseTensor())
data = dataset[0]
self.metric = 'Accuracy'
self.num_classes = dataset.num_classes
self.split_idx = dataset.get_idx_split()
self.x = data.x
self.y = data.y
self.adj_t = data.adj_t.to_symmetric()
self.num_nodes = data.num_nodes
if self.make_edge_index:
row = self.adj_t.storage.row()
col = self.adj_t.storage.col()
self.edge_index = torch.stack((row, col), dim=0)
self.criterion = torch.nn.CrossEntropyLoss()
def setup_wiki(self):
mat = sio.loadmat(self.root / 'wiki' / 'POS.mat')
self.metric = 'MicroF1'
self.num_nodes = 4777
self.num_classes = 40
adj_t = mat['network'].tocoo()
self.adj_t = SparseTensor(row=torch.LongTensor(adj_t.row), col=torch.LongTensor(adj_t.col),
sparse_sizes=(self.num_nodes, self.num_nodes))
if self.make_edge_index:
row = self.adj_t.storage.row()
col = self.adj_t.storage.col()
self.edge_index = torch.stack((row, col), dim=0)
self.y = torch.from_numpy(mat['group'].todense()).float()
idx = torch.arange(self.y.shape[0]).view(-1, 1)
train_idx, _, test_idx, _ = iterative_train_test_split(idx, self.y, test_size=0.1)
self.split_idx = {'train': train_idx.view(-1), 'valid': test_idx.view(-1), 'test': test_idx.view(-1)}
self.criterion = torch.nn.BCEWithLogitsLoss() # for multi-label classification
def setup_geom(self):
edge_file = self.root / self.name / 'out1_graph_edges.txt'
feature_label_file = self.root / self.name / 'out1_node_feature_label.txt'
self.metric = 'Accuracy'
edges = edge_file.open('r').readlines()[1:]
edges = torch.LongTensor([(lambda x: [int(x[0]), int(x[1])])(edge.strip().split('\t')) for edge in edges])
self.num_nodes = torch.max(edges).item() + 1
self.adj_t = SparseTensor(row=torch.LongTensor(edges[:, 0]), col=torch.LongTensor(edges[:, 1]),
sparse_sizes=(self.num_nodes, self.num_nodes))
# self.adj_t = self.adj_t.to_symmetric()
if self.make_edge_index:
self.edge_index = edges.t()
idx = []
x = []
y = []
xy = feature_label_file.open('r').readlines()[1:]
for line in xy:
node_id, feature, label = line.strip().split('\t')
idx.append(int(node_id))
if self.name == 'actor':
one_hot = torch.zeros(932)
pos_with_ones = list(map(int, feature.split(',')))
one_hot[pos_with_ones] = 1
x.append(one_hot.int().tolist())
else:
x.append(list(map(int, feature.split(','))))
y.append(int(label))
_, indices = torch.sort(torch.LongTensor(idx))
self.x = torch.LongTensor(x)[indices]
self.y = torch.LongTensor(y).view(-1, 1)[indices]
self.num_classes = torch.max(self.y).item() + 1
idx = torch.arange(self.y.shape[0]).view(-1, 1)
train_idx, val_test_idx = train_test_split(idx, test_size=0.4, stratify=self.y)
val_idx, test_idx = train_test_split(val_test_idx, test_size=0.5, stratify=self.y[val_test_idx.squeeze()])
self.split_idx = {'train': train_idx.view(-1), 'valid': val_idx.view(-1), 'test': test_idx.view(-1)}
self.criterion = torch.nn.CrossEntropyLoss()
def eval(self, y_true, logits, split_idx):
if self.name == 'ogb':
evaluator = Evaluator(name='ogbn-arxiv')
y_pred = logits.argmax(dim=1, keepdim=True)
train_acc = evaluator.eval({
'y_true': y_true[split_idx['train']],
'y_pred': y_pred[split_idx['train']],
})['acc']
valid_acc = evaluator.eval({
'y_true': y_true[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
return train_acc, valid_acc, test_acc
elif self.name == 'wiki':
y_pred = torch.sigmoid(logits) > 0.5
train_f1 = f1_score(y_true[split_idx['train']], y_pred[split_idx['train']], average='micro')
valid_f1 = f1_score(y_true[split_idx['valid']], y_pred[split_idx['valid']], average='micro')
test_f1 = f1_score(y_true[split_idx['test']], y_pred[split_idx['test']], average='micro')
return train_f1, valid_f1, test_f1
elif self.name in self.heterophily_dataset:
y_pred = logits.argmax(dim=1, keepdim=True)
train_acc = accuracy_score(y_true[split_idx['train']], y_pred[split_idx['train']])
valid_acc = accuracy_score(y_true[split_idx['valid']], y_pred[split_idx['valid']])
test_acc = accuracy_score(y_true[split_idx['test']], y_pred[split_idx['test']])
return train_acc, valid_acc, test_acc
if __name__ == '__main__':
data = Dataset(root=Path('../dataset'), name='ogb', make_edge_index=True)
| [
"torch.zeros",
"torch.sigmoid",
"torch.stack",
"torch.arange",
"torch.max",
"torch.LongTensor",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss"
] | 1.7.1 | siqim/Machine-Learning-with-Graphs | 697d83bb206be0825ebaf0dad128b9eb24908705 |
1.7 | r"""
vanilla pseudo-labeling implementation
"""
from collections import defaultdict
from alr.utils import timeop, manual_seed
from alr.data.datasets import Dataset
from alr.data import UnlabelledDataset
from alr.training import VanillaPLTrainer
from alr.training.samplers import RandomFixedLengthSampler
from alr import MCDropout
import pickle
import numpy as np
import torch
import torch.utils.data as torchdata
from torch.nn import functional as F
from pathlib import Path
if __name__ == "__main__":
manual_seed(42)
kwargs = dict(num_workers=4, pin_memory=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
sizes = np.arange(20, 260, 10)
N = len(sizes)
# validation dataset size
VAL_SIZE = 5_000
# according to the paper:
BATCH_SIZE = 32
UNLABELLED_BATCH_SIZE = 256
# at least prolong the epoch to have this many points (see RandomFixedLengthSampler)
MIN_TRAIN_SIZE = 12_500
# well, early stopping should kick-in before then.
EPOCHS = 200
REPEATS = 6
# paths
pl_metrics = Path("pl_metrics")
metrics = Path("metrics")
saved_models = Path("saved_models")
metrics.mkdir()
saved_models.mkdir()
log_every = 2
accs = defaultdict(list)
for r in range(1, REPEATS + 1):
for i, n in enumerate(sizes, 1):
train, test = Dataset.MNIST.get()
train, pool = torchdata.random_split(train, (n, len(train) - n))
pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))
pool = UnlabelledDataset(pool, debug=True)
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
print(f"=== Iteration {i} of {N} ({i/N:.2%}) ===")
print(f"\ttrain: {len(train)}; pool: {len(pool)}; test: {len(test)}")
if (i - 1) % log_every == 0 and r == 1:
pl_log = str(pl_metrics / f"dsize_{n}")
else:
pl_log = None
trainer = VanillaPLTrainer(
model,
labelled_loss=F.nll_loss,
unlabelled_loss=F.nll_loss,
optimiser="Adam",
patience=3,
reload_best=True,
track_pl_metrics=pl_log,
device=device,
)
train_loader = torchdata.DataLoader(
train,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
train, length=MIN_TRAIN_SIZE, shuffle=True
),
**kwargs,
)
pool_loader = torchdata.DataLoader(
pool,
batch_size=UNLABELLED_BATCH_SIZE,
shuffle=True,
**kwargs,
)
val_loader = torchdata.DataLoader(
val,
batch_size=1024,
shuffle=False,
**kwargs,
)
test_loader = torchdata.DataLoader(
test,
batch_size=1024,
shuffle=False,
**kwargs,
)
with timeop() as t:
history = trainer.fit(
train_loader,
pool_loader,
val_loader,
epochs=EPOCHS,
)
test_metrics = trainer.evaluate(test_loader)
accs[n].append(test_metrics["acc"])
print(
f"\t[train] loss, acc: ({history['stage2']['train_loss'][-1]}, {history['stage2']['train_acc'][-1]})\n"
f"\t[test] loss, acc: ({test_metrics['loss']}, {test_metrics['acc']})\n"
f"\ttime: {t}"
)
if pl_log:
torch.save(
model.state_dict(),
saved_models / f"repeat_{r}_dsize_{n}_weights.pth",
)
payload = {
"history": history,
"test_metrics": test_metrics,
}
with open(metrics / f"repeat_{r}_dsize_{n}_metrics.pkl", "wb") as fp:
pickle.dump(payload, fp)
with open("accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
| [
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.7.1 | jiahfong/alr | ee561c545bd98ec17c4f9c3040ef23b0222ef71a |
1.7 | r"""
From previous experiments, we saw that ephemeral pseudo-labelling helped boost accuracy
despite starting with only 20 points. We could kick-start BALD with 85% accuracy with 24 iterations
but it seems like using 80% accuracy at 10 iterations is a good trade-off. It's harder to gain more
accuracy as the number of iteration increases.
This experiment kick-starts BALD10 acquisition by warming the model to 80% accuracy (with 10 iterations
of ephemeral pseudo-labelling). However, the acquisition loop will NOT run ephemeral P.L. as we've seen
a decrease in performance when doing so. There are two possibilities: (1) warm-starting the model
has caused it to lower its entropy on the pool dataset, hence causing it to actually perform worse.
(2) warm-starting it actually helped! my bet is (unfortunately) on the former, given previous observations
(i.e. ephemeral bald10 performs worse than bald10 -- but i'm hopeful, notwithstanding.).
"""
from collections import defaultdict
from alr.utils import manual_seed, eval_fwd_exp, timeop
from alr.acquisition import BALD
from alr import MCDropout
from alr.data.datasets import Dataset
from alr.training.samplers import RandomFixedLengthSampler
from alr.data import UnlabelledDataset, DataManager
from alr.training import Trainer
from alr.training.repeated_acquisition_utils import (
get_confident_indices,
RelabelledDataset,
)
import torch
import torch.utils.data as torchdata
import pickle
from torch.nn import functional as F
from pathlib import Path
def main(b, threshold, warm_start_iters, log_every):
manual_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kwargs = dict(num_workers=4, pin_memory=True)
# --- constants ---
BATCH_SIZE = 64
EPOCHS = 200
REPS = 6
ITERS = 23
# +1 because of the structure of our loop
warm_start_iters += 1
VAL_SIZE = 5_000
MIN_TRAIN_LEN = 12_500
# --- setup ---
train, pool, test = Dataset.MNIST.get_fixed()
val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))
pool = UnlabelledDataset(pool, debug=True)
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
bald = BALD(eval_fwd_exp(model), device=device, batch_size=1024, **kwargs)
dm = DataManager(train, pool, bald)
val_loader = torchdata.DataLoader(
val,
batch_size=1024,
shuffle=False,
**kwargs,
)
test_loader = torchdata.DataLoader(
test,
batch_size=1024,
shuffle=False,
**kwargs,
)
warm_start_accs = []
accs = defaultdict(list)
template = f"wsi={warm_start_iters}_b={b}_thresh={threshold}"
pl_metrics = Path("pl_metrics") / template
metrics = Path("metrics") / template
saved_models = Path("saved_models") / template
metrics.mkdir(parents=True)
saved_models.mkdir(parents=True)
for r in range(1, REPS + 1):
print(f"- Repeat {r} of {REPS} -")
dm.reset()
ws_accs_r = {}
# store temporarily labelled points (will be union-ed with the training dataset)
pseudo_labelled_points = None
for i in range(1, warm_start_iters + 1):
if pseudo_labelled_points is not None:
full_train_dataset = torchdata.ConcatDataset(
(dm.labelled, pseudo_labelled_points)
)
else:
full_train_dataset = dm.labelled
train_length = len(full_train_dataset)
print(
f"=== Warm start iteration {i} of {warm_start_iters} ({i / warm_start_iters:.2%}) ==="
)
print(
f"\ttrain: {train_length}; "
f"pool: {dm.n_unlabelled}; "
f"val: {len(val)}; "
f"test: {len(test)}"
)
model.reset_weights()
# -- stage 1: train --
trainer = Trainer(
model, F.nll_loss, "Adam", patience=3, reload_best=True, device=device
)
train_loader = torchdata.DataLoader(
full_train_dataset,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
full_train_dataset, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
history = trainer.fit(train_loader, val_loader, epochs=EPOCHS)
test_metrics = trainer.evaluate(test_loader)
ws_accs_r[train_length] = test_metrics["acc"]
print(
f"\t[test] loss, acc: ({test_metrics['loss']:.4f}, {test_metrics['acc']:.4f}); time: {t}"
)
with open(
metrics / f"repeat_{r}_dsize_{train_length}_metrics.pkl", "wb"
) as fp:
payload = {
"history": history,
"test_metrics": test_metrics,
}
pickle.dump(payload, fp)
if (i - 1) % log_every == 0:
torch.save(
model.state_dict(),
saved_models / f"repeat_{r}_dsize_{train_length}_weights.pth",
)
# skip if this is the last iteration
if i == warm_start_iters:
accs[dm.n_labelled].append(test_metrics["acc"])
continue
# -- stage 2: acquire more data into the training set --
# -- acquire using pseudo-labels --
dm.unlabelled.debug = True
idxs, plabs = get_confident_indices(
model=model,
dataset=dm.unlabelled,
threshold=threshold,
root=((pl_metrics / f"repeat_{r}") if r == 1 else None),
step=i,
device=device,
**kwargs,
)
if idxs.shape[0]:
truth = torchdata.Subset(dm.unlabelled, idxs)
# replace true labels with pseudo-labels
pseudo_labelled_points = RelabelledDataset(truth, plabs)
assert len(pseudo_labelled_points) == idxs.shape[0]
else:
print(
f"\tSelf-labelling didn't happen because none of the pseudo-labels are confident enough."
)
warm_start_accs.append(ws_accs_r)
dm.unlabelled.debug = False
print(
f"Warm-started with {warm_start_iters} iterations. Beginning AL acquisitions"
)
for i in range(1, ITERS + 1):
dm.acquire(b=b)
print(f"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===")
print(
f"\ttrain: {dm.n_labelled}; val: {len(val)}; "
f"pool: {dm.n_unlabelled}; test: {len(test)}"
)
# model.reset_weights() # leverage p.l. from before, DON'T reset!
trainer = Trainer(
model,
F.nll_loss,
optimiser="Adam",
patience=3,
reload_best=True,
device=device,
)
train_loader = torchdata.DataLoader(
dm.labelled,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
dm.labelled, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
trainer.fit(train_loader, val_loader, epochs=EPOCHS)
test_metric = trainer.evaluate(test_loader)
print(f"\t[test] acc: {test_metric['acc']}, time: {t}")
accs[dm.n_labelled].append(test_metric["acc"])
with open(f"{template}_warm_start_accs.pkl", "wb") as fp:
pickle.dump(warm_start_accs, fp)
with open(f"{template}_accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
if __name__ == "__main__":
main(b=10, threshold=0.9, warm_start_iters=10, log_every=2)
| [
"torch.utils.data.ConcatDataset",
"torch.cuda.is_available",
"torch.utils.data.Subset",
"torch.utils.data.DataLoader"
] | 1.7.1 | jiahfong/alr | ee561c545bd98ec17c4f9c3040ef23b0222ef71a |
1.7 | from alr.training.pl_mixup import PLMixupTrainer, temp_ds_transform
from alr.utils import manual_seed, timeop
from alr.data.datasets import Dataset
from alr.data import DataManager, UnlabelledDataset
from alr.training.utils import PLPredictionSaver
from alr import ALRModel
from alr.acquisition import AcquisitionFunction
from batchbald_redux.batchbald import get_batchbald_batch
import torch
import pickle
import torch.utils.data as torchdata
import torchvision as tv
import numpy as np
from collections import defaultdict
from ignite.engine import create_supervised_evaluator
from pathlib import Path
from torch import nn
class TemporalBatchBALD(AcquisitionFunction):
def __init__(self):
super(TemporalBatchBALD, self).__init__()
self.labels_E_N_C: torch.Tensor = None
self.recent_score = None
def __call__(self, X_pool: torchdata.Dataset, b: int):
pool_size = len(X_pool)
mc_preds = self._labels.permute((1, 0, 2)) # self.labels_E_N_C.double()
candidate_batch = get_batchbald_batch(
mc_preds,
batch_size=b,
num_samples=10_000,
dtype=torch.double,
device="cuda:0",
)
scores = np.array(candidate_batch.scores)
indices = candidate_batch.indices
assert scores.shape == (b,)
assert np.isfinite(scores).all()
self.recent_score = scores
return np.array(indices[:b])
@property
def _labels(self):
E = self.labels_E_N_C.shape[0]
# have at least 10 even if self._last percent of E is
# less than 10. If E is less than 10, then take everything (E)
e = max(min(10, E), int(E * 0.2))
return self.labels_E_N_C[-e:].double()
class Net(ALRModel):
def __init__(self, model):
super(Net, self).__init__()
self.model = model
self.snap()
def forward(self, x):
return self.model(x)
def calc_calib_metrics(loader, model: nn.Module, log_dir, device):
evaluator = create_supervised_evaluator(model, metrics=None, device=device)
pds = PLPredictionSaver(log_dir)
pds.attach(evaluator)
evaluator.run(loader)
def main( # acq_name: str,
alpha: float, b: int, augment: bool, iters: int, repeats: int
):
acq_name = "tbbald"
manual_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kwargs = dict(num_workers=4, pin_memory=True)
# ========= CONSTANTS ===========
BATCH_SIZE = 100
# at least have this much points in one epoch (see RandomFixedLengthSampler)
MIN_TRAIN_LENGTH = 20_000
VAL_SIZE = 5_000
MIN_LABELLED = 16
# stage 1 and stage 2 patience
PATIENCE = (5, 25)
# how many epochs before LR is reduced
LR_PATIENCE = 10
# stage 1 and stage 2 of PL mixup training
EPOCHS = (100, 400)
REPEATS = repeats
ITERS = iters
# ========= SETUP ===========
train, pool, test = Dataset.CIFAR10.get_fixed(raw=True)
pool, val = torchdata.random_split(pool, (len(pool) - VAL_SIZE, VAL_SIZE))
pool = UnlabelledDataset(pool)
test_loader = torchdata.DataLoader(
test,
batch_size=512,
shuffle=False,
**kwargs,
)
train_transform = test_transform = tv.transforms.Compose(
[
tv.transforms.ToTensor(),
tv.transforms.Normalize(*Dataset.CIFAR10.normalisation_params),
]
)
test_ds_transform = temp_ds_transform(test_transform)
if augment:
data_augmentation = Dataset.CIFAR10.get_augmentation
else:
data_augmentation = None
accs = defaultdict(list)
template = f"{acq_name}_{b}_alpha_{alpha}" + ("_aug" if augment else "")
metrics = Path("metrics") / template
calib_metrics = Path("calib_metrics") / template
saved_models = Path("saved_models") / template
metrics.mkdir(parents=True)
calib_metrics.mkdir(parents=True)
saved_models.mkdir(parents=True)
bald_scores = None
# since we need to know which points were taken for val dataset
with open(metrics / "pool_idxs.pkl", "wb") as fp:
pickle.dump(pool._dataset.indices, fp)
for r in range(1, REPEATS + 1):
print(f"- [{acq_name} (b={b})] repeat #{r} of {REPEATS}-")
model = Net(Dataset.CIFAR10.model).to(device)
acq_fn = TemporalBatchBALD()
dm = DataManager(train, pool, acq_fn)
dm.reset() # this resets pool
for i in range(1, ITERS + 1):
model.reset_weights()
trainer = PLMixupTrainer(
model,
"SGD",
train_transform,
test_transform,
{"lr": 0.1, "momentum": 0.9, "weight_decay": 1e-4},
kwargs,
log_dir=None,
rfls_length=MIN_TRAIN_LENGTH,
alpha=alpha,
min_labelled=MIN_LABELLED,
data_augmentation=data_augmentation,
batch_size=BATCH_SIZE,
patience=PATIENCE,
lr_patience=LR_PATIENCE,
device=device,
)
with dm.unlabelled.tmp_debug():
with timeop() as t:
history = trainer.fit(
dm.labelled, val, dm.unlabelled, epochs=EPOCHS
)
acq_fn.labels_E_N_C = trainer.soft_label_history
# eval
test_metrics = trainer.evaluate(test_loader)
print(f"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===")
print(
f"\ttrain: {dm.n_labelled}; val: {len(val)}; "
f"pool: {dm.n_unlabelled}; test: {len(test)}"
)
print(f"\t[test] acc: {test_metrics['acc']:.4f}, time: {t}")
accs[dm.n_labelled].append(test_metrics["acc"])
# save stuff
# pool calib
with dm.unlabelled.tmp_debug():
pool_loader = torchdata.DataLoader(
temp_ds_transform(test_transform, with_targets=True)(dm.unlabelled),
batch_size=512,
shuffle=False,
**kwargs,
)
calc_calib_metrics(
pool_loader,
model,
calib_metrics / "pool" / f"rep_{r}" / f"iter_{i}",
device=device,
)
calc_calib_metrics(
test_loader,
model,
calib_metrics / "test" / f"rep_{r}" / f"iter_{i}",
device=device,
)
with open(metrics / f"rep_{r}_iter_{i}.pkl", "wb") as fp:
payload = {
"history": history,
"test_metrics": test_metrics,
"labelled_classes": dm.unlabelled.labelled_classes,
"labelled_indices": dm.unlabelled.labelled_indices,
"bald_scores": bald_scores,
}
pickle.dump(payload, fp)
torch.save(model.state_dict(), saved_models / f"rep_{r}_iter_{i}.pt")
# flush results frequently for the impatient
with open(template + "_accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
# finally, acquire points
# transform pool samples toTensor and normalise them (since we used raw above!)
acquired_idxs, _ = dm.acquire(b=b, transform=test_ds_transform)
# if bald, store ALL bald scores and the acquired idx so we can map the top b scores
# to the b acquired_idxs
# acquired_idxs has the top b scores from recent_score
bald_scores = (acquired_idxs, acq_fn.recent_score)
if __name__ == "__main__":
import argparse
args = argparse.ArgumentParser()
args.add_argument("--alpha", type=float, default=0.4)
args.add_argument("--b", default=10, type=int, help="Batch acq size (default = 10)")
args.add_argument("--augment", action="store_true")
args.add_argument("--iters", default=199, type=int)
args.add_argument("--reps", default=1, type=int)
args = args.parse_args()
main(
alpha=args.alpha,
b=args.b,
augment=args.augment,
iters=args.iters,
repeats=args.reps,
)
| [
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.7.1 | jiahfong/alr | ee561c545bd98ec17c4f9c3040ef23b0222ef71a |
1.4 | '''
Code courtesy of Ben Feinstein & Assaf Shocher
Please see their work:
https://github.com/assafshocher/PyTorch-Resizer
https://github.com/feinsteinben
'''
import numpy as np
import torch
from math import pi
from torch import nn
class Resizer(nn.Module):
def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True, device=None, dtype=None):
super(Resizer, self).__init__()
# First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa
scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)
self.device = device
# Choose interpolation method, each method has the matching kernel size
method, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 6.0) # set default interpolation method as cubic
}.get(kernel)
# Antialiasing is only used when downscaling
antialiasing *= (np.any(np.array(scale_factor) < 1))
# Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient
sorted_dims = np.argsort(np.array(scale_factor))
self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]
# Iterate over dimensions to calculate local weights for resizing and resize each time in one direction
field_of_view_list = []
weights_list = []
for dim in self.sorted_dims:
# for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the
# weights that multiply the values there to get its result.
weights, field_of_view = self.contributions(in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing)
# convert to torch tensor
if dtype is not None:
weights = torch.tensor(weights.T, dtype=dtype, device=device)
else:
weights = torch.tensor(weights.T, dtype=torch.float32, device=device)
# We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for
# tmp_im[field_of_view.T], (bsxfun style)
weights_list.append(nn.Parameter(torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]), requires_grad=False))
field_of_view_list.append(nn.Parameter(torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long, device=device), requires_grad=False))
self.field_of_view = nn.ParameterList(field_of_view_list)
self.weights = nn.ParameterList(weights_list)
self.in_shape = in_shape
def forward(self, in_tensor):
x = in_tensor
# make sure input is in the correct size
assert list(self.in_shape[1:]) == list(x.shape[1:]), 'wrong input shape: %s, expected %s' % (str(x.shape), str(self.in_shape))
# Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim
for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):
# To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize
x = torch.transpose(x, dim, 0)
# This is a bit of a complicated multiplication: x[field_of_view] is a tensor of order image_dims+1.
# for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim
# only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with
# the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:
# matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the
# same number
x = torch.sum(x[fov] * w, dim=0).to(self.device)
# Finally we swap back the axes to the original order
x = torch.transpose(x, dim, 0)
return x
def fix_scale_and_size(self, input_shape, output_shape, scale_factor):
# First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the
# same size as the number of input dimensions)
if scale_factor is not None:
# By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.
if np.isscalar(scale_factor) and len(input_shape) > 1:
scale_factor = [scale_factor, scale_factor]
# We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales
scale_factor = list(scale_factor)
scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor
# Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size
# to all the unspecified dimensions
if output_shape is not None:
output_shape = list(input_shape[len(output_shape):]) + list(np.uint(np.array(output_shape)))
# Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is
# sub-optimal, because there can be different scales to the same output-shape.
if scale_factor is None:
scale_factor = np.array(output_shape) / np.array(input_shape)
# Dealing with missing output-shape. calculating according to scale-factor
if output_shape is None:
output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))
return scale_factor, output_shape
def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):
# This function calculates a set of 'filters' and a set of field_of_view that will later on be applied
# such that each position from the field_of_view will be multiplied with a matching filter from the
# 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers
# around it. This is only done for one dimension of the image.
# When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of
# 1/sf. this means filtering is more 'low-pass filter'.
fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing and scale < 1.0 else kernel
kernel_width *= 1.0 / scale if antialiasing and scale < 1.0 else 1.0
# These are the coordinates of the output image
out_coordinates = np.arange(1, out_length+1)
# since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting
# the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale.
# to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved.
shifted_out_coordinates = out_coordinates - (out_length - in_length*scale)/2
# These are the matching positions of the output-coordinates on the input image coordinates.
# Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:
# [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.
# The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to
# the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big
# one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).
# So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is
# at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:
# (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)
match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)
# This is the left boundary to start multiplying the filter from, it depends on the size of the filter
left_boundary = np.floor(match_coordinates - kernel_width / 2)
# Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers
# of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)
expanded_kernel_width = np.ceil(kernel_width) + 2
# Determine a set of field_of_view for each each output position, these are the pixels in the input image
# that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the
# vertical dim is the pixels it 'sees' (kernel_size + 2)
field_of_view = np.squeeze(np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1))
# Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the
# vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
# 'field_of_view')
weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
# Normalize weights to sum up to 1. be careful from dividing by 0
sum_weights = np.sum(weights, axis=1)
sum_weights[sum_weights == 0] = 1.0
weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)
# We use this mirror structure as a trick for reflection padding at the boundaries
mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))
field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]
# Get rid of weights and pixel positions that are of zero weight
non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))
weights = np.squeeze(weights[:, non_zero_out_pixels])
field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])
# Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size
return weights, field_of_view
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = np.abs(x)
absx2 = absx ** 2
absx3 = absx ** 3
return ((1.5*absx3 - 2.5*absx2 + 1) * (absx <= 1) +
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * ((1 < absx) & (absx <= 2)))
def lanczos2(x):
return (((np.sin(pi*x) * np.sin(pi*x/2) + np.finfo(np.float32).eps) /
((pi**2 * x**2 / 2) + np.finfo(np.float32).eps))
* (abs(x) < 2))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def lanczos3(x):
return (((np.sin(pi*x) * np.sin(pi*x/3) + np.finfo(np.float32).eps) /
((pi**2 * x**2 / 3) + np.finfo(np.float32).eps))
* (abs(x) < 3))
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
| [
"torch.nn.ParameterList",
"torch.sum",
"torch.tensor",
"torch.transpose"
] | 1.4.0 | eyalnaor/DeepTemporalSR | 7d8c821431dec3a4c480550c61a6033fcac5e640 |
1.7 | import numpy as np
import torch
from halite_rl.utils import SubProcessWrapper
class EpisodeData():
def __init__(self):
self.observations = [] # Observations (states).
self.actions = [] # Selected actions.
self.act_log_probs = [] # Log probability of selected action.
self.value_preds = [] # Value predictions given observation (from critic network).
self.rewards = [] # Rewards obtained in each step.
self.step_info = [] # Additional details about the step for logging purposes.
def sample_batch(models, env_constructor, device, config):
"""Sample a batch of environment rollouts.
Parameters:
-----------
models : dict[str: nn.Module]
Dict mapping player_ids to actor-critic NN models.
config : dict
Config settings.
Returns:
--------
TODO
"""
# Initialize envs.
envs = [SubProcessWrapper(env_constructor) for _ in range(config["SAMPLE_PARALLEL_ENVS"])]
player_ids = list(models.keys())
# EpisodeData for in-progress episodes.
# ep_datas[i][p_id] references the EpisodeData for player p_id in the i'th env.
ep_datas = [{p_id: None for p_id in player_ids} for _ in envs]
# actions[i][p_id] references the action for player p_id in the i'th env.
actions = [{p_id: None for p_id in player_ids} for _ in envs]
num_steps = {p_id: 0 for p_id in player_ids}
# final_ep_datas[p_id][i] references the EpisodeData for the i'th episode collected for player p_id.
final_ep_datas = {p_id: [] for p_id in player_ids}
# While at least one player is below SAMPLE_MIN_NUM_STEPS.
while np.any(np.array([n for n in num_steps.values()]) < config["SAMPLE_MIN_NUM_STEPS"]):
# 1. Step all envs asynchronously.
# Keep a record of which envs were 'reset' and which were 'stepped' so that we
# know what return values to expect when we receive the results asynchronously.
env_was_reset = []
for i_env, env in enumerate(envs):
if not env.call_sync("is_in_progress"):
env_was_reset.append(True)
for p_id in player_ids:
ep_data = ep_datas[i_env][p_id]
# If this is not the very first iteration, then save the episode.
if ep_data is not None:
# Drop the last observation, as we never acted on it.
ep_data.observations = ep_data.observations[:len(ep_data.rewards)]
final_ep_datas[p_id].append(ep_data)
num_steps[p_id] += len(ep_data.rewards)
ep_datas[i_env] = {p_id: EpisodeData() for p_id in player_ids}
env.call_async("reset")
else:
env_was_reset.append(False)
actions = {p_id: ep_datas[i_env][p_id].actions[-1] for p_id in player_ids}
env.call_async("step", actions)
# 2. Receive results from async env steps.
for i_env, env in enumerate(envs):
if env_was_reset[i_env]:
obs = env.get_result()
for p_id in player_ids:
ep_datas[i_env][p_id].observations.append(obs[p_id])
else:
obs, rewards, dones, step_infos = env.get_result()
for p_id in player_ids:
ep_data = ep_datas[i_env][p_id]
ep_data.observations.append(obs[p_id])
ep_data.rewards.append(rewards[p_id])
# step_infos entry should already exist for this step.
ep_data.step_info[-1].update(step_infos[p_id])
# 3. Sample actions.
player_id_to_state_batch = {p_id: [] for p_id in player_ids}
for i_env, env in enumerate(envs):
for p_id in player_ids:
player_id_to_state_batch[p_id].append(ep_datas[i_env][p_id].observations[-1])
for p_id in player_ids:
model = models[p_id]
with torch.no_grad():
state_batch = np.array(player_id_to_state_batch[p_id])
state_batch = torch.Tensor(state_batch)
state_batch = state_batch.to(device)
ship_act_logits, shipyard_act_logits, value_preds = model(state_batch)
ship_action_dist, shipyard_action_dist = model.get_action_distribution(
ship_act_logits, shipyard_act_logits, state_batch)
ship_action = ship_action_dist.sample()
shipyard_action = shipyard_action_dist.sample()
ship_act_entropy = ship_action_dist.entropy()
shipyard_act_entropy = shipyard_action_dist.entropy()
action_log_prob = model.action_log_prob(
ship_action_dist,
shipyard_action_dist,
ship_action,
shipyard_action,
)
ship_action = ship_action.cpu().detach().numpy()
shipyard_action = shipyard_action.cpu().detach().numpy()
action_log_prob = action_log_prob.cpu().detach().numpy()
value_preds = value_preds.cpu().detach().numpy()
ship_act_entropy = ship_act_entropy.cpu().detach().numpy()
shipyard_act_entropy = shipyard_act_entropy.cpu().detach().numpy()
for i_env, env in enumerate(envs):
if env.call_sync("is_in_progress"):
ep_data = ep_datas[i_env][p_id]
ep_data.actions.append((
ship_action[i_env, ...],
shipyard_action[i_env, ...],
))
ep_data.act_log_probs.append(action_log_prob[i_env])
ep_data.value_preds.append(value_preds[i_env])
# Create step_info entry with info for step that hasn't happend (in env) yet.
ep_data.step_info.append(
{
"ship_action_dist_entropy": ship_act_entropy[i_env],
"shipyard_action_dist_entropy": shipyard_act_entropy[i_env],
}
)
# Close all envs
for e in envs:
e.close()
return final_ep_datas
| [
"torch.no_grad",
"torch.Tensor"
] | 1.7.0 | RyanJDick/halite_rl | e6309a24d3d613171ceb6522ddf07fece3815e62 |
0.4 | import torch
import numpy as np
def torch_nms(tlbr, scores, classes=None, thresh=.5, bias=0, fast=False):
"""
Non maximum suppression implemented with pytorch tensors
CURRENTLY NOT WORKING
Args:
tlbr (Tensor): Bounding boxes of one image in the format (tlbr)
scores (Tensor): Scores of each box
classes (Tensor, optional): the classes of each box. If specified nms is applied to each class separately.
thresh (float): iou threshold
Returns:
ByteTensor: keep: boolean array indicating which boxes were not pruned.
Example:
>>> # DISABLE_DOCTEST
>>> import torch
>>> import numpy as np
>>> tlbr = torch.FloatTensor(np.array([
>>> [0, 0, 100, 100],
>>> [100, 100, 10, 10],
>>> [10, 10, 100, 100],
>>> [50, 50, 100, 100],
>>> [100, 100, 130, 130],
>>> [100, 100, 130, 130],
>>> [100, 100, 130, 130],
>>> ], dtype=np.float32))
>>> scores = torch.FloatTensor(np.array([.1, .5, .9, .1, .3, .5, .4]))
>>> classes = torch.FloatTensor(np.array([0, 0, 0, 0, 0, 0]))
>>> thresh = .5
>>> keep = torch_nms(tlbr, scores, classes, thresh)
>>> bboxes[keep]
Example:
>>> # DISABLE_DOCTEST
>>> import torch
>>> import numpy as np
>>> # Test to check that conflicts are correctly resolved
>>> tlbr = torch.FloatTensor(np.array([
>>> [100, 100, 150, 101],
>>> [120, 100, 180, 101],
>>> [150, 100, 200, 101],
>>> ], dtype=np.float32))
>>> scores = torch.FloatTensor(np.linspace(.8, .9, len(tlbr)))
>>> classes = None
>>> thresh = .3
>>> keep = torch_nms(tlbr, scores, classes, thresh, fast=False)
>>> bboxes[keep]
"""
if tlbr.numel() == 0:
return []
# Sort coordinates by descending score
ordered_scores, order = scores.sort(0, descending=True)
from netharn import util
boxes = util.Boxes(tlbr[order], 'tlbr')
ious = boxes.ious(boxes, bias=bias)
# if False:
# x1, y1, x2, y2 = tlbr[order].split(1, 1)
# # Compute dx and dy between each pair of boxes (these mat contain every pair twice...)
# dx = (x2.min(x2.t()) - x1.max(x1.t())).clamp_(min=0)
# dy = (y2.min(y2.t()) - y1.max(y1.t())).clamp_(min=0)
# # Compute iou
# intersections = dx * dy
# areas = (x2 - x1) * (y2 - y1)
# unions = (areas + areas.t()) - intersections
# ious = intersections / unions
# Filter based on iou (and class)
conflicting = (ious > thresh).triu(1)
if classes is not None:
ordered_classes = classes[order]
same_class = (ordered_classes.unsqueeze(0) == ordered_classes.unsqueeze(1))
conflicting = (conflicting & same_class)
# Now we have a 2D matrix where conflicting[i, j] indicates if box[i]
# conflicts with box[j]. For each box[i] we want to only keep the first
# one that does not conflict with any other box[j].
# Find out how many conflicts each ordered box has with other boxes that
# have higher scores than it does. In other words...
# n_conflicts[i] is the number of conflicts box[i] has with other boxes
# that have a **higher score** than box[i] does. We will definately
# keep any box where n_conflicts is 0, but we need to postprocess because
# we might actually keep some boxes currently marked as conflicted.
n_conflicts = conflicting.sum(0).byte()
if not fast:
# It is not enought to simply use all places where there are no
# conflicts. Say we have boxes A, B, and C, where A conflicts with B,
# B conflicts with C but A does not conflict with C. The fact that we
# use A should mean that C is not longer conflicted.
if True:
# Marginally faster. best=618.2 us
ordered_keep = np.zeros(len(conflicting), dtype=np.uint8)
supress = np.zeros(len(conflicting), dtype=np.bool)
for i, row in enumerate(conflicting.cpu().numpy() > 0):
if not supress[i]:
ordered_keep[i] = 1
supress[row] = 1
ordered_keep = torch.ByteTensor(ordered_keep).to(tlbr.device)
else:
# Marginally slower: best=1.382 ms,
n_conflicts_post = n_conflicts.cpu()
conflicting = conflicting.cpu()
keep_len = len(n_conflicts_post) - 1
for i in range(1, keep_len):
if n_conflicts_post[i] > 0:
n_conflicts_post -= conflicting[i]
n_conflicts = n_conflicts_post.to(n_conflicts.device)
ordered_keep = (n_conflicts == 0)
else:
# Now we can simply keep any box that has no conflicts.
ordered_keep = (n_conflicts == 0)
# Unsort, so keep is aligned with input boxes
keep = ordered_keep.new(*ordered_keep.size())
keep.scatter_(0, order, ordered_keep)
return keep
def test_class_torch():
import numpy as np
import torch
import netharn as nh
import ubelt as ub
# from netharn.util.nms.torch_nms import torch_nms
# from netharn.util import non_max_supression
thresh = .5
num = 500
rng = nh.util.ensure_rng(0)
cpu_boxes = nh.util.Boxes.random(num, scale=400.0, rng=rng, format='tlbr', tensor=True)
cpu_tlbr = cpu_boxes.to_tlbr().data
# cpu_scores = torch.Tensor(rng.rand(len(cpu_tlbr)))
# make all scores unique to ensure comparability
cpu_scores = torch.Tensor(np.linspace(0, 1, len(cpu_tlbr)))
cpu_cls = torch.LongTensor(rng.randint(0, 10, len(cpu_tlbr)))
tlbr = cpu_boxes.to_tlbr().data.to('cuda')
scores = cpu_scores.to('cuda')
classes = cpu_cls.to('cuda')
keep1 = []
for idxs in ub.group_items(range(len(classes)), classes.cpu().numpy()).values():
# cls_tlbr = tlbr.take(idxs, axis=0)
# cls_scores = scores.take(idxs, axis=0)
cls_tlbr = tlbr[idxs]
cls_scores = scores[idxs]
cls_keep = torch_nms(cls_tlbr, cls_scores, thresh=thresh, bias=0)
keep1.extend(list(ub.compress(idxs, cls_keep.cpu().numpy())))
keep1 = sorted(keep1)
keep_ = torch_nms(tlbr, scores, classes=classes, thresh=thresh, bias=0)
keep2 = np.where(keep_.cpu().numpy())[0].tolist()
keep3 = nh.util.non_max_supression(tlbr.cpu().numpy(),
scores.cpu().numpy(),
classes=classes.cpu().numpy(),
thresh=thresh, bias=0, impl='gpu')
print(len(keep1))
print(len(keep2))
print(len(keep3))
print(set(keep1) - set(keep2))
print(set(keep2) - set(keep1))
def _benchmark():
"""
python -m netharn.util.nms.torch_nms _benchmark --show
SeeAlso:
PJR Darknet NonMax supression
https://github.com/pjreddie/darknet/blob/master/src/box.c
Lightnet NMS
https://gitlab.com/EAVISE/lightnet/blob/master/lightnet/data/transform/_postprocess.py#L116
"""
import torch
import numpy as np
import netharn as nh
from netharn.util.nms.torch_nms import torch_nms
from netharn.util import non_max_supression
import ubelt as ub
import itertools as it
N = 100
bestof = 10
ydata = ub.ddict(list)
# xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, 2000]
# max number of boxes yolo will spit out at a time
max_boxes = 19 * 19 * 5
xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, max_boxes]
# xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500]
xdata = [10, 100, 500]
rng = nh.util.ensure_rng(0)
thresh = 0.5
for num in xdata:
print('\n\n---- number of boxes = {} ----\n'.format(num))
outputs = {}
# Build random test boxes and scores
cpu_boxes = nh.util.Boxes.random(num, scale=10.0, rng=rng, format='tlbr', tensor=True)
cpu_tlbr = cpu_boxes.to_tlbr().data
# cpu_scores = torch.Tensor(rng.rand(len(cpu_tlbr)))
# make all scores unique to ensure comparability
cpu_scores = torch.Tensor(np.linspace(0, 1, len(cpu_tlbr)))
cpu_cls = torch.LongTensor(rng.randint(0, 10, len(cpu_tlbr)))
# Format boxes in lightnet format
cpu_ln_boxes = torch.cat([cpu_boxes.to_cxywh().data, cpu_scores[:, None], cpu_cls.float()[:, None]], dim=-1)
# Move boxes to numpy
np_tlbr = cpu_tlbr.numpy()
np_scores = cpu_scores.numpy()
np_cls = cpu_cls.numpy() # NOQA
gpu = torch.device('cuda', 0)
measure_gpu = torch.cuda.is_available()
measure_cpu = False or not torch.cuda.is_available()
def _ln_output_to_keep(ln_output, ln_boxes):
keep = []
for row in ln_output:
# Find the index that we kept
idxs = np.where(np.all(np.isclose(ln_boxes, row), axis=1))[0]
assert len(idxs) == 1
keep.append(idxs[0])
assert np.all(np.isclose(ln_boxes[keep], ln_output))
return keep
if measure_gpu:
# Move boxes to the GPU
gpu_tlbr = cpu_tlbr.to(gpu)
gpu_scores = cpu_scores.to(gpu)
gpu_cls = cpu_cls.to(gpu) # NOQA
gpu_ln_boxes = cpu_ln_boxes.to(gpu)
t1 = ub.Timerit(N, bestof=bestof, label='torch(gpu)')
for timer in t1:
with timer:
keep = torch_nms(gpu_tlbr, gpu_scores, thresh=thresh)
torch.cuda.synchronize()
ydata[t1.label].append(t1.min())
outputs[t1.label] = np.where(keep.cpu().numpy())[0]
t1 = ub.Timerit(N, bestof=bestof, label='cython(gpu)')
for timer in t1:
with timer:
keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='gpu')
torch.cuda.synchronize()
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
from lightnet.data.transform._postprocess import NonMaxSupression
t1 = ub.Timerit(N, bestof=bestof, label='lightnet-slow(gpu)')
for timer in t1:
with timer:
ln_output = NonMaxSupression._nms(gpu_ln_boxes, nms_thresh=thresh, class_nms=False, fast=False)
torch.cuda.synchronize()
# convert lightnet NMS output to keep for consistency
keep = _ln_output_to_keep(ln_output, gpu_ln_boxes)
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
if False:
t1 = ub.Timerit(N, bestof=bestof, label='lightnet-fast(gpu)')
for timer in t1:
with timer:
ln_output = NonMaxSupression._nms(gpu_ln_boxes, nms_thresh=thresh, class_nms=False, fast=True)
torch.cuda.synchronize()
# convert lightnet NMS output to keep for consistency
keep = _ln_output_to_keep(ln_output, gpu_ln_boxes)
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
if measure_cpu:
t1 = ub.Timerit(N, bestof=bestof, label='torch(cpu)')
for timer in t1:
with timer:
keep = torch_nms(cpu_tlbr, cpu_scores, thresh=thresh)
ydata[t1.label].append(t1.min())
outputs[t1.label] = np.where(keep.cpu().numpy())[0]
if True:
t1 = ub.Timerit(N, bestof=bestof, label='cython(cpu)')
for timer in t1:
with timer:
keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='cpu')
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
t1 = ub.Timerit(N, bestof=bestof, label='numpy(cpu)')
for timer in t1:
with timer:
keep = non_max_supression(np_tlbr, np_scores, thresh=thresh, impl='py')
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
# Check that all kept boxes do not have more than `threshold` ious
for key, idxs in outputs.items():
ious = nh.util.box_ious(np_tlbr[idxs], np_tlbr[idxs])
max_iou = (np.tril(ious) - np.eye(len(ious))).max()
if max_iou > thresh:
print('{} produced a bad result with max_iou={}'.format(key, max_iou))
# Check result consistency:
print('\nResult stats:')
for key in sorted(outputs.keys()):
print(' * {:<20}: num={}'.format(key, len(outputs[key])))
print('\nResult overlaps (method1, method2: jaccard):')
datas = []
for k1, k2 in it.combinations(sorted(outputs.keys()), 2):
idxs1 = set(outputs[k1])
idxs2 = set(outputs[k2])
jaccard = len(idxs1 & idxs2) / len(idxs1 | idxs2)
datas.append((k1, k2, jaccard))
datas = sorted(datas, key=lambda x: -x[2])
for k1, k2, jaccard in datas:
print(' * {:<20}, {:<20}: {:0.4f}'.format(k1, k2, jaccard))
nh.util.mplutil.autompl()
nh.util.mplutil.multi_plot(xdata, ydata, xlabel='num boxes', ylabel='seconds')
nh.util.show_if_requested()
if __name__ == '__main__':
"""
CommandLine:
python -m netharn.util.nms.torch_nms all
"""
import xdoctest
xdoctest.doctest_module(__file__)
| [
"torch.device",
"torch.cuda.synchronize",
"torch.ByteTensor",
"torch.cuda.is_available"
] | 0.4.0 | angiemsu/netharn | 728cb40aad299baf62c689430d07b29c67d8cf21 |
0.4 | import torch
import numpy as np
import ubelt as ub
from netharn.util.nms import py_nms
from netharn.util import profiler
from netharn.util.nms import torch_nms
import warnings
_impls = {}
_impls['py'] = py_nms.py_nms
_impls['torch'] = torch_nms.torch_nms
_automode = 'py'
try:
from netharn.util.nms import cpu_nms
_impls['cpu'] = cpu_nms.cpu_nms
_automode = 'cpu'
except Exception:
warnings.warn('cpu_nms is not available')
try:
if torch.cuda.is_available():
from netharn.util.nms import gpu_nms
_impls['gpu'] = gpu_nms.gpu_nms
_automode = 'gpu'
except Exception:
warnings.warn('gpu_nms is not available')
@profiler.profile
def non_max_supression(tlbr, scores, thresh, bias=0.0, classes=None,
impl='auto'):
"""
Non-Maximum Suppression
Args:
tlbr (ndarray): Nx4 boxes in tlbr format
scores (ndarray): score for each bbox
thresh (float): iou threshold
bias (float): bias for iou computation either 0 or 1
(hint: choosing 1 is wrong computer vision community)
classes (ndarray or None): integer classes. If specified NMS is done
on a perclass basis.
impl (str): implementation can be auto, python, cpu, or gpu
CommandLine:
python ~/code/netharn/netharn/util/nms/nms_core.py nms
python ~/code/netharn/netharn/util/nms/nms_core.py nms:0
python ~/code/netharn/netharn/util/nms/nms_core.py nms:1
References:
https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/cython_nms.pyx
https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
https://github.com/bharatsingh430/soft-nms/blob/master/lib/nms/cpu_nms.pyx <- TODO
Example:
>>> dets = np.array([
>>> [0, 0, 100, 100],
>>> [100, 100, 10, 10],
>>> [10, 10, 100, 100],
>>> [50, 50, 100, 100],
>>> ], dtype=np.float32)
>>> scores = np.array([.1, .5, .9, .1])
>>> thresh = .5
>>> keep = non_max_supression(dets, scores, thresh, impl='py')
>>> print('keep = {!r}'.format(keep))
keep = [2, 1, 3]
Example:
>>> import ubelt as ub
>>> dets = np.array([
>>> [0, 0, 100, 100],
>>> [100, 100, 10, 10],
>>> [10, 10, 100, 100],
>>> [50, 50, 100, 100],
>>> [100, 100, 150, 101],
>>> [120, 100, 180, 101],
>>> [150, 100, 200, 101],
>>> ], dtype=np.float32)
>>> scores = np.linspace(0, 1, len(dets))
>>> thresh = .2
>>> solutions = {}
>>> for impl in _impls:
>>> solutions[impl] = sorted(non_max_supression(dets, scores, thresh, impl=impl))
>>> print('solutions = {}'.format(ub.repr2(solutions, nl=1)))
>>> assert ub.allsame(solutions.values())
"""
if tlbr.shape[0] == 0:
return []
if impl == 'auto':
impl = _automode
if classes is not None:
keep = []
for idxs in ub.group_items(range(len(classes)), classes).values():
# cls_tlbr = tlbr.take(idxs, axis=0)
# cls_scores = scores.take(idxs, axis=0)
cls_tlbr = tlbr[idxs]
cls_scores = scores[idxs]
cls_keep = non_max_supression(cls_tlbr, cls_scores, thresh=thresh,
bias=bias, impl=impl)
keep.extend(list(ub.take(idxs, cls_keep)))
return keep
else:
if impl == 'py':
keep = py_nms.py_nms(tlbr, scores, thresh, bias=float(bias))
elif impl == 'torch':
was_tensor = torch.is_tensor(tlbr)
if not was_tensor:
tlbr = torch.Tensor(tlbr)
scores = torch.Tensor(scores)
flags = torch_nms.torch_nms(tlbr, scores, thresh=thresh,
bias=float(bias))
keep = np.where(flags.cpu().numpy())[0]
else:
# TODO: it would be nice to be able to pass torch tensors here
nms = _impls[impl]
tlbr = tlbr.astype(np.float32)
scores = scores.astype(np.float32)
# dets = np.hstack((tlbr, scores[:, None])).astype(np.float32)
if impl == 'gpu':
# HACK: we should parameterize which device is used
device = torch.cuda.current_device()
keep = nms(tlbr, scores, thresh, bias=float(bias), device_id=device)
else:
keep = nms(tlbr, scores, thresh, bias=float(bias))
return keep
# TODO: soft nms
if __name__ == '__main__':
"""
CommandLine:
python -m netharn.util.nms.nms_core all
"""
import xdoctest
xdoctest.doctest_module(__file__)
| [
"torch.is_tensor",
"torch.cuda.is_available",
"torch.Tensor",
"torch.cuda.current_device"
] | 0.4.0 | angiemsu/netharn | 728cb40aad299baf62c689430d07b29c67d8cf21 |
1.10 | import torch
import numpy
from typing import Union, List, Any
class NonMaxSuppression:
"""
Given a set of bounding box defined over possibly different tissue
Use Intersection_over_Minimum criteria to filter out overlapping proposals.
"""
@staticmethod
@torch.no_grad()
def compute_nm_mask(score: Union[torch.Tensor, numpy.ndarray],
ids: Union[torch.Tensor, numpy.ndarray, List[Any]],
patches_xywh: Union[torch.Tensor, numpy.ndarray],
iom_threshold: float) -> (torch.Tensor, torch.Tensor):
"""
Filter the proposals according to their score and their Intersection over Minimum.
Args:
score: score used to sort the proposals of shape (N)
ids: vector or list of shape (N) with the (tissue) id.
IoMIN is always zero between patches with different (tissue) ids.
patches_xywh: coordinates with the proposals of shape (N, 4) where 4 stand for x,y,w,h.
iom_threshold: threshold of Intersection over Minimum. If IoM is larger than this value the proposals
will be suppressed during NMS. Only the proposal with larger score will survive.
Returns:
(nms_mask_n, iomin_nn) where nms_mask_n is a boolean tensor of shape (N) with True
if the proposal survived NMS and iomin_nn with the value of the IoMIN among all possible pairs.
"""
def _to_numpy(_x):
if isinstance(_x, torch.Tensor):
return _x.detach().cpu().numpy()
elif isinstance(_x, numpy.ndarray):
return _x
elif isinstance(_x, list):
return numpy.array(_x)
def _to_torch(_x):
if isinstance(_x, torch.Tensor):
return _x
elif isinstance(_x, numpy.ndarray):
return torch.from_numpy(_x)
else:
raise Exception("Expected a torch.tensor or a numpy.ndarray. Received {0}".format(type(_x)))
# the tissue ids can be a list of string. Therefore I can not convert to torch tensor directly.
ids_numpy = _to_numpy(ids)
assert len(patches_xywh.shape) == 2 and patches_xywh.shape[-1] == 4
assert score.shape == ids_numpy.shape == patches_xywh[:, 0].shape
# this is O(N^2) algorithm (all boxes compared to all other boxes) but it is very simple
x, y, w, h = _to_torch(patches_xywh).unbind(dim=-1)
overlap_measure_tmp_nn = NonMaxSuppression._compute_iomin(x=x, y=y, w=w, h=h)
mask_same_id_nn_numpy = (ids_numpy == ids_numpy[:, None])
mask_same_id_nn = _to_torch(mask_same_id_nn_numpy).to(device=overlap_measure_tmp_nn.device)
overlap_measure_nn = overlap_measure_tmp_nn * mask_same_id_nn # if ids are different IoMIN = 0
binarized_overlap_nn = (overlap_measure_nn > iom_threshold).float()
nms_mask_n = NonMaxSuppression.perform_nms_selection(mask_overlap_nn=binarized_overlap_nn,
score_n=score,
possible_n=torch.ones_like(score).bool())
return nms_mask_n, overlap_measure_nn
@staticmethod
def perform_nms_selection(mask_overlap_nn: torch.Tensor,
score_n: torch.Tensor,
possible_n: torch.Tensor) -> torch.Tensor:
"""
Given a set of n proposals and the (n x n) binarized mask which describes if two proposals are
mutually exclusive it performs the greedy NMS in parallel (if possible).
Args:
mask_overlap_nn: Binarized overlap matrix with 1 if IoMIN > threshold and 0 otherwise, i.e 1 means that
two proposals are incompatible, 0 means that they are compatible.
score_n: Score of the proposal. Higher score proposal have precedence.
possible_n: Vector with 1 if the proposal can be chosen and 0 otherwise.
Note:
The algorithm terminates when there are no more suitable proposals
(because they have all been suppressed by higher scoring ones).
Returns:
mask_nms_n: A tensor with the same shape as :attr:'score_n'. The entries are 1 if that proposal
has been selected (i.e. survived NMS) and 0 otherwise.
"""
# reshape
score_1n = score_n.unsqueeze(-2)
possible_1n = possible_n.unsqueeze(-2)
idx_n1 = torch.arange(start=0, end=score_n.shape[-1], step=1, device=score_n.device).view(-1, 1).long()
selected_n1 = torch.zeros_like(score_n).unsqueeze(dim=-1)
# Greedy algorithm in a loop
n_iter = 0
while possible_1n.sum() > 0:
n_iter += 1
score_mask_nn = mask_overlap_nn * (score_1n * possible_1n)
index_n1 = torch.max(score_mask_nn, keepdim=True, dim=-1)[1]
selected_n1 += possible_1n.transpose(dim0=-1, dim1=-2) * (idx_n1 == index_n1)
blocks_1n = torch.sum(mask_overlap_nn * selected_n1, keepdim=True, dim=-2)
possible_1n *= (blocks_1n == 0)
mask_selected_n = selected_n1.squeeze(dim=-1).bool()
# print("DEBUG nms performed in ", n_iter)
# print("DEBUG nms. Mask ", mask_selected_n.shape, mask_selected_n.sum(), mask_selected_n.dtype)
return mask_selected_n
@staticmethod
def _unroll_and_compare(x_tmp: torch.Tensor, label: str) -> torch.Tensor:
""" Given a vector of size: (*, n) creates an output of size (*, n, n)
obtained by comparing all vector entries with all other vector entries
The comparison is either: MIN,MAX """
if label == "MAX":
y_tmp = torch.max(x_tmp.unsqueeze(dim=-1), x_tmp.unsqueeze(dim=-2))
elif label == "MIN":
y_tmp = torch.min(x_tmp.unsqueeze(dim=-1), x_tmp.unsqueeze(dim=-2))
else:
raise Exception("label is unknown. It is ", label)
return y_tmp
@staticmethod
def _compute_iomin(
x: torch.Tensor,
y: torch.Tensor,
w: torch.Tensor,
h: torch.Tensor) -> torch.Tensor:
"""
Given x,y,w,h compute the Intersection over Min Area (IoMin) among all possible pairs.
Args:
x: torch.Tensor of shape: (n) with the x-coordinate
y: torch.Tensor of shape: (n) with the y-coordinate
w: torch.Tensor of shape: (n) with the width
h: torch.Tensor of shape: (n) with the height
Returns:
A matrix of shape (n, n) with the IoMIN
"""
assert x.shape == y.shape == w.shape == h.shape
# compute x1,x3,y1,y3 and area
x1 = x
x3 = x + w
y1 = y
y3 = y + h
area = w * h
min_area_nn = NonMaxSuppression._unroll_and_compare(area, "MIN")
xi1_nn = NonMaxSuppression._unroll_and_compare(x1, "MAX")
yi1_nn = NonMaxSuppression._unroll_and_compare(y1, "MAX")
xi3_nn = NonMaxSuppression._unroll_and_compare(x3, "MIN")
yi3_nn = NonMaxSuppression._unroll_and_compare(y3, "MIN")
intersection_area_nn = torch.clamp(xi3_nn - xi1_nn, min=0) * torch.clamp(yi3_nn - yi1_nn, min=0)
return intersection_area_nn / min_area_nn
| [
"torch.arange",
"torch.max",
"torch.no_grad",
"torch.clamp",
"torch.from_numpy",
"torch.ones_like",
"torch.zeros_like",
"torch.sum"
] | 1.10 | broadinstitute/tissue_purifier | 989ce9d58bba99a3f1c49743eed22dcc64e5f159 |
1.10 | from typing import List, Optional, Tuple, Union, NamedTuple, Callable, Any
import torch
import collections.abc
from torch.utils.data import Dataset, DataLoader
class MetadataCropperDataset(NamedTuple):
f_name: Union[str, int]
loc_x: Union[int, float]
loc_y: Union[int, float]
moran: Union[float, None]
class CropperTensor(torch.nn.Module):
"""
Base class for cropping a tensor and returning the crops and its coordinates, i.e. (crops, x_loc, y_loc)
This does NOT fit into a standard transform since it returns a tuple and not just an augmented tensor.
"""
def __init__(
self,
crop_size: int = 224,
strategy: str = 'random',
stride: int = 200,
n_crops: int = 10,
random_order: bool = True,
criterium_fn: Callable = None,
**kargs,
):
"""
Args:
crop_size: int, the size in pixel of the sliding tiling windows
strategy: str, can be either 'random' or 'tiling' or 'identity'
stride: Used only when :attr:'strategy' is 'tiling'.
Displacement among consecutive sliding window. This allow to control the overlap between crops.
n_crops: int, the size of crops to generate from a single image.
random_order: Used only when :attr:'strategy' is 'tiling'.
If true the crops are shuffled before being returned.
criterium_fn: Callable which returns true if it is a valid crop, return False otherwise
"""
super().__init__()
self.crop_size_ = crop_size
self.strategy_ = strategy
self.stride_ = stride
self.n_crops_ = n_crops
self.random_order_ = random_order
self.criterium_fn_ = criterium_fn
self._assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn)
@staticmethod
def _assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn):
assert isinstance(crop_size, int)
assert isinstance(stride, int)
assert isinstance(n_crops, int)
assert isinstance(random_order, bool)
assert isinstance(criterium_fn, collections.abc.Callable)
assert strategy == 'random' or strategy == 'tiling' or strategy == 'identity'
def forward(
self,
tensor: torch.Tensor,
crop_size: int = None,
strategy: str = None,
stride: int = None,
n_crops: int = None,
random_order: bool = None,
criterium_fn: Callable = None) -> (List[torch.Tensor], List[int], List[int]):
# All parameters default to the one used during initialization if they are not specified
crop_size = self.crop_size_ if crop_size is None else crop_size
strategy = self.strategy_ if strategy is None else strategy
stride = self.stride_ if stride is None else stride
n_crops = self.n_crops_ if n_crops is None else n_crops
random_order = self.random_order_ if random_order is None else random_order
criterium_fn = self.criterium_fn_ if criterium_fn is None else criterium_fn
crops, x_locs, y_locs = self._crop(tensor, crop_size, strategy, stride, n_crops, random_order, criterium_fn)
return crops, x_locs, y_locs
@staticmethod
def reapply_crops(tensor, patches_xywh) -> (List[torch.Tensor], List[int], List[int]):
raise NotImplementedError
def _crop(self,
tensor,
crop_size: int,
strategy: str,
stride: int,
n_crops: int,
random_order: bool,
criterium_fn: Callable) -> (List[torch.Tensor], List[int], List[int]):
""" This must be overwritten in derived class """
raise NotImplementedError
def __repr__(self) -> str:
""" This must be overwritten in derived class """
raise NotImplementedError
class CropperDenseTensor(CropperTensor):
SAFETY_FACTOR = 3
def __init__(self, min_threshold_value: float, min_threshold_fraction: float, **kargs):
"""
Args:
min_threshold_value: binarize a crop according to
:math:'tensor.sum(dim=-3, keepdim=True) > min_threshold_value'
min_threshold_fraction: A crop with a fraction of True entry below this value is considered
empty and disregarded.
"""
assert isinstance(min_threshold_value, float)
self.min_threshold_value = min_threshold_value
assert isinstance(min_threshold_fraction, float)
self.min_threshold_fraction = min_threshold_fraction
def criterium_fn(potential_crops):
masks = potential_crops.sum(dim=-3, keepdim=False) > min_threshold_value
number_of_true = masks.flatten(start_dim=-2).sum(dim=-1)
area_of_crops = masks.shape[-1] * masks.shape[-2]
return number_of_true.float() > area_of_crops * min_threshold_fraction
super().__init__(criterium_fn=criterium_fn,
**kargs)
def __repr__(self):
return self.__class__.__name__ + '(crop_size={0}, strategy={1}, stride={2}, random_order={3}, \
min_threshold_value={4}, min_threshold_fraction={5})'.format(self.crop_size_,
self.strategy_,
self.stride_,
self.random_order_,
self.min_threshold_value,
self.min_threshold_fraction)
@staticmethod
def reapply_crops(tensor, patches_xywh) -> (List[torch.Tensor], List[int], List[int]):
assert isinstance(patches_xywh, torch.LongTensor)
assert len(patches_xywh.shape) == 2 and patches_xywh.shape[-1] == 4
x_patch, y_patch, w_patch, h_patch = patches_xywh.chunk(chunks=4, dim=-1) # each one has shape (batch, 1)
crops = []
for ix, iy, iw, ih, in zip(x_patch, y_patch, w_patch, h_patch):
tensor_tmp = tensor.narrow(dim=-2, start=ix.item(), length=iw.item())
crop = tensor_tmp.narrow(dim=-1, start=iy.item(), length=ih.item())
crops.append(crop.clone())
return crops, x_patch.squeeze(-1).tolist(), y_patch.squeeze(-1).tolist()
def _crop(self,
tensor: torch.Tensor,
crop_size: int,
strategy: str,
stride: int,
n_crops: int,
random_order: bool,
criterium_fn: Callable) -> (List[torch.Tensor], List[int], List[int]):
assert isinstance(tensor, torch.Tensor)
self._assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn)
if strategy == 'identity':
return [tensor]*n_crops, [0]*n_crops, [0]*n_crops
elif strategy == 'tiling' or strategy == 'random':
w_img, h_img = tensor.shape[-2:]
if strategy == 'tiling':
# generate a random starting point
x_corner_list, y_corner_list = [], []
i0 = torch.randint(low=0, high=stride, size=[1]).item()
j0 = torch.randint(low=0, high=stride, size=[1]).item()
for i in range(i0, w_img-crop_size, stride):
for j in range(j0, h_img-crop_size, stride):
x_corner_list.append(i)
y_corner_list.append(j)
x_corner = torch.tensor(x_corner_list, device=tensor.device, dtype=torch.long)
y_corner = torch.tensor(y_corner_list, device=tensor.device, dtype=torch.long)
if random_order:
index_shuffle = torch.randperm(n=x_corner.shape[0], dtype=torch.long, device=x_corner.device)
x_corner = x_corner[index_shuffle]
y_corner = y_corner[index_shuffle]
elif strategy == 'random':
# create two tensors (x_corner, y_corner) with the location of the bottom left corner of the crop
x_corner = torch.randint(
low=0,
high=max(1, w_img - crop_size),
size=[n_crops * self.SAFETY_FACTOR],
device=tensor.device,
dtype=torch.long,
) # low is included, high is excluded
y_corner = torch.randint(
low=0,
high=max(1, h_img - crop_size),
size=[n_crops * self.SAFETY_FACTOR],
device=tensor.device,
dtype=torch.long,
) # low is included, high is excluded
else:
raise Exception("strategy is not recognized", strategy)
# compute the crops
crops, x_locs, y_locs = [], [], []
for ix, iy in zip(x_corner, y_corner):
tensor_tmp = torch.narrow(tensor, dim=-2, start=ix, length=crop_size)
crop = torch.narrow(tensor_tmp, dim=-1, start=iy, length=crop_size)
if self.criterium_fn_(crop):
crops.append(crop.clone())
x_locs.append(ix.item())
y_locs.append(iy.item())
# return at most n_crops items
return crops[:n_crops], x_locs[:n_crops], y_locs[:n_crops]
class CropperSparseTensor(CropperTensor):
SAFETY_FACTOR = 5
def __init__(self,
n_element_min: int = 100,
**kargs,
):
"""
Args:
n_element_min: create crops with (at least) this number of elements (i.e. cells or genes)
"""
assert isinstance(n_element_min, int)
self.n_element_min = n_element_min
def criterium_fn(n_elements):
return n_elements >= n_element_min
super().__init__(criterium_fn=criterium_fn,
**kargs)
def __repr__(self):
return self.__class__.__name__ + '(crop_size={0}, strategy={1}, stride={2}, random_order={3}, \
n_element_min={4})'.format(self.crop_size_,
self.strategy_,
self.stride_,
self.random_order_,
self.n_element_min)
@staticmethod
def reapply_crops(sparse_tensor, patches_xywh) -> (List[torch.sparse.Tensor], List[int], List[int]):
assert isinstance(patches_xywh, torch.Tensor)
assert len(patches_xywh.shape) == 2 and patches_xywh.shape[-1] == 4
assert isinstance(sparse_tensor, torch.sparse.Tensor)
codes: torch.Tensor
x_pixel: torch.Tensor
y_pixel: torch.Tensor
codes, x_pixel, y_pixel = sparse_tensor.indices() # each has shape (n_element)
values = sparse_tensor.values()
ch, w_img, h_img = sparse_tensor.size()
x_patch, y_patch, w_patch, h_patch = patches_xywh.chunk(chunks=4, dim=-1) # each one has shape (batch, 1)
mask = (x_pixel >= x_patch) * \
(x_pixel < x_patch + w_patch) * \
(y_pixel >= y_patch) * \
(y_pixel < y_patch + h_patch) # shape (batch, n_element)
assert mask.shape[0] == x_patch.shape[0] == y_patch.shape[0] == w_patch.shape[0] == h_patch.shape[0]
crops = []
for n in range(mask.shape[0]):
mask_n = mask[n] # shape (n_element)
codes_n = codes[mask_n]
x_pixel_n = x_pixel[mask_n] - x_patch[n, 0]
y_pixel_n = y_pixel[mask_n] - y_patch[n, 0]
values_n = values[mask_n]
crops.append(
torch.sparse_coo_tensor(
indices=torch.stack((codes_n, x_pixel_n, y_pixel_n), dim=0),
values=values_n,
size=(ch, w_patch[n, 0], h_patch[n, 0]),
device=x_pixel.device,
requires_grad=False,
).coalesce()
)
return crops, x_patch.squeeze(-1).tolist(), y_patch.squeeze(-1).tolist()
def _crop(self,
sparse_tensor,
crop_size: int,
strategy: str,
stride: int,
n_crops: int,
random_order: bool,
criterium_fn: Callable) -> Tuple[list, list, list]:
if strategy == 'identity':
return [sparse_tensor]*n_crops, [0]*n_crops, [0]*n_crops
self._assert_params(crop_size, stride, n_crops, random_order, strategy, criterium_fn)
assert sparse_tensor.is_sparse
# this might break the code if num_worked>0 in dataloader
# if torch.cuda.is_available():
# sparse_tensor = sparse_tensor.cuda()
codes, x_pixel, y_pixel = sparse_tensor.indices() # each has shape (n_elements)
values = sparse_tensor.values()
ch, w_img, h_img = sparse_tensor.size()
if strategy == 'tiling':
# generate a random starting point
x_corner_list, y_corner_list = [], []
i0 = torch.randint(low=-crop_size, high=0, size=[1]).item()
j0 = torch.randint(low=-crop_size, high=0, size=[1]).item()
for i in range(i0, w_img, stride):
for j in range(j0, h_img, stride):
x_corner_list.append(i)
y_corner_list.append(j)
x_corner = torch.tensor(x_corner_list, device=x_pixel.device, dtype=x_pixel.dtype).view(-1, 1)
y_corner = torch.tensor(y_corner_list, device=x_pixel.device, dtype=x_pixel.dtype).view(-1, 1)
if random_order:
index_shuffle = torch.randperm(n=x_corner.shape[0], dtype=torch.long, device=x_corner.device)
x_corner = x_corner[index_shuffle]
y_corner = y_corner[index_shuffle]
elif strategy == 'random':
x_corner = torch.randint(
low=0,
high=max(1, sparse_tensor.shape[-2] - crop_size),
size=[n_crops * self.SAFETY_FACTOR],
device=x_pixel.device,
dtype=x_pixel.dtype,
).view(-1, 1) # low is included, high is excluded
y_corner = torch.randint(
low=0,
high=max(1, sparse_tensor.shape[-1] - crop_size),
size=[n_crops * self.SAFETY_FACTOR],
device=y_pixel.device,
dtype=y_pixel.dtype,
).view(-1, 1) # low is included, high is excluded
else:
raise Exception("strategy is not recognized", strategy)
element_mask = (x_pixel >= x_corner) * \
(x_pixel < x_corner + crop_size) * \
(y_pixel >= y_corner) * \
(y_pixel < y_corner + crop_size) # shape: (n_crops * SAFETY_FACTOR, n_elements)
n_elements = (values * element_mask).sum(dim=-1) # shape (n_crops * SAFETY_FACTOR)
valid_patch = criterium_fn(n_elements)
n_valid_patches = valid_patch.sum().item()
if n_valid_patches < n_crops:
# import warnings
# warnings.warn("Warning. Not enough valid crops found. Change the parameters. ")
print("Warning. Only {0} valid crops found when requested {1}. \
Change the parameters.".format(n_valid_patches, n_crops))
n_max = min(n_crops, n_valid_patches)
ix = x_corner[valid_patch, 0][: n_max] # shape: n_max
iy = y_corner[valid_patch, 0][: n_max] # shape: n_max
mask = element_mask[valid_patch][: n_max] # shape: n_max, element_in_sparse_array
dense_crop_shape = (ch, crop_size, crop_size)
crops = []
for n in range(n_max):
mask_n = mask[n]
codes_n = codes[mask_n]
x_pixel_n = x_pixel[mask_n] - ix[n]
y_pixel_n = y_pixel[mask_n] - iy[n]
values_n = values[mask_n]
crops.append(
torch.sparse_coo_tensor(
indices=torch.stack((codes_n, x_pixel_n, y_pixel_n), dim=0),
values=values_n,
size=dense_crop_shape,
device=x_pixel.device,
requires_grad=False,
).coalesce()
)
x_locs = [ix[n].item() for n in range(n_max)]
y_locs = [iy[n].item() for n in range(n_max)]
return crops, x_locs, y_locs
class CropperDataset(Dataset):
"""
Dataset with imgs, labels, metadata and possibly a cropper for cropping img on the fly
"""
def __init__(
self,
imgs: Union[
List[torch.Tensor],
List[torch.sparse.Tensor],
List["SparseImage"],
],
labels: List[Any],
metadatas: List[MetadataCropperDataset],
cropper: Optional[CropperTensor] = None,
):
"""
Args:
imgs: (list of) images representing spatial data.
labels: (list of) labels.
metadatas: (list of) metadata.
cropper: Callable which crops the image on the fly
"""
assert isinstance(imgs, list)
assert isinstance(labels, list)
assert isinstance(metadatas, list)
assert len(imgs) == len(labels) == len(metadatas), (
"These number should be the same {0} {1} {2}".format(len(imgs),
len(labels),
len(metadatas))
)
assert len(imgs) >= 1, "I can not create a dataset with less than 1 image."
# check that all sparse_images have a _categories_to_code before putting them together into a dataset.
if hasattr(imgs[0], '_categories_to_codes'):
list_of_cat_to_code_dict = [img._categories_to_codes for img in imgs]
for i in range(len(list_of_cat_to_code_dict)-1):
assert list_of_cat_to_code_dict[i] == list_of_cat_to_code_dict[i+1], \
"The sparse images have different cat_to_code dictionaries {0} and {1}. \
These images can not be combined into a dataset. \
You can re-create the sparse images and specify the cat_to_code dictionary \
to be used.".format(list_of_cat_to_code_dict[i], list_of_cat_to_code_dict[i+1])
print("All cat_to_codes dictionaries are identical {0}".format(list_of_cat_to_code_dict[-1]))
unique_y_labels = list(sorted(set(labels)))
unique_y_codes = [i for i in range(len(unique_y_labels))]
self._labels_to_codes = dict(zip(unique_y_labels, unique_y_codes))
self.codes = [self._labels_to_codes[label] for label in labels] # list of integers
self.metadatas = metadatas
self.imgs = imgs
self.cropper = cropper
if self.cropper is None:
self.duplicating_factor = 1
self.n_crops_per_tissue = None
elif self.cropper.strategy_ == 'random':
# If n_crops >= batch_size then a single tissue generates all crops for the mini_batch.
# This results in a very imbalanced mini_batch.
# Here, we implement a trick for generating more balanced mini_batches.
# We pretend to have more tissues and generate fewer crops from each tissue resulting in the same overall
# number of crops but a more diverse mini_batch.
# See __len__ and __getitem__ for how this trick is implemented.
tmp_n_crops = self.cropper.n_crops_
while tmp_n_crops > 10 and tmp_n_crops % 2 == 0:
tmp_n_crops /= 2
self.n_crops_per_tissue = int(tmp_n_crops)
self.duplicating_factor = int(self.cropper.n_crops_ // self.n_crops_per_tissue)
def to(self, device: torch.device) -> "CropperDataset":
""" Move the images to a particular device """
self.imgs = [img.to(device) for img in self.imgs]
return self
def __len__(self):
# We pretend that the dataset contains extra samples.
# Note that the data_loader will generate RANDOM indices between 0 and this (inflated) length
return len(self.imgs) * self.duplicating_factor
def __getitem__(self, index: int) -> Union[
Tuple[torch.Tensor, int, MetadataCropperDataset],
List[Tuple[torch.Tensor, int, MetadataCropperDataset]]]:
# Remap the index from the inflated interval to the original interval.
new_index = index % len(self.imgs) # this is strictly in [0, len(self.imgs))
if self.cropper is None:
img = self.imgs[new_index]
code = self.codes[new_index]
metadata = self.metadatas[new_index]
return img, code, metadata
else:
code_base = self.codes[new_index]
crop_list, loc_x_list, loc_y_list = self.cropper(self.imgs[new_index], n_crops=self.n_crops_per_tissue)
metadata_base: MetadataCropperDataset = self.metadatas[new_index]
return [(crop, code_base, MetadataCropperDataset(f_name=metadata_base.f_name,
loc_x=metadata_base.loc_x + x_loc,
loc_y=metadata_base.loc_y + y_loc,
moran=None)) for
crop, x_loc, y_loc in zip(crop_list, loc_x_list, loc_y_list)]
class CollateFnListTuple:
@staticmethod
@torch.no_grad()
def __call__(data):
"""
Args:
data: Output of the batchloader calling the __getitem__ method i.e.:
Either: List[Tuple]
Or: List[List[Tuple]
Returns:
List[imgs], List[labels], List[Metadata]
"""
if isinstance(data, list) and isinstance(data[0], list):
# I have to flatten a list of list
data = [val for sublist in data for val in sublist]
tuple_imgs, tuple_labels, tuple_metadata = zip(*data)
return list(tuple_imgs), list(tuple_labels), list(tuple_metadata)
class DataLoaderWithLoad(DataLoader):
def load(self, index: Union[List[int], torch.Tensor]):
tmp = []
for idx in index:
tmp.append(self.dataset.__getitem__(idx))
return self.collate_fn(tmp)
| [
"torch.stack",
"torch.no_grad",
"torch.randperm",
"torch.randint",
"torch.narrow",
"torch.tensor"
] | 1.10 | broadinstitute/tissue_purifier | 989ce9d58bba99a3f1c49743eed22dcc64e5f159 |
1.10 | from typing import Tuple
import math
import torch
from torch.optim.optimizer import Optimizer
def linear_warmup_and_cosine_protocol(
f_values: Tuple[float, float, float],
x_milestones: Tuple[int, int, int, int]):
"""
There are 5 regions:
1. constant at f0 for x < x0
2. linear increase from f0 to f1 for x0 < x < x1
3. constant at f1 for x1 < x < x2
4. cosine protocol from f1 to f2 for x2 < x < x3
5. constant at f2 for x > x3
If you want a linear_ramp followed by a cosine_decay only simply set:
1. x0=0 (to eliminate the first constant piece)
2. x2=x1 (to eliminate the second constant piece)
3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)
"""
assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3]
def fn(step):
if step <= x_milestones[0]:
return float(f_values[0])
elif (step > x_milestones[0]) and (step <= x_milestones[1]):
m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0]))
return float(f_values[0]) + m * float(step - x_milestones[0])
elif (step > x_milestones[1]) and (step <= x_milestones[2]):
return float(f_values[1])
elif (step > x_milestones[2]) and (step <= x_milestones[3]):
progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2])) # in (0,1)
tmp = 0.5 * (1.0 + math.cos(math.pi * progress)) # in (1,0)
return float(f_values[2]) + tmp * float(f_values[1] - f_values[2])
else:
return float(f_values[2])
return fn
class LARS(Optimizer):
"""
Extends SGD in PyTorch with LARS scaling from the paper
'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001)
eps (float, optional): eps for division denominator (default: 1e-8)
Example:
>>> model = torch.nn.Linear(10, 1)
>>> input = torch.Tensor(10)
>>> target = torch.Tensor([1.])
>>> loss_fn = lambda input, target: (input - target) ** 2
>>> #
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
Note:
The application of momentum in the SGD part is modified according to
the PyTorch standards. LARS scaling fits into the equation in the
following fashion.
.. math::
\begin{aligned}
g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\
v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\\end{aligned}
where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` denote the
parameters, gradient, velocity, momentum, and weight decay respectively.
The :math:`lars_lr` is defined by Eq. 6 in the paper.
The Nesterov version is analogously modified.
.. warning::
Parameters with weight decay set to 0 will automatically be excluded from
layer-wise LR scaling. This is to ensure consistency with papers like SimCLR
and BYOL.
"""
def __init__(
self,
params,
lr=None,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coefficient=0.001,
eps=1e-8,
):
if lr is None or lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coefficient=trust_coefficient,
eps=eps,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# exclude scaling for params with 0 weight decay
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
# lars scaling + weight decay part
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = p_norm / (g_norm + p_norm * weight_decay + group["eps"])
lars_lr *= group["trust_coefficient"]
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
# sgd part
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group["lr"])
return loss
| [
"torch.norm",
"torch.no_grad",
"torch.clone",
"torch.enable_grad"
] | 1.10 | broadinstitute/tissue_purifier | 989ce9d58bba99a3f1c49743eed22dcc64e5f159 |
1.4 | import warnings
import numpy as np
import scipy as sp
from scipy import stats
import torch
import torch.nn as nn
import torch.nn.functional as F
from .. import utilities
def create_batches(features, y, batchsize):
# Create random indices to reorder datapoints
n = features.shape[0]
p = features.shape[1]
inds = torch.randperm(n)
# Iterate through and create batches
i = 0
batches = []
while i < n:
batches.append([features[inds][i : i + batchsize], y[inds][i : i + batchsize]])
i += batchsize
return batches
class DeepPinkModel(nn.Module):
def __init__(self, p, hidden_sizes=[64], y_dist="gaussian", normalize_Z=True):
"""
Adapted from https://arxiv.org/pdf/1809.01185.pdf.
The module has two components:
1. A sparse linear layer with dimension 2*p to p.
However, there are only 2*p weights (each feature
and knockoff points only to their own unique node).
This is (maybe?) followed by a ReLU activation.
2. A multilayer perceptron (MLP)
Parameters
----------
p : int
The dimensionality of the data
hidden_sizes: list
A list of hidden sizes for the mlp layer(s).
Defaults to [64].
normalize_Z : bool
If True, the first sparse linear layer is normalized
so the weights for each feature/knockoff pair have an
l1 norm of 1. This can modestly improve power in some
settings.
"""
super().__init__()
# Initialize weight for first layer
self.p = p
self.y_dist = y_dist
self.Z_weight = nn.Parameter(torch.ones(2 * p))
self.norm_Z_weight = normalize_Z
# Save indices/reverse indices to prevent violations of FDR control
self.inds, self.rev_inds = utilities.random_permutation_inds(2 * p)
self.feature_inds = self.rev_inds[0:self.p]
self.ko_inds = self.rev_inds[self.p:]
# Create MLP layers
mlp_layers = [nn.Linear(p, hidden_sizes[0])]
for i in range(len(hidden_sizes) - 1):
mlp_layers.append(nn.ReLU())
mlp_layers.append(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))
# Prepare for either MSE loss or cross entropy loss
mlp_layers.append(nn.ReLU())
if y_dist == "gaussian":
mlp_layers.append(nn.Linear(hidden_sizes[-1], 1))
else:
mlp_layers.append(nn.Linear(hidden_sizes[-1], 2))
# Then create MLP
self.mlp = nn.Sequential(*mlp_layers)
def _fetch_Z_weight(self):
# Possibly don't normalize
if not self.norm_Z_weight:
return self.Z_weight
# Else normalize, first construct denominator
normalizer = torch.abs(self.Z_weight[self.feature_inds]) + torch.abs(
self.Z_weight[self.ko_inds]
)
# Normalize
Z = torch.abs(self.Z_weight[self.feature_inds]) / normalizer
Ztilde = torch.abs(self.Z_weight[self.ko_inds]) / normalizer
# Concatenate and reshuffle
return torch.cat([Z, Ztilde], dim=0)[self.inds]
def forward(self, features):
"""
Note: features are now shuffled
"""
# First layer: pairwise weights (and sum)
if not isinstance(features, torch.Tensor):
features = torch.tensor(features).float()
features = features[:, self.inds] # shuffle features to prevent FDR violations
features = self._fetch_Z_weight().unsqueeze(dim=0) * features
features = features[:, self.feature_inds] - features[:, self.ko_inds]
# Apply MLP
return self.mlp(features)
def predict(self, features):
"""
Wraps forward method, for compatibility
with sklearn classes.
"""
with torch.no_grad():
return self.forward(features).numpy()
def l1norm(self):
out = 0
for parameter in self.mlp.parameters():
out += torch.abs(parameter).sum()
out += torch.abs(self.Z_weight).sum() # This is just for stability
return out
def l2norm(self):
out = 0
for parameter in self.mlp.parameters():
out += (parameter ** 2).sum()
out += (self.Z_weight ** 2).sum()
return out
def feature_importances(self, weight_scores=True):
with torch.no_grad():
# Calculate weights from MLP
if weight_scores:
layers = list(self.mlp.named_children())
W = layers[0][1].weight.detach().numpy().T
for layer in layers[1:]:
if isinstance(layer[1], nn.ReLU):
continue
weight = layer[1].weight.detach().numpy().T
W = np.dot(W, weight)
W = W.squeeze(-1)
else:
W = np.ones(self.p)
# Multiply by Z weights
Z = self._fetch_Z_weight().numpy()
feature_imp = Z[self.feature_inds] * W
knockoff_imp = Z[self.ko_inds] * W
return np.concatenate([feature_imp, knockoff_imp])
def train_deeppink(
model,
features,
y,
batchsize=100,
num_epochs=50,
lambda1=None,
lambda2=None,
verbose=True,
**kwargs,
):
# Infer n, p, set default lambda1, lambda2
n = features.shape[0]
p = int(features.shape[1] / 2)
if lambda1 is None:
lambda1 = 10 * np.sqrt(np.log(p) / n)
if lambda2 is None:
lambda2 = 0
# Batchsize can't be bigger than n
batchsize = min(features.shape[0], batchsize)
# Create criterion
features, y = map(lambda x: torch.tensor(x).detach().float(), (features, y))
if model.y_dist == "gaussian":
criterion = nn.MSELoss(reduction="sum")
else:
criterion = nn.CrossEntropyLoss(reduction="sum")
y = y.long()
# Create optimizer
opt = torch.optim.Adam(model.parameters(), **kwargs)
# Loop through epochs
for j in range(num_epochs):
# Create batches, loop through
batches = create_batches(features, y, batchsize=batchsize)
predictive_loss = 0
for Xbatch, ybatch in batches:
# Forward pass and loss
output = model(Xbatch)
loss = criterion(output, ybatch.unsqueeze(-1))
predictive_loss += loss
# Add l1 and l2 regularization
loss += lambda1 * model.l1norm()
loss += lambda2 * model.l2norm()
# Step
opt.zero_grad()
loss.backward()
opt.step()
if verbose and j % 10 == 0:
print(f"At epoch {j}, mean loss is {predictive_loss / n}")
return model
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.Sequential",
"torch.no_grad",
"torch.randperm",
"torch.ones",
"torch.nn.ReLU",
"torch.abs",
"torch.tensor",
"torch.nn.CrossEntropyLoss"
] | 1.4.0 | amspector100/knockpy | c4980ebd506c110473babd85836dbd8ae1d548b7 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from torch import Tensor
from torchmetrics.functional.classification.f_beta import _fbeta_compute, _fbeta_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
class FBeta(Metric):
r"""
Computes `F-score <https://en.wikipedia.org/wiki/F-score>`_, specifically:
.. math::
F_\beta = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
Where :math:`\beta` is some positive real factor. Works with binary, multiclass, and multilabel data.
Accepts probabilities from a model output or integer class values in prediction.
Works with multi-dimensional preds and target.
Forward accepts
- ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes
- ``target`` (long tensor): ``(N, ...)``
If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument
to convert into integer labels. This is the case for binary and multi-label probabilities.
If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.
Args:
num_classes: Number of classes in the dataset.
beta: Beta coefficient in the F measure.
threshold:
Threshold value for binary or multi-label probabilities. default: 0.5
average:
- ``'micro'`` computes metric globally
- ``'macro'`` computes metric for each class and uniformly averages them
- ``'weighted'`` computes metric for each class and does a weighted-average,
where each class is weighted by their support (accounts for class imbalance)
- ``'none'`` or ``None`` computes and returns the metric per class
multilabel: If predictions are from multilabel classification.
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Example:
>>> from torchmetrics import FBeta
>>> target = torch.tensor([0, 1, 2, 0, 1, 2])
>>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
>>> f_beta = FBeta(num_classes=3, beta=0.5)
>>> f_beta(preds, target)
tensor(0.3333)
"""
def __init__(
self,
num_classes: int,
beta: float = 1.0,
threshold: float = 0.5,
average: str = "micro",
multilabel: bool = False,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
self.num_classes = num_classes
self.beta = beta
self.threshold = threshold
self.average = average
self.multilabel = multilabel
allowed_average = ("micro", "macro", "weighted", "none", None)
if self.average not in allowed_average:
raise ValueError(
'Argument `average` expected to be one of the following:'
f' {allowed_average} but got {self.average}'
)
self.add_state("true_positives", default=torch.zeros(num_classes), dist_reduce_fx="sum")
self.add_state("predicted_positives", default=torch.zeros(num_classes), dist_reduce_fx="sum")
self.add_state("actual_positives", default=torch.zeros(num_classes), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor):
"""
Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
true_positives, predicted_positives, actual_positives = _fbeta_update(
preds, target, self.num_classes, self.threshold, self.multilabel
)
self.true_positives += true_positives
self.predicted_positives += predicted_positives
self.actual_positives += actual_positives
def compute(self) -> Tensor:
"""
Computes fbeta over state.
"""
return _fbeta_compute(
self.true_positives, self.predicted_positives, self.actual_positives, self.beta, self.average
)
class F1(FBeta):
"""
Computes F1 metric. F1 metrics correspond to a harmonic mean of the
precision and recall scores.
Works with binary, multiclass, and multilabel data.
Accepts logits from a model output or integer class values in prediction.
Works with multi-dimensional preds and target.
Forward accepts
- ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes
- ``target`` (long tensor): ``(N, ...)``
If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument.
This is the case for binary and multi-label logits.
If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.
Args:
num_classes: Number of classes in the dataset.
threshold:
Threshold value for binary or multi-label logits. default: 0.5
average:
- ``'micro'`` computes metric globally
- ``'macro'`` computes metric for each class and uniformly averages them
- ``'weighted'`` computes metric for each class and does a weighted-average,
where each class is weighted by their support (accounts for class imbalance)
- ``'none'`` or ``None`` computes and returns the metric per class
multilabel: If predictions are from multilabel classification.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Example:
>>> from torchmetrics import F1
>>> target = torch.tensor([0, 1, 2, 0, 1, 2])
>>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
>>> f1 = F1(num_classes=3)
>>> f1(preds, target)
tensor(0.3333)
"""
def __init__(
self,
num_classes: int,
threshold: float = 0.5,
average: str = "micro",
multilabel: bool = False,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
if multilabel is not False:
rank_zero_warn(f'The `multilabel={multilabel}` parameter is unused and will not have any effect.')
super().__init__(
num_classes=num_classes,
beta=1.0,
threshold=threshold,
average=average,
multilabel=multilabel,
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
| [
"torch.zeros"
] | 1.3.1 | amorehead/metrics | 2e4cb70c46bd775629ceb9d710bc581af8bf92c5 |
1.1 | # -*- coding: utf-8 -*-
import glob
import os
import codecs
import math
from collections import Counter, defaultdict
from itertools import chain, cycle
import torch
import torchtext.data
from torchtext.data import Field, RawField
from torchtext.vocab import Vocab
from torchtext.data.utils import RandomShuffler
from onmt.inputters.text_dataset import text_fields, TextMultiField
from onmt.inputters.image_dataset import image_fields
from onmt.inputters.audio_dataset import audio_fields
from onmt.utils.logging import logger
# backwards compatibility
from onmt.inputters.text_dataset import _feature_tokenize # noqa: F401
from onmt.inputters.image_dataset import ( # noqa: F401
batch_img as make_img)
import gc
# monkey-patch to make torchtext Vocab's pickleable
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
Vocab.__getstate__ = _getstate
Vocab.__setstate__ = _setstate
def make_src(data, vocab):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
def make_tgt(data, vocab):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
def get_fields(
src_data_type,
n_src_feats,
n_tgt_feats,
pad='<blank>',
bos='<s>',
eos='</s>',
dynamic_dict=False,
src_truncate=None,
tgt_truncate=None
):
"""
Args:
src_data_type: type of the source input. Options are [text|img|audio].
n_src_feats (int): the number of source features (not counting tokens)
to create a :class:`torchtext.data.Field` for. (If
``src_data_type=="text"``, these fields are stored together
as a ``TextMultiField``).
n_tgt_feats (int): See above.
pad (str): Special pad symbol. Used on src and tgt side.
bos (str): Special beginning of sequence symbol. Only relevant
for tgt.
eos (str): Special end of sequence symbol. Only relevant
for tgt.
dynamic_dict (bool): Whether or not to include source map and
alignment fields.
src_truncate: Cut off src sequences beyond this (passed to
``src_data_type``'s data reader - see there for more details).
tgt_truncate: Cut off tgt sequences beyond this (passed to
:class:`TextDataReader` - see there for more details).
Returns:
A dict mapping names to fields. These names need to match
the dataset example attributes.
"""
assert src_data_type in ['text', 'img', 'audio'], \
"Data type not implemented"
assert not dynamic_dict or src_data_type == 'text', \
'it is not possible to use dynamic_dict with non-text input'
fields = {}
fields_getters = {"text": text_fields,
"img": image_fields,
"audio": audio_fields}
src_field_kwargs = {"n_feats": n_src_feats,
"include_lengths": True,
"pad": pad, "bos": None, "eos": None,
"truncate": src_truncate,
"base_name": "src"}
fields["src"] = fields_getters[src_data_type](**src_field_kwargs)
tgt_field_kwargs = {"n_feats": n_tgt_feats,
"include_lengths": False,
"pad": pad, "bos": bos, "eos": eos,
"truncate": tgt_truncate,
"base_name": "tgt"}
fields["tgt"] = fields_getters["text"](**tgt_field_kwargs)
indices = Field(use_vocab=False, dtype=torch.long, sequential=False)
fields["indices"] = indices
if dynamic_dict:
src_map = Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
fields["src_map"] = src_map
src_ex_vocab = RawField()
fields["src_ex_vocab"] = src_ex_vocab
align = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["alignment"] = align
return fields
def load_old_vocab(vocab, data_type="text", dynamic_dict=False):
"""Update a legacy vocab/field format.
Args:
vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the
format formerly saved in *.vocab.pt files. Or, text data
not using a :class:`TextMultiField`.
data_type (str): text, img, or audio
dynamic_dict (bool): Used for copy attention.
Returns:
a dictionary whose keys are the field names and whose values Fields.
"""
if _old_style_vocab(vocab):
# List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
vocab = dict(vocab)
n_src_features = sum('src_feat_' in k for k in vocab)
n_tgt_features = sum('tgt_feat_' in k for k in vocab)
fields = get_fields(
data_type, n_src_features, n_tgt_features,
dynamic_dict=dynamic_dict)
for n, f in fields.items():
try:
f_iter = iter(f)
except TypeError:
f_iter = [(n, f)]
for sub_n, sub_f in f_iter:
if sub_n in vocab:
sub_f.vocab = vocab[sub_n]
return fields
if _old_style_field_list(vocab): # upgrade to multifield
# Dict[str, List[Tuple[str, Field]]]
# doesn't change structure - don't return early.
fields = vocab
for base_name, vals in fields.items():
if ((base_name == 'src' and data_type == 'text') or
base_name == 'tgt'):
assert not isinstance(vals[0][1], TextMultiField)
fields[base_name] = [(base_name, TextMultiField(
vals[0][0], vals[0][1], vals[1:]))]
if _old_style_nesting(vocab):
# Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
fields = dict(list(chain.from_iterable(vocab.values())))
return fields
def _old_style_vocab(vocab):
"""Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is a list of pairs where the second object
is a :class:`torchtext.vocab.Vocab` object.
This exists because previously only the vocab objects from the fields
were saved directly, not the fields themselves, and the fields needed to
be reconstructed at training and translation time.
"""
return isinstance(vocab, list) and \
any(isinstance(v[1], Vocab) for v in vocab)
def _old_style_nesting(vocab):
"""Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``)."""
return isinstance(vocab, dict) and \
any(isinstance(v, list) for v in vocab.values())
def _old_style_field_list(vocab):
"""Detect old-style text fields.
Not old style vocab, old nesting, and text-type fields not using
``TextMultiField``.
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is not an :func:`_old_style_vocab` and not
a :class:`TextMultiField` (using an old-style text representation).
"""
# if tgt isn't using TextMultiField, then no text field is.
return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \
(not isinstance(vocab['tgt'][0][1], TextMultiField))
def old_style_vocab(vocab):
"""The vocab/fields need updated."""
return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \
_old_style_nesting(vocab)
def filter_example(ex, use_src_len=True, use_tgt_len=True,
min_src_len=1, max_src_len=float('inf'),
min_tgt_len=1, max_tgt_len=float('inf')):
"""Return whether an example is an acceptable length.
If used with a dataset as ``filter_pred``, use :func:`partial()`
for all keyword arguments.
Args:
ex (torchtext.data.Example): An object with a ``src`` and ``tgt``
property.
use_src_len (bool): Filter based on the length of ``ex.src``.
use_tgt_len (bool): Similar to above.
min_src_len (int): A non-negative minimally acceptable length
(examples of exactly this length will be included).
min_tgt_len (int): Similar to above.
max_src_len (int or float): A non-negative (possibly infinite)
maximally acceptable length (examples of exactly this length
will be included).
max_tgt_len (int or float): Similar to above.
"""
src_len = len(ex.src[0])
tgt_len = len(ex.tgt[0])
return (not use_src_len or min_src_len <= src_len <= max_src_len) and \
(not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)
def _pad_vocab_to_multiple(vocab, multiple):
vocab_size = len(vocab)
if vocab_size % multiple == 0:
return
target_size = int(math.ceil(vocab_size / multiple)) * multiple
padding_tokens = [
"averyunlikelytoken%d" % i for i in range(target_size - vocab_size)]
vocab.extend(Vocab(Counter(), specials=padding_tokens))
return vocab
def _build_field_vocab(field, counter, size_multiple=1, **kwargs):
# this is basically copy-pasted from torchtext.
all_specials = [
field.unk_token, field.pad_token, field.init_token, field.eos_token
]
specials = [tok for tok in all_specials if tok is not None]
field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)
if size_multiple > 1:
_pad_vocab_to_multiple(field.vocab, size_multiple)
def _load_vocab(vocab_path, name, counters, min_freq):
# counters changes in place
vocab = _read_vocab_file(vocab_path, name)
vocab_size = len(vocab)
logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))
for i, token in enumerate(vocab):
# keep the order of tokens specified in the vocab file by
# adding them to the counter with decreasing counting values
counters[name][token] = vocab_size - i + min_freq
return vocab, vocab_size
def _build_fv_from_multifield(multifield, counters, build_fv_args,
size_multiple=1):
for name, field in multifield:
_build_field_vocab(
field,
counters[name],
size_multiple=size_multiple,
**build_fv_args[name])
logger.info(" * %s vocab size: %d." % (name, len(field.vocab)))
def _build_fields_vocab(fields, counters, data_type, share_vocab,
vocab_size_multiple,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency):
build_fv_args = defaultdict(dict)
build_fv_args["src"] = dict(
max_size=src_vocab_size, min_freq=src_words_min_frequency)
build_fv_args["tgt"] = dict(
max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)
tgt_multifield = fields["tgt"]
_build_fv_from_multifield(
tgt_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if data_type == 'text':
src_multifield = fields["src"]
_build_fv_from_multifield(
src_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
src_field = src_multifield.base_field
tgt_field = tgt_multifield.base_field
_merge_field_vocabs(
src_field, tgt_field, vocab_size=src_vocab_size,
min_freq=src_words_min_frequency,
vocab_size_multiple=vocab_size_multiple)
logger.info(" * merged vocab size: %d." % len(src_field.vocab))
return fields
def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,
vocab_size_multiple=1):
"""Build the fields for all data sides.
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict[str, Field]): fields to build vocab for.
data_type (str): A supported data type string.
share_vocab (bool): share source and target vocabulary?
src_vocab_path (str): Path to src vocabulary file.
src_vocab_size (int): size of the source vocabulary.
src_words_min_frequency (int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path (str): Path to tgt vocabulary file.
tgt_vocab_size (int): size of the target vocabulary.
tgt_words_min_frequency (int): the minimum frequency needed to
include a target word in the vocabulary.
vocab_size_multiple (int): ensure that the vocabulary size is a
multiple of this value.
Returns:
Dict of Fields
"""
counters = defaultdict(Counter)
if src_vocab_path:
try:
logger.info("Using existing vocabulary...")
vocab = torch.load(src_vocab_path)
# return vocab to dump with standard name
return vocab
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
# empty train_dataset_files so that vocab is only loaded from
# given paths in src_vocab_path, tgt_vocab_path
train_dataset_files = []
# Load vocabulary
if src_vocab_path:
src_vocab, src_vocab_size = _load_vocab(
src_vocab_path, "src", counters,
src_words_min_frequency)
else:
src_vocab = None
if tgt_vocab_path:
tgt_vocab, tgt_vocab_size = _load_vocab(
tgt_vocab_path, "tgt", counters,
tgt_words_min_frequency)
else:
tgt_vocab = None
for i, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and src_vocab) or \
(sub_n == 'tgt' and tgt_vocab)
if sub_f.sequential and not has_vocab:
val = fd
counters[sub_n].update(val)
# Drop the none-using from memory but keep the last
if i < len(train_dataset_files) - 1:
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
fields = _build_fields_vocab(
fields, counters, data_type,
share_vocab, vocab_size_multiple,
src_vocab_size, src_words_min_frequency,
tgt_vocab_size, tgt_words_min_frequency)
return fields # is the return necessary?
def _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,
vocab_size_multiple):
# in the long run, shouldn't it be possible to do this by calling
# build_vocab with both the src and tgt data?
specials = [tgt_field.unk_token, tgt_field.pad_token,
tgt_field.init_token, tgt_field.eos_token]
merged = sum(
[src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()
)
merged_vocab = Vocab(
merged, specials=specials,
max_size=vocab_size, min_freq=min_freq
)
if vocab_size_multiple > 1:
_pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)
src_field.vocab = merged_vocab
tgt_field.vocab = merged_vocab
assert len(src_field.vocab) == len(tgt_field.vocab)
def _read_vocab_file(vocab_path, tag):
"""Loads a vocabulary from the given path.
Args:
vocab_path (str): Path to utf-8 text file containing vocabulary.
Each token should be on a line by itself. Tokens must not
contain whitespace (else only before the whitespace
is considered).
tag (str): Used for logging which vocab is being read.
"""
logger.info("Loading {} vocabulary from {}".format(tag, vocab_path))
if not os.path.exists(vocab_path):
raise RuntimeError(
"{} vocabulary not found at {}".format(tag, vocab_path))
else:
with codecs.open(vocab_path, 'r', 'utf-8') as f:
return [line.strip().split()[0] for line in f if line.strip()]
def batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if batch_size_multiple > 1:
overflowed += (
(len(minibatch) - overflowed) % batch_size_multiple)
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1, size_so_far)
if minibatch:
yield minibatch
def _pool(data, batch_size, batch_size_fn, batch_size_multiple,
sort_key, random_shuffler, pool_factor):
for p in torchtext.data.batch(
data, batch_size * pool_factor,
batch_size_fn=batch_size_fn):
p_batch = list(batch_iter(
sorted(p, key=sort_key),
batch_size,
batch_size_fn=batch_size_fn,
batch_size_multiple=batch_size_multiple))
for b in random_shuffler(p_batch):
yield b
class OrderedIterator(torchtext.data.Iterator):
def __init__(self,
dataset,
batch_size,
pool_factor=1,
batch_size_multiple=1,
yield_raw_example=False,
**kwargs):
super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)
self.batch_size_multiple = batch_size_multiple
self.yield_raw_example = yield_raw_example
self.dataset = dataset
self.pool_factor = pool_factor
def create_batches(self):
if self.train:
if self.yield_raw_example:
self.batches = batch_iter(
self.data(),
1,
batch_size_fn=None,
batch_size_multiple=1)
else:
self.batches = _pool(
self.data(),
self.batch_size,
self.batch_size_fn,
self.batch_size_multiple,
self.sort_key,
self.random_shuffler,
self.pool_factor)
else:
self.batches = []
for b in batch_iter(
self.data(),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple):
self.batches.append(sorted(b, key=self.sort_key))
def __iter__(self):
"""
Extended version of the definition in torchtext.data.Iterator.
Added yield_raw_example behaviour to yield a torchtext.data.Example
instead of a torchtext.data.Batch object.
"""
while True:
self.init_epoch()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
if self.sort_within_batch:
# NOTE: `rnn.pack_padded_sequence` requires that a
# minibatch be sorted by decreasing order, which
# requires reversing relative to typical sort keys
if self.sort:
minibatch.reverse()
else:
minibatch.sort(key=self.sort_key, reverse=True)
if self.yield_raw_example:
yield minibatch[0]
else:
yield torchtext.data.Batch(
minibatch,
self.dataset,
self.device)
if not self.repeat:
return
class MultipleDatasetIterator(object):
"""
This takes a list of iterable objects (DatasetLazyIter) and their
respective weights, and yields a batch in the wanted proportions.
"""
def __init__(self,
train_shards,
fields,
device,
opt):
self.index = -1
self.iterables = []
for shard in train_shards:
self.iterables.append(
build_dataset_iter(shard, fields, opt, multi=True))
self.init_iterators = True
self.weights = opt.data_weights
self.batch_size = opt.batch_size
self.batch_size_fn = max_tok_len \
if opt.batch_type == "tokens" else None
self.batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
self.device = device
# Temporarily load one shard to retrieve sort_key for data_type
temp_dataset = torch.load(self.iterables[0]._paths[0])
self.sort_key = temp_dataset.sort_key
self.random_shuffler = RandomShuffler()
self.pool_factor = opt.pool_factor
del temp_dataset
def _iter_datasets(self):
if self.init_iterators:
self.iterators = [iter(iterable) for iterable in self.iterables]
self.init_iterators = False
for weight in self.weights:
self.index = (self.index + 1) % len(self.iterators)
for i in range(weight):
yield self.iterators[self.index]
def _iter_examples(self):
for iterator in cycle(self._iter_datasets()):
yield next(iterator)
def __iter__(self):
while True:
for minibatch in _pool(
self._iter_examples(),
self.batch_size,
self.batch_size_fn,
self.batch_size_multiple,
self.sort_key,
self.random_shuffler,
self.pool_factor):
minibatch = sorted(minibatch, key=self.sort_key, reverse=True)
yield torchtext.data.Batch(minibatch,
self.iterables[0].dataset,
self.device)
class DatasetLazyIter(object):
"""Yield data from sharded dataset files.
Args:
dataset_paths: a list containing the locations of dataset files.
fields (dict[str, Field]): fields dict for the
datasets.
batch_size (int): batch size.
batch_size_fn: custom batch process function.
device: See :class:`OrderedIterator` ``device``.
is_train (bool): train or valid?
"""
def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,
batch_size_multiple, device, is_train, pool_factor,
repeat=True, num_batches_multiple=1, yield_raw_example=False):
self._paths = dataset_paths
self.fields = fields
self.batch_size = batch_size
self.batch_size_fn = batch_size_fn
self.batch_size_multiple = batch_size_multiple
self.device = device
self.is_train = is_train
self.repeat = repeat
self.num_batches_multiple = num_batches_multiple
self.yield_raw_example = yield_raw_example
self.pool_factor = pool_factor
def _iter_dataset(self, path):
logger.info('Loading dataset from %s' % path)
cur_dataset = torch.load(path)
logger.info('number of examples: %d' % len(cur_dataset))
cur_dataset.fields = self.fields
cur_iter = OrderedIterator(
dataset=cur_dataset,
batch_size=self.batch_size,
pool_factor=self.pool_factor,
batch_size_multiple=self.batch_size_multiple,
batch_size_fn=self.batch_size_fn,
device=self.device,
train=self.is_train,
sort=False,
sort_within_batch=True,
repeat=False,
yield_raw_example=self.yield_raw_example
)
for batch in cur_iter:
self.dataset = cur_iter.dataset
yield batch
# NOTE: This is causing some issues for consumer/producer,
# as we may still have some of those examples in some queue
# cur_dataset.examples = None
# gc.collect()
# del cur_dataset
# gc.collect()
def __iter__(self):
num_batches = 0
paths = self._paths
if self.is_train and self.repeat:
# Cycle through the shards indefinitely.
paths = cycle(paths)
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if self.is_train and not self.repeat and \
num_batches % self.num_batches_multiple != 0:
# When the dataset is not repeated, we might need to ensure that
# the number of returned batches is the multiple of a given value.
# This is important for multi GPU training to ensure that all
# workers have the same number of batches to process.
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if num_batches % self.num_batches_multiple == 0:
return
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def build_dataset_iter(corpus_type, fields, opt, is_train=True, multi=False):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
dataset_paths = list(sorted(
glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt')))
if not dataset_paths:
if is_train:
raise ValueError('Training data %s not found' % opt.data)
else:
return None
if multi:
batch_size = 1
batch_fn = None
batch_size_multiple = 1
else:
batch_size = opt.batch_size if is_train else opt.valid_batch_size
batch_fn = max_tok_len \
if is_train and opt.batch_type == "tokens" else None
batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
device = "cuda" if opt.gpu_ranks else "cpu"
return DatasetLazyIter(
dataset_paths,
fields,
batch_size,
batch_fn,
batch_size_multiple,
device,
is_train,
opt.pool_factor,
repeat=not opt.single_pass,
num_batches_multiple=max(opt.accum_count) * opt.world_size,
yield_raw_example=multi)
def build_dataset_iter_multiple(train_shards, fields, opt):
return MultipleDatasetIterator(
train_shards, fields, "cuda" if opt.gpu_ranks else "cpu", opt)
| [
"torch.load"
] | 1.1 | EstelleHuang666/OpenNMT-py | f7a239086d0db156535f3f5db9ed7060291485e8 |
1.7 | import random
import torch
from deepy.data.transform import Transform, SeparatedTransform
from deepy.data.transform import PairedTransform, PairedCompose, ToPairedTransform
from deepy.nn import functional as myF
class RandomCrop(Transform):
def __init__(self, length: int, generator=None):
self.length = length
self.generator = generator
def __call__(self, data):
signal_length = data.size(-1)
start_index = torch.randint(0, signal_length - self.length + 1,
(1, ),
generator=self.generator)
end_index = start_index + self.length
return data[..., start_index:end_index]
def __repr__(self):
return self.__class__.__name__ + '(length={})'.format(self.length)
class RandomFrames(RandomCrop):
def __init__(self, n_frames=5, generator=None):
super().__init__(length=n_frames, generator=generator)
self.n_frames = n_frames
def __repr__(self):
return self.__class__.__name__ + '(n_frames={})'.format(self.n_frames)
class Windowing(Transform):
def __init__(self, n_frames=5, stride=1, n_signals=None):
self.n_frames = n_frames
if not stride == 1:
raise NotImplementedError
self.stride = stride
self.n_signals = n_signals
def __call__(self, data):
total_frames = data.size(-1)
if self.n_signals == None:
n_signals = total_frames - self.n_frames + 1
else:
n_signals = self.n_signals
return torch.stack([data[..., i:i+self.n_frames] for i in range(n_signals)], dim=1)
def __repr__(self):
return self.__class__.__name__ + '(n_frames={}, stride={})'.format(self.n_frames, self.stride)
class Plane2Vector(Transform):
def __init__(self):
pass
def __call__(self, data):
return torch.cat([data[..., i, :] for i in range(data.size(-2))], dim=-1)
class ToVector(Transform):
def __init__(self):
pass
def __call__(self, data):
return data.reshape(-1, )
def __repr__(self):
return self.__class__.__name__
class PickUpChannel(Transform):
def __init__(self, chidx=0):
self.chidx = chidx
def __call__(self, data):
return data[self.chidx]
def __repr__(self):
return self.__class__.__name__ + '(chidx={})'.format(self.chidx)
| [
"torch.randint"
] | 1.7.0 | popura/deepy-pytorch | 71d87a82e937d82b9b149041280a392cc24b7299 |
1.6 | from typing import Union, Dict, List, Tuple, Any, Callable
from ..transition import (
Transition,
Scalar,
TransitionStorageSmart,
TransitionStorageBasic,
)
import torch as t
import random
class Buffer:
def __init__(self, buffer_size, buffer_device="cpu", *_, **__):
"""
Create a buffer instance.
Buffer stores a series of transition objects and functions
as a ring buffer. **It is not thread-safe**.
See Also:
:class:`.Transition`
During sampling, the tensors in "state", "action" and "next_state"
dictionaries, along with "reward", will be concatenated in dimension 0.
any other custom keys specified in ``**kwargs`` will not be
concatenated.
Args:
buffer_size: Maximum buffer size.
buffer_device: Device where buffer is stored.
"""
self.buffer_size = buffer_size
self.buffer_device = buffer_device
self.buffer = TransitionStorageSmart(buffer_size)
self.index = 0
def append(
self,
transition: Union[Transition, Dict],
required_attrs=("state", "action", "next_state", "reward", "terminal"),
):
"""
Store a transition object to buffer.
Args:
transition: A transition object.
required_attrs: Required attributes. Could be an empty tuple if
no attribute is required.
Raises:
``ValueError`` if transition object doesn't have required
attributes in ``required_attrs`` or has different attributes
compared to other transition objects stored in buffer.
"""
if isinstance(transition, dict):
transition = Transition(**transition)
elif isinstance(transition, Transition):
pass
else: # pragma: no cover
raise RuntimeError(
"Transition object must be a dict or an instance"
" of the Transition class"
)
if not transition.has_keys(required_attrs):
missing_keys = set(required_attrs) - set(transition.keys())
raise ValueError(f"Transition object missing attributes: {missing_keys}")
transition.to(self.buffer_device)
if self.size() != 0 and self.buffer[0].keys() != transition.keys():
raise ValueError("Transition object has different attributes!")
return self.buffer.store(transition)
def size(self):
"""
Returns:
Length of current buffer.
"""
return len(self.buffer)
def clear(self):
"""
Remove all entries from the buffer
"""
self.buffer.clear()
@staticmethod
def sample_method_random_unique(
buffer: List[Transition], batch_size: int
) -> Tuple[int, List[Transition]]:
"""
Sample unique random samples from buffer.
Note:
Sampled size could be any value from 0 to ``batch_size``.
"""
if len(buffer) < batch_size:
batch = random.sample(buffer, len(buffer))
real_num = len(buffer)
else:
batch = random.sample(buffer, batch_size)
real_num = batch_size
return real_num, batch
@staticmethod
def sample_method_random(
buffer: List[Transition], batch_size: int
) -> Tuple[int, List[Transition]]:
"""
Sample random samples from buffer.
Note:
Sampled size could be any value from 0 to ``batch_size``.
"""
indexes = [random.randint(0, len(buffer) - 1) for _ in range(batch_size)]
batch = [buffer[i] for i in indexes]
return batch_size, batch
@staticmethod
def sample_method_all(buffer: List[Transition], _) -> Tuple[int, List[Transition]]:
"""
Sample all samples from buffer. Always return the whole buffer,
will ignore the ``batch_size`` parameter.
"""
return len(buffer), buffer
def sample_batch(
self,
batch_size: int,
concatenate: bool = True,
device: Union[str, t.device] = None,
sample_method: Union[Callable, str] = "random_unique",
sample_attrs: List[str] = None,
additional_concat_attrs: List[str] = None,
*_,
**__,
) -> Any:
"""
Sample a random batch from buffer.
See Also:
Default sample methods are defined as static class methods.
:meth:`.Buffer.sample_method_random_unique`
:meth:`.Buffer.sample_method_random`
:meth:`.Buffer.sample_method_all`
Note:
"Concatenation"
means ``torch.cat([...], dim=0)`` for tensors,
and ``torch.tensor([...]).view(batch_size, 1)`` for scalars.
Warnings:
Custom attributes must not contain tensors. And only scalar custom
attributes can be concatenated, such as ``int``, ``float``,
``bool``.
Args:
batch_size: A hint size of the result sample. actual sample size
depends on your sample method.
sample_method: Sample method, could be one of:
``"random", "random_unique", "all"``,
or a function:
``func(list, batch_size)->(list, result_size)``
concatenate: Whether concatenate state, action and next_state
in dimension 0.
If ``True``, for each value in dictionaries of major
attributes. and each value of sub attributes, returns
a concatenated tensor. Custom Attributes specified in
``additional_concat_attrs`` will also be concatenated.
If ``False``, return a list of tensors.
device: Device to copy to.
sample_attrs: If sample_keys is specified, then only specified keys
of the transition object will be sampled. You may use
``"*"`` as a wildcard to collect remaining
**custom keys** as a ``dict``, you cannot collect major
and sub attributes using this.
Invalid sample attributes will be ignored.
additional_concat_attrs: additional **custom keys** needed to be
concatenated, will only work if ``concatenate`` is
``True``.
Returns:
1. Batch size, Sampled attribute values in the same order as
``sample_keys``.
2. Sampled attribute values is a tuple. Or ``None`` if sampled
batch size is zero (E.g.: if buffer is empty or your sample
size is 0 and you are not sampling using the "all" method).
- For major attributes, result are dictionaries of tensors with
the same keys in your transition objects.
- For sub attributes, result are tensors.
- For custom attributes, if they are not in
``additional_concat_attrs``, then lists, otherwise tensors.
"""
if isinstance(sample_method, str):
if not hasattr(self, "sample_method_" + sample_method):
raise RuntimeError(
f"Cannot find specified sample method: {sample_method}"
)
sample_method = getattr(self, "sample_method_" + sample_method)
batch_size, batch = sample_method(self.buffer, batch_size)
if device is None:
device = self.buffer_device
return (
batch_size,
self.post_process_batch(
batch, device, concatenate, sample_attrs, additional_concat_attrs
),
)
@classmethod
def post_process_batch(
cls,
batch: List[Transition],
device: Union[str, t.device],
concatenate: bool,
sample_attrs: List[str],
additional_concat_attrs: List[str],
):
"""
Post-process (concatenate) sampled batch.
"""
result = []
used_keys = []
if len(batch) == 0:
return None
if sample_attrs is None:
sample_attrs = batch[0].keys() if batch else []
if additional_concat_attrs is None:
additional_concat_attrs = []
major_attr = set(batch[0].major_attr)
sub_attr = set(batch[0].sub_attr)
custom_attr = set(batch[0].custom_attr)
for attr in sample_attrs:
if attr in major_attr:
tmp_dict = {}
for sub_k in batch[0][attr].keys():
tmp_dict[sub_k] = cls.make_tensor_from_batch(
[item[attr][sub_k].to(device) for item in batch],
device,
concatenate,
)
result.append(tmp_dict)
used_keys.append(attr)
elif attr in sub_attr:
result.append(
cls.make_tensor_from_batch(
[item[attr] for item in batch], device, concatenate
)
)
used_keys.append(attr)
elif attr == "*":
# select custom keys
tmp_dict = {}
for remain_k in batch[0].keys():
if (
remain_k not in major_attr
and remain_k not in sub_attr
and remain_k not in used_keys
):
tmp_dict[remain_k] = cls.make_tensor_from_batch(
[item[remain_k] for item in batch],
device,
concatenate and remain_k in additional_concat_attrs,
)
result.append(tmp_dict)
elif attr in custom_attr:
result.append(
cls.make_tensor_from_batch(
[item[attr] for item in batch],
device,
concatenate and attr in additional_concat_attrs,
)
)
used_keys.append(attr)
return tuple(result)
@staticmethod
def make_tensor_from_batch(
batch: List[Union[Scalar, t.Tensor]],
device: Union[str, t.device],
concatenate: bool,
):
"""
Make a tensor from a batch of data.
Will concatenate input tensors in dimension 0.
Or create a tensor of size (batch_size, 1) for scalars.
Args:
batch: Batch data.
device: Device to move data to
concatenate: Whether performing concatenation.
Returns:
Original batch if batch is empty,
or tensor depends on your data (if concatenate),
or original batch (if not concatenate).
"""
if concatenate and len(batch) != 0:
item = batch[0]
batch_size = len(batch)
if t.is_tensor(item):
batch = [it.to(device) for it in batch]
return t.cat(batch, dim=0).to(device)
else:
try:
return t.tensor(batch, device=device).view(batch_size, -1)
except Exception:
raise ValueError(f"Batch not concatenable: {batch}")
else:
return batch
def __reduce__(self):
# for pickling
return self.__class__, (self.buffer_size, self.buffer_device)
| [
"torch.is_tensor",
"torch.cat",
"torch.tensor"
] | 1.6.0 | ikamensh/machin | af7b423c47bc1412530cf6c96c11bd3af9b3e239 |
1.5 | from abc import abstractmethod
import logging
from typing import List, Union
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from transformers import AutoTokenizer, AutoConfig, AutoModel, CONFIG_MAPPING, PreTrainedTokenizer
import flair
from flair.data import Sentence
from flair.embeddings.base import Embeddings, ScalarMix
from flair.embeddings.token import TokenEmbeddings, StackedEmbeddings, FlairEmbeddings
from flair.nn import LockedDropout, WordDropout
from sklearn.feature_extraction.text import TfidfVectorizer
log = logging.getLogger("flair")
class DocumentEmbeddings(Embeddings):
"""Abstract base class for all document-level embeddings. Every new type of document embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "sentence-level"
class TransformerDocumentEmbeddings(DocumentEmbeddings):
def __init__(
self,
model: str = "bert-base-uncased",
fine_tune: bool = True,
batch_size: int = 1,
layers: str = "-1",
layer_mean: bool = False,
**kwargs
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param fine_tune: If True, allows transformers to be fine-tuned during training
:param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer
models tend to be huge.
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param layer_mean: If True, uses a scalar mix of layers as embedding
"""
super().__init__()
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# load tokenizer and transformer model
self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
if not 'config' in kwargs:
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config, **kwargs)
else:
self.model = AutoModel.from_pretrained(None, **kwargs)
# model name
self.name = 'transformer-document-' + str(model)
self.base_model_name = str(model)
# when initializing, embeddings are in eval mode by default
self.model.eval()
self.model.to(flair.device)
# embedding parameters
if layers == 'all':
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[-1]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
self.layer_mean = layer_mean
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
self.batch_size = batch_size
# check whether CLS is at beginning or end
self.initial_cls_token: bool = self._has_initial_cls_token(tokenizer=self.tokenizer)
@staticmethod
def _has_initial_cls_token(tokenizer: PreTrainedTokenizer) -> bool:
# most models have CLS token as last token (GPT-1, GPT-2, TransfoXL, XLNet, XLM), but BERT is initial
tokens = tokenizer.encode('a')
initial_cls_token: bool = False
if tokens[0] == tokenizer.cls_token_id: initial_cls_token = True
return initial_cls_token
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences."""
# using list comprehension
sentence_batches = [sentences[i * self.batch_size:(i + 1) * self.batch_size]
for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)]
for batch in sentence_batches:
self._add_embeddings_to_sentences(batch)
return sentences
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
"""Extract sentence embedding from CLS token or similar and add to Sentence object."""
# gradients are enabled if fine-tuning is enabled
gradient_context = torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
with gradient_context:
# first, subtokenize each sentence and find out into how many subtokens each token was divided
subtokenized_sentences = []
# subtokenize sentences
for sentence in sentences:
# tokenize and truncate to max subtokens (TODO: check better truncation strategies)
subtokenized_sentence = self.tokenizer.encode(sentence.to_tokenized_string(),
add_special_tokens=True,
max_length=self.tokenizer.model_max_length,
truncation=True,
)
subtokenized_sentences.append(
torch.tensor(subtokenized_sentence, dtype=torch.long, device=flair.device))
# find longest sentence in batch
longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))
# initialize batch tensors and mask
input_ids = torch.zeros(
[len(sentences), longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
mask = torch.zeros(
[len(sentences), longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
for s_id, sentence in enumerate(subtokenized_sentences):
sequence_length = len(sentence)
input_ids[s_id][:sequence_length] = sentence
mask[s_id][:sequence_length] = torch.ones(sequence_length)
# put encoded batch through transformer model to get all hidden states of all encoder layers
hidden_states = self.model(input_ids, attention_mask=mask)[-1] if len(sentences) > 1 \
else self.model(input_ids)[-1]
# iterate over all subtokenized sentences
for sentence_idx, (sentence, subtokens) in enumerate(zip(sentences, subtokenized_sentences)):
index_of_CLS_token = 0 if self.initial_cls_token else len(subtokens) - 1
cls_embeddings_all_layers: List[torch.FloatTensor] = \
[hidden_states[layer][sentence_idx][index_of_CLS_token] for layer in self.layer_indexes]
# use scalar mix of embeddings if so selected
if self.layer_mean:
sm = ScalarMix(mixture_size=len(cls_embeddings_all_layers))
sm_embeddings = sm(cls_embeddings_all_layers)
cls_embeddings_all_layers = [sm_embeddings]
# set the extracted embedding for the token
sentence.set_embedding(self.name, torch.cat(cls_embeddings_all_layers))
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return (
len(self.layer_indexes) * self.model.config.hidden_size
if not self.layer_mean
else self.model.config.hidden_size
)
def __getstate__(self):
# special handling for serializing transformer models
config_state_dict = self.model.config.__dict__
model_state_dict = self.model.state_dict()
if not hasattr(self, "base_model_name"): self.base_model_name = self.name.split('transformer-document-')[-1]
# serialize the transformer models and the constructor arguments (but nothing else)
model_state = {
"config_state_dict": config_state_dict,
"model_state_dict": model_state_dict,
"embedding_length_internal": self.embedding_length,
"base_model_name": self.base_model_name,
"fine_tune": self.fine_tune,
"batch_size": self.batch_size,
"layer_indexes": self.layer_indexes,
"layer_mean": self.layer_mean,
}
return model_state
def __setstate__(self, d):
self.__dict__ = d
# necessary for reverse compatibility with Flair <= 0.7
if 'use_scalar_mix' in self.__dict__.keys():
self.__dict__['layer_mean'] = d['use_scalar_mix']
# special handling for deserializing transformer models
if "config_state_dict" in d:
# load transformer model
config_class = CONFIG_MAPPING[d["config_state_dict"]["model_type"]]
loaded_config = config_class.from_dict(d["config_state_dict"])
# constructor arguments
layers = ','.join([str(idx) for idx in self.__dict__['layer_indexes']])
# re-initialize transformer word embeddings with constructor arguments
embedding = TransformerDocumentEmbeddings(
model=self.__dict__['base_model_name'],
fine_tune=self.__dict__['fine_tune'],
batch_size=self.__dict__['batch_size'],
layers=layers,
layer_mean=self.__dict__['layer_mean'],
config=loaded_config,
state_dict=d["model_state_dict"],
)
# I have no idea why this is necessary, but otherwise it doesn't work
for key in embedding.__dict__.keys():
self.__dict__[key] = embedding.__dict__[key]
else:
model_name = self.__dict__['name'].split('transformer-document-')[-1]
# reload tokenizer to get around serialization issues
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
except:
pass
self.tokenizer = tokenizer
class DocumentPoolEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
fine_tune_mode: str = "none",
pooling: str = "mean",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param fine_tune_mode: if set to "linear" a trainable layer is added, if set to
"nonlinear", a nonlinearity is added as well. Set this to make the pooling trainable.
:param pooling: a string which can any value from ['mean', 'max', 'min']
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.__embedding_length = self.embeddings.embedding_length
# optional fine-tuning on top of embedding layer
self.fine_tune_mode = fine_tune_mode
if self.fine_tune_mode in ["nonlinear", "linear"]:
self.embedding_flex = torch.nn.Linear(
self.embedding_length, self.embedding_length, bias=False
)
self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))
if self.fine_tune_mode in ["nonlinear"]:
self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)
self.embedding_flex_nonlinear_map = torch.nn.Linear(
self.embedding_length, self.embedding_length
)
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
if pooling not in ['min', 'max', 'mean']:
raise ValueError(f"Pooling operation for {self.mode!r} is not defined")
self.pooling = pooling
self.name: str = f"document_{self.pooling}"
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static."""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
if self.fine_tune_mode in ["nonlinear", "linear"]:
word_embeddings = self.embedding_flex(word_embeddings)
if self.fine_tune_mode in ["nonlinear"]:
word_embeddings = self.embedding_flex_nonlinear(word_embeddings)
word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)
if self.pooling == "mean":
pooled_embedding = torch.mean(word_embeddings, 0)
elif self.pooling == "max":
pooled_embedding, _ = torch.max(word_embeddings, 0)
elif self.pooling == "min":
pooled_embedding, _ = torch.min(word_embeddings, 0)
sentence.set_embedding(self.name, pooled_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
def extra_repr(self):
return f"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}"
class DocumentTFIDFEmbeddings(DocumentEmbeddings):
def __init__(
self,
train_dataset,
**vectorizer_params,
):
"""The constructor for DocumentTFIDFEmbeddings.
:param train_dataset: the train dataset which will be used to construct vectorizer
:param vectorizer_params: parameters given to Scikit-learn's TfidfVectorizer constructor
"""
super().__init__()
import numpy as np
self.vectorizer = TfidfVectorizer(dtype=np.float32, **vectorizer_params)
self.vectorizer.fit([s.to_original_text() for s in train_dataset])
self.__embedding_length: int = len(self.vectorizer.vocabulary_)
self.to(flair.device)
self.name: str = f"document_tfidf"
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences."""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
raw_sentences = [s.to_original_text() for s in sentences]
tfidf_vectors = torch.from_numpy(self.vectorizer.transform(raw_sentences).A)
for sentence_id, sentence in enumerate(sentences):
sentence.set_embedding(self.name, tfidf_vectors[sentence_id])
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentRNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
fine_tune: bool = True,
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU' or 'LSTM'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = False if fine_tune else True
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
# bidirectional RNN on top of embedding layer
if rnn_type == "LSTM":
self.rnn = torch.nn.LSTM(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
batch_first=True,
)
else:
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
batch_first=True,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
self.dropout = torch.nn.Dropout(dropout) if dropout > 0.0 else None
self.locked_dropout = (
LockedDropout(locked_dropout) if locked_dropout > 0.0 else None
)
self.word_dropout = WordDropout(word_dropout) if word_dropout > 0.0 else None
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
self.eval()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
# TODO: remove in future versions
if not hasattr(self, "locked_dropout"):
self.locked_dropout = None
if not hasattr(self, "word_dropout"):
self.word_dropout = None
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
# embed words in the sentence
self.embeddings.embed(sentences)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs: List[torch.Tensor] = list()
for sentence in sentences:
all_embs += [
emb for token in sentence for emb in token.get_each_embedding()
]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[
: self.embeddings.embedding_length * nb_padding_tokens
]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
# before-RNN dropout
if self.dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
# reproject if set
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
# push through RNN
packed = pack_padded_sequence(
sentence_tensor, lengths, enforce_sorted=False, batch_first=True
)
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)
# after-RNN dropout
if self.dropout:
outputs = self.dropout(outputs)
if self.locked_dropout:
outputs = self.locked_dropout(outputs)
# extract embeddings from RNN
for sentence_no, length in enumerate(lengths):
last_rep = outputs[sentence_no, length - 1]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[sentence_no, 0]
embedding = torch.cat([first_rep, last_rep], 0)
if self.static_embeddings:
embedding = embedding.detach()
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _apply(self, fn):
# models that were serialized using torch versions older than 1.4.0 lack the _flat_weights_names attribute
# check if this is the case and if so, set it
for child_module in self.children():
if isinstance(child_module, torch.nn.RNNBase) and not hasattr(child_module, "_flat_weights_names"):
_flat_weights_names = []
if child_module.__dict__["bidirectional"]:
num_direction = 2
else:
num_direction = 1
for layer in range(child_module.__dict__["num_layers"]):
for direction in range(num_direction):
suffix = "_reverse" if direction == 1 else ""
param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
if child_module.__dict__["bias"]:
param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
param_names = [
x.format(layer, suffix) for x in param_names
]
_flat_weights_names.extend(param_names)
setattr(child_module, "_flat_weights_names",
_flat_weights_names)
child_module._apply(fn)
class DocumentLMEmbeddings(DocumentEmbeddings):
def __init__(self, flair_embeddings: List[FlairEmbeddings]):
super().__init__()
self.embeddings = flair_embeddings
self.name = "document_lm"
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(flair_embeddings):
self.add_module("lm_embedding_{}".format(i), embedding)
if not embedding.static_embeddings:
self.static_embeddings = False
self._embedding_length: int = sum(
embedding.embedding_length for embedding in flair_embeddings
)
@property
def embedding_length(self) -> int:
return self._embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
# iterate over sentences
for sentence in sentences:
sentence: Sentence = sentence
# if its a forward LM, take last state
if embedding.is_forward_lm:
sentence.set_embedding(
embedding.name,
sentence[len(sentence) - 1]._embeddings[embedding.name],
)
else:
sentence.set_embedding(
embedding.name, sentence[0]._embeddings[embedding.name]
)
return sentences
class SentenceTransformerDocumentEmbeddings(DocumentEmbeddings):
def __init__(
self,
model: str = "bert-base-nli-mean-tokens",
batch_size: int = 1,
convert_to_numpy: bool = False,
):
"""
:param model: string name of models from SentencesTransformer Class
:param name: string name of embedding type which will be set to Sentence object
:param batch_size: int number of sentences to processed in one batch
:param convert_to_numpy: bool whether the encode() returns a numpy array or PyTorch tensor
"""
super().__init__()
try:
from sentence_transformers import SentenceTransformer
except ModuleNotFoundError:
log.warning("-" * 100)
log.warning('ATTENTION! The library "sentence-transformers" is not installed!')
log.warning(
'To use Sentence Transformers, please first install with "pip install sentence-transformers"'
)
log.warning("-" * 100)
pass
self.model = SentenceTransformer(model)
self.name = 'sentence-transformers-' + str(model)
self.batch_size = batch_size
self.convert_to_numpy = convert_to_numpy
self.static_embeddings = True
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
sentence_batches = [sentences[i * self.batch_size:(i + 1) * self.batch_size]
for i in range((len(sentences) + self.batch_size - 1) // self.batch_size)]
for batch in sentence_batches:
self._add_embeddings_to_sentences(batch)
return sentences
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
# convert to plain strings, embedded in a list for the encode function
sentences_plain_text = [sentence.to_plain_string() for sentence in sentences]
embeddings = self.model.encode(sentences_plain_text, convert_to_numpy=self.convert_to_numpy)
for sentence, embedding in zip(sentences, embeddings):
sentence.set_embedding(self.name, embedding)
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return self.model.get_sentence_embedding_dimension()
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.GRU",
"torch.enable_grad",
"torch.ones",
"torch.eye",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.tensor",
"torch.zeros",
"torch.min",
"torch.max",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.mean"
] | 1.5.0 | aynetdia/flair | 7e0958423ceb9744a87b0c27fd66f7be4caf0d99 |
0.4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import torch
import horovod.torch as hvd
def broadcast_optimizer_state(optimizer, root_rank):
"""
This function is copied from the newest horovod version.
But the newest version has to be compiled with gcc7
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError('cannot broadcast torch.optim.LBFGS state')
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict['state']) == 0:
for group in optimizer.param_groups:
for p in group['params']:
p.grad = torch.autograd.Variable(
p.data.new(p.size()).zero_())
optimizer.step()
state_dict = optimizer.state_dict()
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict['state'][pid][name] = t(p.numpy()[0])
return _from_tensor
# Groups are unordered, but their params will be distinct
for group in state_dict['param_groups']:
# The params list here is ordered by the layers in the model
for pid in group['params']:
if pid not in state_dict['state']:
continue
param_state = state_dict['state'][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = '%s.%d' % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
hvd.broadcast_parameters(params, root_rank)
# Post-broadcast clenaup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
| [
"torch.is_tensor",
"torch.Tensor"
] | 0.4.0 | jasonleeinf/nmtlab | 122b70cc226d9ce17ad106a3bd3a5318bd3b359f |
0.4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from nmtlab.modules.kv_attention import KeyValAttention
class MultiHeadAttention(nn.Module):
"""The implementation of multi-head attention.
Following the original description in the transformer paper.
"""
_RELATIVE_POS_CLIP = 100
def __init__(self, out_size, num_head=8, hidden_size=None, additive=False, dropout_ratio=0, relative_pos=False):
super(MultiHeadAttention, self).__init__()
if hidden_size is None:
hidden_size = out_size
self._num_head = num_head
self._hidden_size = hidden_size
self._out_size = out_size
self._additive = additive
if relative_pos:
self.relative_posmatrix = nn.Embedding(self._RELATIVE_POS_CLIP * 2 + 1, hidden_size)
else:
self.relative_posmatrix = None
self._attention = KeyValAttention(scaling=True, dropout_ratio=dropout_ratio, )
if additive:
# Taken from RNMT+ paper
raise NotImplementedError
else:
self.linear_Q = nn.Linear(out_size, hidden_size)
self.linear_K = nn.Linear(out_size, hidden_size)
self.linear_V = nn.Linear(out_size, hidden_size)
self.linear_O = nn.Linear(hidden_size, out_size)
def forward_2d(self, query, keys, values, mask=None):
"""Compute attention for 2-dimensional queries (batch x hidden).
"""
query = query.unsqueeze(1) # (B, 1, H)
context_vectors, weights = self.forward_3d(query, keys, values, mask=mask)
context_vectors = context_vectors.squeeze(1)
weights = weights.squeeze(1)
return context_vectors, weights
def forward_3d(self, query, keys, values, mask=None):
"""Compute attention for 3-dimensional input (batch x step x hidden).
"""
B = query.shape[0]
head_dim = self._hidden_size // self._num_head
transformed_query = self.linear_Q(query)
if self.relative_posmatrix is not None:
TQ = query.shape[1]
TK = keys.shape[1]
#pos = torch.arange(TK).repeat(TQ, 1)
#relpos = pos - torch.arange(TQ)[:, None]
relpos = torch.arange(TK)[None, :] - torch.arange(TQ)[:, None]
relpos = torch.clamp(relpos, -self._RELATIVE_POS_CLIP, self._RELATIVE_POS_CLIP)
relpos += self._RELATIVE_POS_CLIP
if torch.cuda.is_available():
relpos = relpos.cuda()
relpos_embed = self.relative_posmatrix(relpos)
relpos_logits = (transformed_query.unsqueeze(-2) * relpos_embed.unsqueeze(0)).sum(-1)
relpos_logits = relpos_logits.unsqueeze(1)
else:
relpos_logits = None
query = transformed_query.view(B, -1, self._num_head, head_dim).transpose(1, 2) # (B, 4, TQ, H)
keys = self.linear_K(keys).view(keys.shape[0], -1, self._num_head, head_dim).transpose(1, 2)
values = self.linear_V(values).view(values.shape[0], -1, self._num_head, head_dim).transpose(1, 2)
if mask is not None and mask.dim() < keys.dim():
mask = mask.unsqueeze(1)
context_vectors, weights = self._attention(query, keys, values, mask=mask, additional_logits=relpos_logits) # (B, 4, TQ, H)
context_vectors = context_vectors.transpose(1, 2).contiguous().view(B, -1, self._num_head * head_dim) # (B, TQ, H)
context_vectors = self.linear_O(context_vectors)
return context_vectors, weights
def forward(self, query, keys, values, mask=None):
"""Compute the context vector with key value attention.
Returns:
context vector and attention weights.
"""
if query.dim() == 2:
return self.forward_2d(query, keys, values, mask)
elif query.dim() == 3:
return self.forward_3d(query, keys, values, mask)
else:
raise NotImplementedError
| [
"torch.nn.Linear",
"torch.arange",
"torch.clamp",
"torch.cuda.is_available",
"torch.nn.Embedding"
] | 0.4.0 | jasonleeinf/nmtlab | 122b70cc226d9ce17ad106a3bd3a5318bd3b359f |
1.3 | import cv2
import ast
import torch
import numpy as np
import random
from torch.utils.data import DataLoader, Dataset
cv2.setNumThreads(1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class VimeoDataset(Dataset):
def __init__(self, dataset_name, batch_size=32):
self.batch_size = batch_size
self.dataset_name = dataset_name
self.load_data()
self.h = 256
self.w = 448
self.data_root = 'vimeo_triplet'
self.image_root = os.path.join(self.data_root, 'sequences')
train_fn = os.path.join(self.data_root, 'tri_trainlist.txt')
test_fn = os.path.join(self.data_root, 'tri_testlist.txt')
with open(train_fn, 'r') as f:
self.trainlist = f.read().splitlines()
with open(test_fn, 'r') as f:
self.testlist = f.read().splitlines()
def __len__(self):
return len(self.meta_data)
def load_data(self):
if self.dataset_name == 'train':
self.meta_data = self.trainlist
else:
self.meta_data = self.testlist
def aug(self, img0, gt, img1, h, w):
ih, iw, _ = img0.shape
x = np.random.randint(0, ih - h + 1)
y = np.random.randint(0, iw - w + 1)
img0 = img0[x:x+h, y:y+w, :]
img1 = img1[x:x+h, y:y+w, :]
gt = gt[x:x+h, y:y+w, :]
return img0, gt, img1
def getimg(self, index):
imgpath = self.meta_data[index]
imgpaths = [imgpath + '/im1.png', imgpath + '/im2.png', imgpath + '/im3.png']
# Load images
img0 = cv2.imread(imgpaths[0])
gt = cv2.imread(imgpaths[1])
img1 = cv2.imread(imgpaths[2])
return img0, gt, img1
def __getitem__(self, index):
img0, gt, img1 = self.getimg(index)
if self.dataset_name == 'train':
img0, gt, img1 = self.aug(img0, gt, img1, 224, 224)
if random.uniform(0, 1) < 0.5:
img0 = img0[:, :, ::-1]
img1 = img1[:, :, ::-1]
gt = gt[:, :, ::-1]
if random.uniform(0, 1) < 0.5:
img0 = img0[::-1]
img1 = img1[::-1]
gt = gt[::-1]
if random.uniform(0, 1) < 0.5:
img0 = img0[:, ::-1]
img1 = img1[:, ::-1]
gt = gt[:, ::-1]
if random.uniform(0, 1) < 0.5:
tmp = img1
img1 = img0
img0 = tmp
img0 = torch.from_numpy(img0.copy()).permute(2, 0, 1)
img1 = torch.from_numpy(img1.copy()).permute(2, 0, 1)
gt = torch.from_numpy(gt.copy()).permute(2, 0, 1)
return torch.cat((img0, img1, gt), 0)
| [
"torch.cat",
"torch.cuda.is_available"
] | 1.3.0 | zawecha1/arXiv2020-RIFE | 8eb622a150bd3bf0e773033cbba4728e64340ba1 |
0.4 | import gym
import torch
import multiprocessing as mp
import numpy as np
from maml_rl.envs.subproc_vec_env import SubprocVecEnv
from maml_rl.episode import BatchEpisodes
def make_env(env_name):
def _make_env():
return gym.make(env_name)
return _make_env
class BatchSampler(object):
def __init__(self, env_name, batch_size, num_workers=mp.cpu_count() - 1):
self.env_name = env_name
self.batch_size = batch_size
self.num_workers = num_workers
self.queue = mp.Queue()
self.envs = SubprocVecEnv([make_env(env_name) for _ in range(num_workers)],
queue=self.queue)
self._env = gym.make(env_name)
def sample(self, policy, task, tree=None, params=None, gamma=0.95, device='cpu'):
episodes = BatchEpisodes(batch_size=self.batch_size, gamma=gamma, device=device)
for i in range(self.batch_size):
self.queue.put(i)
for _ in range(self.num_workers):
self.queue.put(None)
observations, batch_ids = self.envs.reset()
dones = [False]
while (not all(dones)) or (not self.queue.empty()):
with torch.no_grad():
input = torch.from_numpy(observations).float().to(device=device)
if self.env_name == 'AntPos-v0':
_, embedding = tree.forward(torch.from_numpy(task["position"]).float().to(device=device))
if self.env_name == 'AntVel-v1':
_, embedding = tree.forward(torch.from_numpy(np.array([task["velocity"]])).float().to(device=device))
# print(input.shape)
# print(embedding.shape)
observations_tensor = torch.t(
torch.stack([torch.cat([torch.from_numpy(np.array(teo)).to(device=device), embedding[0]], 0) for teo in input], 1))
actions_tensor = policy(observations_tensor, task=task, params=params, enhanced=False).sample()
actions = actions_tensor.cpu().numpy()
new_observations, rewards, dones, new_batch_ids, _ = self.envs.step(actions)
episodes.append(observations_tensor.cpu().numpy(), actions, rewards, batch_ids)
observations, batch_ids = new_observations, new_batch_ids
return episodes
def reset_task(self, task):
tasks = [task for _ in range(self.num_workers)]
reset = self.envs.reset_task(tasks)
return all(reset)
def sample_tasks(self, num_tasks):
tasks = self._env.unwrapped.sample_tasks(num_tasks)
return tasks
| [
"torch.no_grad",
"torch.from_numpy"
] | 0.4.0 | henryzxu/pytorch-hsml-rl | 3b36f29cf91f3ca68820ea124a2ee7a75327b94f |
1.8 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import operator
from functools import partial, reduce
import torch
from torch.distributions.utils import _sum_rightmost
from pyro.nn import ConditionalDenseNN, DenseNN
from .. import constraints
from ..conditional import ConditionalTransformModule
from ..torch_transform import TransformModule
from ..transforms.utils import clamp_preserve_gradients
from ..util import copy_docs_from
@copy_docs_from(TransformModule)
class AffineCoupling(TransformModule):
r"""
An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)
that uses the bijective transform,
:math:`\mathbf{y}_{1:d} = \mathbf{x}_{1:d}`
:math:`\mathbf{y}_{(d+1):D} = \mu + \sigma\odot\mathbf{x}_{(d+1):D}`
where :math:`\mathbf{x}` are the inputs, :math:`\mathbf{y}` are the outputs,
e.g. :math:`\mathbf{x}_{1:d}` represents the first :math:`d` elements of the
inputs, and :math:`\mu,\sigma` are shift and translation parameters calculated
as the output of a function inputting only :math:`\mathbf{x}_{1:d}`.
That is, the first :math:`d` components remain unchanged, and the subsequent
:math:`D-d` are shifted and translated by a function of the previous components.
Together with :class:`~pyro.distributions.TransformedDistribution` this provides
a way to create richer variational approximations.
Example usage:
>>> from pyro.nn import DenseNN
>>> input_dim = 10
>>> split_dim = 6
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [input_dim-split_dim, input_dim-split_dim]
>>> hypernet = DenseNN(split_dim, [10*input_dim], param_dims)
>>> transform = AffineCoupling(split_dim, hypernet)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
>>> flow_dist.sample() # doctest: +SKIP
The inverse of the Bijector is required when, e.g., scoring the log density of a
sample with :class:`~pyro.distributions.TransformedDistribution`. This
implementation caches the inverse of the Bijector when its forward operation is
called, e.g., when sampling from
:class:`~pyro.distributions.TransformedDistribution`. However, if the cached
value isn't available, either because it was overwritten during sampling a new
value or an arbitary value is being scored, it will calculate it manually.
This is an operation that scales as O(1), i.e. constant in the input dimension.
So in general, it is cheap to sample *and* score (an arbitrary value) from
:class:`~pyro.distributions.transforms.AffineCoupling`.
:param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/
output split for transformation.
:type split_dim: int
:param hypernet: a neural network whose forward call returns a real-valued mean
and logit-scale as a tuple. The input should have final dimension split_dim
and the output final dimension input_dim-split_dim for each member of the
tuple.
:type hypernet: callable
:param dim: the tensor dimension on which to split. This value must be negative
and defines the event dim as `abs(dim)`.
:type dim: int
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_max_clip: float
References:
[1] Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation
using Real NVP. ICLR 2017.
"""
bijective = True
def __init__(self, split_dim, hypernet, *, dim=-1, log_scale_min_clip=-5., log_scale_max_clip=3.):
super().__init__(cache_size=1)
if dim >= 0:
raise ValueError("'dim' keyword argument must be negative")
self.split_dim = split_dim
self.nn = hypernet
self.dim = dim
self._cached_log_scale = None
self.log_scale_min_clip = log_scale_min_clip
self.log_scale_max_clip = log_scale_max_clip
@constraints.dependent_property(is_discrete=False)
def domain(self):
return constraints.independent(constraints.real, -self.dim)
@constraints.dependent_property(is_discrete=False)
def codomain(self):
return constraints.independent(constraints.real, -self.dim)
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x=>y; in the prototypical context of a
:class:`~pyro.distributions.TransformedDistribution` `x` is a sample from
the base distribution (or the output of a previous transform)
"""
x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)
# Now that we can split on an arbitrary dimension, we have do a bit of reshaping...
mean, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))
mean = mean.reshape(mean.shape[:-1] + x2.shape[self.dim:])
log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[self.dim:])
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
self._cached_log_scale = log_scale
y1 = x1
y2 = torch.exp(log_scale) * x2 + mean
return torch.cat([y1, y2], dim=self.dim)
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. Uses a previously cached inverse if available, otherwise
performs the inversion afresh.
"""
y1, y2 = y.split([self.split_dim, y.size(self.dim) - self.split_dim], dim=self.dim)
x1 = y1
# Now that we can split on an arbitrary dimension, we have do a bit of reshaping...
mean, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))
mean = mean.reshape(mean.shape[:-1] + y2.shape[self.dim:])
log_scale = log_scale.reshape(log_scale.shape[:-1] + y2.shape[self.dim:])
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
self._cached_log_scale = log_scale
x2 = (y2 - mean) * torch.exp(-log_scale)
return torch.cat([x1, x2], dim=self.dim)
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log jacobian
"""
x_old, y_old = self._cached_x_y
if self._cached_log_scale is not None and x is x_old and y is y_old:
log_scale = self._cached_log_scale
else:
x1, x2 = x.split([self.split_dim, x.size(self.dim) - self.split_dim], dim=self.dim)
_, log_scale = self.nn(x1.reshape(x1.shape[:self.dim] + (-1,)))
log_scale = log_scale.reshape(log_scale.shape[:-1] + x2.shape[self.dim:])
log_scale = clamp_preserve_gradients(log_scale, self.log_scale_min_clip, self.log_scale_max_clip)
return _sum_rightmost(log_scale, self.event_dim)
@copy_docs_from(ConditionalTransformModule)
class ConditionalAffineCoupling(ConditionalTransformModule):
r"""
An implementation of the affine coupling layer of RealNVP (Dinh et al., 2017)
that conditions on an additional context variable and uses the bijective
transform,
:math:`\mathbf{y}_{1:d} = \mathbf{x}_{1:d}`
:math:`\mathbf{y}_{(d+1):D} = \mu + \sigma\odot\mathbf{x}_{(d+1):D}`
where :math:`\mathbf{x}` are the inputs, :math:`\mathbf{y}` are the outputs,
e.g. :math:`\mathbf{x}_{1:d}` represents the first :math:`d` elements of the
inputs, and :math:`\mu,\sigma` are shift and translation parameters calculated
as the output of a function input :math:`\mathbf{x}_{1:d}` and a context
variable :math:`\mathbf{z}\in\mathbb{R}^M`.
That is, the first :math:`d` components remain unchanged, and the subsequent
:math:`D-d` are shifted and translated by a function of the previous components.
Together with :class:`~pyro.distributions.ConditionalTransformedDistribution`
this provides a way to create richer variational approximations.
Example usage:
>>> from pyro.nn import ConditionalDenseNN
>>> input_dim = 10
>>> split_dim = 6
>>> context_dim = 4
>>> batch_size = 3
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [input_dim-split_dim, input_dim-split_dim]
>>> hypernet = ConditionalDenseNN(split_dim, context_dim, [10*input_dim],
... param_dims)
>>> transform = ConditionalAffineCoupling(split_dim, hypernet)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> z = torch.rand(batch_size, context_dim)
>>> flow_dist = dist.ConditionalTransformedDistribution(base_dist,
... [transform]).condition(z)
>>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP
The inverse of the Bijector is required when, e.g., scoring the log density of a
sample with :class:`~pyro.distributions.ConditionalTransformedDistribution`.
This implementation caches the inverse of the Bijector when its forward
operation is called, e.g., when sampling from
:class:`~pyro.distributions.ConditionalTransformedDistribution`. However, if the
cached value isn't available, either because it was overwritten during sampling
a new value or an arbitary value is being scored, it will calculate it manually.
This is an operation that scales as O(1), i.e. constant in the input dimension.
So in general, it is cheap to sample *and* score (an arbitrary value) from
:class:`~pyro.distributions.transforms.ConditionalAffineCoupling`.
:param split_dim: Zero-indexed dimension :math:`d` upon which to perform input/
output split for transformation.
:type split_dim: int
:param hypernet: A neural network whose forward call returns a real-valued mean
and logit-scale as a tuple. The input should have final dimension split_dim
and the output final dimension input_dim-split_dim for each member of the
tuple. The network also inputs a context variable as a keyword argument in
order to condition the output upon it.
:type hypernet: callable
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the NN
:type log_scale_max_clip: float
References:
Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using
Real NVP. ICLR 2017.
"""
domain = constraints.real_vector
codomain = constraints.real_vector
bijective = True
def __init__(self, split_dim, hypernet, **kwargs):
super().__init__()
self.split_dim = split_dim
self.nn = hypernet
self.kwargs = kwargs
def condition(self, context):
cond_nn = partial(self.nn, context=context)
return AffineCoupling(self.split_dim, cond_nn, **self.kwargs)
def affine_coupling(input_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):
"""
A helper function to create an
:class:`~pyro.distributions.transforms.AffineCoupling` object that takes care of
constructing a dense network with the correct input/output dimensions.
:param input_dim: Dimension(s) of input variable to permute. Note that when
`dim < -1` this must be a tuple corresponding to the event shape.
:type input_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [10*input_dim]
:type hidden_dims: list[int]
:param split_dim: The dimension to split the input on for the coupling
transform. Defaults to using input_dim // 2
:type split_dim: int
:param dim: the tensor dimension on which to split. This value must be negative
and defines the event dim as `abs(dim)`.
:type dim: int
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_max_clip: float
"""
if not isinstance(input_dim, int):
if len(input_dim) != -dim:
raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))
event_shape = input_dim
extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)
else:
event_shape = [input_dim]
extra_dims = 1
event_shape = list(event_shape)
if split_dim is None:
split_dim = event_shape[dim] // 2
if hidden_dims is None:
hidden_dims = [10 * event_shape[dim] * extra_dims]
hypernet = DenseNN(split_dim * extra_dims,
hidden_dims,
[(event_shape[dim] - split_dim) * extra_dims,
(event_shape[dim] - split_dim) * extra_dims])
return AffineCoupling(split_dim, hypernet, dim=dim, **kwargs)
def conditional_affine_coupling(input_dim, context_dim, hidden_dims=None, split_dim=None, dim=-1, **kwargs):
"""
A helper function to create an
:class:`~pyro.distributions.transforms.ConditionalAffineCoupling` object that
takes care of constructing a dense network with the correct input/output
dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param context_dim: Dimension of context variable
:type context_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [10*input_dim]
:type hidden_dims: list[int]
:param split_dim: The dimension to split the input on for the coupling
transform. Defaults to using input_dim // 2
:type split_dim: int
:param dim: the tensor dimension on which to split. This value must be negative
and defines the event dim as `abs(dim)`.
:type dim: int
:param log_scale_min_clip: The minimum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_min_clip: float
:param log_scale_max_clip: The maximum value for clipping the log(scale) from
the autoregressive NN
:type log_scale_max_clip: float
"""
if not isinstance(input_dim, int):
if len(input_dim) != -dim:
raise ValueError('event shape {} must have same length as event_dim {}'.format(input_dim, -dim))
event_shape = input_dim
extra_dims = reduce(operator.mul, event_shape[(dim + 1):], 1)
else:
event_shape = [input_dim]
extra_dims = 1
event_shape = list(event_shape)
if split_dim is None:
split_dim = event_shape[dim] // 2
if hidden_dims is None:
hidden_dims = [10 * event_shape[dim] * extra_dims]
nn = ConditionalDenseNN(split_dim * extra_dims, context_dim, hidden_dims,
[(event_shape[dim] - split_dim) * extra_dims, (event_shape[dim] - split_dim) * extra_dims])
return ConditionalAffineCoupling(split_dim, nn, dim=dim, **kwargs)
| [
"torch.distributions.utils._sum_rightmost",
"torch.cat",
"torch.exp"
] | 1.8.0 | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f |
1.8 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import queue
import warnings
import torch
import pyro.poutine as poutine
from pyro.distributions.util import is_identically_zero
from pyro.infer.elbo import ELBO
from pyro.infer.enum import (
get_importance_trace,
iter_discrete_escape,
iter_discrete_extend,
)
from pyro.infer.util import compute_site_dice_factor, is_validation_enabled, torch_item
from pyro.ops import packed
from pyro.ops.contract import einsum
from pyro.poutine.enum_messenger import EnumMessenger
from pyro.util import check_traceenum_requirements, warn_if_nan
def _compute_dice_factors(model_trace, guide_trace):
"""
compute per-site DiCE log-factors for non-reparameterized proposal sites
this logic is adapted from pyro.infer.util.Dice.__init__
"""
log_probs = []
for role, trace in zip(("model", "guide"), (model_trace, guide_trace)):
for name, site in trace.nodes.items():
if site["type"] != "sample" or site["is_observed"]:
continue
if role == "model" and name in guide_trace:
continue
log_prob, log_denom = compute_site_dice_factor(site)
if not is_identically_zero(log_denom):
dims = log_prob._pyro_dims
log_prob = log_prob - log_denom
log_prob._pyro_dims = dims
if not is_identically_zero(log_prob):
log_probs.append(log_prob)
return log_probs
def _compute_tmc_factors(model_trace, guide_trace):
"""
compute per-site log-factors for all observed and unobserved variables
log-factors are log(p / q) for unobserved sites and log(p) for observed sites
"""
log_factors = []
for name, site in guide_trace.nodes.items():
if site["type"] != "sample" or site["is_observed"]:
continue
log_proposal = site["packed"]["log_prob"]
log_factors.append(packed.neg(log_proposal))
for name, site in model_trace.nodes.items():
if site["type"] != "sample":
continue
if site["name"] not in guide_trace and \
not site["is_observed"] and \
site["infer"].get("enumerate", None) == "parallel" and \
site["infer"].get("num_samples", -1) > 0:
# site was sampled from the prior
log_proposal = packed.neg(site["packed"]["log_prob"])
log_factors.append(log_proposal)
log_factors.append(site["packed"]["log_prob"])
return log_factors
def _compute_tmc_estimate(model_trace, guide_trace):
"""
Use :func:`~pyro.ops.contract.einsum` to compute the Tensor Monte Carlo
estimate of the marginal likelihood given parallel-sampled traces.
"""
# factors
log_factors = _compute_tmc_factors(model_trace, guide_trace)
log_factors += _compute_dice_factors(model_trace, guide_trace)
if not log_factors:
return 0.
# loss
eqn = ",".join([f._pyro_dims for f in log_factors]) + "->"
plates = "".join(frozenset().union(list(model_trace.plate_to_symbol.values()),
list(guide_trace.plate_to_symbol.values())))
tmc, = einsum(eqn, *log_factors, plates=plates,
backend="pyro.ops.einsum.torch_log",
modulo_total=False)
return tmc
class TraceTMC_ELBO(ELBO):
"""
A trace-based implementation of Tensor Monte Carlo [1]
by way of Tensor Variable Elimination [2] that supports:
- local parallel sampling over any sample site in the model or guide
- exhaustive enumeration over any sample site in the model or guide
To take multiple samples, mark the site with
``infer={'enumerate': 'parallel', 'num_samples': N}``.
To configure all sites in a model or guide at once,
use :func:`~pyro.infer.enum.config_enumerate` .
To enumerate or sample a sample site in the ``model``,
mark the site and ensure the site does not appear in the ``guide``.
This assumes restricted dependency structure on the model and guide:
variables outside of an :class:`~pyro.plate` can never depend on
variables inside that :class:`~pyro.plate` .
References
[1] `Tensor Monte Carlo: Particle Methods for the GPU Era`,
Laurence Aitchison (2018)
[2] `Tensor Variable Elimination for Plated Factor Graphs`,
Fritz Obermeyer, Eli Bingham, Martin Jankowiak, Justin Chiu, Neeraj Pradhan,
Alexander Rush, Noah Goodman (2019)
"""
def _get_trace(self, model, guide, args, kwargs):
"""
Returns a single trace from the guide, and the model that is run
against it.
"""
model_trace, guide_trace = get_importance_trace(
"flat", self.max_plate_nesting, model, guide, args, kwargs)
if is_validation_enabled():
check_traceenum_requirements(model_trace, guide_trace)
has_enumerated_sites = any(site["infer"].get("enumerate")
for trace in (guide_trace, model_trace)
for name, site in trace.nodes.items()
if site["type"] == "sample")
if self.strict_enumeration_warning and not has_enumerated_sites:
warnings.warn('Found no sample sites configured for enumeration. '
'If you want to enumerate sites, you need to @config_enumerate or set '
'infer={"enumerate": "sequential"} or infer={"enumerate": "parallel"}? '
'If you do not want to enumerate, consider using Trace_ELBO instead.')
model_trace.compute_score_parts()
guide_trace.pack_tensors()
model_trace.pack_tensors(guide_trace.plate_to_symbol)
return model_trace, guide_trace
def _get_traces(self, model, guide, args, kwargs):
"""
Runs the guide and runs the model against the guide with
the result packaged as a trace generator.
"""
if self.max_plate_nesting == float('inf'):
self._guess_max_plate_nesting(model, guide, args, kwargs)
if self.vectorize_particles:
guide = self._vectorized_num_particles(guide)
model = self._vectorized_num_particles(model)
# Enable parallel enumeration over the vectorized guide and model.
# The model allocates enumeration dimensions after (to the left of) the guide,
# accomplished by preserving the _ENUM_ALLOCATOR state after the guide call.
guide_enum = EnumMessenger(first_available_dim=-1 - self.max_plate_nesting)
model_enum = EnumMessenger() # preserve _ENUM_ALLOCATOR state
guide = guide_enum(guide)
model = model_enum(model)
q = queue.LifoQueue()
guide = poutine.queue(guide, q,
escape_fn=iter_discrete_escape,
extend_fn=iter_discrete_extend)
for i in range(1 if self.vectorize_particles else self.num_particles):
q.put(poutine.Trace())
while not q.empty():
yield self._get_trace(model, guide, args, kwargs)
def differentiable_loss(self, model, guide, *args, **kwargs):
"""
:returns: a differentiable estimate of the marginal log-likelihood
:rtype: torch.Tensor
:raises ValueError: if the ELBO is not differentiable (e.g. is
identically zero)
Computes a differentiable TMC estimate using ``num_particles`` many samples
(particles). The result should be infinitely differentiable (as long
as underlying derivatives have been implemented).
"""
elbo = 0.0
for model_trace, guide_trace in self._get_traces(model, guide, args, kwargs):
elbo_particle = _compute_tmc_estimate(model_trace, guide_trace)
if is_identically_zero(elbo_particle):
continue
elbo = elbo + elbo_particle
elbo = elbo / self.num_particles
loss = -elbo
warn_if_nan(loss, "loss")
return loss
def loss(self, model, guide, *args, **kwargs):
with torch.no_grad():
loss = self.differentiable_loss(model, guide, *args, **kwargs)
if is_identically_zero(loss) or not loss.requires_grad:
return torch_item(loss)
return loss.item()
def loss_and_grads(self, model, guide, *args, **kwargs):
loss = self.differentiable_loss(model, guide, *args, **kwargs)
if is_identically_zero(loss) or not loss.requires_grad:
return torch_item(loss)
loss.backward()
return loss.item()
| [
"torch.no_grad"
] | 1.8.0 | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f |
1.8 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import torch
import pyro
import pyro.distributions as dist
from pyro.ops.tensor_utils import safe_normalize
from .reparam import Reparam
class ProjectedNormalReparam(Reparam):
"""
Reparametrizer for :class:`~pyro.distributions.ProjectedNormal` latent
variables.
This reparameterization works only for latent variables, not likelihoods.
"""
def __call__(self, name, fn, obs):
fn, event_dim = self._unwrap(fn)
assert isinstance(fn, dist.ProjectedNormal)
assert obs is None, "ProjectedNormalReparam does not support observe statements"
# Draw parameter-free noise.
new_fn = dist.Normal(torch.zeros_like(fn.concentration), 1).to_event(1)
x = pyro.sample("{}_normal".format(name), self._wrap(new_fn, event_dim))
# Differentiably transform.
value = safe_normalize(x + fn.concentration)
# Simulate a pyro.deterministic() site.
new_fn = dist.Delta(value, event_dim=event_dim).mask(False)
return new_fn, value
| [
"torch.zeros_like"
] | 1.8.0 | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f |
1.8 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
def xavier_uniform(D_in, D_out):
scale = math.sqrt(6.0 / float(D_in + D_out))
noise = torch.rand(D_in, D_out)
return 2.0 * scale * noise - scale
def adjoin_ones_vector(x):
return torch.cat([x, torch.ones(x.shape[:-1] + (1,)).type_as(x)], dim=-1)
def adjoin_zeros_vector(x):
return torch.cat([x, torch.zeros(x.shape[:-1] + (1,)).type_as(x)], dim=-1)
| [
"torch.zeros",
"torch.rand",
"torch.ones"
] | 1.8.0 | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f |
1.8 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import bz2
import csv
import datetime
import logging
import multiprocessing
import os
import subprocess
import sys
import urllib
import torch
from pyro.contrib.examples.util import _mkdir_p, get_data_directory
DATA = get_data_directory(__file__)
# https://www.bart.gov/about/reports/ridership
SOURCE_DIR = "http://64.111.127.166/origin-destination/"
SOURCE_FILES = [
"date-hour-soo-dest-2011.csv.gz",
"date-hour-soo-dest-2012.csv.gz",
"date-hour-soo-dest-2013.csv.gz",
"date-hour-soo-dest-2014.csv.gz",
"date-hour-soo-dest-2015.csv.gz",
"date-hour-soo-dest-2016.csv.gz",
"date-hour-soo-dest-2017.csv.gz",
"date-hour-soo-dest-2018.csv.gz",
"date-hour-soo-dest-2019.csv.gz",
]
CACHE_URL = "https://d2hg8soec8ck9v.cloudfront.net/datasets/bart_full.pkl.bz2"
def _load_hourly_od(basename):
filename = os.path.join(DATA, basename.replace(".csv.gz", ".pkl"))
if os.path.exists(filename):
return filename
# Download source files.
gz_filename = os.path.join(DATA, basename)
if not os.path.exists(gz_filename):
url = SOURCE_DIR + basename
logging.debug("downloading {}".format(url))
urllib.request.urlretrieve(url, gz_filename)
csv_filename = gz_filename[:-3]
assert csv_filename.endswith(".csv")
if not os.path.exists(csv_filename):
logging.debug("unzipping {}".format(gz_filename))
subprocess.check_call(["gunzip", "-k", gz_filename])
assert os.path.exists(csv_filename)
# Convert to PyTorch.
logging.debug("converting {}".format(csv_filename))
start_date = datetime.datetime.strptime("2000-01-01", "%Y-%m-%d")
stations = {}
num_rows = sum(1 for _ in open(csv_filename))
logging.info("Formatting {} rows".format(num_rows))
rows = torch.empty((num_rows, 4), dtype=torch.long)
with open(csv_filename) as f:
for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):
date = datetime.datetime.strptime(date, "%Y-%m-%d")
date += datetime.timedelta(hours=int(hour))
rows[i, 0] = int((date - start_date).total_seconds() / 3600)
rows[i, 1] = stations.setdefault(origin, len(stations))
rows[i, 2] = stations.setdefault(destin, len(stations))
rows[i, 3] = int(trip_count)
if i % 10000 == 0:
sys.stderr.write(".")
sys.stderr.flush()
# Save data with metadata.
dataset = {
"basename": basename,
"start_date": start_date,
"stations": stations,
"rows": rows,
"schema": ["time_hours", "origin", "destin", "trip_count"],
}
dataset["rows"]
logging.debug("saving {}".format(filename))
torch.save(dataset, filename)
return filename
def load_bart_od():
"""
Load a dataset of hourly origin-destination ridership counts for every pair
of BART stations during the years 2011-2019.
**Source** https://www.bart.gov/about/reports/ridership
This downloads the dataset the first time it is called. On subsequent calls
this reads from a local cached file ``.pkl.bz2``. This attempts to
download a preprocessed compressed cached file maintained by the Pyro team.
On cache hit this should be very fast. On cache miss this falls back to
downloading the original data source and preprocessing the dataset,
requiring about 350MB of file transfer, storing a few GB of temp files, and
taking upwards of 30 minutes.
:returns: a dataset is a dictionary with fields:
- "stations": a list of strings of station names
- "start_date": a :py:class:`datetime.datetime` for the first observaion
- "counts": a ``torch.FloatTensor`` of ridership counts, with shape
``(num_hours, len(stations), len(stations))``.
"""
_mkdir_p(DATA)
filename = os.path.join(DATA, "bart_full.pkl.bz2")
# Work around apparent bug in torch.load(),torch.save().
pkl_file = filename.rsplit(".", 1)[0]
if not os.path.exists(pkl_file):
try:
urllib.request.urlretrieve(CACHE_URL, filename)
logging.debug("cache hit, uncompressing")
with bz2.BZ2File(filename) as src, open(filename[:-4], "wb") as dst:
dst.write(src.read())
except urllib.error.HTTPError:
logging.debug("cache miss, preprocessing from scratch")
if os.path.exists(pkl_file):
return torch.load(pkl_file)
filenames = multiprocessing.Pool(len(SOURCE_FILES)).map(_load_hourly_od, SOURCE_FILES)
datasets = list(map(torch.load, filenames))
stations = sorted(set().union(*(d["stations"].keys() for d in datasets)))
min_time = min(int(d["rows"][:, 0].min()) for d in datasets)
max_time = max(int(d["rows"][:, 0].max()) for d in datasets)
num_rows = max_time - min_time + 1
start_date = datasets[0]["start_date"] + datetime.timedelta(hours=min_time),
logging.info("Loaded data from {} stations, {} hours"
.format(len(stations), num_rows))
result = torch.zeros(num_rows, len(stations), len(stations))
for dataset in datasets:
part_stations = sorted(dataset["stations"], key=dataset["stations"].__getitem__)
part_to_whole = torch.tensor(list(map(stations.index, part_stations)))
time = dataset["rows"][:, 0] - min_time
origin = part_to_whole[dataset["rows"][:, 1]]
destin = part_to_whole[dataset["rows"][:, 2]]
count = dataset["rows"][:, 3].float()
result[time, origin, destin] = count
dataset.clear()
logging.info("Loaded {} shaped data of mean {:0.3g}"
.format(result.shape, result.mean()))
dataset = {
"stations": stations,
"start_date": start_date,
"counts": result,
}
torch.save(dataset, pkl_file)
subprocess.check_call(["bzip2", "-k", pkl_file])
assert os.path.exists(filename)
return dataset
def load_fake_od():
"""
Create a tiny synthetic dataset for smoke testing.
"""
dataset = {
"stations": ["12TH", "EMBR", "SFIA"],
"start_date": datetime.datetime.strptime("2000-01-01", "%Y-%m-%d"),
"counts": torch.distributions.Poisson(100).sample([24 * 7 * 8, 3, 3]),
}
return dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="BART data preprocessor")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
logging.basicConfig(format='%(relativeCreated) 9d %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
load_bart_od()
| [
"torch.distributions.Poisson",
"torch.save",
"torch.empty",
"torch.load"
] | 1.8.0 | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f |
1.1 | #!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import ExtSummarizer
from models.trainer_ext import build_trainer
from others.logging import logger, init_logger
import pdb
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
def train_multi_ext(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_single_ext(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_ext(args, device_id):
timestep = 0
FILE_PATH = 'model_step_*.pt'
#FILE_PATH = 'bertext_cnndm_transformer*.pt'
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_ext(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
print("will sleep 60", os.path.getsize(cp))
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = 0
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_ext(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
return
else:
print("will sleep 300", cp_files)
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_ext(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter, step)
def train_ext(args, device_id):
if (args.world_size > 1):
train_multi_ext(args)
else:
train_single_ext(args, device_id)
def train_single_ext(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = ExtSummarizer(args, device, checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
| [
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.multiprocessing.get_context",
"torch.cuda.set_device",
"torch.load"
] | 1.1.0 | qwang70/PreSumm | b2c3aee0ada7f5fa8754dffd44355b956fe0d45b |
1.8 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
def cross_entropy_dist_epoch(reduction='mean', **_):
cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)
l1_fn = torch.nn.L1Loss(reduction=reduction)
def loss_fn(outputs, outputs_f, labels, epoch, **_):
loss_dict = dict()
full_gt_loss = cross_entropy_fn(outputs_f['out'], labels)
gt_loss = cross_entropy_fn(outputs['out'], labels)
dist_loss = 0
layer_names = outputs.keys()
len_layer = len(layer_names)
for i, layer_name in enumerate(layer_names):
if i == len_layer - 1:
continue
dist_loss += l1_fn(outputs[layer_name], outputs_f[layer_name])
scale = epoch / 100
if epoch == 100:
scale = 1
loss_dict['loss'] = scale*(gt_loss + dist_loss) + full_gt_loss
loss_dict['gt_loss'] = gt_loss
loss_dict['full_gt_loss'] = full_gt_loss
return loss_dict
return {'train': loss_fn, 'val': cross_entropy_fn}
def cross_entropy_dist(reduction='mean', **_):
cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)
l1_fn = torch.nn.L1Loss(reduction=reduction)
def loss_fn(outputs, outputs_f, labels, **_):
loss_dict = dict()
full_gt_loss = cross_entropy_fn(outputs_f['out'], labels)
gt_loss = cross_entropy_fn(outputs['out'], labels)
dist_loss = 0
layer_names = outputs.keys()
len_layer = len(layer_names)
for i, layer_name in enumerate(layer_names):
if i == len_layer - 1:
continue
dist_loss += l1_fn(outputs[layer_name], outputs_f[layer_name])
loss_dict['loss'] = gt_loss + dist_loss + full_gt_loss
loss_dict['gt_loss'] = gt_loss
loss_dict['full_gt_loss'] = full_gt_loss
return loss_dict
return {'train': loss_fn, 'val': cross_entropy_fn}
def cross_entropy(reduction='mean', **_):
cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)
def loss_fn(outputs, labels, **_):
loss_dict = dict()
gt_loss = cross_entropy_fn(outputs, labels)
loss_dict['loss'] = gt_loss
loss_dict['gt_loss'] = gt_loss
return loss_dict
return {'train': loss_fn, 'val': cross_entropy_fn}
def regularization(reduction='mean', **_):
cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)
def loss_fn(outputs, labels, reg_factors, **_):
loss_dict = dict()
gt_loss = cross_entropy_fn(outputs, labels)
reg_loss = 0
for i in range(len(reg_factors)):
reg_loss += torch.mean((torch.pow(reg_factors[i]-1, 2)*torch.pow(reg_factors[i]+1, 2)))
reg_loss = reg_loss / len(reg_factors)
loss_dict['loss'] = gt_loss + reg_loss
loss_dict['gt_loss'] = gt_loss
loss_dict['reg_loss'] = reg_loss
return loss_dict
return {'train': loss_fn, 'val': cross_entropy_fn}
def regularization_temp(reduction='mean', **_):
cross_entropy_fn = torch.nn.CrossEntropyLoss(reduction=reduction)
def loss_fn(outputs, labels, reg_factors, **_):
loss_dict = dict()
gt_loss = cross_entropy_fn(outputs, labels)
reg_loss = 0
for i in range(len(reg_factors)):
reg_loss += torch.mean((torch.pow(reg_factors[i]-1, 2)*torch.pow(reg_factors[i]+1, 2)))
reg_loss = reg_loss / len(reg_factors)
loss_dict['loss'] = gt_loss + reg_loss
loss_dict['gt_loss'] = gt_loss
loss_dict['reg_loss'] = reg_loss
return loss_dict
return {'train': loss_fn, 'val': cross_entropy_fn}
def get_loss(config):
f = globals().get(config.loss.name)
return f(**config.loss.params)
| [
"torch.nn.CrossEntropyLoss",
"torch.pow",
"torch.nn.L1Loss"
] | 1.8.2 | iimmortall/QuantLib | 29e83dad8738d0fb4efb18d0cb5dd3a7029abd86 |
0.27 | # Copyright 2021 MosaicML. All Rights Reserved.
"""The CIFAR ResNet torch module.
See the :doc:`Model Card </model_cards/resnet>` for more details.
"""
# Code below adapted from https://github.com/facebookresearch/open_lth
# and https://github.com/pytorch/vision
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from composer.models import Initializer
__all__ = ["CIFAR_ResNet"]
class CIFAR_ResNet(nn.Module):
"""A residual neural network as originally designed for CIFAR-10."""
class Block(nn.Module):
"""A ResNet block."""
def __init__(self, f_in: int, f_out: int, downsample: bool = False):
super(CIFAR_ResNet.Block, self).__init__()
stride = 2 if downsample else 1
self.conv1 = nn.Conv2d(f_in, f_out, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(f_out)
self.conv2 = nn.Conv2d(f_out, f_out, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(f_out)
self.relu = nn.ReLU(inplace=True)
# No parameters for shortcut connections.
if downsample or f_in != f_out:
self.shortcut = nn.Sequential(
nn.Conv2d(f_in, f_out, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(f_out),
)
else:
self.shortcut = nn.Sequential()
def forward(self, x: torch.Tensor):
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
return self.relu(out)
def __init__(self, plan: List[Tuple[int, int]], initializers: List[Initializer], outputs: int = 10):
super(CIFAR_ResNet, self).__init__()
outputs = outputs or 10
self.num_classes = outputs
# Initial convolution.
current_filters = plan[0][0]
self.conv = nn.Conv2d(3, current_filters, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(current_filters)
self.relu = nn.ReLU(inplace=True)
# The subsequent blocks of the ResNet.
blocks = []
for segment_index, (filters, num_blocks) in enumerate(plan):
for block_index in range(num_blocks):
downsample = segment_index > 0 and block_index == 0
blocks.append(CIFAR_ResNet.Block(current_filters, filters, downsample))
current_filters = filters
self.blocks = nn.Sequential(*blocks)
# Final fc layer. Size = number of filters in last segment.
self.fc = nn.Linear(plan[-1][0], outputs)
self.criterion = nn.CrossEntropyLoss()
for initializer in initializers:
initializer = Initializer(initializer)
self.apply(initializer.get_initializer())
def forward(self, x: torch.Tensor):
out = self.relu(self.bn(self.conv(x)))
out = self.blocks(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
@staticmethod
def is_valid_model_name(model_name: str):
valid_model_names = [f"cifar_resnet_{layers}" for layers in (20, 56)]
return (model_name in valid_model_names)
@staticmethod
def get_model_from_name(model_name: str, initializers: List[Initializer], outputs: int = 10):
"""The naming scheme for a ResNet is ``'cifar_resnet_D[_W]'``.
D is the model depth (e.g. ``'cifar_resnet56'``)
"""
if not CIFAR_ResNet.is_valid_model_name(model_name):
raise ValueError('Invalid model name: {}'.format(model_name))
depth = int(model_name.split('_')[2])
if len(model_name.split('_')) == 3:
width = 16
else:
width = int(model_name.split('_')[4])
if (depth - 2) % 3 != 0:
raise ValueError('Invalid CIFAR_ResNet depth: {}'.format(depth))
num_blocks = (depth - 2) // 6
model_arch = {
56: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],
20: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],
}
return CIFAR_ResNet(model_arch[depth], initializers, outputs)
| [
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.CrossEntropyLoss"
] | 0.27 | murthyn/composer | 2a04cf387dd8558556500f7ef2bc6d3d131043d5 |
1.8 | from torch import nn
from .base_models import BaseEncoderMaskerDecoder
from asteroid_filterbanks import make_enc_dec
from asteroid_filterbanks.transforms import mag, magreim
from ..masknn import norms, activations
from ..utils.torch_utils import pad_x_to_y
import warnings
class DeMask(BaseEncoderMaskerDecoder):
"""
Simple MLP model for surgical mask speech enhancement A transformed-domain masking approach is used.
Args:
input_type (str, optional): whether the magnitude spectrogram "mag" or both real imaginary parts "reim" are
passed as features to the masker network.
Concatenation of "mag" and "reim" also can be used by using "cat".
output_type (str, optional): whether the masker ouputs a mask
for magnitude spectrogram "mag" or both real imaginary parts "reim".
hidden_dims (list, optional): list of MLP hidden layer sizes.
dropout (float, optional): dropout probability.
activation (str, optional): type of activation used in hidden MLP layers.
mask_act (str, optional): Which non-linear function to generate mask.
norm_type (str, optional): To choose from ``'BN'``, ``'gLN'``,
``'cLN'``.
fb_name (str): type of analysis and synthesis filterbanks used,
choose between ["stft", "free", "analytic_free"].
n_filters (int): number of filters in the analysis and synthesis filterbanks.
stride (int): filterbank filters stride.
kernel_size (int): length of filters in the filterbank.
encoder_activation (str)
sample_rate (float): Sampling rate of the model.
**fb_kwargs (dict): Additional kwards to pass to the filterbank
creation.
"""
def __init__(
self,
input_type="mag",
output_type="mag",
hidden_dims=(1024,),
dropout=0.0,
activation="relu",
mask_act="relu",
norm_type="gLN",
fb_name="stft",
n_filters=512,
stride=256,
kernel_size=512,
sample_rate=16000,
**fb_kwargs,
):
encoder, decoder = make_enc_dec(
fb_name,
kernel_size=kernel_size,
n_filters=n_filters,
stride=stride,
sample_rate=sample_rate,
**fb_kwargs,
)
n_masker_in = self._get_n_feats_input(input_type, encoder.n_feats_out)
n_masker_out = self._get_n_feats_output(output_type, encoder.n_feats_out)
masker = build_demask_masker(
n_masker_in,
n_masker_out,
norm_type=norm_type,
activation=activation,
hidden_dims=hidden_dims,
dropout=dropout,
mask_act=mask_act,
)
super().__init__(encoder, masker, decoder)
self.input_type = input_type
self.output_type = output_type
self.hidden_dims = hidden_dims
self.dropout = dropout
self.activation = activation
self.mask_act = mask_act
self.norm_type = norm_type
def _get_n_feats_input(self, input_type, encoder_n_out):
if input_type == "reim":
return encoder_n_out
if input_type not in {"mag", "cat"}:
raise NotImplementedError("Input type should be either mag, reim or cat")
n_feats_input = encoder_n_out // 2
if input_type == "cat":
n_feats_input += encoder_n_out
return n_feats_input
def _get_n_feats_output(self, output_type, encoder_n_out):
if output_type == "mag":
return encoder_n_out // 2
if output_type == "reim":
return encoder_n_out
raise NotImplementedError("Output type should be either mag or reim")
def forward_masker(self, tf_rep):
"""Estimates masks based on time-frequency representations.
Args:
tf_rep (torch.Tensor): Time-frequency representation in
(batch, freq, seq).
Returns:
torch.Tensor: Estimated masks in (batch, freq, seq).
"""
masker_input = tf_rep
if self.input_type == "mag":
masker_input = mag(masker_input)
elif self.input_type == "cat":
masker_input = magreim(masker_input)
est_masks = self.masker(masker_input)
if self.output_type == "mag":
est_masks = est_masks.repeat(1, 2, 1)
return est_masks
def apply_masks(self, tf_rep, est_masks):
"""Applies masks to time-frequency representations.
Args:
tf_rep (torch.Tensor): Time-frequency representations in
(batch, freq, seq).
est_masks (torch.Tensor): Estimated masks in (batch, freq, seq).
Returns:
torch.Tensor: Masked time-frequency representations.
"""
if self.output_type == "reim":
tf_rep = tf_rep.unsqueeze(1)
return est_masks * tf_rep
def get_model_args(self):
""" Arguments needed to re-instantiate the model. """
model_args = {
"input_type": self.input_type,
"output_type": self.output_type,
"hidden_dims": self.hidden_dims,
"dropout": self.dropout,
"activation": self.activation,
"mask_act": self.mask_act,
"norm_type": self.norm_type,
}
model_args.update(self.encoder.filterbank.get_config())
return model_args
def build_demask_masker(
n_in,
n_out,
activation="relu",
dropout=0.0,
hidden_dims=(1024,),
mask_act="relu",
norm_type="gLN",
):
make_layer_norm = norms.get(norm_type)
net = [make_layer_norm(n_in)]
layer_activation = activations.get(activation)()
in_chan = n_in
for hidden_dim in hidden_dims:
net.extend(
[
nn.Conv1d(in_chan, hidden_dim, 1),
make_layer_norm(hidden_dim),
layer_activation,
nn.Dropout(dropout),
]
)
in_chan = hidden_dim
net.extend([nn.Conv1d(in_chan, n_out, 1), activations.get(mask_act)()])
return nn.Sequential(*net)
| [
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.Conv1d"
] | 1.8.0 | ldelebec/asteroid | d6390baca5409634f112ceed554ea66c4054cb54 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.