text
stringlengths 1
2.05k
|
---|
import nn
NUM_CLASSES = 3
iris = load_iris()
X, y = iris.data, iris.target
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr = Gbc(n_estimators=10)
clr.fit(X_train, y_train)
trees = []
for bundle in clr.estimators_:
tree_bundle = []
for tree in bundle:
tree_bundle.append(sk2torch.wrap(tree))
trees.append(tree_bundle) |
class GradientBoostedTrees(nn.Module):
def __init__(self, trees):
super(GradientBoostedTrees, self).__init__()
bundle_modules = []
for bundle in trees:
module = nn.ModuleList(bundle)
bundle_modules.append(module)
self.trees = nn.ModuleList(bundle_modules)
self.num_classifiers = torch.tensor(
[len(self.trees) for _ in range(NUM_CLASSES)])
def forward(self, x):
local_pred = self.trees[0][0](x)
local_pred = local_pred.reshape(-1, 1)
for tree in self.trees[0][1:]:
tree_out = tree(x)
tree_out = tree_out.reshape(-1, 1)
local_pred = torch.cat((local_pred, tree_out), 1)
local_pred = local_pred.reshape(-1, NUM_CLASSES)
out = local_pred
for bundle in self.trees[1:]:
local_pred = bundle[0](x)
local_pred = local_pred.reshape(-1, 1)
for tree in bundle[1:]:
tree_out = tree(x)
tree_out = tree_out.reshape(-1, 1)
local_pred = torch.cat((local_pred, tree_out), 1)
local_pred = local_pred.reshape(-1, NUM_CLASSES)
out = out + local_pred
output = out / self.num_classifiers
return out.reshape(-1, NUM_CLASSES)
torch_rf = GradientBoostedTrees(trees)
for i in range(len(X_test)):
torch_pred = torch_rf(torch.tensor(X_test[i].reshape(1, -1)))
sk_pred = clr.predict(X_test[i].reshape(1, -1))
print(torch_pred, sk_pred[0])
assert torch_pred.argmax() == sk_pred[0]
torch_rf.eval()
shape = X_train.shape[1:]
x = torch.rand(1, *shape, requires_grad=False)
torch_out = torch_rf(x)
torch.onnx.export(torch_rf,
x,
"network.onnx",
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=['input'],
output_ |
names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape],
input_data=[d],
output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])
json.dump(data, open("input.json", 'w')) |
import random
import math
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import json
model = nn.GRU(3, 3) # Input dim is 3, output dim is 3
x = torch.randn(1, 3) # make a sequence of length 5
print(x)
# Flips the neural net into inference mode
model.eval()
model.to('cpu')
# Export the model
torch.onnx.export(model, # model being run
# model input (or a tuple for multiple inputs)
x,
# where to save the model (can be a file or file-like object)
"network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
data_array = ((x).detach().numpy()).reshape([-1]).tolist()
data_json = dict(input_data=[data_array])
print(data_json)
# Serialize data into file:
json.dump(data_json, open("input.json", 'w'))
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.argmax(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.Hardsigmoid()(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.Hardswish()(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
# Train a model.
import json
import onnxruntime as rt
from skl2onnx import to_onnx
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier as De
from hummingbird.ml import convert
import torch
iris = load_iris()
X, y = iris.data, iris.target
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr = De()
clr.fit(X_train, y_train)
torch_model = convert(clr, "pytorch").model
# Convert into ONNX format.
# export to onnx format
# Input to the model
shape = X_train.shape[1:]
x = torch.rand(1, *shape, requires_grad=True)
torch_out = torch_model(x)
# Export the model
torch.onnx.export(torch_model, # model being run
# model input (or a tuple for multiple inputs)
x,
# where to save the model (can be a file or file-like object)
"network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape],
input_data=[d],
output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
import torch
import torch.nn as nn
import json
# A single model that only does layernorm
class LayerNorm(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.ln = nn.LayerNorm(hidden_size)
def forward(self, x):
return self.ln(x)
x = torch.randn(1, 10, 10)
model = LayerNorm(10)
out = model(x)
torch.onnx.export(model, x, "network.onnx", export_params=True, do_constant_folding=True, input_names = ['input'],output_names = ['output'],dynamic_axes={'input' : {0 : 'batch_size'},'output' : {0 : 'batch_size'}})
data_array = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_data = [data_array], output_data = [((o).detach().numpy()).reshape([-1]).tolist() for o in out])
json.dump( data, open( "input.json", 'w' ) )
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, w, x):
return torch.less(w, x)
circuit = MyModel()
w = torch.rand(1, 4)
x = torch.rand(1, 4)
torch.onnx.export(circuit, (w, x), "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=15, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input', 'input1'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'input1': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d = ((w).detach().numpy()).reshape([-1]).tolist()
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d, d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
import json |
import numpy as np
from sklearn.datasets |
import load_iris
from sklearn.model_selection |
import train_test_split
from lightgbm |
import LGBMClassifier as Gbc |
import torch |
import ezkl |
import os
from torch |
import nn |
import xgboost as xgb
from hummingbird.ml |
import convert
NUM_CLASSES = 3
iris = load_iris()
X, y = iris.data, iris.target
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr = Gbc(n_estimators=12)
clr.fit(X_train, y_train)
torch_gbt = convert(clr, 'torch', X_test[:1])
print(torch_gbt)
diffs = []
for i in range(len(X_test)):
torch_pred = torch_gbt.predict(torch.tensor(X_test[i].reshape(1, -1)))
sk_pred = clr.predict(X_test[i].reshape(1, -1))
diffs.append(torch_pred != sk_pred[0])
print("num diff: ", sum(diffs))
shape = X_train.shape[1:]
x = torch.rand(1, *shape, requires_grad=False)
torch_out = torch_gbt.predict(x)
torch.onnx.export(torch_gbt.model,
x,
"network.onnx",
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape],
input_data=[d],
output_data=[(o).reshape([-1]).tolist() for o in torch_out])
json.dump(data, open("input.json", 'w')) |
import os
import torch
import ezkl
import json
from hummingbird.ml import convert
# here we create and (potentially train a model)
# make sure you have the dependencies required here already installed
import numpy as np
from sklearn.linear_model import LinearRegression
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
# y = 1 * x_0 + 2 * x_1 + 3
y = np.dot(X, np.array([1, 2])) + 3
reg = LinearRegression().fit(X, y)
reg.score(X, y)
circuit = convert(reg, "torch", X[:1]).model
# export to onnx format
# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!
# Input to the model
shape = X.shape[1:]
x = torch.rand(1, *shape, requires_grad=True)
torch_out = circuit(x)
# Export the model
torch.onnx.export(circuit, # model being run
# model input (or a tuple for multiple inputs)
x,
# where to save the model (can be a file or file-like object)
"network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape],
input_data=[d],
output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.LogSoftmax()(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.logsumexp(x, dim=1)
return m
circuit = MyModel()
x = torch.empty(1, 2, 2, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
import random
import math
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import json
model = nn.LSTM(3, 3) # Input dim is 3, output dim is 3
x = torch.randn(1, 3) # make a sequence of length 5
print(x)
# Flips the neural net into inference mode
model.eval()
model.to('cpu')
# Export the model
torch.onnx.export(model, # model being run
# model input (or a tuple for multiple inputs)
x,
# where to save the model (can be a file or file-like object)
"network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
data_array = ((x).detach().numpy()).reshape([-1]).tolist()
data_json = dict(input_data=[data_array])
print(data_json)
# Serialize data into file:
json.dump(data_json, open("input.json", 'w'))
|
import torch |
import torch.nn as nn |
import torch.nn.functional as F |
import numpy as np |
import json |
class moving_avg(nn.Module):
"""
Moving average block to highlight the trend of time series
"""
def __init__(self, kernel_size, stride):
super(moving_avg, self).__init__()
self.kernel_size = kernel_size
self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)
def forward(self, x):
front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1)
end = x[:, -1:, :].repeat(1, (self.kernel_size - 1)
x = torch.cat([front, x, end], dim=1)
x = self.avg(x.permute(0, 2, 1))
x = x.permute(0, 2, 1)
return x |
class series_decomp(nn.Module):
"""
Series decomposition block
"""
def __init__(self, kernel_size):
super(series_decomp, self).__init__()
self.moving_avg = moving_avg(kernel_size, stride=1)
def forward(self, x):
moving_mean = self.moving_avg(x)
res = x - moving_mean
return res, moving_mean |
class Model(nn.Module):
"""
Decomposition-Linear
"""
def __init__(self, configs):
super(Model, self).__init__()
self.seq_len = configs['seq_len']
self.pred_len = configs['pred_len']
kernel_size = 25
self.decompsition = series_decomp(kernel_size)
self.individual = configs['individual']
self.channels = configs['enc_in']
if self.individual:
self.Linear_Seasonal = nn.ModuleList()
self.Linear_Trend = nn.ModuleList()
for i in range(self.channels):
self.Linear_Seasonal.append(nn.Linear(self.seq_len,self.pred_len))
self.Linear_Trend.append(nn.Linear(self.seq_len,self.pred_len))
else:
self.Linear_Seasonal = nn.Linear(self.seq_len,self.pred_len)
self.Linear_Trend = nn.Linear(self.seq_len,self.pred_len)
def forward(self, x):
seasonal_init, trend_init = self.decompsition(x)
seasonal_init, trend_init = seasonal_init.permute(0,2,1), trend_init.permute(0,2,1)
if self.individual:
seasonal_output = torch.zeros([seasonal_init.size(0),seasonal_init.size(1),self.pred_len],dtype=seasonal_init.dtype).to(seasonal_init.device)
trend_output = torch.zeros([trend_init.size(0),trend_init.size(1),self.pred_len],dtype=trend_init.dtype).to(trend_init.device)
for i in range(self.channels):
seasonal_output[:,i,:] = self.Linear_Seasonal[i](seasonal_init[:,i,:])
trend_output[:,i,:] = self.Linear_Trend[i](trend_init[:,i,:])
else:
seasonal_output = self.Linear_Seasonal(seasonal_init)
trend_output = self.Linear_Trend(trend_init)
x = seasonal_output + trend_output
return x.permute(0,2,1)
configs = {
'seq_len': 96,
'pred_len': 1,
'individual': True,
'enc_in': 1,
}
circuit |
= Model(configs)
x = torch.empty(1, 96, 1).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True,
opset_version=17,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
json.dump(data, open("input.json", 'w')) |
from torch import nn
from ezkl import export
import torch
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
return [torch.max(x)]
circuit = Model()
export(circuit, input_shape=[3, 2, 2])
|
from torch import nn
from ezkl import export
import torch
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
return [torch.min(x)]
circuit = Model()
export(circuit, input_shape=[3, 2, 2])
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.Mish()(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
import ezkl |
import json |
import torch |
import torch.nn as nn |
import torch.nn.functional as F |
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_model, dropout=0.1):
super().__init__()
self.temperature = d_model ** 0.5
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
attn = F.softmax(attn, dim=-1)
attn = self.dropout(attn)
output = torch.matmul(attn, v)
return output, attn |
class MultiHeadAttention(nn.Module):
def __init__(self, n_heads, d_model, dropout=0.1):
super().__init__()
self.n_heads = n_heads
self.d_model = d_model
self.d_k = d_model
self.w_qs = nn.Linear(d_model, d_model)
self.w_ks = nn.Linear(d_model, d_model)
self.w_vs = nn.Linear(d_model, d_model)
self.fc = nn.Linear(d_model, d_model)
self.attention = ScaledDotProductAttention(d_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, q, k, v):
n_batches = q.size(0)
q = self.w_qs(q).view(n_batches, -1, self.n_heads,
self.d_k).transpose(1, 2)
k = self.w_ks(k).view(n_batches, -1, self.n_heads,
self.d_k).transpose(1, 2)
v = self.w_vs(v).view(n_batches, -1, self.n_heads,
self.d_k).transpose(1, 2)
q, attn = self.attention(q, k, v)
q = q.transpose(1, 2).contiguous().view(n_batches, -1, self.d_model)
q = self.dropout(self.fc(q))
return self.layer_norm(q) |
class SimpleTransformer(nn.Module):
def __init__(self, nlayer, d_model=512, n_heads=8):
super().__init__()
self.layers = nn.ModuleList(
[MultiHeadAttention(n_heads, d_model) for _ in range(nlayer)])
self.fc = nn.Linear(d_model, 1)
def forward(self, x):
for layer in self.layers:
x = layer(x, x, x)
x = x.mean(dim=1)
x = self.fc(x)
return x
model = SimpleTransformer(2, d_model=128)
input_shape = [1, 16, 128]
x = 0.1*torch.rand(1, *input_shape, requires_grad=True)
torch.onnx.export(model,
x,
"network.onnx",
export_params=True,
opset_version=10,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
data_array = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_data=[data_array])
json.dump(data, open("input.json", 'w')) |
"""
Reference: https:
""" |
import json |
import math
from dataclasses |
import dataclass |
import torch |
import torch.nn as nn
from torch.nn |
import functional as F |
import sys |
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
def new_gelu(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
Reference: Gaussian Error Linear Units (GELU) paper: https:
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * x * x * x))) |
class LayerNorm(nn.Module):
""" LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
def __init__(self, ndim, bias):
super().__init__()
self.weight = nn.Parameter(torch.ones(ndim))
self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
def forward(self, input):
return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) |
class CausalSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.c_attn = nn.Linear(
config.n_embd, 3 * config.n_embd, bias=config.bias)
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
self.attn_dropout = nn.Dropout(config.dropout)
self.resid_dropout = nn.Dropout(config.dropout)
self.n_head = config.n_head
self.n_embd = config.n_embd
self.dropout = config.dropout
self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
def forward(self, x):
B, T, C = x.size()
q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
k = k.view(B, T, self.n_head, C
self.n_head).transpose(1, 2)
q = q.view(B, T, self.n_head, C
self.n_head).transpose(1, 2)
v = v.view(B, T, self.n_head, C
self.n_head).transpose(1, 2)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float(-10))
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
y = att @ v
y = y.transpose(1, 2).contiguous().view(B, T, C)
y = self.resid_dropout(self.c_proj(y))
return y |
class MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.c_fc = nn.Linear(
config.n_embd, 4 * config.n_embd, bias=config.bias)
self.c_proj = nn.Linear(
4 * config.n_embd, config.n_embd, bias=config.bias)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x):
x = self.c_fc(x)
x = new_gelu(x)
x = self.c_proj(x)
x = self.dropout(x)
return x |
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
self.attn = CausalSelfAttention(config)
self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
self.mlp = MLP(config)
def forward(self, x):
x = x + self.attn(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
@dataclass
class GPTConfig:
block_size: int = 1024
vocab_size: int = 50304
n_layer: int = 12
n_head: int = 12
n_embd: int = 768
dropout: float = 0.0
bias: bool = True |
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
assert config.vocab_size is not None
assert config.block_size is not None
self.config = config
self.transformer = nn.ModuleDict(dict(
wte=nn.Embedding(config.vocab_size, config.n_embd),
wpe=nn.Embedding(config.block_size, config.n_embd),
drop=nn.Dropout(config.dropout),
h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
ln_f=LayerNorm(config.n_embd, bias=config.bias),
))
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.transformer.wte.weight = self.lm_head.weight
self.block = Block(config)
self.apply(self._init_weights)
for pn, p in self.named_parameters():
if pn.endswith('c_proj.weight'):
torch.nn.init.normal_(
p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
def get_num_params(self, non_embedding=True):
"""
Return the number of parameters in the model.
For non-embedding count (default), the position embeddings get subtracted.
The token embeddings would too, except due to the parameter sharing these
params are actually used as weights in the final layer, so we |
include them.
"""
n_params = sum(p.numel() for p in self.parameters())
if non_embedding:
n_params -= self.transformer.wpe.weight.numel()
return n_params
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, idx, targets=None):
device = idx.device
b, t = idx.size()
assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
pos = torch.arange(0, t, dtype=torch.long,
device=device).unsqueeze(0)
idx = self.transformer.wte(idx)
pos_emb = self.transformer.wpe(pos)
idx = self.transformer.drop(idx + pos_emb)
for block in self.transformer.h:
idx = block(idx)
idx = self.transformer.ln_f(idx)
idx = self.lm_head(idx)
return idx
gptconf = GPTConfig(block_size=64, vocab_size=65, n_layer=4,
n_head=4, n_embd=64, dropout=0.0, bias=False)
model = GPT(gptconf)
model.get_num_params()
shape = [1, 64]
x = torch.randint(65, (1, 64))
torch_out = model(x)
torch.onnx.export(model, x, "network.onnx",
export_params=True,
opset_version=10,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape],
input_data=[d],
output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o i |
n torch_out])
json.dump(data, open("input.json", 'w')) |
# Train a model.
import json
import onnxruntime as rt
from skl2onnx import to_onnx
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier as De
import sk2torch
import torch
iris = load_iris()
X, y = iris.data, iris.target
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr = De()
clr.fit(X_train, y_train)
torch_model = sk2torch.wrap(clr)
# Convert into ONNX format.
# export to onnx format
# Input to the model
shape = X_train.shape[1:]
x = torch.rand(1, *shape, requires_grad=True)
torch_out = torch_model(x)
# Export the model
torch.onnx.export(torch_model, # model being run
# model input (or a tuple for multiple inputs)
x,
# where to save the model (can be a file or file-like object)
"network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape],
input_data=[d],
output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch.ao.quantization import QuantStub, DeQuantStub
# define NN architecture
class PredictLiquidationsV0(nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.layer_1 = nn.Linear(in_features=41, out_features=1)
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.layer_1(x)
x = self.dequant(x)
return x
# instantiate the model
model_0 = PredictLiquidationsV0()
# for QAT
# model_0.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
torch.ao.quantization.prepare_qat(model_0, inplace=True)
# convert to a QAT model
quantized_model_0 = torch.ao.quantization.convert(
model_0.eval(), inplace=False)
# evaluate quantized_model_0
# ...
x = torch.randn((1, 41), requires_grad=True)
# export as onnx
quantized_model_0.eval()
torch.onnx.export(quantized_model_0,
torch.randn((1, 41), requires_grad=True),
'network.onnx',
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_data=[d],)
# save to input.json
json.dump(data, open("input.json", 'w'))
|
import json |
import numpy as np
from sklearn.datasets |
import load_iris
from sklearn.model_selection |
import train_test_split
from sklearn.ensemble |
import RandomForestClassifier as Rf |
import sk2torch |
import torch |
import ezkl |
import os
from torch |
import nn
iris = load_iris()
X, y = iris.data, iris.target
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr = Rf()
clr.fit(X_train, y_train)
trees = []
for tree in clr.estimators_:
trees.append(sk2torch.wrap(tree))
print(trees) |
class RandomForest(nn.Module):
def __init__(self, trees):
super(RandomForest, self).__init__()
self.trees = nn.ModuleList(trees)
def forward(self, x):
out = self.trees[0](x)
for tree in self.trees[1:]:
out += tree(x)
return out / len(self.trees)
torch_rf = RandomForest(trees)
for i in range(len(X_test)):
torch_pred = torch_rf(torch.tensor(X_test[i].reshape(1, -1)))
sk_pred = clr.predict(X_test[i].reshape(1, -1))
print(torch_pred, sk_pred[0])
assert torch_pred[0].round() == sk_pred[0]
torch_rf.eval()
shape = X_train.shape[1:]
x = torch.rand(1, *shape, requires_grad=False)
torch_out = torch_rf(x)
torch.onnx.export(torch_rf,
x,
"network.onnx",
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape],
input_data=[d],
output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])
json.dump(data, open("input.json", 'w')) |
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.norm(x, p=1, dim=1)
return m
circuit = MyModel()
x = torch.empty(1, 2, 2, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.norm(x, p=2, dim=1)
return m
circuit = MyModel()
x = torch.empty(1, 2, 2, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return x % 0.5
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
import random
import math
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import json
model = nn.RNN(3, 3) # Input dim is 3, output dim is 3
x = torch.randn(1, 3) # make a sequence of length 5
print(x)
# Flips the neural net into inference mode
model.eval()
model.to('cpu')
# Export the model
torch.onnx.export(model, # model being run
# model input (or a tuple for multiple inputs)
x,
# where to save the model (can be a file or file-like object)
"network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=11, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
data_array = ((x).detach().numpy()).reshape([-1]).tolist()
data_json = dict(input_data=[data_array])
print(data_json)
# Serialize data into file:
json.dump(data_json, open("input.json", 'w'))
|
import io |
import numpy as np
from torch |
import nn |
import torch.onnx |
import torch |
import torch.nn as nn |
import torch.nn.init as init |
import json |
class Circuit(nn.Module):
def __init__(self):
super(Circuit, self).__init__()
def forward(self, w, x, y):
return torch.round(w), torch.floor(x), torch.ceil(y)
def main():
torch_model = Circuit()
shape = [3, 2, 3]
w = 0.1*torch.rand(1, *shape, requires_grad=True)
x = 0.1*torch.rand(1, *shape, requires_grad=True)
y = 0.1*torch.rand(1, *shape, requires_grad=True)
torch_out = torch_model(w, x, y)
torch.onnx.export(torch_model,
(w, x, y),
"network.onnx",
export_params=True,
opset_version=16,
do_constant_folding=True,
input_names=['w', 'x', 'y'],
output_names=['output_w', 'output_x', 'output_y'],
dynamic_axes={'x': {0: 'batch_size'},
'y': {0: 'batch_size'},
'w': {0: 'batch_size'},
'output_w': {0: 'batch_size'},
'output_x': {0: 'batch_size'},
'output_y': {0: 'batch_size'}
})
dw = ((w).detach().numpy()).reshape([-1]).tolist()
dx = ((x).detach().numpy()).reshape([-1]).tolist()
dy = ((y).detach().numpy()).reshape([-1]).tolist()
data = dict(input_shapes=[shape, shape, shape, shape],
input_data=[dw, dx, dy],
output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])
json.dump(data, open("input.json", 'w'))
if __name__ == "__main__":
main() |
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, w, x, src):
# scatter_elements
return w.scatter(2, x, src)
circuit = MyModel()
w = torch.rand(1, 15, 18)
src = torch.rand(1, 15, 2)
x = torch.randint(0, 15, (1, 15, 2))
torch.onnx.export(circuit, (w, x, src), "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=15, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
# the model's input names
input_names=['input', 'input1', 'input2'],
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'input1': {0: 'batch_size'},
'input2': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d = ((w).detach().numpy()).reshape([-1]).tolist()
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
d2 = ((src).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d, d1, d2],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))
|
import torch |
import torch.nn as nn |
import sys |
import json
sys.path.append("..") |
class Model(nn.Module):
"""
Just one Linear layer
"""
def __init__(self, configs):
super(Model, self).__init__()
self.seq_len = configs.seq_len
self.pred_len = configs.pred_len
self.channels = configs.enc_in
self.individual = configs.individual
if self.individual:
self.Linear = nn.ModuleList()
for i in range(self.channels):
self.Linear.append(nn.Linear(self.seq_len,self.pred_len))
else:
self.Linear = nn.Linear(self.seq_len, self.pred_len)
def forward(self, x):
if self.individual:
output = torch.zeros([x.size(0),self.pred_len,x.size(2)],dtype=x.dtype).to(x.device)
for i in range(self.channels):
output[:,:,i] = self.Linear[i](x[:,:,i])
x = output
else:
x = self.Linear(x.permute(0,2,1)).permute(0,2,1)
return x
class Configs:
def __init__(self, seq_len, pred_len, enc_in=321, individual=True):
self.seq_len = seq_len
self.pred_len = pred_len
self.enc_in = enc_in
self.individual = individual
model = 'Linear'
seq_len = 10
pred_len = 4
enc_in = 3
configs = Configs(seq_len, pred_len, enc_in, True)
circuit = Model(configs)
x = torch.randn(1, seq_len, pred_len)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True,
opset_version=15,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
json.dump(data, open("input.json", 'w')) |
"""
Reference: https:
""" |
import torch |
import json
from torch |
import nn |
import math
from dataclasses |
import dataclass
from torch.nn |
import functional as F
@dataclass
class GPTConfig:
block_size: int = 1024
vocab_size: int = 50304
n_layer: int = 12
n_head: int = 12
n_embd: int = 768
dropout: float = 0.0
bias: bool = True
def new_gelu(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
Reference: Gaussian Error Linear Units (GELU) paper: https:
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * x * x * x))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.