markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Создадим простейшую сеть с новыми слоями: Convolutional - `nn.Conv2d` MaxPool - `nn.MaxPool2d` | nn_model = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(4),
nn.Conv2d(64, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(4),
Flattener(),
nn.Linear(64*2*2, 10),
)
nn_model.type(torch.cuda.FloatTensor)
nn_model.to(device)
loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)
optimizer = optim.SGD(nn_model.parameters(), lr=1e-1, weight_decay=1e-4) | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
Восстановите функцию `compute_accuracy` из прошлого задания. Единственное отличие в новом - она должна передать данные на GPU прежде чем прогонять через модель. Сделайте это так же, как это делает функция `train_model` | def train_model(model, train_loader, val_loader, loss, optimizer, num_epochs):
loss_history = []
train_history = []
val_history = []
for epoch in range(num_epochs):
model.train() # Enter train mode
loss_accum = 0
correct_samples = 0
total_samples = 0
for i_step, (x, y) in enumerate(train_loader):
x_gpu = x.to(device)
y_gpu = y.to(device)
prediction = model(x_gpu)
loss_value = loss(prediction, y_gpu)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
_, indices = torch.max(prediction, 1)
correct_samples += torch.sum(indices == y_gpu)
total_samples += y.shape[0]
loss_accum += loss_value
ave_loss = loss_accum / i_step
train_accuracy = float(correct_samples) / total_samples
val_accuracy = compute_accuracy(model, val_loader)
loss_history.append(float(ave_loss))
train_history.append(train_accuracy)
val_history.append(val_accuracy)
print("Average loss: %f, Train accuracy: %f, Val accuracy: %f" % (ave_loss, train_accuracy, val_accuracy))
return loss_history, train_history, val_history
def compute_accuracy(model, loader):
"""
Computes accuracy on the dataset wrapped in a loader
Returns: accuracy as a float value between 0 and 1
"""
model.eval() # Evaluation mode
# TODO: Copy implementation from previous assignment
# Don't forget to move the data to device before running it through the model!
raise Exception("Not implemented")
loss_history, train_history, val_history = train_model(nn_model, train_loader, val_loader, loss, optimizer, 5) | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
Аугментация данных (Data augmentation)В работе с изображениями одним из особенно важных методов является аугментация данных - то есть, генерация дополнительных данных для тренировки на основе изначальных. Таким образом, мы получаем возможность "увеличить" набор данных для тренировки, что ведет к лучшей работе сети.Важно, чтобы аугментированные данные были похожи на те, которые могут встретиться в реальной жизни, иначе польза от аугментаций уменьшается и может ухудшить работу сети.С PyTorch идут несколько таких алгоритмов, называемых `transforms`. Более подробно про них можно прочитать тут -https://pytorch.org/tutorials/beginner/data_loading_tutorial.htmltransformsНиже мы используем следующие алгоритмы генерации:- ColorJitter - случайное изменение цвета- RandomHorizontalFlip - горизонтальное отражение с вероятностью 50%- RandomVerticalFlip - вертикальное отражение с вероятностью 50%- RandomRotation - случайный поворот | tfs = transforms.Compose([
transforms.ColorJitter(hue=.50, saturation=.50),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(50, resample=PIL.Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.43,0.44,0.47],
std=[0.20,0.20,0.20])
])
# Create augmented train dataset
data_aug_train = dset.SVHN('./',
transform=tfs
)
train_aug_loader = torch.utils.data.DataLoader(data_aug_train, batch_size=batch_size,
sampler=train_sampler) | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
Визуализируем результаты агментации (вообще, смотреть на сгенерированные данные всегда очень полезно). | # TODO: Visualize some augmented images!
# hint: you can create new datasets and loaders to accomplish this
# Based on the visualizations, should we keep all the augmentations?
tfs = transforms.Compose([
transforms.ColorJitter(hue=.20, saturation=.20),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(10, resample=PIL.Image.BILINEAR),
])
data_aug_vis = dset.SVHN('./',
transform=tfs
)
plt.figure(figsize=(30, 3))
for i, (x, y) in enumerate(data_aug_vis):
if i == 10:
break
plt.subplot(1, 10, i+1)
plt.grid(False)
plt.imshow(x)
plt.axis('off') | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
Все ли агментации одинаково полезны на этом наборе данных? Могут ли быть среди них те, которые собьют модель с толку?Выберите из них только корректные | # TODO:
tfs = transforms.Compose([
# TODO: Add good augmentations
transforms.ToTensor(),
transforms.Normalize(mean=[0.43,0.44,0.47],
std=[0.20,0.20,0.20])
])
# TODO create new instances of loaders with the augmentations you chose
train_aug_loader = None
# Finally, let's train with augmentations!
# Note we shouldn't use augmentations on validation
loss_history, train_history, val_history = train_model(nn_model, train_aug_loader, val_loader, loss, optimizer, 5) | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
LeNetПопробуем имплементировать классическую архитектуру сверточной нейронной сети, предложенную Яном ЛеКуном в 1998 году. В свое время она достигла впечатляющих результатов на MNIST, посмотрим как она справится с SVHN?Она описана в статье ["Gradient Based Learning Applied to Document Recognition"](http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf), попробуйте прочитать ключевые части и имплементировать предложенную архитетуру на PyTorch.Реализовывать слои и функцию ошибки LeNet, которых нет в PyTorch, **не нужно** - просто возьмите их размеры и переведите в уже известные нам Convolutional, Pooling и Fully Connected layers.Если в статье не очень понятно, можно просто погуглить LeNet и разобраться в деталях :) | # TODO: Implement LeNet-like architecture for SVHN task
lenet_model = nn.Sequential(
)
lenet_model.type(torch.cuda.FloatTensor)
lenet_model.to(device)
loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)
optimizer = optim.SGD(lenet_model.parameters(), lr=1e-1, weight_decay=1e-4)
# Let's train it!
loss_history, train_history, val_history = train_model(lenet_model, train_aug_loader, val_loader, loss, optimizer, 10) | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
Подбор гиперпараметров | # The key hyperparameters we're going to tune are learning speed, annealing rate and regularization
# We also encourage you to try different optimizers as well
Hyperparams = namedtuple("Hyperparams", ['learning_rate', 'anneal_epochs', 'reg'])
RunResult = namedtuple("RunResult", ['model', 'train_history', 'val_history', 'final_val_accuracy'])
learning_rates = [1e0, 1e-1, 1e-2, 1e-3, 1e-4]
anneal_coeff = 0.2
anneal_epochs = [1, 5, 10, 15, 20, 50]
reg = [1e-3, 1e-4, 1e-5, 1e-7]
batch_size = 64
epoch_num = 10
# Record all the runs here
# Key should be Hyperparams and values should be RunResult
run_record = {}
# Use grid search or random search and record all runs in run_record dictionnary
# Important: perform search in logarithmic space!
# TODO: Your code here!
best_val_accuracy = None
best_hyperparams = None
best_run = None
for hyperparams, run_result in run_record.items():
if best_val_accuracy is None or best_val_accuracy < run_result.final_val_accuracy:
best_val_accuracy = run_result.final_val_accuracy
best_hyperparams = hyperparams
best_run = run_result
print("Best validation accuracy: %4.2f, best hyperparams: %s" % (best_val_accuracy, best_hyperparams))
| _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
Свободное упражнение - догоним и перегоним LeNet!Попробуйте найти архитектуру и настройки тренировки, чтобы выступить лучше наших бейзлайнов.Что можно и нужно попробовать:- BatchNormalization (для convolution layers он в PyTorch называется [batchnorm2d](https://pytorch.org/docs/stable/nn.htmlbatchnorm2d))- Изменить количество слоев и их толщину- Изменять количество эпох тренировки- Попробовать и другие агментации | best_model = None | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
Финальный аккорд - проверим лучшую модель на test setВ качестве разнообразия - напишите код для прогона модели на test set вы.В результате вы должны натренировать модель, которая покажет более **90%** точности на test set. Как водится, лучший результат в группе получит дополнительные баллы! | # TODO Write the code to compute accuracy on test set
final_test_accuracy = 0.0
print("Final test accuracy - ", final_test_accuracy) | _____no_output_____ | MIT | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai |
baxterのmap求める | import sympy as sy
from sympy import sin, cos, pi, sqrt
import math
#from math import pi
q = sy.Matrix(sy.MatrixSymbol('q', 7, 1))
L, h, H, L0, L1, L2, L3, L4, L5, L6, R = sy.symbols('L, h, H, L0, L1, L2, L3, L4, L5, L6, R')
# L = 278e-3
# h = 64e-3
# H = 1104e-3
# L0 = 270.35e-3
# L1 = 69e-3
# L2 = 364.35e-3
# L3 = 69e-3
# L4 = 374.29e-3
# L5 = 10e-3
# L6 = 368.3e-3
def HTM(alpha, a, d, theta):
return sy.Matrix([
[cos(theta), -sin(theta), 0, a],
[sin(theta)*cos(alpha), cos(theta)*cos(alpha), -sin(alpha), -d*sin(alpha)],
[sin(theta)*sin(alpha), cos(theta)*sin(alpha), cos(alpha), d*cos(alpha)],
[0, 0, 0, 1],
])
DHparams = (
(0, 0, 0, q[0, 0]),
(-pi/2, L1, 0, q[1, 0]+pi/2),
(pi/2, 0, L2, q[2, 0]),
(-pi/2, L3, 0, q[3, 0]),
(pi/2, 0, L4, q[4, 0]),
(-pi/2, L5, 0, q[5, 0]),
(pi/2, 0, 0, q[6, 0]),
)
T_RL_W0 = sy.Matrix([
[-sqrt(2)/2, sqrt(2)/2, 0, -L,],
[-sqrt(2)/2, -sqrt(2)/2, 0, -h,],
[0, 0, 1, H,],
[0, 0, 0, 1,],
])
T_0_RL = sy.Matrix([
[1, 0, 0, 0,],
[0, 1, 0, 0,],
[0, 0, 1, L0,],
[0, 0, 0, 1,],
])
Ts = [HTM(*dhparam) for dhparam in DHparams]
T_GR_7 = sy.Matrix([
[1, 0, 0, 0,],
[0, 1, 0, 0,],
[0, 0, 1, L6,],
[0, 0, 0, 1,],
])
### 変換前一覧 ###
T_all = [T_RL_W0, T_0_RL]
T_all += Ts
T_all.append(T_GR_7)
### 変換後 ###
for i, T in enumerate(T_all):
if i == 0:
T_abs = [T]
else:
T_abs.append(T_abs[i-1] @ T)
os = [T[0:3, 3:4] for T in T_abs]
Rxs = [T[0:3, 0:1] for T in T_abs]
Rys = [T[0:3, 1:2] for T in T_abs]
Rzs = [T[0:3, 2:3] for T in T_abs]
Jos = [o.jacobian(q) for o in os]
JRxs = [r.jacobian(q) for r in Rxs]
JRys = [r.jacobian(q) for r in Rys]
JRzs = [r.jacobian(q) for r in Rzs]
t = sy.Symbol("t")
q1 = sy.Function("q1")
q2 = sy.Function("q2")
q3 = sy.Function("q3")
q4 = sy.Function("q4")
q5 = sy.Function("q5")
q6 = sy.Function("q6")
q7 = sy.Function("q7")
dq = sy.Matrix(sy.MatrixSymbol('dq', 7, 1))
T_abs_ = []
for T in T_abs:
T_ = T.subs([
(q[0,0], q1(t)),
(q[1,0], q2(t)),
(q[2,0], q3(t)),
(q[3,0], q4(t)),
(q[4,0], q5(t)),
(q[5,0], q6(t)),
(q[6,0], q7(t)),
])
T_abs_.append(T_)
os_ = [T[0:3, 3:4] for T in T_abs_]
Rxs_ = [T[0:3, 0:1] for T in T_abs_]
Rys_ = [T[0:3, 1:2] for T in T_abs_]
Rzs_ = [T[0:3, 2:3] for T in T_abs_]
q_ = sy.Matrix([
[q1(t)],
[q2(t)],
[q3(t)],
[q4(t)],
[q5(t)],
[q6(t)],
[q7(t)],
])
Jos_ = [o.jacobian(q_) for o in os_]
JRxs_ = [r.jacobian(q_) for r in Rxs_]
JRys_ = [r.jacobian(q_) for r in Rys_]
JRzs_ = [r.jacobian(q_) for r in Rzs_]
Jos_dot_ = [sy.diff(J, t) for J in Jos_]
JRxs_dot_ = [sy.diff(J, t) for J in JRxs_]
JRys_dot_ = [sy.diff(J, t) for J in JRys_]
JRzs_dot_ = [sy.diff(J, t) for J in JRzs_]
Jos_dot = []
JRxs_dot = []
JRys_dot = []
JRzs_dot = []
for Js, newJs in zip((Jos_dot_, JRxs_dot_, JRys_dot_, JRzs_dot_), (Jos_dot, JRxs_dot, JRys_dot, JRzs_dot)):
for J in Js:
newJs.append(J.subs([
(sy.Derivative(q1(t),t), dq[0, 0]),
(sy.Derivative(q2(t),t), dq[1, 0]),
(sy.Derivative(q3(t),t), dq[2, 0]),
(sy.Derivative(q4(t),t), dq[3, 0]),
(sy.Derivative(q5(t),t), dq[4, 0]),
(sy.Derivative(q6(t),t), dq[5, 0]),
(sy.Derivative(q7(t),t), dq[6, 0]),
(q1(t), q[0, 0]),
(q2(t), q[1, 0]),
(q3(t), q[2, 0]),
(q4(t), q[3, 0]),
(q5(t), q[4, 0]),
(q6(t), q[5, 0]),
(q7(t), q[6, 0]),
]))
os = [sy.expand(e) for e in os]
Rxs = [sy.expand(e) for e in Rxs]
Rys = [sy.expand(e) for e in Rys]
Rzs = [sy.expand(e) for e in Rzs]
Jos = [sy.expand(e) for e in Jos]
JRxs = [sy.expand(e) for e in JRxs]
JRys = [sy.expand(e) for e in JRys]
JRzs = [sy.expand(e) for e in JRzs]
Jos_dot = [sy.expand(e) for e in Jos_dot]
JRxs_dot = [sy.expand(e) for e in JRxs_dot]
JRys_dot = [sy.expand(e) for e in JRys_dot]
JRzs_dot = [sy.expand(e) for e in JRzs_dot]
expr_all = [os, Rxs, Rys, Rzs, Jos, JRxs, JRys, JRzs, Jos_dot, JRxs_dot, JRys_dot, JRzs_dot]
names = ["W0", "BR"] + [str(i) for i in range(7)] + ["ee"]
expr_name = [
["o_" + n for n in names],
["rx_" + n for n in names],
["ry_" + n for n in names],
["rz_" + n for n in names],
["jo_" + n for n in names],
["jrx_" + n for n in names],
["jry_" + n for n in names],
["jrz_" + n for n in names],
["jo_" + n + "_dot" for n in names],
["jrx_" + n + "_dot" for n in names],
["jry_" + n + "_dot" for n in names],
["jrz_" + n + "_dot" for n in names],
]
from sympy.printing import cxxcode
from sympy.utilities.codegen import codegen
import os as OS
original = "cpp_"
done = "cpp"
OS.makedirs(original, exist_ok=True)
OS.makedirs(done, exist_ok=True)
def gen_cpp_code(expr, name):
code_txt = cxxcode(expr, assign_to="out", standard="c++17")
with open(name+".cpp", "w") as f:
f.write(code_txt)
def gen_c(expr, name, dir=""):
[(c_name, c_code), (h_name, c_header)] = codegen(
name_expr=(name, expr),
language="C",
project= name + "project",
to_files=False
)
f = open(dir+c_name, 'w')
f.write(c_code)
f.close()
f = open(dir+h_name, 'w')
f.write(c_header)
f.close()
return c_code, c_header
names = ["W0", "BR"] + [str(i) for i in range(7)] + ["ee"]
with open(original+"/htm.cpp", "w") as fc, open(original+"/htm.hpp", "w") as fh:
for i, o in enumerate(os):
c, h = gen_c(o, name="o_"+names[i])
fc.write(c)
fh.write(h)
for i, o in enumerate(Rxs):
c, h = gen_c(o, name="rx_"+names[i])
fc.write(c)
fh.write(h)
for i, o in enumerate(Rys):
c, h = gen_c(o, name="ry_"+names[i])
fc.write(c)
fh.write(h)
for i, o in enumerate(Rzs):
c, h = gen_c(o, name="rz_"+names[i])
fc.write(c)
fh.write(h)
with open(original+"/Jos.cpp", "w") as fc, open(original+"/Jos.hpp", "w") as fh:
for i, o in enumerate(Jos):
c, h = gen_c(o, name="jo_"+names[i])
fc.write(c)
fh.write(h)
with open(original+"/JRxs.cpp", "w") as fc, open(original+"/JRxs.hpp", "w") as fh:
for i, o in enumerate(JRxs):
c, h = gen_c(o, name="jrx_"+names[i])
fc.write(c)
fh.write(h)
with open(original+"/JRys.cpp", "w") as fc, open(original+"/JRys.hpp", "w") as fh:
for i, o in enumerate(JRzs):
c, h = gen_c(o, name="jry_"+names[i])
fc.write(c)
fh.write(h)
with open(original+"/JRzs.cpp", "w") as fc, open(original+"/JRzs.hpp", "w") as fh:
for i, o in enumerate(JRzs):
c, h = gen_c(o, name="jrz_"+names[i])
fc.write(c)
fh.write(h)
with open(original+"/Jo_dots.cpp", "w") as fc, open(original+"/Jo_dots.hpp", "w") as fh:
for i, o in enumerate(Jos_dot):
c, h = gen_c(o, name="jo_"+names[i]+"_dot")
fc.write(c)
fh.write(h)
with open(original+"/JRx_dots.cpp", "w") as fc, open(original+"/JRx_dots.hpp", "w") as fh:
for i, o in enumerate(JRxs_dot):
c, h = gen_c(o, name="jrx_"+names[i]+"_dot")
fc.write(c)
fh.write(h)
with open(original+"/JRy_dots.cpp", "w") as fc, open(original+"/JRy_dots.hpp", "w") as fh:
for i, o in enumerate(JRzs_dot):
c, h = gen_c(o, name="jry_"+names[i]+"_dot")
fc.write(c)
fh.write(h)
with open(original+"/JRz_dots.cpp", "w") as fc, open(original+"/JRz_dots.hpp", "w") as fh:
for i, o in enumerate(JRzs_dot):
c, h = gen_c(o, name="jrz_"+names[i]+"_dot")
fc.write(c)
fh.write(h)
### これが本物 ###
from sympy.printing import cxxcode
from sympy.utilities.codegen import codegen
import os as OS
original = "cpp_original"
done = "cpp_done"
OS.makedirs(original, exist_ok=True)
OS.makedirs(original+"/include", exist_ok=True)
OS.makedirs(original+"/src", exist_ok=True)
def gen_cpp_code(expr, name, dir):
[(c_name, c_code), (h_name, c_header)] = codegen(
name_expr=(name, expr),
language="C",
project= name + "_BY_SYMPY_",
to_files=False
)
f = open(dir+"/src/"+name+".cpp", 'w')
f.write(c_code)
f.close()
f = open(dir+"/include/"+h_name.replace(".h", "")+".hpp", 'w')
f.write(c_header)
f.close()
for exprs, names in zip(expr_all, expr_name):
for expr, name in zip(exprs, names):
gen_cpp_code(expr, name, original)
com = "#ifndef BAXTER_HPP\n" \
+ "#define BAXTER_HPP\n" \
+ "#include<eigen3/Eigen/Core>\n" \
+ "namespace baxter\n" \
+ "{\n" \
+ " using Eigen::VectorXd;\n" \
+ " using Eigen::MatrixXd;\n" \
+ " static const double L = 278e-3;\n" \
+ " static const double h = 64e-3;\n" \
+ " static const double H = 1104e-3;\n" \
+ " static const double L0 = 270.35e-3;\n" \
+ " static const double L1 = 69e-3;\n" \
+ " static const double L2 = 364.35e-3;\n" \
+ " static const double L3 = 69e-3;\n" \
+ " static const double L4 = 374.29e-3;\n" \
+ " static const double L5 = 10e-3;\n" \
+ " static const double L6 = 368.3e-3;\n"
for ns in expr_name[0:4]:
for n in ns:
com += (" void " + n + "(const VectorXd& q, VectorXd& out);\n")
for ns in expr_name[4:8]:
for n in ns:
com += (" void " + n + "(const VectorXd& q, MatrixXd& out);\n")
for ns in expr_name[8:12]:
for n in ns:
com += (" void " + n + "(const VectorXd& q, const VectorXd& q_dot, MatrixXd& out);\n")
com += "};\n#endif"
### 変換 ###
import re
done = "cpp_done"
OS.makedirs(done, exist_ok=True)
OS.makedirs(done+"/include", exist_ok=True)
OS.makedirs(done+"/src", exist_ok=True)
pat = r'out_(.+?)\['
pat2 = r'out_(.+?)\)'
pat3 = r'\((.+?)\) {'
pat4 = r'#(.+?).h\"'
sout = ["out[" + str(i) + "]" for i in range(21)]
sout_2 = ["out(0,0)","out(0,1)","out(0,2)","out(0,3)","out(0,4)","out(0,5)","out(0,6)","out(1,0)","out(1,1)","out(1,2)","out(1,3)","out(1,4)","out(1,5)","out(1,6)","out(2,0)","out(2,1)","out(2,2)","out(2,3)","out(2,4)","out(2,5)","out(2,6)"]
with open("cpp_done/include/baxter.hpp", "w") as f:
f.write(com)
def common_trans(line):
r = re.findall(pat, line)
r2 = re.findall(pat2, line)
if len(r) != 0:
line = line.replace("out_" + r[0], "out")
if len(r2) != 0:
line = line.replace("out_" + r2[0], "out")
line = line.replace("q[0]", "q(0)")
line = line.replace("q[1]", "q(1)")
line = line.replace("q[2]", "q(2)")
line = line.replace("q[3]", "q(3)")
line = line.replace("q[4]", "q(4)")
line = line.replace("q[5]", "q(5)")
line = line.replace("q[6]", "q(6)")
# line = line.replace("double L, ", "")
# line = line.replace("double h, ", "")
# line = line.replace("double H, ", "")
# line = line.replace("double L0, ", "")
# line = line.replace("double L1, ", "")
# line = line.replace("double L2, ", "")
# line = line.replace("double L3, ", "")
# line = line.replace("double L4, ", "")
# line = line.replace("double L5, ", "")
# line = line.replace("double L6, ", "")
r3 = re.findall(pat3, line)
if "j" not in name:
if len(r3) != 0:
print("("+r3[0]+")")
#line = line.replace("("+r3[0]+") {", "(const VectorXd& q, VectorXd& out) {")
line = line.replace("("+r3[0]+") {", "(const VectorXd& q, double L, double h, double H, double L0, double L1, double L2, double L3, double L4, double L5, double L6, VectorXd& out) {")
line = line.replace("double *out", "VectorXd& out")
line = line.replace("out[0]", "out(0)")
line = line.replace("out[1]", "out(1)")
line = line.replace("out[2]", "out(2)")
else:
if "dot" in name:
if len(r3) != 0:
line = line.replace(r3[0], "const VectorXd& q, const VectorXd& dq, double L, double h, double H, double L0, double L1, double L2, double L3, double L4, double L5, double L6, MatrixXd& out")
else:
if len(r3) != 0:
print(name)
line = line.replace(r3[0], "const VectorXd& q, double L, double h, double H, double L0, double L1, double L2, double L3, double L4, double L5, double L6, MatrixXd& out")
line = line.replace("double *out", "MatrixXd& out")
for s, t in zip(sout, sout_2):
line = line.replace(s, t)
return line
def trans_cpp(name):
origin = "cpp_original/src/" + name + ".cpp"
done = "cpp_done/src/" + name + ".cpp"
with open(origin, "r") as f, open(done, "w") as g:
file_data = f.readlines()
for line in file_data:
line = line.replace('#include <math.h>', '#include <cmath>\nusing std::cos;\nusing std::sin;\nusing std::sqrt;\n')
#line = line.replace("#include \"", "#include \"../../include/baxter/")
#line = line.replace(".h\"", ".hpp\"\n#include \"../../include/baxter/common.hpp\"\n")
r4 = re.findall(pat4, line)
if len(r4) != 0:
line = line.replace("#"+r4[0]+".h\"", "#include \"../include/baxter.hpp\"\n")
line = line.replace("void ", "void baxter::")
line = line.replace("double *q", "const VectorXd& q").replace("double *dq", "const VectorXd& dq")
line = common_trans(line)
g.write(line)
# def trans_hpp(name):
# origin = "cpp_original/include/" + name + ".hpp"
# done = "cpp_done/include/" + name + ".hpp"
# with open(origin, "r") as f, open(done, "w") as g:
# file_data = f.readlines()
# for line in file_data:
# line = line.replace("void ", "#include<eigen3/Eigen/Core>\nnamespace baxter\n{\nusing Eigen::VectorXd;\nusing Eigen::MatrixXd;\nvoid ").replace(");", ");\n}\n")
# line = line.replace("double *q", "const VectorXd& q").replace("double *dq", "const VectorXd& dq")
# line = common_trans(line)
# g.write(line)
for names in expr_name:
for name in names:
trans_cpp(name)
#trans_hpp(name)
hoho = "void baxter::o_W0(VectorXd& out) {"
# pythonコード生成(クラス)
from sympy.printing.numpy import NumPyPrinter
names = ["W0", "BR"] + [str(i) for i in range(7)] + ["ee"]
common_w = "import numpy as np\nfrom math import cos as c\nfrom math import sin as s\nfrom math import tan as t\nfrom math import sqrt as sq\nfrom base import Base\n"
with open("src_py_/htm.py", "w") as f:
f.write(common_w + "class HTM(Base):\n")
for name, z in zip(names, os):
numpy_word = " def o_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
for name, z in zip(names, Rxs):
numpy_word = " def rx_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
for name, z in zip(names, Rys):
numpy_word = " def ry_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
for name, z in zip(names, Rzs):
numpy_word = " def rz_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/Jos.py", "w") as f:
f.write(common_w + "class Jo(Base):\n")
for name, z in zip(names, Jos):
numpy_word = " def jo_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRxs.py", "w") as f:
f.write(common_w + "class JRx(Base):\n")
for name, z in zip(names, JRxs):
numpy_word = " def jrx_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRys.py", "w") as f:
f.write(common_w + "class JRy(Base):\n")
for name, z in zip(names, JRys):
numpy_word = " def jry_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRzs.py", "w") as f:
f.write(common_w + "class JRz(Base):\n")
for name, z in zip(names, JRzs):
numpy_word = " def jrz_" + name + "(self, q):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/Jo_dots.py", "w") as f:
f.write(common_w + "class Jo_dot(Base):\n")
for name, z in zip(names, Jos_dot):
numpy_word = " def jo_" + name + "_dot(self, q, dq):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRx_dots.py", "w") as f:
f.write(common_w + "class JRx_dot(Base):\n")
for name, z in zip(names, JRxs_dot):
numpy_word = " def jrx_" + name + "_dot(self, q, dq):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRy_dots.py", "w") as f:
f.write(common_w + "class JRy_dot(Base):\n")
for name, z in zip(names, JRys):
numpy_word = " def jry_" + name + "_dot(self, q, dq):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRz_dots.py", "w") as f:
f.write(common_w + "class JRz_dot(Base):\n")
for name, z in zip(names, JRzs):
numpy_word = " def jrz_" + name + "_dot(self, q, dq):\n return "
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
def translate_hoge(original, done):
with open(original, "r") as f, open(done, "w") as g:
file_data = f.readlines()
for line in file_data:
line = line.replace('numpy', 'np').replace('1/2', '0.5').replace('(0.5)', '0.5')
line = line.replace('np.cos', 'c').replace('np.sin', 's').replace('np.sqrt', 'sq')
#line = line.replace('L', 'self.L').replace('h', 'self.h').replace('H', 'self.H')
line = line.replace('import np as np', 'import numpy as np')
line = line.replace('matself.h', 'math')
g.write(line)
translate_hoge("src_py_/htm.py", "src_py/htm.py")
translate_hoge("src_py_/Jos.py", "src_py/Jos.py")
translate_hoge("src_py_/JRxs.py", "src_py/JRxs.py")
translate_hoge("src_py_/JRys.py", "src_py/JRys.py")
translate_hoge("src_py_/JRzs.py", "src_py/JRzs.py")
translate_hoge("src_py_/Jo_dots.py", "src_py/Jo_dots.py")
translate_hoge("src_py_/JRx_dots.py", "src_py/JRx_dots.py")
translate_hoge("src_py_/JRy_dots.py", "src_py/JRy_dots.py")
translate_hoge("src_py_/JRz_dots.py", "src_py/JRz_dots.py")
from sympy.printing.numpy import NumPyPrinter
names = ["W0", "BR"] + [str(i) for i in range(7)] + ["ee"]
common_w = "import numpy as np\nfrom math import cos as c\nfrom math import sin as s\nfrom math import tan as ta\nfrom math import sqrt as sq\n"
numba_word_q = "@njit(\"f8[:, :](f8[:, :])\")\n"
numba_word_q_dq = "@njit(\"f8[:, :](f8[:, :], f8[:, :])\")\n"
with open("src_py_/htm.py", "w") as f:
f.write(common_w)
for name, z in zip(names, os):
numpy_word = "def o_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
for name, z in zip(names, Rxs):
numpy_word = "def rx_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
for name, z in zip(names, Rys):
numpy_word = "def ry_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
for name, z in zip(names, Rzs):
numpy_word = "def rz_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/Jos.py", "w") as f:
f.write(common_w)
for name, z in zip(names, Jos):
numpy_word = "def jo_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRxs.py", "w") as f:
f.write(common_w)
for name, z in zip(names, JRxs):
numpy_word = "def jrx_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRys.py", "w") as f:
f.write(common_w)
for name, z in zip(names, JRys):
numpy_word = "def jry_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/JRzs.py", "w") as f:
f.write(common_w)
for name, z in zip(names, JRzs):
numpy_word = "def jrz_" + name + "(q):\n return "
#f.write(numba_word_q)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(z))
f.write("\n")
with open("src_py_/Jo_dots.py", "w") as f:
f.write(common_w)
for name, z in zip(names, Jos_dot):
numpy_word = "def jo_" + name + "_dot(q, dq):\n return "
#f.write(numba_word_q_dq)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(sy.simplify(z)))
f.write("\n")
with open("src_py_/JRx_dots.py", "w") as f:
f.write(common_w)
for name, z in zip(names, JRxs_dot):
numpy_word = "def jrx_" + name + "_dot(q, dq):\n return "
#f.write(numba_word_q_dq)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(sy.simplify(z)))
f.write("\n")
with open("src_py_/JRy_dots.py", "w") as f:
f.write(common_w)
for name, z in zip(names, JRys):
numpy_word = "def jry_" + name + "_dot(q, dq):\n return "
#f.write(numba_word_q_dq)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(sy.simplify(z)))
f.write("\n")
with open("src_py_/JRz_dots.py", "w") as f:
f.write(common_w)
for name, z in zip(names, JRzs):
numpy_word = "def jrz_" + name + "_dot(q, dq):\n return "
#f.write(numba_word_q_dq)
f.write(numpy_word)
f.write(NumPyPrinter().doprint(sy.simplify(z)))
f.write("\n")
def translate_hoge(original, done):
with open(original, "r") as f, open(done, "w") as g:
file_data = f.readlines()
for line in file_data:
line = line.replace('numpy', 'np').replace('1/2', '0.5').replace('(0.5)', '0.5')
line = line.replace('np.cos', 'c').replace('np.sin', 's').replace('np.sqrt', 'sq')
# line = line.replace(']])', ']], dtype=np.float64)')
# line = line.replace('[0, 0, 0, 0, 0, 0, 0]', '[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]')
# line = line.replace('[0]', '[0.0]').replace(' 0]],', ' 0.0]],').replace('[1]', '[1.0]').replace('[[0,', '[[0.0,').replace('0.0, 0],', '0.0, 0.0],')
line = line.replace('import np as np', 'import numpy as np')
g.write(line)
translate_hoge("src_py_/htm.py", "src_py_no_class/htm.py")
translate_hoge("src_py_/Jos.py", "src_py_no_class/Jos.py")
translate_hoge("src_py_/JRxs.py", "src_py_no_class/JRxs.py")
translate_hoge("src_py_/JRys.py", "src_py_no_class/JRys.py")
translate_hoge("src_py_/JRzs.py", "src_py_no_class/JRzs.py")
translate_hoge("src_py_/Jo_dots.py", "src_py_no_class/Jo_dots.py")
translate_hoge("src_py_/JRx_dots.py", "src_py_no_class/JRx_dots.py")
translate_hoge("src_py_/JRy_dots.py", "src_py_no_class/JRy_dots.py")
translate_hoge("src_py_/JRz_dots.py", "src_py_no_class/JRz_dots.py") | _____no_output_____ | MIT | misc/baxter/derivation.ipynb | YoshimitsuMatsutaIe/rmp_test |
Collapse all 2-cells | all_X,collapses,all_losses,total_loss,all_signals,phispsis= dmt.sequence_optimal_up_collapses(X=X,kX=kX,dimq=1,signal=s1,steps=120)
colX=all_X[-1]
colS=all_signals[-1]
s0 = ['black']*len(X[0])#np.zeros(len(simplices[0]))
f_X=all_X[-1]
f_s=all_signals[-1]
fig = plt.figure(figsize=(6,7))
ax = fig.add_subplot(111)
dmtvis.plot_nodes(s0, points,ax, zorder=3,s=30)
dmtvis.plot_edges(f_s.copy(),points,f_X, ax, zorder=2,linewidths=2)
dmtvis.plot_triangles_plain('lavenderblush',points,f_X, ax, zorder=1)
cbar=plt.colorbar(ax.collections[0], ax=ax,orientation="horizontal")
cbar.set_ticklabels(np.around(np.append(np.arange(f_s.min(),f_s.max(),(f_s.max())/5),f_s.max()),decimals=1))
ax.set_xticks([])
ax.set_yticks([])
print([f_s.min(),f_s.max()])
plt.savefig('./figures/ex_coll_2.pdf')
plt.show()
s0 = ['black']*len(X[0])#np.zeros(len(simplices[0]))
s2 =np.random.uniform(size=len(X[2]))
sr=phispsis
fig = plt.figure(figsize=(6,7))
ax = fig.add_subplot(111)
dmtvis.plot_nodes(s0, points,ax, zorder=3,s=30)
dmtvis.plot_edges(sr.copy(),points,X, ax, zorder=2,linewidths=2)
dmtvis.plot_triangles_plain('lavenderblush',points,X, ax, zorder=1)
cbar=plt.colorbar(ax.collections[0], ax=ax,orientation="horizontal")
cbar.set_ticklabels(np.around(np.append(np.arange(sr.min(),sr.max(),(sr.max())/5),sr.max()),decimals=1))
#cbar.set_ticklabels(np.arange(s1.max() ,s1.min(),6))
print([sr.min(),sr.max()])
ax.set_xticks([])
ax.set_yticks([])
plt.savefig('./figures/ex_coll_3.pdf')
plt.show()
s0 = ['black']*len(X[0])#np.zeros(len(simplices[0]))
s2 =np.random.uniform(size=len(X[2]))
sl=np.abs(s1-phispsis)
fig = plt.figure(figsize=(6,7))
ax = fig.add_subplot(111)
dmtvis.plot_nodes(s0, points,ax, zorder=3,s=30)
dmtvis.plot_edges(sl.copy(),points,X, ax, zorder=2,linewidths=2)
dmtvis.plot_triangles_plain('lavenderblush',points,X, ax, zorder=1)
cbar=plt.colorbar(ax.collections[0], ax=ax,orientation="horizontal")
#cbar.set_ticklabels([])
a=np.around(np.append(np.arange(sl.min(),sl.max(),(sl.max())/5),sl.max()),decimals=1)
cbar.set_ticklabels(a)
print([sl.min(),sl.max()])
ax.set_xticks([])
ax.set_yticks([])
plt.savefig('./figures/ex_coll_4.pdf')
plt.show()
dmtvis.plot_hodge_decomp(X,s1,kX,phispsis,trange=30,type_collapse='up')
plt.savefig('./figures/hodge_new.pdf') | _____no_output_____ | MIT | total-collapsing.ipynb | stefaniaebli/dmt-signal-processing |
Randomly collapse 2-cells | all_X_rand,collapses_rand,all_losses_rand,total_loss_rand,all_signals_rand,phispsis_rand= dmt.sequence_optimal_up_collapses(X=X,kX=kX,dimq=1,signal=s1,steps=244,random=True)
colX_rand=all_X_rand[-1]
colS_rand=all_signals_rand[-1]
dmtvis.plot_hodge_decomp(X,s1,kX,phispsis_rand,trange=30,type_collapse='up')
plt.savefig('./figures/hodge_multiple_random_collapses_uniform.pdf') | _____no_output_____ | MIT | total-collapsing.ipynb | stefaniaebli/dmt-signal-processing |
Comparing losses | def CI_plot_y(data, conf = .95):
from scipy.stats import sem, t
n = np.array(data).shape[0]
std_err = sem(data,axis = 0)
h = std_err * t.ppf((1 + .95) / 2, n - 1)
return h
typ=['normal','uniform','height','center']
steps=np.arange(244)
s=[1,50,100,150,200,240]
for j in typ:
l=np.load('./data/data_optimal_{}_sim0.npy'.format(j))[:,0,:]
rl=np.load('./data/data_random_{}_sim0.npy'.format(j))[:,0,:]
#l1=np.load('./data/data_optimal_sim0.npy'.format(j))[:,0,:]
#rl1=np.load('./data/data_random_sim{0.npy'.format(j))[:,0,:]
fig = plt.figure(figsize=(7,5))
m = np.array(l).mean(axis=0)
h = CI_plot_y(np.array(l))
plt.plot(steps,m,label="Optimal pairing")
plt.fill_between(steps,m-h,m+h,alpha=.5,zorder=0)
m = np.array(rl).mean(axis=0)
h = CI_plot_y(np.array(rl))
plt.plot(steps,m,c='green',label="Random pairing")
plt.fill_between(steps,m-h,m+h,alpha=.3,zorder=0,color='green')
plt.xticks(s)
#plt.savefig('./figures/topo_error.pdf')
plt.xlabel("Number of iterations")
plt.ylabel("Topological reconstruction loss")
#plt.title("Signal on the 1-cells: {}".format(j))
plt.legend(loc='upper left')
plt.savefig('./figures/topological_loss_{}.pdf'.format(j))
plt.show() | _____no_output_____ | MIT | total-collapsing.ipynb | stefaniaebli/dmt-signal-processing |
Object Detection Data Set (Pikachu)There are no small data sets, like MNIST or Fashion-MNIST, in the object detection field. In order to quickly test models, we are going to assemble a small data set. First, we generate 1000 Pikachu images of different angles and sizes using an open source 3D Pikachu model. Then, we collect a series of background images and place a Pikachu image at a random position on each image. We use the [im2rec tool](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) provided by MXNet to convert the images to binary RecordIO format[1]. This format can reduce the storage overhead of the data set on the disk and improve the reading efficiency. If you want to learn more about how to read images, refer to the documentation for the [GluonCV Toolkit](https://gluon-cv.mxnet.io/). Download the Data SetThe Pikachu data set in RecordIO format can be downloaded directly from the Internet. The operation for downloading the data set is defined in the function `_download_pikachu`. | %matplotlib inline
import d2l
from mxnet import gluon, image
import os
# Save to the d2l package.
def download_pikachu(data_dir):
root_url = ('https://apache-mxnet.s3-accelerate.amazonaws.com/'
'gluon/dataset/pikachu/')
dataset = {'train.rec': 'e6bcb6ffba1ac04ff8a9b1115e650af56ee969c8',
'train.idx': 'dcf7318b2602c06428b9988470c731621716c393',
'val.rec': 'd6c33f799b4d058e82f2cb5bd9a976f69d72d520'}
for k, v in dataset.items():
gluon.utils.download(
root_url + k, os.path.join(data_dir, k), sha1_hash=v) | _____no_output_____ | MIT | d2l-en/chapter_computer-vision/object-detection-dataset.ipynb | mru4913/Dive-into-Deep-Learning |
Read the Data SetWe are going to read the object detection data set by creating the instance `ImageDetIter`. The "Det" in the name refers to Detection. We will read the training data set in random order. Since the format of the data set is RecordIO, we need the image index file `'train.idx'` to read random mini-batches. In addition, for each image of the training set, we will use random cropping and require the cropped image to cover at least 95% of each object. Since the cropping is random, this requirement is not always satisfied. We preset the maximum number of random cropping attempts to 200. If none of them meets the requirement, the image will not be cropped. To ensure the certainty of the output, we will not randomly crop the images in the test data set. We also do not need to read the test data set in random order. | # Save to the d2l package.
def load_data_pikachu(batch_size, edge_size=256):
"""Load the pikachu dataset"""
data_dir = '../data/pikachu'
download_pikachu(data_dir)
train_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'train.rec'),
path_imgidx=os.path.join(data_dir, 'train.idx'),
batch_size=batch_size,
data_shape=(3, edge_size, edge_size), # The shape of the output image
shuffle=True, # Read the data set in random order
rand_crop=1, # The probability of random cropping is 1
min_object_covered=0.95, max_attempts=200)
val_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'val.rec'), batch_size=batch_size,
data_shape=(3, edge_size, edge_size), shuffle=False)
return train_iter, val_iter | _____no_output_____ | MIT | d2l-en/chapter_computer-vision/object-detection-dataset.ipynb | mru4913/Dive-into-Deep-Learning |
Below, we read a mini-batch and print the shape of the image and label. The shape of the image is the same as in the previous experiment (batch size, number of channels, height, width). The shape of the label is (batch size, $m$, 5), where $m$ is equal to the maximum number of bounding boxes contained in a single image in the data set. Although computation for the mini-batch is very efficient, it requires each image to contain the same number of bounding boxes so that they can be placed in the same batch. Since each image may have a different number of bounding boxes, we can add illegal bounding boxes to images that have less than $m$ bounding boxes until each image contains $m$ bounding boxes. Thus, we can read a mini-batch of images each time. The label of each bounding box in the image is represented by an array of length 5. The first element in the array is the category of the object contained in the bounding box. When the value is -1, the bounding box is an illegal bounding box for filling purpose. The remaining four elements of the array represent the $x, y$ axis coordinates of the upper-left corner of the bounding box and the $x, y$ axis coordinates of the lower-right corner of the bounding box (the value range is between 0 and 1). The Pikachu data set here has only one bounding box per image, so $m=1$. | batch_size, edge_size = 32, 256
train_iter, _ = load_data_pikachu(batch_size, edge_size)
batch = train_iter.next()
batch.data[0].shape, batch.label[0].shape | _____no_output_____ | MIT | d2l-en/chapter_computer-vision/object-detection-dataset.ipynb | mru4913/Dive-into-Deep-Learning |
Graphic DataWe have ten images with bounding boxes on them. We can see that the angle, size, and position of Pikachu are different in each image. Of course, this is a simple man-made data set. In actual practice, the data is usually much more complicated. | imgs = (batch.data[0][0:10].transpose((0, 2, 3, 1))) / 255
axes = d2l.show_images(imgs, 2, 5, scale=2)
for ax, label in zip(axes, batch.label[0][0:10]):
d2l.show_bboxes(ax, [label[0][1:5] * edge_size], colors=['w']) | _____no_output_____ | MIT | d2l-en/chapter_computer-vision/object-detection-dataset.ipynb | mru4913/Dive-into-Deep-Learning |
The Goal of this Notebook is to predict Future Sales given historical data (daily granularity). This is a part of the kaggle competition "Predict Future Sales": https://www.kaggle.com/c/competitive-data-science-predict-future-sales/data Where more information about the problem, dataset and other solutions can be found.For my own usage, this is a part of the Capstone Project as part of the Udacity Machine Learning Engineer Nanodegree program and so am running this on AWS Sagemaker, with a conda_pytorch_36 shell.Author: Steven Vuong. Most recent update: 25/05/2020 | # mount gdrive
from google.colab import drive
drive.mount('/gdrive')
# cd to dir
% cd '../gdrive/My Drive/self_teach/udacity_ml_eng_nanodegree'
# Import Libraries
import pandas as pd
import numpy as np
import warnings
from sklearn.preprocessing import LabelEncoder
# Visualisation Libraries
import seaborn as sns
import matplotlib.pyplot as plt
# Styling Preferences
%matplotlib inline
sns.set(style="darkgrid")
pd.set_option('display.float_format', lambda x: '%.2f' % x)
warnings.filterwarnings("ignore") | /usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
| MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Before we begin, Thanks to the following notebooks who I gained some ideas from in feature engineering/visualisations (and took code snippets from). I would suggest having a look at their notebooks and work also, and if you like it, give them a thumbs up on Kaggle to support their work :)):- https://www.kaggle.com/dlarionov/feature-engineering-xgboost- https://www.kaggle.com/kyakovlev/1st-place-solution-part-1-hands-on-data- https://www.kaggle.com/dimitreoliveira/model-stacking-feature-engineering-and-eda | # Load in dataset (cast float64 -> float32 and int32 -> int16 to save memory)
items = pd.read_csv('./data/competition_files/items.csv',
dtype={'item_name': 'str', 'item_id': 'int16', 'item_category_id': 'int16'}
)
shops = pd.read_csv('./data/competition_files/shops.csv',
dtype={'shop_name': 'str', 'shop_id': 'int16'}
)
categories = pd.read_csv('./data/competition_files/item_categories.csv',
dtype={'item_category_name': 'str', 'item_category_id': 'int16'}
)
train = pd.read_csv('./data/competition_files/sales_train.csv',
dtype={
'date': 'str',
'date_block_num': 'int16',
'shop_id': 'int16',
'item_id': 'int16',
'item_price': 'float32',
'item_cnt_day': 'int16'}
)
# set index to ID to avoid dropping it later
test = pd.read_csv('./data/competition_files/test.csv',
dtype={'ID': 'int16', 'shop_id': 'int16', 'item_id': 'int16'}
).set_index('ID')
# Cast train date from string to datetime data type
train.date = train.date.str.replace(".", "/")
train.date = pd.to_datetime(train.date) | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Join the different data sets; merge onto train df | train = train.join(
items, on='item_id', rsuffix='_').join(
shops, on='shop_id', rsuffix='_').join(
categories, on='item_category_id', rsuffix='_').drop(
['item_id_', 'shop_id_', 'item_category_id_'], axis=1
) | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Probe the train data, it appears that there are no nan data, or missing data, which is quite good. | print("----------Top-5- Record----------")
print(train.head(5))
print("-----------Information-----------")
print(train.info())
print("-----------Data Types-----------")
print(train.dtypes)
print("----------Missing value-----------")
print(train.isnull().sum())
print("----------Null value-----------")
print(train.isna().sum())
print("----------Shape of Data----------")
print("Number of rows = {}, Number of columns = {}".format(len(train), len(train.columns)))
print("----------Data Description----------")
print(train.describe())
# look at time period of data
print('Min date from train set: %s' % train['date'].min().date())
print('Max date from train set: %s' % train['date'].max().date()) | Min date from train set: 2013-01-01
Max date from train set: 2015-12-10
| MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Data is from 1st January 2013 to 10th Decemer 2015, as we expect So it turns out that a lot of data in the training set for columns "shop_id" and "item_id" does not appear in the test set. This could be perhaps because the item is no longer on sale as time goes on or shops have closed down or moved addresses. As we want to predict data in the test set, we will focus on only using "shop_id" and "item_id" that appears in the test set. These rows may contain information so could be worth keeping as an extra column (commented out) indicating whether or not the train_id or shop_id is in the test set. Unfortunately however, we are tight on memory and so will not be doing that in this notebook.To make this more future proof where the "shop_id" and "item_id" might change over time (in a production environment, let's say), one may want to consider a data pipeline to constantly train and update our model with the latest information regarding shop_id and item_id's etc.. | test_shop_ids = test['shop_id'].unique()
test_item_ids = test['item_id'].unique()
# Only shops that exist in test set.
corrlate_train = train[train['shop_id'].isin(test_shop_ids)]
# Only items that exist in test set.
correlate_train = corrlate_train[corrlate_train['item_id'].isin(test_item_ids)]
print('Initial data set size :', train.shape[0])
print('Data set size after matching crossovers between train and test:', correlate_train.shape[0])
# Make separate column to indicate whether or not the train_id and shop_id is in test
# train['is_in_test'] = train.index.isin(correlate_train.index)
# train.head()
# Reduce train set to just match ones in test set regarding train_id and shop_id
train = correlate_train
len(train) | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
It appears we have 5 duplicated rows, let's look into these | print('Number of duplicates:', len(train[train.duplicated()])) | Number of duplicates: 5
| MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
The Itetm ID's are all the same, as well as the price for a number of them; other columns such as date, date_block_num look different. So this appears not to be a mistake. As there are only 5 duplicated rows, we will leave these in for now and deal with these later. | train[train.duplicated()] | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Plot the train data; look for outliers. It seems like there are a few with item price > 100000 and with item count per day > 1000. We will remove these from our training set. | plt.figure(figsize=(10,4))
plt.xlim(-100, 3000)
sns.boxplot(x=train.item_cnt_day)
plt.figure(figsize=(10,4))
plt.xlim(train.item_price.min(), train.item_price.max()*1.1)
sns.boxplot(x=train.item_price)
train = train[train.item_price<100000]
train = train[train.item_cnt_day<1000]
plt.figure(figsize=(10,4))
plt.xlim(-100, 3000)
sns.boxplot(x=train.item_cnt_day)
plt.figure(figsize=(10,4))
plt.xlim(train.item_price.min(), train.item_price.max()*1.1)
sns.boxplot(x=train.item_price)
plt.show() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Looking better after having removed outliers. Fill any item_price < 0 with the median item price median. | # Calculate the item price median
median = train.item_price.median()
print("Item Price Median = {}".format(median))
train.loc[train.item_price<0, 'item_price'] = median
# Double there are no item price rows < 0
train.loc[train.item_price<0, 'item_price'] | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Count number of rows with item_cnt_day < 0; seems too many to be anomalous and could be an important feature. We will leave this in our dataset. | len(train.loc[train.item_cnt_day<0, 'item_cnt_day']) | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Some shops are duplicates of each other (according to name), we will fix these in our train and test set. | # Якутск Орджоникидзе, 56
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
# Якутск ТЦ "Центральный"
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
# Жуковский ул. Чкалова 39м²
train.loc[train.shop_id == 10, 'shop_id'] = 11
test.loc[test.shop_id == 10, 'shop_id'] = 11 | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Process "Shop_name" column -> shop name begins with city name. | # Fix erroneous shop name title
train.loc[train.shop_name == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"'
# Create a column for city
train['city'] = train['shop_name'].str.split(' ').map(lambda x: x[0])
train.head()
# Fix a city name (typo)
train.loc[train.city == '!Якутск', 'city'] = 'Якутск'
# Encode the city name into a code column
train['city_code'] = LabelEncoder().fit_transform(train['city'])
train.head() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Each category name contains type and subtype in its name. Treat this similarly as to how we treated shop name, split into separate columns and encode into labels (one hot encoding). | # Create separate column with split category name
train['split_category_name'] = train['item_category_name'].str.split('-')
train.head()
# Make column for category type and encode
train['item_category_type'] = train['split_category_name'].map(lambda x : x[0].strip())
train['item_category_type_code'] = LabelEncoder().fit_transform(train['item_category_type'])
train.head()
# Do the same for subtype, make column wiht name if nan then set to the type
train['item_category_subtype'] = train['split_category_name'].map(
lambda x: x[1].strip() if len(x) > 1 else x[0].strip()
)
# Make separate encoded column
train['item_category_subtype_code'] = LabelEncoder().fit_transform(train['item_category_subtype'])
train.head() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
We can now drop the following columns, having captured and encoded the necessary information from them:- shop_name- item_category_name- split_category_name- item_category_type- item_category_subtype | train = train.drop(['shop_name',
'item_category_name',
'split_category_name',
'item_category_type',
'item_category_subtype',
], axis = 1)
train.head() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Looking at item name, perhaps we can reduce the number of unique types, as there are too many at the moment which our model might struggle with, so we will try to categorise some of these by just taking the first part of an item name and encoding this. | print("Number of unique Item names = {}".format(len(train.item_name.unique())))
# Split item name, extracting first word of the string
train['item_name_split'] = train['item_name'].str.split(' ').map(lambda x : x[0].strip())
train.head()
print("Number of unique Item First Words = {}".format(len(train['item_name_split'].unique()))) | Number of unique Item First Words = 1590
| MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
This seems substantial enough, so we will encode this once again into another column. | train['item_name_code'] = LabelEncoder().fit_transform(train['item_name_split'])
train.head() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
And now we can drop the following columns:- item_name- item_name_split- city (forgot to drop in last round) | train = train.drop(['item_name',
'item_name_split',
'city'
], axis = 1)
train.head() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
So the features above are the ones so far deemed as useful and thus are kept on. We will group by month into dataframe; then by the other columns and then aggregate the item price and count, determining the mean average and sum per month. | print(len(train))
# Group by month (date_block_num)
# Could do more complex, just want something very basic to aggregate
train_by_month = train.sort_values('date').groupby([
'date_block_num',
'item_category_type_code',
'item_category_subtype_code',
'item_name_code',
'city_code',
'shop_id',
'item_category_id',
'item_id',
# Keep simple; will just use the above columns
], as_index=False)
train_by_month.size()
# everything is organised by date block num, great!
train_by_month.head().head()
train_by_month.head()
# Aggregate item price and item count
train_by_month = train_by_month.agg({'item_price':['sum', 'mean'], 'item_cnt_day':['sum', 'mean','count']})
train_by_month.head()
# See how many rows we now have
len(train_by_month)
# Sanity check on number of months
train_by_month.date_block_num.unique()
# Rename columns
train_by_month.columns = ['date_block_num',
'item_category_type_code',
'item_category_subtype_code',
'item_name_code',
'city_code',
'shop_id',
'item_category_id',
'item_id',
'sum_item_price',
'mean_item_price',
'sum_item_count',
'mean_item_count',
'transactions']
train_by_month.head() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
As we have to apply predictions to the test set, we must ensure all possible combinations of "shop_id" and "item_id" are covered. To do this, we will loop through all possible combinations in our test set and append to an empty dataframe. Then we will merge that empty dataframe to our main dataframe and fill in missing na values with 0. | # Get all unique shop id's and item id's
shop_ids = test['shop_id'].unique()
item_ids = test['item_id'].unique()
# Initialise empty df
empty_df = []
# Loop through months and append to dataframe
for i in range(34):
for item in item_ids:
for shop in shop_ids:
empty_df.append([i, shop, item])
# Turn into dataframe
empty_df = pd.DataFrame(empty_df, columns=['date_block_num','shop_id','item_id'])
# Merge monthly train set with the complete set (missing records will be filled with 0).
train_by_month = pd.merge(train_by_month, empty_df, on=['date_block_num','shop_id','item_id'], how='outer')
len(train_by_month)
# Double check we have no na records
train_by_month.isna().sum() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
The fact we have so many na is quiet concerning. Perhaps many more item_id or shop_id values were added in the most recent month (test data) that is not included in the training data. Whilst there may be better ways of dealing with this, we will be fill the missing na records with 0 and progress. | # Filll missing records with na
train_by_month.fillna(0, inplace=True)
train_by_month.isna().sum()
train_by_month.describe() | _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
In this first feature-engineering notebook, we have inspected the data, removed outliers and identified features (as well as engineer others) we would like to use for further feature engineering to train our model with. As our feature-engineering steps are quite numerous, we will split it up into separate notebooks, more to come in part-2. | # Save this as a csv
train_by_month.to_csv('./data/output/processed_data_pt1.csv', index=False, header=True)
| _____no_output_____ | MIT | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project |
Задание 1.2 - Линейный классификатор (Linear classifier)В этом задании мы реализуем другую модель машинного обучения - линейный классификатор. Линейный классификатор подбирает для каждого класса веса, на которые нужно умножить значение каждого признака и потом сложить вместе.Тот класс, у которого эта сумма больше, и является предсказанием модели.В этом задании вы:- потренируетесь считать градиенты различных многомерных функций- реализуете подсчет градиентов через линейную модель и функцию потерь softmax- реализуете процесс тренировки линейного классификатора- подберете параметры тренировки на практикеНа всякий случай, еще раз ссылка на туториал по numpy: http://cs231n.github.io/python-numpy-tutorial/ | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
from dataset import load_svhn, random_split_train_val
from gradient_check_solution import check_gradient
from metrics_solution import multiclass_accuracy
import linear_classifer_solution as linear_classifer | _____no_output_____ | MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Как всегда, первым делом загружаем данныеМы будем использовать все тот же SVHN. | def prepare_for_linear_classifier(train_X, test_X):
train_flat = train_X.reshape(train_X.shape[0], -1).astype(np.float) / 255.0
test_flat = test_X.reshape(test_X.shape[0], -1).astype(np.float) / 255.0
# Subtract mean
mean_image = np.mean(train_flat, axis = 0)
train_flat -= mean_image
test_flat -= mean_image
# Add another channel with ones as a bias term
train_flat_with_ones = np.hstack([train_flat, np.ones((train_X.shape[0], 1))])
test_flat_with_ones = np.hstack([test_flat, np.ones((test_X.shape[0], 1))])
return train_flat_with_ones, test_flat_with_ones
train_X, train_y, test_X, test_y = load_svhn("data", max_train=10000, max_test=1000)
train_X, test_X = prepare_for_linear_classifier(train_X, test_X)
# Split train into train and val
train_X, train_y, val_X, val_y = random_split_train_val(train_X, train_y, num_val = 1000) | _____no_output_____ | MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Играемся с градиентами!В этом курсе мы будем писать много функций, которые вычисляют градиенты аналитическим методом.Необходимым инструментом во время реализации кода, вычисляющего градиенты, является функция его проверки. Эта функция вычисляет градиент численным методом и сверяет результат с градиентом, вычисленным аналитическим методом.Мы начнем с того, чтобы реализовать вычисление численного градиента (numeric gradient) в этой функции.Вычислите градиент с помощью численной производной для каждой координаты. Для вычисления производной используйте так называемую two-point formula (https://en.wikipedia.org/wiki/Numerical_differentiation): | # TODO: Implement gradient check function
def sqr(x):
return x*x, 2*x
check_gradient(sqr, np.array([3.0]))
def array_sum(x):
assert x.shape == (2,), x.shape
return np.sum(x), np.ones_like(x)
check_gradient(array_sum, np.array([3.0, 2.0]))
def array_2d_sum(x):
assert x.shape == (2,2)
return np.sum(x), np.ones_like(x)
check_gradient(array_2d_sum, np.array([[3.0, 2.0], [1.0, 0.0]])) | Gradient check passed!
Gradient check passed!
Gradient check passed!
| MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Теперь реализуем функцию softmax, которая получает на вход оценки для каждого класса и преобразует их в вероятности от 0 до 1:**Важно:** Практический аспект вычисления этой функции заключается в том, что в ней учавствует вычисление экспоненты от потенциально очень больших чисел - это может привести к очень большим значениям в числителе и знаменателе за пределами диапазона float.К счастью, у этой проблемы есть простое решение -- перед вычислением softmax вычесть из всех оценок максимальное значение среди всех оценок:```predictions -= np.max(predictions)```(подробнее здесь - http://cs231n.github.io/linear-classify/softmax, секция `Practical issues: Numeric stability`) | # TODO Implement softmax and cross-entropy for single sample
probs = linear_classifer.softmax(np.array([-10, 0, 10]))
# Make sure it works for big numbers too!
probs = linear_classifer.softmax(np.array([1000, 0, 0]))
assert np.isclose(probs[0], 1.0) | _____no_output_____ | MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Кроме этого, мы реализуем cross-entropy loss, которую мы будем использовать как функцию ошибки (error function).В общем виде cross-entropy определена следующим образом:где x - все классы, p(x) - истинная вероятность принадлежности сэмпла классу x, а q(x) - вероятность принадлежности классу x, предсказанная моделью. В нашем случае сэмпл принадлежит только одному классу, индекс которого передается функции. Для него p(x) равна 1, а для остальных классов - 0. Это позволяет реализовать функцию проще! | probs = linear_classifer.softmax(np.array([-5, 0, 5]))
linear_classifer.cross_entropy_loss(probs, 1) | _____no_output_____ | MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
После того как мы реализовали сами функции, мы можем реализовать градиент.Оказывается, что вычисление градиента становится гораздо проще, если объединить эти функции в одну, которая сначала вычисляет вероятности через softmax, а потом использует их для вычисления функции ошибки через cross-entropy loss.Эта функция `softmax_with_cross_entropy` будет возвращает и значение ошибки, и градиент по входным параметрам. Мы проверим корректность реализации с помощью `check_gradient`. | # TODO Implement combined function or softmax and cross entropy and produces gradient
loss, grad = linear_classifer.softmax_with_cross_entropy(np.array([1, 0, 0]), 1)
check_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, 1), np.array([1, 0, 0], np.float)) | Gradient check passed!
| MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
В качестве метода тренировки мы будем использовать стохастический градиентный спуск (stochastic gradient descent или SGD), который работает с батчами сэмплов. Поэтому все наши фукнции будут получать не один пример, а батч, то есть входом будет не вектор из `num_classes` оценок, а матрица размерности `batch_size, num_classes`. Индекс примера в батче всегда будет первым измерением.Следующий шаг - переписать наши функции так, чтобы они поддерживали батчи.Финальное значение функции ошибки должно остаться числом, и оно равно среднему значению ошибки среди всех примеров в батче. | # TODO Extend combined function so it can receive a 2d array with batch of samples
# Test batch_size = 1
batch_size = 1
predictions = np.zeros((batch_size, 3))
target_index = np.ones(batch_size, np.int)
check_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index), predictions)
# Test batch_size = 3
batch_size = 3
predictions = np.zeros((batch_size, 3))
target_index = np.ones(batch_size, np.int)
check_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index), predictions) | Gradient check passed!
Gradient check passed!
| MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Наконец, реализуем сам линейный классификатор!softmax и cross-entropy получают на вход оценки, которые выдает линейный классификатор.Он делает это очень просто: для каждого класса есть набор весов, на которые надо умножить пиксели картинки и сложить. Получившееся число и является оценкой класса, идущей на вход softmax.Таким образом, линейный классификатор можно представить как умножение вектора с пикселями на матрицу W размера `num_features, num_classes`. Такой подход легко расширяется на случай батча векторов с пикселями X размера `batch_size, num_features`:`predictions = X * W`, где `*` - матричное умножение.Реализуйте функцию подсчета линейного классификатора и градиентов по весам `linear_softmax` в файле `linear_classifer.py` | # TODO Implement linear_softmax function that uses softmax with cross-entropy for linear classifier
batch_size = 2
num_classes = 2
num_features = 3
np.random.seed(42)
W = np.random.randint(-1, 3, size=(num_features, num_classes)).astype(np.float)
X = np.random.randint(-1, 3, size=(batch_size, num_features)).astype(np.float)
target_index = np.ones(batch_size, dtype=np.int)
loss, dW = linear_classifer.linear_softmax(X, W, target_index)
check_gradient(lambda w: linear_classifer.linear_softmax(X, w, target_index), W) | Gradient check passed!
| MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
И теперь регуляризацияМы будем использовать L2 regularization для весов как часть общей функции ошибки.Напомним, L2 regularization определяется какl2_reg_loss = regularization_strength * sumij W[i, j]2Реализуйте функцию для его вычисления и вычисления соотвествующих градиентов. | # TODO Implement l2_regularization function that implements loss for L2 regularization
linear_classifer.l2_regularization(W, 0.01)
check_gradient(lambda w: linear_classifer.l2_regularization(w, 0.01), W) | Gradient check passed!
| MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Тренировка! Градиенты в порядке, реализуем процесс тренировки! | # TODO: Implement LinearSoftmaxClassifier.fit function
classifier = linear_classifer.LinearSoftmaxClassifier()
loss_history = classifier.fit(train_X, train_y, epochs=30, learning_rate=1e-3, batch_size=300, reg=1e1)
# let's look at the loss history!
plt.plot(loss_history);
# Let's check how it performs on validation set
pred = classifier.predict(val_X)
accuracy = multiclass_accuracy(pred, val_y)
print("Accuracy: ", accuracy)
# Now, let's train more and see if it performs better
classifier.fit(train_X, train_y, epochs=100, learning_rate=1e-3, batch_size=300, reg=1e1)
pred = classifier.predict(val_X)
accuracy = multiclass_accuracy(pred, val_y)
print("Accuracy after training for 100 epochs: ", accuracy) | Accuracy: 0.145
Epoch 0, loss: 2.301971
Epoch 1, loss: 2.301977
Epoch 2, loss: 2.301983
Epoch 3, loss: 2.301990
Epoch 4, loss: 2.301970
Epoch 5, loss: 2.301979
Epoch 6, loss: 2.301968
Epoch 7, loss: 2.301989
Epoch 8, loss: 2.301976
Epoch 9, loss: 2.301980
Epoch 10, loss: 2.301986
Epoch 11, loss: 2.301982
Epoch 12, loss: 2.301993
Epoch 13, loss: 2.301974
Epoch 14, loss: 2.301999
Epoch 15, loss: 2.301972
Epoch 16, loss: 2.301976
Epoch 17, loss: 2.301989
Epoch 18, loss: 2.301968
Epoch 19, loss: 2.301983
Epoch 20, loss: 2.301982
Epoch 21, loss: 2.301983
Epoch 22, loss: 2.301975
Epoch 23, loss: 2.301981
Epoch 24, loss: 2.301990
Epoch 25, loss: 2.301996
Epoch 26, loss: 2.301979
Epoch 27, loss: 2.301980
Epoch 28, loss: 2.301974
Epoch 29, loss: 2.301978
Epoch 30, loss: 2.301972
Epoch 31, loss: 2.301977
Epoch 32, loss: 2.301991
Epoch 33, loss: 2.301983
Epoch 34, loss: 2.301986
Epoch 35, loss: 2.301970
Epoch 36, loss: 2.301983
Epoch 37, loss: 2.302006
Epoch 38, loss: 2.301975
Epoch 39, loss: 2.301975
Epoch 40, loss: 2.301974
Epoch 41, loss: 2.301977
Epoch 42, loss: 2.301963
Epoch 43, loss: 2.301973
Epoch 44, loss: 2.301981
Epoch 45, loss: 2.301978
Epoch 46, loss: 2.301970
Epoch 47, loss: 2.301976
Epoch 48, loss: 2.301974
Epoch 49, loss: 2.301988
Epoch 50, loss: 2.301970
Epoch 51, loss: 2.302000
Epoch 52, loss: 2.301989
Epoch 53, loss: 2.301979
Epoch 54, loss: 2.301973
Epoch 55, loss: 2.301989
Epoch 56, loss: 2.301984
Epoch 57, loss: 2.301964
Epoch 58, loss: 2.301977
Epoch 59, loss: 2.301970
Epoch 60, loss: 2.301976
Epoch 61, loss: 2.301992
Epoch 62, loss: 2.301982
Epoch 63, loss: 2.301992
Epoch 64, loss: 2.301977
Epoch 65, loss: 2.301983
Epoch 66, loss: 2.301959
Epoch 67, loss: 2.301976
Epoch 68, loss: 2.301975
Epoch 69, loss: 2.301986
Epoch 70, loss: 2.301995
Epoch 71, loss: 2.301974
Epoch 72, loss: 2.301960
Epoch 73, loss: 2.301993
Epoch 74, loss: 2.301976
Epoch 75, loss: 2.301969
Epoch 76, loss: 2.301978
Epoch 77, loss: 2.301972
Epoch 78, loss: 2.301979
Epoch 79, loss: 2.301968
Epoch 80, loss: 2.301962
Epoch 81, loss: 2.301983
Epoch 82, loss: 2.301975
Epoch 83, loss: 2.301961
Epoch 84, loss: 2.301973
Epoch 85, loss: 2.301976
Epoch 86, loss: 2.301993
Epoch 87, loss: 2.301971
Epoch 88, loss: 2.301970
Epoch 89, loss: 2.301989
Epoch 90, loss: 2.301989
Epoch 91, loss: 2.301989
Epoch 92, loss: 2.301978
Epoch 93, loss: 2.301983
Epoch 94, loss: 2.301976
Epoch 95, loss: 2.301968
Epoch 96, loss: 2.301969
Epoch 97, loss: 2.301986
Epoch 98, loss: 2.301984
Epoch 99, loss: 2.301975
Accuracy after training for 100 epochs: 0.15
| MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Как и раньше, используем кросс-валидацию для подбора гиперпараметтов.В этот раз, чтобы тренировка занимала разумное время, мы будем использовать только одно разделение на тренировочные (training) и проверочные (validation) данные.Теперь нам нужно подобрать не один, а два гиперпараметра! Не ограничивайте себя изначальными значениями в коде. Добейтесь точности более чем **20%** на проверочных данных (validation data). | import itertools
num_epochs = 200
batch_size = 300
learning_rates = [1e-3, 1e-4, 1e-5]
reg_strengths = [1e-4, 1e-5, 1e-6]
best_classifier = None
best_val_accuracy = -float("inf")
# TODO use validation set to find the best hyperparameters
# hint: for best results, you might need to try more values for learning rate and regularization strength
# than provided initially
for learning_rate, reg_strength in itertools.product(learning_rates, reg_strengths):
classifier = linear_classifer.LinearSoftmaxClassifier()
classifier.fit(train_X, train_y, verbose=False,
epochs=num_epochs, batch_size=batch_size,
learning_rate=learning_rate,
reg=reg_strength)
pred = classifier.predict(val_X)
accuracy = multiclass_accuracy(pred, val_y)
if accuracy > best_val_accuracy:
best_classifier = classifier
best_val_accuracy = accuracy
print('best validation accuracy achieved: %f' % best_val_accuracy) | best validation accuracy achieved: 0.215000
| MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Какой же точности мы добились на тестовых данных? | test_pred = best_classifier.predict(test_X)
test_accuracy = multiclass_accuracy(test_pred, test_y)
print('Linear softmax classifier test set accuracy: %f' % (test_accuracy, )) | _____no_output_____ | MIT | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai |
Is the SED Correct?In the circle test, the SFH is totatlly bonkers. We just can not get the correct SFH back out with MCMC. Is the MCMC getting a good fit? | import numpy as np
import matplotlib.pyplot as plt
wavelengths = [3551, 4686, 6166, 7480, 8932] # for u, g, r, i, z filters
filters = ['u', 'g', 'r', 'i', 'z'] | _____no_output_____ | MIT | figures/Check-SED.ipynb | benjaminrose/SNIa-Local-Environments |
Input TextSo from:logzsol | dust2| $\tau$| tStart| sfTrans| sfSlope--------|------|----|-------|--------|----------0.5| 0.1| 0.5| 1.5| 9.0| -1.0we getu| g| r| i| z-|--|--|--|--45.36|43.76|42.99|42.67|42.39This SED gets 25 magnitues subtracted from (`c` paramter in fit) it get it to a resonable magnitude. FSPS only calcualtes for 1 solar mass, so this factor is a scaling factor that is related to the total solar mass observed. Fit 1 First we did our normal fit. The oddest part was that `logzsol` wanted the smallest value possible. This was most odd because the prior is a Gaussian centered at -0.5 (this happens to be the input value) with a width of 0.5 dex. I also have a low cut off, just cause, of -2.5. This fit gives us logzsol | dust2| $\tau$| tStart| sfTrans| sfSlope | c--------|------|----|-------|--------|----|------2.5| 0.01| 7.17| 7.94| 10.40| -5.24| -23.48and and SED of u| g| r| i| z-|--|--|--|--43.31|42.06|41.76|41.67|41.62 Fit 2I changed the low cut off, in part becasue nothing else seemed to effect the metalicity paramter fit. With it now set at no lower then -1.0 the fit gives us:logzsol | dust2| $\tau$| tStart| sfTrans| sfSlope | c--------|------|----|-------|--------|----|------1.0| 0.25| 5.67| 1.94| 4.93| 1.64| -22.85and and SED of u| g| r| i| z-|--|--|--|--42.28|41.43|41.23|41.01|40.99 Fit 3Finally I "fixed" the metalicity to the known value of -0.5, because these previouse fits just still did not want to get things correct. This fit gives us:logzsol | dust2| $\tau$| tStart| sfTrans| sfSlope | c--------|------|----|-------|--------|----|------0.51| 0.32| 8.17| 8.42| 10.76| 4.72| -22.17and and SED of u| g| r| i| z-|--|--|--|--41.53|40.70|40.55|40.33|40.30**None** of these are correct. | input_sed = np.array([45.36, 43.76, 42.99, 42.67, 42.39])
input_c = -25
fit1_sed = np.array([43.31, 42.06, 41.76, 41.67, 41.62])
fit1_c = -23.48
fit2_sed = np.array([42.28, 41.43, 41.23, 41.01, 40.99])
fit2_c = -22.85
fit3_sed = np.array([41.53, 40.70, 40.55, 40.33, 40.30])
fit3_c = -22.1
plt.figure('fit test')
fig, ax = plt.subplots(1,1)
ax.plot(wavelengths, input_sed+input_c, label='Input Values')
# ax.plot(wavelengths, [20.36, 18.76, 17.99, 17.67, 17.39]) # the in text file numbers.
ax.plot(wavelengths, fit1_sed+fit1_c, label='Full Fit')
ax.plot(wavelengths, fit2_sed+fit2_c, label='Smaller $\log(Z_{sol})$ range')
ax.plot(wavelengths, fit3_sed+fit3_c, label='Fixed $\log(Z_{sol})$')
plt.gca().invert_yaxis()
ax.set_xticks(wavelengths)
ax.set_xticklabels(filters)
ax.set_xlabel('SDSS Filters')
ax.set_ylabel('Magnitude [mag]')
plt.legend()
# plt.savefig('2017-08-09- not getting correct sed.pdf')
plt.show() | _____no_output_____ | MIT | figures/Check-SED.ipynb | benjaminrose/SNIa-Local-Environments |
Check Newer ResutlsOn 2017-08-24 I re-ran the whole analaysis method and it got a closer answer on the circle test (particually with the log(Z_sol)) but it was not perfect. Here I want to compare the SED outputed results. | fit0824_sed = np.array([42.29, 41.43, 41.21, 40.98, 40.93])
fit0824_c = -25.70
plt.figure('newer fit test')
fig, ax = plt.subplots(1,1)
ax.plot(wavelengths, input_sed+input_c, label='Input Values')
# ax.plot(wavelengths, [20.36, 18.76, 17.99, 17.67, 17.39]) # the in text file numbers.
ax.plot(wavelengths, fit1_sed+fit1_c, label='Old Full Fit')
ax.plot(wavelengths, fit0824_sed+fit0824_c, label='08-24 Fit')
plt.gca().invert_yaxis()
ax.set_xticks(wavelengths)
ax.set_xticklabels(filters)
ax.set_xlabel('SDSS Filters')
ax.set_ylabel('Magnitude [mag]')
plt.legend()
plt.savefig('2017-09-05 not getting correct sed.pdf')
plt.show() | _____no_output_____ | MIT | figures/Check-SED.ipynb | benjaminrose/SNIa-Local-Environments |
**Summary Of Findings**:It was found that wildfire frequency across the United State has been increasing in the past decade. Although fire and fire damage was generally localized to mostly the west coast in the past, fire frequency has been gradually increasing in states east of it in the continental US; in 2021, midwestern states have had fire counts similar to those found in West Coast states in 2014 and 2015. Although fire frequency has been increasing, the overall area of land affected by wildfires has remained within a similar range for the past 20 years. It was also found that the number of recorded fires, did not necessarily correlate with the area affected for each states. While the degree of fire coverage has remained relatively consistent, the distribution of burned area across the United States has changed over the years. In the early 2000s, the majority of wildfire area was almost entirely localized to Alaska and the West coast; by 2021, the majority of the US had seen more than minimal fire coverage. Throughout the past decade, hot spots on on the continental US have remained relatively consistent; the west coast will probably continue to be considered hot spots, and some may become prominent in the Midwest. Regardless of hot spots, fire activity has generally increased across the United States. | !apt-get install openjdk-8-jdk-headless -qq > /dev/null
!wget https://dlcdn.apache.org/spark/spark-3.2.0/spark-3.2.0-bin-hadoop3.2.tgz | --2021-12-14 04:34:01-- https://dlcdn.apache.org/spark/spark-3.2.0/spark-3.2.0-bin-hadoop3.2.tgz
Resolving dlcdn.apache.org (dlcdn.apache.org)... 151.101.2.132, 2a04:4e42::644
Connecting to dlcdn.apache.org (dlcdn.apache.org)|151.101.2.132|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 300965906 (287M) [application/x-gzip]
Saving to: ‘spark-3.2.0-bin-hadoop3.2.tgz’
spark-3.2.0-bin-had 100%[===================>] 287.02M 180MB/s in 1.6s
2021-12-14 04:34:03 (180 MB/s) - ‘spark-3.2.0-bin-hadoop3.2.tgz’ saved [300965906/300965906]
| MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
!tar xvzf spark-3.2.0-bin-hadoop3.2.tgz
!ls /content/spark-3.2.0-bin-hadoop3.2
# Set the ‘environment’ path
import os
#os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-3.2.0-bin-hadoop3.2"
!pip install -q findspark
import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
import pandas as pd
import matplotlib.pyplot as plt
spark = SparkSession.builder\
.master("local[*]")\
.appName("final-project")\
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel("ERROR")
sc
#The wildfire location database
locatData = spark.read.option("header",True) \
.option("inferSchema", True) \
.csv("WFIGS_-_Wildland_Fire_Locations_Full_History.csv") | _____no_output_____ | MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
|
Fire Frequency By Year | #Fires not considered "wildfire" are first filtered out
#locTime will be used to focus on the frequency of wildfires per state
#POOState - Location of wildfire at time of discovery
#FireDiscoveryDateTime - Date when the fire was discovered.
locatData = locatData.filter(locatData["IncidentTypeCategory"] == "WF")
locTime = locatData.select(substring(locatData["POOState"],0,6).alias("State Occurred"),
substring(locatData['FireDiscoveryDateTime'],0,4).alias("Year"))
#Unusable rows are filtered out.
locTime = locTime.filter((locTime["year"].isNotNull())& (locTime["State Occurred"].isNotNull()))\ | _____no_output_____ | MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
A significant difference between the wildfire frequency was found between 2013 and 2014; it is assumed the years before 2014 had incomplete data. |
locTime.groupBy("Year").count().orderBy("year").show()
| +----+-----+
|Year|count|
+----+-----+
|2003| 1|
|2004| 1|
|2008| 1|
|2009| 1|
|2010| 2|
|2014|12634|
|2015|19633|
|2016|19798|
|2017|25114|
|2018|22627|
|2019|25451|
|2020|33348|
|2021|34488|
+----+-----+
| MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
Number of Fires across the US per state | #To gain insights into the US results, areas outside the US are filtered out.
locTime = locTime.filter(locTime["Year"] > 2013)
locTime = locTime.filter(locTime["State Occurred"].contains("US"))
locTime = locTime.withColumn("State Occurred",substring(locTime["State Occurred"],4,6))
locTime.show()
totalFiresPerState = locTime.groupBy("State Occurred").count()
import plotly.express as px
import pandas as pd
fig = px.choropleth(totalFiresPerState.toPandas(), locations='State Occurred',locationmode = "USA-states",color = "count",
scope='usa')
fig.update_layout(
width=800,
height=600)
fig.show() | _____no_output_____ | MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
Findings:From the figure above, it can be seen that fires in the last decade have most occurred in the western portion of the United States, and have been mostly prevalent on the west coast as well as Montana and Arizona. Number of Fires Per Year Per State | firePerState = locTime.filter(locTime["year"].isNotNull())\
.groupBy("year",'State Occurred').count().orderBy("Year")
firePerState.show()
import plotly.express as px
import pandas as pd
fig = px.choropleth(firePerState.toPandas(), locations='State Occurred',locationmode = "USA-states",color = "count",range_color = [0,5000],
animation_frame="year", animation_group="State Occurred",scope='usa')
fig.update_layout(
width=800,
height=600)
fig.show() | _____no_output_____ | MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
**Findings** : From the above, figure, we see a general rise in wildfire occurences over the years. The west coast has consistently had the highest number of fires over the years. Originally the majority of fires had been originating in the west coast, but states east of it have steadily seen increasing occurences.In 2021, midwestern states such as North Dakota and Minnesota have had fire counts similar to those of western states in 2014 and 2015. It should be noted that data for 2021 is incomplete, so there may still be a gradual increase in fire count over the year. Acres Burned In Historical Data Across the US | #Primarily tracks historical fire Perimeters from 2000-2018
oldPerimData = spark.read.option("header",True) \
.option("inferSchema", True) \
.csv("Historic_GeoMAC_Perimeters_Combined_2000-2018.csv")
#Meaningful data is cleaned and selected
oldPerimTime = oldPerimData.select((oldPerimData["state"]).alias("State Occurred"),
oldPerimData["gisacres"].alias("area(acres)"),
oldPerimData['fireyear'].alias("year"))
oldPerimTime = oldPerimTime.filter(oldPerimTime["year"].isNotNull())
oldPerimTime = oldPerimTime.filter(oldPerimTime["year"].cast("int").isNotNull())
oldOverall = oldPerimTime.groupBy("year").agg(sum('area(acres)').alias("area (acres)")).orderBy("year")
#The Data in this csv primarily tracks the area of each recorded fire; data is mostly available for 2020 and 2021.
perimData = spark.read.option("header",True) \
.option("inferSchema", True) \
.csv("WFIGS_-_Wildland_Fire_Perimeters_Full_History.csv")
#Data similar to columns found in oldPerimTime is cleaned and selected here.
recentTime = perimData.select(substring(perimData["irwin_POOState"],4,6).alias("State Occurred"),
perimData["poly_Acres_AutoCalc"].alias("area(acres)"),
substring(perimData['irwin_ContainmentDateTime'],0,4).alias("year"))
recentTime = recentTime.filter(recentTime["year"].isNotNull())
recentOverall = recentTime.groupBy("year").agg(sum('area(acres)').alias("area (acres)")).orderBy("year")
recentOverall = recentOverall.filter((recentOverall["year"] == 2020) | (recentOverall["year"] == 2021))
recentOverall.show()
combinedOverall = oldOverall.union(recentOverall)
yearMonth = combinedOverall.select("year").rdd.flatMap(lambda x: x).collect()
areaDamage = combinedOverall.select("area (acres)").rdd.flatMap(lambda x: x).collect()
ticks = [0,5,10,15,20]
plt.plot(yearMonth,areaDamage)
plt.xticks(ticks)
plt.xlabel("Year")
plt.ylabel("Area Affected (acres)")
plt.title("Wildfire Damage from 2000-2021") | _____no_output_____ | MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
**Findings**: In the above figure, it is found that the total area damaged by wildfires has been inconsistent throughout the past two decades; while fires are increasing in frequency, the area affected does not necessarily increase. Total Area Burned Per State | damagePerState = oldPerimTime.union(recentTime)
damagePerStateOverall= damagePerState.groupBy("State Occurred").agg(sum('area(acres)').alias("total area burned (acres)"))
import plotly.express as px
import pandas as pd
fig = px.choropleth(damagePerStateOverall.toPandas(), locations='State Occurred',locationmode = "USA-states",color = "total area burned (acres)",
scope='usa')
fig.update_layout(
width=800,
height=600)
fig.show() | _____no_output_____ | MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
**Findings**: The above map shows that the most significant damage was found on the west coast; this is similar and supports the findings found in the occurences map. Some States that had a high number of fire occurences such as Texas have not seen proportional quantities of acres burned. In contrast to its low number of reported fires over the years, Alaska has the most significant fire damage found of any state. Area Burned Per State Per Month | damagePerStateYearly= damagePerState.groupBy("year","State Occurred").agg(sum('area(acres)').alias("total area burned (acres)")).orderBy("year")
import plotly.express as px
import pandas as pd
fig = px.choropleth(damagePerStateYearly.toPandas(), locations='State Occurred',locationmode = "USA-states",color = "total area burned (acres)",
range_color = [0,1000000],animation_frame="year", animation_group="State Occurred",scope='usa')
fig.update_layout(
width=800,
height=600)
fig.show() | _____no_output_____ | MIT | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj |
Write a pandas dataframe to disk as gunzip compressed csv- df.to_csv('dfsavename.csv.gz', compression='gzip') Read from disk- df = pd.read_csv('dfsavename.csv.gz', compression='gzip') Magic useful- %%timeit for the whole cell- %timeit for the specific line- %%latex to render the cell as a block of latex- %prun and %%prun | DATASET_PATH = '/media/rs/0E06CD1706CD0127/Kapok/WSDM/'
HDF_FILENAME = DATASET_PATH + 'music_info.h5'
HDF_TRAIN_FEATURE_FILENAME = DATASET_PATH + 'music_train_feature_part.h5'
HDF_TEST_FEATURE_FILENAME = DATASET_PATH + 'music_test_feature_part.h5'
def set_logging(logger_name, logger_file_name):
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
print_formatter = logging.Formatter('%(message)s')
file_formatter = logging.Formatter('%(asctime)s - %(name)s_%(levelname)s: %(message)s')
# create file handler which logs even debug messages
fh = logging.FileHandler(logger_file_name, mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(file_formatter)
log.addHandler(fh)
# both output to console and file
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(print_formatter)
log.addHandler(consoleHandler)
return log
log = set_logging('MUSIC', DATASET_PATH + 'music_test_xgboost.log')
log.info('here is an info message.')
store_data = pd.HDFStore(HDF_FILENAME)
log.info(store_data['all_train_withextra'].head())
def clip_by_percent(hist, num_percent):
return hist[(hist >= hist[int( len(hist.index) * num_percent )]) == True]
def clip_by_value(hist, value):
return hist[(hist >= value) == True]
def create_bag_of_words(input_df, percent, column_name):
input_hist = input_df[column_name].value_counts(sort=True, ascending=False)
input_select = clip_by_percent(input_hist, percent).index
log.info('{} item are selected.'.format(len(input_select)))
# the total number of the other items
total_others = np.sum(input_hist) - np.sum(input_hist[input_select])
# all hist values are log transformed accouting the popularity
clip_hist_with_log = defaultdict(lambda: np.log(total_others))
for k,v in dict(np.log(input_hist[input_select])).items():
clip_hist_with_log[k] = v
# print(input_hist[input_select])
# print(dict(np.log(input_hist[input_select])))
input_map = defaultdict(lambda: column_name + ' ' + 'others')
for input_item in input_select:
input_map[input_item] = column_name + ' ' + input_item
# item name in input_map are "column_name + ' ' + input_item"
# item name in clip_hist_with_log are "input_item"
return input_map, clip_hist_with_log
# 181 ms ± 420 µs
def word_bag_encode(input_data, column, word_map, word_hist):
col_index = input_data.columns.get_loc(column) + 1
count_list = [0 for _ in range(len(word_map))]
count_dict = dict(zip(list(word_map.keys()), count_list))
count_dict['others'] = 0
new_columns = [column + ' ' + s for s in count_dict.keys()]
all_df = pd.DataFrame(data = None, columns = new_columns)
delay_rate = 0.8 # must be less than 1
for cur_row in input_data.itertuples():
if isinstance(cur_row[col_index], str):
df = pd.DataFrame([list(count_dict.values())], columns=new_columns)
splited_list = re.split(r'[|/]+',cur_row[col_index])
list_len = len(splited_list)
# the weight of each position of the array, are decayed by the ratio delay_rate, and their sum are 1
# so according to the geometric series summation formula, the iniatial weight are caculate as follow
initial_weight = (1-delay_rate)/(1 - np.power(delay_rate, list_len))
for index, s in enumerate(splited_list):
word_stripped = s.strip(' \"\t\s\n')
df[word_map.get(word_stripped, column + ' others')] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]
# defaultdict will auto insert missing key
#df[word_map[word_stripped]] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]
initial_weight *= delay_rate
all_df = all_df.append(df, ignore_index=True)
# NAN fix
else:
all_df = all_df.append(pd.DataFrame([[0] * len(new_columns)], columns=new_columns), ignore_index=True)
return all_df
# 7.09 ms ± 43.2 µs
def word_bag_encode_apply(input_data, column, word_map, word_hist):
new_columns = [column + ' ' + s for s in word_map.keys()]
new_columns.append(column + ' ' + 'others')
delay_rate = 0.8 # must be less than 1
def encode_routine(str_value):
series_dict = dict(zip(new_columns, [0.] * len(new_columns)))
if isinstance(str_value, str):
splited_list = re.split(r'[|/]+',str_value)
list_len = len(splited_list)
# the weight of each position of the array, are decayed by the ratio delay_rate, and their sum are 1
# so according to the geometric series summation formula, the iniatial weight are caculate as follow
initial_weight = (1-delay_rate)/(1 - np.power(delay_rate, list_len))
for index, s in enumerate(splited_list):
word_stripped = s.strip(' \"\t\s\n')
series_dict[word_map.get(word_stripped, column + ' others')] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]
initial_weight *= delay_rate
return pd.Series(series_dict)
return input_data[column].apply(lambda s: encode_routine(s))
# 171 µs ± 693 ns
def word_bag_encode_numpy(input_data, column, word_map, word_hist):
new_columns = [s for s in word_map.keys()]
new_columns.append('others')
delay_rate = 0.8 # must be less than 1
num_columns = len(new_columns)
str_indice_dict = dict(zip(new_columns, list(range(num_columns))))
def encode_routine(str_value):
temp_hist = np.zeros(num_columns, dtype=float)
if isinstance(str_value, str):
splited_list = re.split(r'[|/]+',str_value)
list_len = len(splited_list)
# the weight of each position of the array, are decayed by the ratio delay_rate, and their sum are 1
# so according to the geometric series summation formula, the iniatial weight are caculate as follow
initial_weight = (1-delay_rate)/(1 - np.power(delay_rate, list_len))
for index, s in enumerate(splited_list):
word_stripped = s.strip(' \"\t\s\n')
temp_hist[str_indice_dict.get(word_stripped, num_columns-1)] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]
initial_weight *= delay_rate
return temp_hist
# actually we cannot use vectorize #vf = np.vectorize(encode_routine)
#def fromiter(x):
#return np.fromiter((f(xi) for xi in x), x.dtype)
numpy_str = np.array(input_data[column].values, dtype=object)
#return np.array(map(encode_routine, numpy_str))
#return np.fromiter((encode_routine(xi) for xi in numpy_str), numpy_str.dtype, count=len(numpy_str))
return np.array([encode_routine(xi) for xi in numpy_str]), [column + ' ' + s for s in new_columns]
def feature_encoder_impl(source_data, column_name, map_dict, hist_dict):
feature_array, head_name = word_bag_encode_numpy(source_data, column_name, map_dict, hist_dict)
return pd.DataFrame(data = feature_array, columns = head_name)
def feature_encoder(filename_to_store, music_info_data, key_to_encode, batch_size):
total_num_examples = len(music_info_data[key_to_encode].index)
num_steps = int(total_num_examples / batch_size) + 1
cur_step = 0
next_step = 0
composer_map, composer_hist = create_bag_of_words(music_info_data['all_composer'], 0.001, 'composer')
artist_name_map, artist_name_hist = create_bag_of_words(music_info_data['all_artist_name'], 0.001, 'artist_name')
lyricist_map, lyricist_hist = create_bag_of_words(music_info_data['all_lyricist'], 0.002, 'lyricist')
h5store = pd.HDFStore(filename_to_store, mode='w', complib='zlib', complevel=1)
for _step in range(num_steps):
start_time = time.time()
cur_batch_size = _step + 1 == num_steps and total_num_examples - cur_step or batch_size
next_step = cur_step + cur_batch_size
cur_batch_data = store_data[key_to_encode][cur_step:next_step]
composer_feature = feature_encoder_impl(cur_batch_data, 'composer', composer_map, composer_hist)
artist_name_feature = feature_encoder_impl(cur_batch_data, 'artist_name', artist_name_map, artist_name_hist)
lyricist_feature = feature_encoder_impl(cur_batch_data, 'lyricist', lyricist_map, lyricist_hist)
cur_batch_data.drop('composer', axis=1, inplace=True)
cur_batch_data.drop('artist_name', axis=1, inplace=True)
cur_batch_data.drop('lyricist', axis=1, inplace=True)
#print(pd.concat([cur_batch_data, composer_feature, artist_name_feature, lyricist_feature], join='inner', axis=1, copy=True))
#break
table_to_save = pd.concat([composer_feature, artist_name_feature, lyricist_feature], join='inner', axis=1, copy=True)
#print(dict(zip(table_to_save.columns, [150]*len(table_to_save.columns))))
#break
# if _step == 0:
# h5store.append(key_to_encode, table_to_save, min_itemsize=dict(zip(table_to_save.columns, [150]*len(table_to_save.columns))))
# else:
h5store.append(key_to_encode, table_to_save)
#break
time_elapsed = time.time() - start_time
if _step % 5 == 0:
log.info('cur step: {} of {}, from {} to {}, {:5.3f}sec/batch.'.format(_step, num_steps, cur_step, next_step, time_elapsed))
# print(composer_feature)
# print(artist_name_feature)
# print(lyricist_feature)
# break
cur_step = next_step
log.info(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
feature_encoder(HDF_TRAIN_FEATURE_FILENAME, store_data, 'all_train_withextra', 102400)
feature_encoder(HDF_TEST_FEATURE_FILENAME, store_data, 'all_test_withextra', 102400)
store_data.close()
log.info(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
h5store = pd.HDFStore(HDF_TRAIN_FEATURE_FILENAME, complib='zlib', complevel=1)
%%timeit
print(h5store.select('all_train_withextra','index>0 & index<10000'))
h5store.close() | _____no_output_____ | MIT | MusicRecommendation/.ipynb_checkpoints/TestHDFTables-checkpoint.ipynb | HiKapok/KaggleCompetitions |
The lidar system, data (1 of 2 datasets)========================================Generate a chart of the data recorded by the lidar system | import numpy as np
import matplotlib.pyplot as plt
waveform_1 = np.load('waveform_1.npy')
t = np.arange(len(waveform_1))
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(t, waveform_1)
plt.xlabel('Time [ns]')
plt.ylabel('Amplitude [bins]')
plt.show() | _____no_output_____ | CC-BY-4.0 | _downloads/plot_optimize_lidar_data.ipynb | scipy-lectures/scipy-lectures.github.com |
Copyright 2020 The TensorFlow Authors. | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Creating Keras Models with TFL Layers View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook OverviewYou can use TFL Keras layers to construct Keras models with monotonicity and other shape constraints. This example builds and trains a calibrated lattice model for the UCI heart dataset using TFL layers.In a calibrated lattice model, each feature is transformed by a `tfl.layers.PWLCalibration` or a `tfl.layers.CategoricalCalibration` layer and the results are nonlinearly fused using a `tfl.layers.Lattice`. Setup Installing TF Lattice package: | #@test {"skip": true}
!pip install tensorflow-lattice pydot | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Importing required packages: | import tensorflow as tf
import logging
import numpy as np
import pandas as pd
import sys
import tensorflow_lattice as tfl
from tensorflow import feature_column as fc
logging.disable(sys.maxsize) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Downloading the UCI Statlog (Heart) dataset: | # UCI Statlog (Heart) dataset.
csv_file = tf.keras.utils.get_file(
'heart.csv', 'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv')
training_data_df = pd.read_csv(csv_file).sample(
frac=1.0, random_state=41).reset_index(drop=True)
training_data_df.head() | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Setting the default values used for training in this guide: | LEARNING_RATE = 0.1
BATCH_SIZE = 128
NUM_EPOCHS = 100 | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Sequential Keras ModelThis example creates a Sequential Keras model and only uses TFL layers.Lattice layers expect `input[i]` to be within `[0, lattice_sizes[i] - 1.0]`, so we need to define the lattice sizes ahead of the calibration layers so we can properly specify output range of the calibration layers. | # Lattice layer expects input[i] to be within [0, lattice_sizes[i] - 1.0], so
lattice_sizes = [3, 2, 2, 2, 2, 2, 2] | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
We use a `tfl.layers.ParallelCombination` layer to group together calibration layers which have to be executed in parallel in order to be able to create a Sequential model. | combined_calibrators = tfl.layers.ParallelCombination() | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
We create a calibration layer for each feature and add it to the parallel combination layer. For numeric features we use `tfl.layers.PWLCalibration`, and for categorical features we use `tfl.layers.CategoricalCalibration`. | # ############### age ###############
calibrator = tfl.layers.PWLCalibration(
# Every PWLCalibration layer must have keypoints of piecewise linear
# function specified. Easiest way to specify them is to uniformly cover
# entire input range by using numpy.linspace().
input_keypoints=np.linspace(
training_data_df['age'].min(), training_data_df['age'].max(), num=5),
# You need to ensure that input keypoints have same dtype as layer input.
# You can do it by setting dtype here or by providing keypoints in such
# format which will be converted to desired tf.dtype by default.
dtype=tf.float32,
# Output range must correspond to expected lattice input range.
output_min=0.0,
output_max=lattice_sizes[0] - 1.0,
)
combined_calibrators.append(calibrator)
# ############### sex ###############
# For boolean features simply specify CategoricalCalibration layer with 2
# buckets.
calibrator = tfl.layers.CategoricalCalibration(
num_buckets=2,
output_min=0.0,
output_max=lattice_sizes[1] - 1.0,
# Initializes all outputs to (output_min + output_max) / 2.0.
kernel_initializer='constant')
combined_calibrators.append(calibrator)
# ############### cp ###############
calibrator = tfl.layers.PWLCalibration(
# Here instead of specifying dtype of layer we convert keypoints into
# np.float32.
input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32),
output_min=0.0,
output_max=lattice_sizes[2] - 1.0,
monotonicity='increasing',
# You can specify TFL regularizers as a tuple ('regularizer name', l1, l2).
kernel_regularizer=('hessian', 0.0, 1e-4))
combined_calibrators.append(calibrator)
# ############### trestbps ###############
calibrator = tfl.layers.PWLCalibration(
# Alternatively, you might want to use quantiles as keypoints instead of
# uniform keypoints
input_keypoints=np.quantile(training_data_df['trestbps'],
np.linspace(0.0, 1.0, num=5)),
dtype=tf.float32,
# Together with quantile keypoints you might want to initialize piecewise
# linear function to have 'equal_slopes' in order for output of layer
# after initialization to preserve original distribution.
kernel_initializer='equal_slopes',
output_min=0.0,
output_max=lattice_sizes[3] - 1.0,
# You might consider clamping extreme inputs of the calibrator to output
# bounds.
clamp_min=True,
clamp_max=True,
monotonicity='increasing')
combined_calibrators.append(calibrator)
# ############### chol ###############
calibrator = tfl.layers.PWLCalibration(
# Explicit input keypoint initialization.
input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],
dtype=tf.float32,
output_min=0.0,
output_max=lattice_sizes[4] - 1.0,
# Monotonicity of calibrator can be decreasing. Note that corresponding
# lattice dimension must have INCREASING monotonicity regardless of
# monotonicity direction of calibrator.
monotonicity='decreasing',
# Convexity together with decreasing monotonicity result in diminishing
# return constraint.
convexity='convex',
# You can specify list of regularizers. You are not limited to TFL
# regularizrs. Feel free to use any :)
kernel_regularizer=[('laplacian', 0.0, 1e-4),
tf.keras.regularizers.l1_l2(l1=0.001)])
combined_calibrators.append(calibrator)
# ############### fbs ###############
calibrator = tfl.layers.CategoricalCalibration(
num_buckets=2,
output_min=0.0,
output_max=lattice_sizes[5] - 1.0,
# For categorical calibration layer monotonicity is specified for pairs
# of indices of categories. Output for first category in pair will be
# smaller than output for second category.
#
# Don't forget to set monotonicity of corresponding dimension of Lattice
# layer to '1'.
monotonicities=[(0, 1)],
# This initializer is identical to default one('uniform'), but has fixed
# seed in order to simplify experimentation.
kernel_initializer=tf.keras.initializers.RandomUniform(
minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1))
combined_calibrators.append(calibrator)
# ############### restecg ###############
calibrator = tfl.layers.CategoricalCalibration(
num_buckets=3,
output_min=0.0,
output_max=lattice_sizes[6] - 1.0,
# Categorical monotonicity can be partial order.
monotonicities=[(0, 1), (0, 2)],
# Categorical calibration layer supports standard Keras regularizers.
kernel_regularizer=tf.keras.regularizers.l1_l2(l1=0.001),
kernel_initializer='constant')
combined_calibrators.append(calibrator) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
We then create a lattice layer to nonlinearly fuse the outputs of the calibrators.Note that we need to specify the monotonicity of the lattice to be increasing for required dimensions. The composition with the direction of the monotonicity in the calibration will result in the correct end-to-end direction of monotonicity. This includes partial monotonicity of CategoricalCalibration layer. | lattice = tfl.layers.Lattice(
lattice_sizes=lattice_sizes,
monotonicities=[
'increasing', 'none', 'increasing', 'increasing', 'increasing',
'increasing', 'increasing'
],
output_min=0.0,
output_max=1.0) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
We can then create a sequential model using the combined calibrators and lattice layers. | model = tf.keras.models.Sequential()
model.add(combined_calibrators)
model.add(lattice) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Training works the same as any other keras model. | features = training_data_df[[
'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg'
]].values.astype(np.float32)
target = training_data_df[['target']].values.astype(np.float32)
model.compile(
loss=tf.keras.losses.mean_squared_error,
optimizer=tf.keras.optimizers.Adagrad(learning_rate=LEARNING_RATE))
model.fit(
features,
target,
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
validation_split=0.2,
shuffle=False,
verbose=0)
model.evaluate(features, target) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Functional Keras ModelThis example uses a functional API for Keras model construction.As mentioned in the previous section, lattice layers expect `input[i]` to be within `[0, lattice_sizes[i] - 1.0]`, so we need to define the lattice sizes ahead of the calibration layers so we can properly specify output range of the calibration layers. | # We are going to have 2-d embedding as one of lattice inputs.
lattice_sizes = [3, 2, 2, 3, 3, 2, 2] | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
For each feature, we need to create an input layer followed by a calibration layer. For numeric features we use `tfl.layers.PWLCalibration` and for categorical features we use `tfl.layers.CategoricalCalibration`. | model_inputs = []
lattice_inputs = []
# ############### age ###############
age_input = tf.keras.layers.Input(shape=[1], name='age')
model_inputs.append(age_input)
age_calibrator = tfl.layers.PWLCalibration(
# Every PWLCalibration layer must have keypoints of piecewise linear
# function specified. Easiest way to specify them is to uniformly cover
# entire input range by using numpy.linspace().
input_keypoints=np.linspace(
training_data_df['age'].min(), training_data_df['age'].max(), num=5),
# You need to ensure that input keypoints have same dtype as layer input.
# You can do it by setting dtype here or by providing keypoints in such
# format which will be converted to desired tf.dtype by default.
dtype=tf.float32,
# Output range must correspond to expected lattice input range.
output_min=0.0,
output_max=lattice_sizes[0] - 1.0,
monotonicity='increasing',
name='age_calib',
)(
age_input)
lattice_inputs.append(age_calibrator)
# ############### sex ###############
# For boolean features simply specify CategoricalCalibration layer with 2
# buckets.
sex_input = tf.keras.layers.Input(shape=[1], name='sex')
model_inputs.append(sex_input)
sex_calibrator = tfl.layers.CategoricalCalibration(
num_buckets=2,
output_min=0.0,
output_max=lattice_sizes[1] - 1.0,
# Initializes all outputs to (output_min + output_max) / 2.0.
kernel_initializer='constant',
name='sex_calib',
)(
sex_input)
lattice_inputs.append(sex_calibrator)
# ############### cp ###############
cp_input = tf.keras.layers.Input(shape=[1], name='cp')
model_inputs.append(cp_input)
cp_calibrator = tfl.layers.PWLCalibration(
# Here instead of specifying dtype of layer we convert keypoints into
# np.float32.
input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32),
output_min=0.0,
output_max=lattice_sizes[2] - 1.0,
monotonicity='increasing',
# You can specify TFL regularizers as tuple ('regularizer name', l1, l2).
kernel_regularizer=('hessian', 0.0, 1e-4),
name='cp_calib',
)(
cp_input)
lattice_inputs.append(cp_calibrator)
# ############### trestbps ###############
trestbps_input = tf.keras.layers.Input(shape=[1], name='trestbps')
model_inputs.append(trestbps_input)
trestbps_calibrator = tfl.layers.PWLCalibration(
# Alternatively, you might want to use quantiles as keypoints instead of
# uniform keypoints
input_keypoints=np.quantile(training_data_df['trestbps'],
np.linspace(0.0, 1.0, num=5)),
dtype=tf.float32,
# Together with quantile keypoints you might want to initialize piecewise
# linear function to have 'equal_slopes' in order for output of layer
# after initialization to preserve original distribution.
kernel_initializer='equal_slopes',
output_min=0.0,
output_max=lattice_sizes[3] - 1.0,
# You might consider clamping extreme inputs of the calibrator to output
# bounds.
clamp_min=True,
clamp_max=True,
monotonicity='increasing',
name='trestbps_calib',
)(
trestbps_input)
lattice_inputs.append(trestbps_calibrator)
# ############### chol ###############
chol_input = tf.keras.layers.Input(shape=[1], name='chol')
model_inputs.append(chol_input)
chol_calibrator = tfl.layers.PWLCalibration(
# Explicit input keypoint initialization.
input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],
output_min=0.0,
output_max=lattice_sizes[4] - 1.0,
# Monotonicity of calibrator can be decreasing. Note that corresponding
# lattice dimension must have INCREASING monotonicity regardless of
# monotonicity direction of calibrator.
monotonicity='decreasing',
# Convexity together with decreasing monotonicity result in diminishing
# return constraint.
convexity='convex',
# You can specify list of regularizers. You are not limited to TFL
# regularizrs. Feel free to use any :)
kernel_regularizer=[('laplacian', 0.0, 1e-4),
tf.keras.regularizers.l1_l2(l1=0.001)],
name='chol_calib',
)(
chol_input)
lattice_inputs.append(chol_calibrator)
# ############### fbs ###############
fbs_input = tf.keras.layers.Input(shape=[1], name='fbs')
model_inputs.append(fbs_input)
fbs_calibrator = tfl.layers.CategoricalCalibration(
num_buckets=2,
output_min=0.0,
output_max=lattice_sizes[5] - 1.0,
# For categorical calibration layer monotonicity is specified for pairs
# of indices of categories. Output for first category in pair will be
# smaller than output for second category.
#
# Don't forget to set monotonicity of corresponding dimension of Lattice
# layer to '1'.
monotonicities=[(0, 1)],
# This initializer is identical to default one ('uniform'), but has fixed
# seed in order to simplify experimentation.
kernel_initializer=tf.keras.initializers.RandomUniform(
minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1),
name='fbs_calib',
)(
fbs_input)
lattice_inputs.append(fbs_calibrator)
# ############### restecg ###############
restecg_input = tf.keras.layers.Input(shape=[1], name='restecg')
model_inputs.append(restecg_input)
restecg_calibrator = tfl.layers.CategoricalCalibration(
num_buckets=3,
output_min=0.0,
output_max=lattice_sizes[6] - 1.0,
# Categorical monotonicity can be partial order.
monotonicities=[(0, 1), (0, 2)],
# Categorical calibration layer supports standard Keras regularizers.
kernel_regularizer=tf.keras.regularizers.l1_l2(l1=0.001),
kernel_initializer='constant',
name='restecg_calib',
)(
restecg_input)
lattice_inputs.append(restecg_calibrator) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
We then create a lattice layer to nonlinearly fuse the outputs of the calibrators.Note that we need to specify the monotonicity of the lattice to be increasing for required dimensions. The composition with the direction of the monotonicity in the calibration will result in the correct end-to-end direction of monotonicity. This includes partial monotonicity of `tfl.layers.CategoricalCalibration` layer. | lattice = tfl.layers.Lattice(
lattice_sizes=lattice_sizes,
monotonicities=[
'increasing', 'none', 'increasing', 'increasing', 'increasing',
'increasing', 'increasing'
],
output_min=0.0,
output_max=1.0,
name='lattice',
)(
lattice_inputs) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
To add more flexibility to the model, we add an output calibration layer. | model_output = tfl.layers.PWLCalibration(
input_keypoints=np.linspace(0.0, 1.0, 5),
name='output_calib',
)(
lattice) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
We can now create a model using the inputs and outputs. | model = tf.keras.models.Model(
inputs=model_inputs,
outputs=model_output)
tf.keras.utils.plot_model(model, rankdir='LR') | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Training works the same as any other keras model. Note that, with our setup, input features are passed as separate tensors. | feature_names = ['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg']
features = np.split(
training_data_df[feature_names].values.astype(np.float32),
indices_or_sections=len(feature_names),
axis=1)
target = training_data_df[['target']].values.astype(np.float32)
model.compile(
loss=tf.keras.losses.mean_squared_error,
optimizer=tf.keras.optimizers.Adagrad(LEARNING_RATE))
model.fit(
features,
target,
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
validation_split=0.2,
shuffle=False,
verbose=0)
model.evaluate(features, target) | _____no_output_____ | Apache-2.0 | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 |
Common Regression class | class Regression:
def __init__(self, learning_rate, iteration, regularization):
"""
:param learning_rate: A samll value needed for gradient decent, default value id 0.1.
:param iteration: Number of training iteration, default value is 10,000.
"""
self.m = None
self.n = None
self.w = None
self.b = None
self.regularization = regularization # will be the l1/l2 regularization class according to the regression model.
self.lr = learning_rate
self.it = iteration
def cost_function(self, y, y_pred):
"""
:param y: Original target value.
:param y_pred: predicted target value.
"""
return (1 / (2*self.m)) * np.sum(np.square(y_pred - y)) + self.regularization(self.w)
def hypothesis(self, weights, bias, X):
"""
:param weights: parameter value weight.
:param X: Training samples.
"""
return np.dot(X, weights) #+ bias
def train(self, X, y):
"""
:param X: training data feature values ---> N Dimentional vector.
:param y: training data target value -----> 1 Dimentional array.
"""
# Insert constant ones for bias weights.
X = np.insert(X, 0, 1, axis=1)
# Target value should be in the shape of (n, 1) not (n, ).
# So, this will check that and change the shape to (n, 1), if not.
try:
y.shape[1]
except IndexError as e:
# we need to change it to the 1 D array, not a list.
print("ERROR: Target array should be a one dimentional array not a list"
"----> here the target value not in the shape of (n,1). \nShape ({shape_y_0},1) and {shape_y} not match"
.format(shape_y_0 = y.shape[0] , shape_y = y.shape))
return
# m is the number of training samples.
self.m = X.shape[0]
# n is the number of features.
self.n = X.shape[1]
# Set the initial weight.
self.w = np.zeros((self.n , 1))
# bias.
self.b = 0
for it in range(1, self.it+1):
# 1. Find the predicted value through the hypothesis.
# 2. Find the Cost function value.
# 3. Find the derivation of weights.
# 4. Apply Gradient Decent.
y_pred = self.hypothesis(self.w, self.b, X)
#print("iteration",it)
#print("y predict value",y_pred)
cost = self.cost_function(y, y_pred)
#print("Cost function",cost)
# fin the derivative.
dw = (1/self.m) * np.dot(X.T, (y_pred - y)) + self.regularization.derivation(self.w)
#print("weights derivation",dw)
#db = -(2 / self.m) * np.sum((y_pred - y))
# change the weight parameter.
self.w = self.w - self.lr * dw
#print("updated weights",self.w)
#self.b = self.b - self.lr * db
if it % 10 == 0:
print("The Cost function for the iteration {}----->{} :)".format(it, cost))
def predict(self, test_X):
"""
:param test_X: feature values to predict.
"""
# Insert constant ones for bias weights.
test_X = np.insert(test_X, 0, 1, axis=1)
y_pred = self.hypothesis(self.w, self.b, test_X)
return y_pred | _____no_output_____ | MIT | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch |
Data Creation | # Define the traning data.
X, y = make_regression(n_samples=50000, n_features=8)
# Chnage the shape of the target to 1 dimentional array.
y = y[:, np.newaxis]
print("="*100)
print("Number of training data samples-----> {}".format(X.shape[0]))
print("Number of training features --------> {}".format(X.shape[1]))
print("Shape of the target value ----------> {}".format(y.shape))
# display the data.
data = pd.DataFrame(X)
data.head()
# display the data.
data_y = pd.DataFrame(y)
data_y.head() | _____no_output_____ | MIT | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch |
Polynomial Regression from Scratch | def PolynomialFeature(X, degree):
"""
It is type of feature engineering ---> adding some more features based on the exisiting features
by squaring or cubing.
:param X: data need to be converted.
:param degree: int- The degree of the polynomial that the features X will be transformed to.
"""
n_samples, n_features = X.shape
# get the index combinations.
combination = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)]
combination_index = [index for obj in combination for index in obj]
# generate a empty array with new shape.
new_n_features = len(combination_index)
X_new = np.empty((n_samples, new_n_features))
for i, com_index in enumerate(combination_index):
X_new[:, i] = np.prod(X[:, com_index], axis=1)
return X_new
# Used for Polynomial Ridge regression.
class l2_regularization:
"""Regularization used for Ridge Regression"""
def __init__(self, lamda):
self.lamda = lamda
def __call__(self, weights):
"This will be retuned when we call this class."
return self.lamda * np.sum(np.square(weights))
def derivation(self, weights):
"Derivation of the regulariozation function."
return self.lamda * 2 * (weights)
class PolynamialRegression(Regression):
"""
Polynomail Regression is also a type of non-linear regression with no regularization.
Before fitting the linear regression, the dependant variable is tranformed to some polynomail degree.
This is basincally transforming linear data to have some nonliniarity.
"""
def __init__(self, learning_rate, iteration, degree):
"""
:param learning_rate: [range from 0 to infinity] the stpe distance used while doing gradiant decent.
:param iteration: int - Number of iteration to do.
:param degree: int - The degree of the polynomial that the feature transformed to.
"""
self.degree = degree
# No regularization here. So, making the regularization methods to return 0.
self.regularization = lambda x: 0
self.regularization.derivation = lambda x: 0
super().__init__(learning_rate, iteration, self.regularization)
def train(self, X, y):
"""
:param X: training data feature values ---> N Dimentional vector.
:param y: training data target value -----> 1 Dimentional array.
"""
# change the data to
X_poly = PolynomialFeature(X, degree=self.degree)
return super().train(X_poly, y)
def predict(self, test_X):
"""
:param test_X: feature values to predict.
"""
test_X_poly = PolynomialFeature(test_X, degree=self.degree)
return super().predict(test_X_poly)
#define the parameters
param = {
"degree" : 2,
"learning_rate" : 0.1,
"iteration" : 100,
}
print("="*100)
polynomial_reg = PolynamialRegression(**param)
# Train the model.
polynomial_reg.train(X, y)
# Predict the values.
y_pred = polynomial_reg.predict(X)
#Root mean square error.
score = r2_score(y, y_pred)
print("The r2_score of the trained model", score) | ====================================================================================================
The Cost function for the iteration 10----->2524.546198902789 :)
The Cost function for the iteration 20----->313.8199639696676 :)
The Cost function for the iteration 30----->39.17839267886082 :)
The Cost function for the iteration 40----->4.916567388701627 :)
The Cost function for the iteration 50----->0.6225340983364702 :)
The Cost function for the iteration 60----->0.08070495018731812 :)
The Cost function for the iteration 70----->0.011282742313695108 :)
The Cost function for the iteration 80----->0.0019608909310563647 :)
The Cost function for the iteration 90----->0.0005118599780978334 :)
The Cost function for the iteration 100----->0.00019559828225020284 :)
The r2_score of the trained model 0.9999999891242503
| MIT | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch |
Polynomial Regression using scikit-learn for comparision | from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# data is already defined, going to use the same data for comparision.
print("="*100)
print("Number of training data samples-----> {}".format(X.shape[0]))
print("Number of training features --------> {}".format(X.shape[1]))
linear_reg_sklearn = LinearRegression()
poly = PolynomialFeatures(degree = 2)
X_new = poly.fit_transform(X)
linear_reg_sklearn.fit(X, y)
# predict the value
y_pred_sklearn = linear_reg_sklearn.predict(X)
score = r2_score(y, y_pred_sklearn)
print("="*100)
print("R2 score of the model is {}".format(score)) | ====================================================================================================
R2 score of the model is 1.0
| MIT | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch |
Polynomial Ridge Regression from scratch | class PolynamialRidgeRegression(Regression):
"""
Polynomail Ridge Regression is basically polynomial regression with l2 regularization.
"""
def __init__(self, learning_rate, iteration, degree, lamda):
"""
:param learning_rate: [range from 0 to infinity] the stpe distance used while doing gradiant decent.
:param iteration: int - Number of iteration to do.
:param degree: int - The degree of the polynomial that the feature transformed to.
"""
self.degree = degree
# No regularization here. So, making the regularization methods to return 0.
self.regularization = l2_regularization(lamda)
super().__init__(learning_rate, iteration, self.regularization)
def train(self, X, y):
"""
:param X: training data feature values ---> N Dimentional vector.
:param y: training data target value -----> 1 Dimentional array.
"""
# change the data to
X_poly = PolynomialFeature(X, degree=self.degree)
return super().train(X_poly, y)
def predict(self, test_X):
"""
:param test_X: feature values to predict.
"""
test_X_poly = PolynomialFeature(test_X, degree=self.degree)
return super().predict(test_X_poly)
#define the parameters
param = {
"lamda": 0.1,
"degree" : 2,
"learning_rate" : 0.1,
"iteration" : 100,
}
print("="*100)
polynomial_reg = PolynamialRidgeRegression(**param)
# Train the model.
polynomial_reg.train(X, y)
# Predict the values.
y_pred = polynomial_reg.predict(X)
#Root mean square error.
score = r2_score(y, y_pred)
print("The r2_score of the trained model", score) | ====================================================================================================
The Cost function for the iteration 10----->4178.872832133191 :)
The Cost function for the iteration 20----->2887.989505020741 :)
The Cost function for the iteration 30----->2785.6247039737964 :)
The Cost function for the iteration 40----->2777.471815365709 :)
The Cost function for the iteration 50----->2776.819294060092 :)
The Cost function for the iteration 60----->2776.7666829082946 :)
The Cost function for the iteration 70----->2776.7623662294877 :)
The Cost function for the iteration 80----->2776.761991761519 :)
The Cost function for the iteration 90----->2776.761953080877 :)
The Cost function for the iteration 100----->2776.761947221511 :)
The r2_score of the trained model 0.9718297887794873
| MIT | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch |
Lists from: [HackerRank](https://www.hackerrank.com/challenges/python-lists/problem) - (easy)Consider a list (list = []). You can perform the following commands:insert `i`, `e`: Insert integer at position. print(): Print the list. remove `e`: Delete the first occurrence of integer. append `e`: Insert integer at the end of the list. sort: Sort the list. pop: Pop the last element from the list. reverse: Reverse the list. Initialize your list and read in the value of followed by lines of commands where each command will be of the types listed above. Iterate through each command in order and perform the corresponding operation on your list.**Input Format**The first line contains an integer, n, denoting the number of commands.Each line of the subsequent lines contains one of the commands described above.**Constraints** The elements added to the list must be integers.**Output Format** For each command of type print, print the list on a new line.**Sample Input**```12insert 0 5insert 1 10insert 0 6printremove 6append 9append 1sortprintpopreverseprint```**Sample Output**```[6, 5, 10][1, 5, 9, 10][9, 5, 1]``` | N = int(input())
ls = []
for i in range(N):
n = input()
a = n.split()
cmd = a[0]
if cmd == "insert":
ls.insert(int(a[1]), int(a[2]))
elif cmd == "remove":
ls.remove(int(a[1]))
elif cmd == "append":
ls.append(int(a[1]))
elif cmd == "sort":
ls.sort()
elif cmd == "pop":
ls.pop()
elif cmd == "reverse":
ls.reverse()
elif cmd == "print":
print(ls)
n = int(input())
ls = []
for _ in range(n):
s = input().split()
cmd = s[0]
args = s[1:]
if cmd !="print":
cmd += "("+ ",".join(args) +")"
eval("ls."+cmd)
else:
print(ls) | 12
insert 0 5
insert 1 10
insert 0 6
print
[6, 5, 10]
remove 6
append 9
append 1
sort
print
[1, 5, 9, 10]
pop
reverse
print
[9, 5, 1]
| MIT | 9_coding quizzes/05_list_HackerRank.ipynb | lucaseo/TIL |
Copyright 2018 The AdaNet Authors. | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | _____no_output_____ | MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
The AdaNet objective Run in Google Colab View source on GitHub One of key contributions from *AdaNet: Adaptive Structural Learning of NeuralNetworks* [[Cortes et al., ICML 2017](https://arxiv.org/abs/1607.01097)] isdefining an algorithm that aims to directly minimize the DeepBoostgeneralization bound from *Deep Boosting*[[Cortes et al., ICML 2014](http://proceedings.mlr.press/v32/cortesb14.pdf)]when applied to neural networks. This algorithm, called **AdaNet**, adaptivelygrows a neural network as an ensemble of subnetworks that minimizes the AdaNetobjective (a.k.a. AdaNet loss):$$F(w) = \frac{1}{m} \sum_{i=1}^{m} \Phi \left(\sum_{j=1}^{N}w_jh_j(x_i), y_i \right) + \sum_{j=1}^{N} \left(\lambda r(h_j) + \beta \right) |w_j| $$where $w$ is the set of mixture weights, one per subnetwork $h$,$\Phi$ is a surrogate loss function such as logistic loss or MSE, $r$ is afunction for measuring a subnetwork's complexity, and $\lambda$ and $\beta$are hyperparameters. Mixture weightsSo what are mixture weights? When forming an ensemble $f$ of subnetworks $h$,we need to somehow combine the their predictions. This is done by multiplyingthe outputs of subnetwork $h_i$ with mixture weight $w_i$, and summing theresults:$$f(x) = \sum_{j=1}^{N}w_jh_j(x)$$In practice, most commonly used set of mixture weight is **uniform averageweighting**:$$f(x) = \frac{1}{N}\sum_{j=1}^{N}h_j(x)$$However, we can also solve a convex optimization problem to learn the mixtureweights that minimize the loss function $\Phi$:$$F(w) = \frac{1}{m} \sum_{i=1}^{m} \Phi \left(\sum_{j=1}^{N}w_jh_j(x_i), y_i \right)$$This is the first term in the AdaNet objective. The second term applies L1regularization to the mixture weights:$$\sum_{j=1}^{N} \left(\lambda r(h_j) + \beta \right) |w_j|$$When $\lambda > 0$ this penalty serves to prevent the optimization fromassigning too much weight to more complex subnetworks according to thecomplexity measure function $r$. How AdaNet uses the objectiveThis objective function serves two purposes:1. To **learn to scale/transform the outputs of each subnetwork $h$** as part of the ensemble.2. To **select the best candidate subnetwork $h$** at each AdaNet iteration to include in the ensemble.Effectively, when learning mixture weights $w$, AdaNet solves a convexcombination of the outputs of the frozen subnetworks $h$. For $\lambda >0$,AdaNet penalizes more complex subnetworks with greater L1 regularization ontheir mixture weight, and will be less likely to select more complex subnetworksto add to the ensemble at each iteration.In this tutorial, in you will observe the benefits of using AdaNet to learn theensemble's mixture weights and to perform candidate selection. | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import adanet
import tensorflow as tf
# The random seed to use.
RANDOM_SEED = 42 | _____no_output_____ | MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
Boston Housing datasetIn this example, we will solve a regression task known as the [Boston Housing dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the price of suburban houses in Boston, MA in the 1970s. There are 13 numerical features, the labels are in thousands of dollars, and there are only 506 examples. Download the dataConveniently, the data is available via Keras: | (x_train, y_train), (x_test, y_test) = (
tf.keras.datasets.boston_housing.load_data())
print(x_test.shape)
print(x_test[0])
print(y_test.shape)
print(y_test[0]) | (102, 13)
[ 18.0846 0. 18.1 0. 0.679 6.434 100. 1.8347
24. 666. 20.2 27.25 29.05 ]
(102,)
7.2
| MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
Supply the data in TensorFlowOur first task is to supply the data in TensorFlow. Using thetf.estimator.Estimator convention, we will define a function that returns aninput_fn which returns feature and label Tensors.We will also use the tf.data.Dataset API to feed the data into our models.Also, as a preprocessing step, we will apply `tf.log1p` to log-scale thefeatures and labels for improved numerical stability during training. To recoverthe model's predictions in the correct scale, you can apply `tf.math.expm1` to theprediction. | FEATURES_KEY = "x"
def input_fn(partition, training, batch_size):
"""Generate an input function for the Estimator."""
def _input_fn():
if partition == "train":
dataset = tf.data.Dataset.from_tensor_slices(({
FEATURES_KEY: tf.log1p(x_train)
}, tf.log1p(y_train)))
else:
dataset = tf.data.Dataset.from_tensor_slices(({
FEATURES_KEY: tf.log1p(x_test)
}, tf.log1p(y_test)))
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
if training:
dataset = dataset.shuffle(10 * batch_size, seed=RANDOM_SEED).repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn | _____no_output_____ | MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
Define the subnetwork generatorLet's define a subnetwork generator similar to the one in[[Cortes et al., ICML 2017](https://arxiv.org/abs/1607.01097)] and in`simple_dnn.py` which creates two candidate fully-connected neural networks ateach iteration with the same width, but one an additional hidden layer. To makeour generator *adaptive*, each subnetwork will have at least the same numberof hidden layers as the most recently added subnetwork to the`previous_ensemble`.We define the complexity measure function $r$ to be $r(h) = \sqrt{d(h)}$, where$d$ is the number of hidden layers in the neural network $h$, to approximate theRademacher bounds from[[Golowich et. al, 2017](https://arxiv.org/abs/1712.06541)]. So subnetworkswith more hidden layers, and therefore more capacity, will have more heavilyregularized mixture weights. | _NUM_LAYERS_KEY = "num_layers"
class _SimpleDNNBuilder(adanet.subnetwork.Builder):
"""Builds a DNN subnetwork for AdaNet."""
def __init__(self, optimizer, layer_size, num_layers, learn_mixture_weights,
seed):
"""Initializes a `_DNNBuilder`.
Args:
optimizer: An `Optimizer` instance for training both the subnetwork and
the mixture weights.
layer_size: The number of nodes to output at each hidden layer.
num_layers: The number of hidden layers.
learn_mixture_weights: Whether to solve a learning problem to find the
best mixture weights, or use their default value according to the
mixture weight type. When `False`, the subnetworks will return a no_op
for the mixture weight train op.
seed: A random seed.
Returns:
An instance of `_SimpleDNNBuilder`.
"""
self._optimizer = optimizer
self._layer_size = layer_size
self._num_layers = num_layers
self._learn_mixture_weights = learn_mixture_weights
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
input_layer = tf.to_float(features[FEATURES_KEY])
kernel_initializer = tf.glorot_uniform_initializer(seed=self._seed)
last_layer = input_layer
for _ in range(self._num_layers):
last_layer = tf.layers.dense(
last_layer,
units=self._layer_size,
activation=tf.nn.relu,
kernel_initializer=kernel_initializer)
logits = tf.layers.dense(
last_layer,
units=logits_dimension,
kernel_initializer=kernel_initializer)
persisted_tensors = {_NUM_LAYERS_KEY: tf.constant(self._num_layers)}
return adanet.Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=self._measure_complexity(),
persisted_tensors=persisted_tensors)
def _measure_complexity(self):
"""Approximates Rademacher complexity as the square-root of the depth."""
return tf.sqrt(tf.to_float(self._num_layers))
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
"""See `adanet.subnetwork.Builder`."""
return self._optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
"""See `adanet.subnetwork.Builder`."""
if not self._learn_mixture_weights:
return tf.no_op()
return self._optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
"""See `adanet.subnetwork.Builder`."""
if self._num_layers == 0:
# A DNN with no hidden layers is a linear model.
return "linear"
return "{}_layer_dnn".format(self._num_layers)
class SimpleDNNGenerator(adanet.subnetwork.Generator):
"""Generates a two DNN subnetworks at each iteration.
The first DNN has an identical shape to the most recently added subnetwork
in `previous_ensemble`. The second has the same shape plus one more dense
layer on top. This is similar to the adaptive network presented in Figure 2 of
[Cortes et al. ICML 2017](https://arxiv.org/abs/1607.01097), without the
connections to hidden layers of networks from previous iterations.
"""
def __init__(self,
optimizer,
layer_size=32,
learn_mixture_weights=False,
seed=None):
"""Initializes a DNN `Generator`.
Args:
optimizer: An `Optimizer` instance for training both the subnetwork and
the mixture weights.
layer_size: Number of nodes in each hidden layer of the subnetwork
candidates. Note that this parameter is ignored in a DNN with no hidden
layers.
learn_mixture_weights: Whether to solve a learning problem to find the
best mixture weights, or use their default value according to the
mixture weight type. When `False`, the subnetworks will return a no_op
for the mixture weight train op.
seed: A random seed.
Returns:
An instance of `Generator`.
"""
self._seed = seed
self._dnn_builder_fn = functools.partial(
_SimpleDNNBuilder,
optimizer=optimizer,
layer_size=layer_size,
learn_mixture_weights=learn_mixture_weights)
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""See `adanet.subnetwork.Generator`."""
num_layers = 0
seed = self._seed
if previous_ensemble:
num_layers = tf.contrib.util.constant_value(
previous_ensemble.weighted_subnetworks[
-1].subnetwork.persisted_tensors[_NUM_LAYERS_KEY])
if seed is not None:
seed += iteration_number
return [
self._dnn_builder_fn(num_layers=num_layers, seed=seed),
self._dnn_builder_fn(num_layers=num_layers + 1, seed=seed),
] | _____no_output_____ | MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
Train and evaluateNext we create an `adanet.Estimator` using the `SimpleDNNGenerator` we just defined.In this section we will show the effects of two hyperparamters: **learning mixture weights** and **complexity regularization**.On the righthand side you will be able to play with the hyperparameters of this model. Until you reach the end of this section, we ask that you not change them. At first we will not learn the mixture weights, using their default initial value. Here they will be scalars initialized to $1/N$ where $N$ is the number of subnetworks in the ensemble, effectively creating a **uniform average ensemble**. | #@title AdaNet parameters
LEARNING_RATE = 0.001 #@param {type:"number"}
TRAIN_STEPS = 100000 #@param {type:"integer"}
BATCH_SIZE = 32 #@param {type:"integer"}
LEARN_MIXTURE_WEIGHTS = False #@param {type:"boolean"}
ADANET_LAMBDA = 0 #@param {type:"number"}
BOOSTING_ITERATIONS = 5 #@param {type:"integer"}
def train_and_evaluate(learn_mixture_weights=LEARN_MIXTURE_WEIGHTS,
adanet_lambda=ADANET_LAMBDA):
"""Trains an `adanet.Estimator` to predict housing prices."""
estimator = adanet.Estimator(
# Since we are predicting housing prices, we'll use a regression
# head that optimizes for MSE.
head=tf.contrib.estimator.regression_head(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE),
# Define the generator, which defines our search space of subnetworks
# to train as candidates to add to the final AdaNet model.
subnetwork_generator=SimpleDNNGenerator(
optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE),
learn_mixture_weights=learn_mixture_weights,
seed=RANDOM_SEED),
# Lambda is a the strength of complexity regularization. A larger
# value will penalize more complex subnetworks.
adanet_lambda=adanet_lambda,
# The number of train steps per iteration.
max_iteration_steps=TRAIN_STEPS // BOOSTING_ITERATIONS,
# The evaluator will evaluate the model on the full training set to
# compute the overall AdaNet loss (train loss + complexity
# regularization) to select the best candidate to include in the
# final AdaNet model.
evaluator=adanet.Evaluator(
input_fn=input_fn("train", training=False, batch_size=BATCH_SIZE)),
# Configuration for Estimators.
config=tf.estimator.RunConfig(
save_checkpoints_steps=50000,
save_summary_steps=50000,
tf_random_seed=RANDOM_SEED))
# Train and evaluate using using the tf.estimator tooling.
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE),
max_steps=TRAIN_STEPS)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE),
steps=None)
return tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def ensemble_architecture(result):
"""Extracts the ensemble architecture from evaluation results."""
architecture = result["architecture/adanet/ensembles"]
# The architecture is a serialized Summary proto for TensorBoard.
summary_proto = tf.summary.Summary.FromString(architecture)
return summary_proto.value[0].tensor.string_val[0]
results, _ = train_and_evaluate()
print("Loss:", results["average_loss"])
print("Architecture:", ensemble_architecture(results)) | WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmplcezpthw
INFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_experimental_distribute': None, '_service': None, '_task_id': 0, '_is_chief': True, '_master': '', '_evaluation_master': '', '_train_distribute': None, '_model_dir': '/tmp/tmplcezpthw', '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f8074e7df28>, '_keep_checkpoint_every_n_hours': 10000, '_global_id_in_cluster': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_steps': 50000, '_tf_random_seed': 42, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_protocol': None, '_device_fn': None, '_save_summary_steps': 50000, '_num_ps_replicas': 0, '_eval_distribute': None, '_num_worker_replicas': 1, '_log_step_count_steps': 100, '_task_type': 'worker'}
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 50000 or save_checkpoints_secs None.
INFO:tensorflow:Beginning training AdaNet iteration 0
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Saving checkpoints for 0 into /tmp/tmplcezpthw/model.ckpt.
INFO:tensorflow:loss = 21.773132, step = 1
INFO:tensorflow:global_step/sec: 211.077
INFO:tensorflow:loss = 0.647101, step = 101 (0.478 sec)
INFO:tensorflow:global_step/sec: 429.504
INFO:tensorflow:loss = 0.58654255, step = 201 (0.230 sec)
INFO:tensorflow:global_step/sec: 423.494
INFO:tensorflow:loss = 0.07683477, step = 301 (0.236 sec)
INFO:tensorflow:global_step/sec: 432.097
INFO:tensorflow:loss = 0.08281773, step = 401 (0.232 sec)
INFO:tensorflow:global_step/sec: 413.752
INFO:tensorflow:loss = 0.08148783, step = 501 (0.242 sec)
INFO:tensorflow:global_step/sec: 459.975
INFO:tensorflow:loss = 0.056522056, step = 601 (0.219 sec)
INFO:tensorflow:global_step/sec: 458.298
INFO:tensorflow:loss = 0.025881834, step = 701 (0.215 sec)
INFO:tensorflow:global_step/sec: 419.078
INFO:tensorflow:loss = 0.030095303, step = 801 (0.242 sec)
INFO:tensorflow:global_step/sec: 455.713
INFO:tensorflow:loss = 0.03755439, step = 901 (0.220 sec)
INFO:tensorflow:global_step/sec: 444.218
INFO:tensorflow:loss = 0.06690022, step = 1001 (0.225 sec)
INFO:tensorflow:global_step/sec: 451.699
INFO:tensorflow:loss = 0.03615122, step = 1101 (0.222 sec)
INFO:tensorflow:global_step/sec: 457.472
INFO:tensorflow:loss = 0.050185308, step = 1201 (0.218 sec)
INFO:tensorflow:global_step/sec: 462.55
INFO:tensorflow:loss = 0.099214725, step = 1301 (0.216 sec)
INFO:tensorflow:global_step/sec: 436.246
INFO:tensorflow:loss = 0.026417175, step = 1401 (0.227 sec)
INFO:tensorflow:global_step/sec: 432.357
INFO:tensorflow:loss = 0.02078271, step = 1501 (0.231 sec)
INFO:tensorflow:global_step/sec: 407.845
INFO:tensorflow:loss = 0.03165562, step = 1601 (0.245 sec)
INFO:tensorflow:global_step/sec: 409.88
INFO:tensorflow:loss = 0.041417733, step = 1701 (0.244 sec)
INFO:tensorflow:global_step/sec: 447.158
INFO:tensorflow:loss = 0.035114042, step = 1801 (0.226 sec)
INFO:tensorflow:global_step/sec: 460.335
INFO:tensorflow:loss = 0.044721745, step = 1901 (0.218 sec)
INFO:tensorflow:global_step/sec: 442.593
INFO:tensorflow:loss = 0.029930545, step = 2001 (0.223 sec)
INFO:tensorflow:global_step/sec: 459.624
INFO:tensorflow:loss = 0.04725883, step = 2101 (0.218 sec)
INFO:tensorflow:global_step/sec: 462.613
INFO:tensorflow:loss = 0.024880452, step = 2201 (0.218 sec)
INFO:tensorflow:global_step/sec: 462.628
INFO:tensorflow:loss = 0.024809994, step = 2301 (0.216 sec)
INFO:tensorflow:global_step/sec: 442.364
INFO:tensorflow:loss = 0.022308666, step = 2401 (0.227 sec)
INFO:tensorflow:global_step/sec: 434.897
INFO:tensorflow:loss = 0.04762791, step = 2501 (0.227 sec)
INFO:tensorflow:global_step/sec: 473.857
INFO:tensorflow:loss = 0.03194421, step = 2601 (0.210 sec)
INFO:tensorflow:global_step/sec: 461.601
INFO:tensorflow:loss = 0.033454657, step = 2701 (0.219 sec)
INFO:tensorflow:global_step/sec: 459.118
INFO:tensorflow:loss = 0.014480978, step = 2801 (0.218 sec)
INFO:tensorflow:global_step/sec: 445.164
INFO:tensorflow:loss = 0.031083336, step = 2901 (0.222 sec)
INFO:tensorflow:global_step/sec: 447.55
INFO:tensorflow:loss = 0.026340332, step = 3001 (0.226 sec)
INFO:tensorflow:global_step/sec: 463.595
INFO:tensorflow:loss = 0.02651683, step = 3101 (0.213 sec)
INFO:tensorflow:global_step/sec: 468.642
INFO:tensorflow:loss = 0.027183883, step = 3201 (0.214 sec)
INFO:tensorflow:global_step/sec: 463.448
INFO:tensorflow:loss = 0.035816483, step = 3301 (0.218 sec)
INFO:tensorflow:global_step/sec: 462.425
INFO:tensorflow:loss = 0.02551706, step = 3401 (0.214 sec)
INFO:tensorflow:global_step/sec: 457.828
INFO:tensorflow:loss = 0.049349364, step = 3501 (0.219 sec)
INFO:tensorflow:global_step/sec: 463.545
INFO:tensorflow:loss = 0.024015253, step = 3601 (0.216 sec)
INFO:tensorflow:global_step/sec: 463.739
INFO:tensorflow:loss = 0.017241174, step = 3701 (0.216 sec)
INFO:tensorflow:global_step/sec: 469.595
INFO:tensorflow:loss = 0.020121489, step = 3801 (0.212 sec)
INFO:tensorflow:global_step/sec: 468.923
INFO:tensorflow:loss = 0.021484237, step = 3901 (0.214 sec)
INFO:tensorflow:global_step/sec: 453.121
INFO:tensorflow:loss = 0.037488014, step = 4001 (0.221 sec)
INFO:tensorflow:global_step/sec: 438.323
INFO:tensorflow:loss = 0.040071916, step = 4101 (0.228 sec)
INFO:tensorflow:global_step/sec: 410.215
INFO:tensorflow:loss = 0.021272995, step = 4201 (0.244 sec)
INFO:tensorflow:global_step/sec: 457.032
INFO:tensorflow:loss = 0.03338682, step = 4301 (0.219 sec)
INFO:tensorflow:global_step/sec: 429.693
INFO:tensorflow:loss = 0.036143243, step = 4401 (0.232 sec)
INFO:tensorflow:global_step/sec: 432.626
INFO:tensorflow:loss = 0.039583378, step = 4501 (0.234 sec)
INFO:tensorflow:global_step/sec: 427.591
INFO:tensorflow:loss = 0.036702216, step = 4601 (0.235 sec)
INFO:tensorflow:global_step/sec: 427.303
INFO:tensorflow:loss = 0.05008479, step = 4701 (0.231 sec)
INFO:tensorflow:global_step/sec: 453.169
INFO:tensorflow:loss = 0.0439879, step = 4801 (0.220 sec)
INFO:tensorflow:global_step/sec: 462.178
INFO:tensorflow:loss = 0.023454221, step = 4901 (0.217 sec)
INFO:tensorflow:global_step/sec: 468.888
INFO:tensorflow:loss = 0.014781383, step = 5001 (0.213 sec)
INFO:tensorflow:global_step/sec: 463.829
INFO:tensorflow:loss = 0.020877432, step = 5101 (0.217 sec)
INFO:tensorflow:global_step/sec: 465.293
INFO:tensorflow:loss = 0.028106665, step = 5201 (0.212 sec)
INFO:tensorflow:global_step/sec: 447
INFO:tensorflow:loss = 0.044017084, step = 5301 (0.227 sec)
INFO:tensorflow:global_step/sec: 442.253
INFO:tensorflow:loss = 0.015634855, step = 5401 (0.223 sec)
INFO:tensorflow:global_step/sec: 468.506
INFO:tensorflow:loss = 0.017649759, step = 5501 (0.214 sec)
INFO:tensorflow:global_step/sec: 425.122
INFO:tensorflow:loss = 0.026881203, step = 5601 (0.235 sec)
INFO:tensorflow:global_step/sec: 392.981
INFO:tensorflow:loss = 0.02515915, step = 5701 (0.255 sec)
INFO:tensorflow:global_step/sec: 422.847
INFO:tensorflow:loss = 0.03226296, step = 5801 (0.236 sec)
INFO:tensorflow:global_step/sec: 372.411
INFO:tensorflow:loss = 0.014366373, step = 5901 (0.269 sec)
INFO:tensorflow:global_step/sec: 333.939
INFO:tensorflow:loss = 0.020684633, step = 6001 (0.303 sec)
INFO:tensorflow:global_step/sec: 330.503
INFO:tensorflow:loss = 0.035918076, step = 6101 (0.299 sec)
INFO:tensorflow:global_step/sec: 357.144
INFO:tensorflow:loss = 0.052825905, step = 6201 (0.279 sec)
INFO:tensorflow:global_step/sec: 355.8
INFO:tensorflow:loss = 0.026814178, step = 6301 (0.283 sec)
INFO:tensorflow:global_step/sec: 412.615
INFO:tensorflow:loss = 0.03537807, step = 6401 (0.243 sec)
INFO:tensorflow:global_step/sec: 331.551
INFO:tensorflow:loss = 0.041909292, step = 6501 (0.302 sec)
INFO:tensorflow:global_step/sec: 321.808
INFO:tensorflow:loss = 0.025281452, step = 6601 (0.311 sec)
| MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
These hyperparameters preduce a model that achieves **0.0348** MSE on the testset. Notice that the ensemble is composed of 5 subnetworks, each one a hiddenlayer deeper than the previous. The most complex subnetwork is made of 5 hiddenlayers.Since `SimpleDNNGenerator` produces subnetworks of varying complexity, and ourmodel gives each one an equal weight, AdaNet selected the subnetwork that mostlowered the ensemble's training loss at each iteration, likely the one with themost hidden layers, since it has the most capacity, and we aren't penalizingmore complex subnetworks (yet).Next, instead of assigning equal weight to each subnetwork, let's learn themixture weights as a convex optimization problem using SGD: | #@test {"skip": true}
results, _ = train_and_evaluate(learn_mixture_weights=True)
print("Loss:", results["average_loss"])
print("Uniform average loss:", results["average_loss/adanet/uniform_average_ensemble"])
print("Architecture:", ensemble_architecture(results)) | WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpsbdccn23
INFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_experimental_distribute': None, '_service': None, '_task_id': 0, '_is_chief': True, '_master': '', '_evaluation_master': '', '_train_distribute': None, '_model_dir': '/tmp/tmpsbdccn23', '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f8029968a90>, '_keep_checkpoint_every_n_hours': 10000, '_global_id_in_cluster': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_steps': 50000, '_tf_random_seed': 42, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_protocol': None, '_device_fn': None, '_save_summary_steps': 50000, '_num_ps_replicas': 0, '_eval_distribute': None, '_num_worker_replicas': 1, '_log_step_count_steps': 100, '_task_type': 'worker'}
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 50000 or save_checkpoints_secs None.
INFO:tensorflow:Beginning training AdaNet iteration 0
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Saving checkpoints for 0 into /tmp/tmpsbdccn23/model.ckpt.
INFO:tensorflow:loss = 21.773132, step = 1
INFO:tensorflow:global_step/sec: 151.659
INFO:tensorflow:loss = 0.6285208, step = 101 (0.661 sec)
INFO:tensorflow:global_step/sec: 377.914
INFO:tensorflow:loss = 0.568697, step = 201 (0.264 sec)
INFO:tensorflow:global_step/sec: 317.447
INFO:tensorflow:loss = 0.07774219, step = 301 (0.318 sec)
INFO:tensorflow:global_step/sec: 298.158
INFO:tensorflow:loss = 0.08270247, step = 401 (0.332 sec)
INFO:tensorflow:global_step/sec: 421.096
INFO:tensorflow:loss = 0.08153409, step = 501 (0.237 sec)
INFO:tensorflow:global_step/sec: 414.588
INFO:tensorflow:loss = 0.05655239, step = 601 (0.241 sec)
INFO:tensorflow:global_step/sec: 341.393
INFO:tensorflow:loss = 0.025883064, step = 701 (0.293 sec)
INFO:tensorflow:global_step/sec: 366.02
INFO:tensorflow:loss = 0.030127691, step = 801 (0.275 sec)
INFO:tensorflow:global_step/sec: 427.488
INFO:tensorflow:loss = 0.03756215, step = 901 (0.232 sec)
INFO:tensorflow:global_step/sec: 353.863
INFO:tensorflow:loss = 0.06788642, step = 1001 (0.285 sec)
INFO:tensorflow:global_step/sec: 322.318
INFO:tensorflow:loss = 0.036306262, step = 1101 (0.310 sec)
INFO:tensorflow:global_step/sec: 413.289
INFO:tensorflow:loss = 0.05074877, step = 1201 (0.240 sec)
INFO:tensorflow:global_step/sec: 321.58
INFO:tensorflow:loss = 0.10058461, step = 1301 (0.311 sec)
INFO:tensorflow:global_step/sec: 300.699
INFO:tensorflow:loss = 0.026643617, step = 1401 (0.334 sec)
INFO:tensorflow:global_step/sec: 318.013
INFO:tensorflow:loss = 0.020885482, step = 1501 (0.313 sec)
INFO:tensorflow:global_step/sec: 323.705
INFO:tensorflow:loss = 0.03239681, step = 1601 (0.315 sec)
INFO:tensorflow:global_step/sec: 328.631
INFO:tensorflow:loss = 0.04160305, step = 1701 (0.298 sec)
INFO:tensorflow:global_step/sec: 397.201
INFO:tensorflow:loss = 0.0352926, step = 1801 (0.251 sec)
INFO:tensorflow:global_step/sec: 342.005
INFO:tensorflow:loss = 0.044745784, step = 1901 (0.296 sec)
INFO:tensorflow:global_step/sec: 425.216
INFO:tensorflow:loss = 0.02993768, step = 2001 (0.233 sec)
INFO:tensorflow:global_step/sec: 425.851
INFO:tensorflow:loss = 0.047246575, step = 2101 (0.234 sec)
INFO:tensorflow:global_step/sec: 290.003
INFO:tensorflow:loss = 0.024866767, step = 2201 (0.346 sec)
INFO:tensorflow:global_step/sec: 306.232
INFO:tensorflow:loss = 0.025053538, step = 2301 (0.332 sec)
INFO:tensorflow:global_step/sec: 319.194
INFO:tensorflow:loss = 0.022536863, step = 2401 (0.315 sec)
INFO:tensorflow:global_step/sec: 327.319
INFO:tensorflow:loss = 0.04780043, step = 2501 (0.299 sec)
INFO:tensorflow:global_step/sec: 330.195
INFO:tensorflow:loss = 0.032027524, step = 2601 (0.302 sec)
INFO:tensorflow:global_step/sec: 424.554
INFO:tensorflow:loss = 0.033754565, step = 2701 (0.237 sec)
INFO:tensorflow:global_step/sec: 415.456
INFO:tensorflow:loss = 0.014495807, step = 2801 (0.243 sec)
INFO:tensorflow:global_step/sec: 378.815
INFO:tensorflow:loss = 0.031205792, step = 2901 (0.259 sec)
INFO:tensorflow:global_step/sec: 435.675
INFO:tensorflow:loss = 0.026793242, step = 3001 (0.233 sec)
INFO:tensorflow:global_step/sec: 445.07
INFO:tensorflow:loss = 0.02696861, step = 3101 (0.222 sec)
INFO:tensorflow:global_step/sec: 411.002
INFO:tensorflow:loss = 0.027100282, step = 3201 (0.243 sec)
INFO:tensorflow:global_step/sec: 452.535
INFO:tensorflow:loss = 0.03591666, step = 3301 (0.221 sec)
INFO:tensorflow:global_step/sec: 390.136
INFO:tensorflow:loss = 0.025515229, step = 3401 (0.257 sec)
INFO:tensorflow:global_step/sec: 403.819
INFO:tensorflow:loss = 0.049373504, step = 3501 (0.247 sec)
INFO:tensorflow:global_step/sec: 441.761
INFO:tensorflow:loss = 0.024171133, step = 3601 (0.230 sec)
INFO:tensorflow:global_step/sec: 438.165
INFO:tensorflow:loss = 0.017237274, step = 3701 (0.228 sec)
INFO:tensorflow:global_step/sec: 442.471
INFO:tensorflow:loss = 0.020128746, step = 3801 (0.224 sec)
INFO:tensorflow:global_step/sec: 443.692
INFO:tensorflow:loss = 0.021598278, step = 3901 (0.225 sec)
INFO:tensorflow:global_step/sec: 433.398
INFO:tensorflow:loss = 0.03772788, step = 4001 (0.230 sec)
INFO:tensorflow:global_step/sec: 453.543
INFO:tensorflow:loss = 0.040997066, step = 4101 (0.220 sec)
INFO:tensorflow:global_step/sec: 447.837
INFO:tensorflow:loss = 0.021314848, step = 4201 (0.223 sec)
INFO:tensorflow:global_step/sec: 449.319
INFO:tensorflow:loss = 0.03397343, step = 4301 (0.222 sec)
INFO:tensorflow:global_step/sec: 291.817
INFO:tensorflow:loss = 0.03742571, step = 4401 (0.343 sec)
INFO:tensorflow:global_step/sec: 349.156
INFO:tensorflow:loss = 0.04003142, step = 4501 (0.287 sec)
INFO:tensorflow:global_step/sec: 444.919
INFO:tensorflow:loss = 0.037306767, step = 4601 (0.224 sec)
INFO:tensorflow:global_step/sec: 324.799
INFO:tensorflow:loss = 0.050043724, step = 4701 (0.308 sec)
INFO:tensorflow:global_step/sec: 399.035
INFO:tensorflow:loss = 0.04509888, step = 4801 (0.250 sec)
INFO:tensorflow:global_step/sec: 342.386
INFO:tensorflow:loss = 0.023579072, step = 4901 (0.293 sec)
INFO:tensorflow:global_step/sec: 435.009
INFO:tensorflow:loss = 0.014783351, step = 5001 (0.230 sec)
INFO:tensorflow:global_step/sec: 465.426
INFO:tensorflow:loss = 0.021115372, step = 5101 (0.214 sec)
INFO:tensorflow:global_step/sec: 379.114
INFO:tensorflow:loss = 0.02869285, step = 5201 (0.263 sec)
INFO:tensorflow:global_step/sec: 446.474
INFO:tensorflow:loss = 0.044227358, step = 5301 (0.224 sec)
INFO:tensorflow:global_step/sec: 442.508
INFO:tensorflow:loss = 0.015665509, step = 5401 (0.229 sec)
INFO:tensorflow:global_step/sec: 439.36
INFO:tensorflow:loss = 0.017735064, step = 5501 (0.225 sec)
INFO:tensorflow:global_step/sec: 452.882
INFO:tensorflow:loss = 0.026888551, step = 5601 (0.220 sec)
INFO:tensorflow:global_step/sec: 450.627
INFO:tensorflow:loss = 0.025225505, step = 5701 (0.224 sec)
INFO:tensorflow:global_step/sec: 455.843
INFO:tensorflow:loss = 0.032536294, step = 5801 (0.218 sec)
INFO:tensorflow:global_step/sec: 453.967
INFO:tensorflow:loss = 0.014429852, step = 5901 (0.220 sec)
INFO:tensorflow:global_step/sec: 446.021
INFO:tensorflow:loss = 0.020685814, step = 6001 (0.226 sec)
INFO:tensorflow:global_step/sec: 447.631
INFO:tensorflow:loss = 0.035909995, step = 6101 (0.221 sec)
INFO:tensorflow:global_step/sec: 454.564
INFO:tensorflow:loss = 0.053759962, step = 6201 (0.220 sec)
INFO:tensorflow:global_step/sec: 385.801
INFO:tensorflow:loss = 0.02680358, step = 6301 (0.263 sec)
INFO:tensorflow:global_step/sec: 380.766
INFO:tensorflow:loss = 0.035358958, step = 6401 (0.262 sec)
INFO:tensorflow:global_step/sec: 384.454
INFO:tensorflow:loss = 0.04194645, step = 6501 (0.262 sec)
INFO:tensorflow:global_step/sec: 324.174
INFO:tensorflow:loss = 0.025395717, step = 6601 (0.309 sec)
| MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
Learning the mixture weights produces a model with **0.0449** MSE, a bit worsethan the uniform average model, which the `adanet.Estimator` always compute as abaseline. The mixture weights were learned without regularization, so theylikely overfit to the training set.Observe that AdaNet learned the same ensemble composition as the previous run.Without complexity regularization, AdaNet will favor more complex subnetworks,which may have worse generalization despite improving the empirical error.Finally, let's apply some **complexity regularization** by using $\lambda > 0$.Since this will penalize more complex subnetworks, AdaNet will select thecandidate subnetwork that most improves the objective for its marginalcomplexity: | #@test {"skip": true}
results, _ = train_and_evaluate(learn_mixture_weights=True, adanet_lambda=.015)
print("Loss:", results["average_loss"])
print("Uniform average loss:", results["average_loss/adanet/uniform_average_ensemble"])
print("Architecture:", ensemble_architecture(results)) | WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpyxwongpm
INFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_experimental_distribute': None, '_service': None, '_task_id': 0, '_is_chief': True, '_master': '', '_evaluation_master': '', '_train_distribute': None, '_model_dir': '/tmp/tmpyxwongpm', '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f802a6f6668>, '_keep_checkpoint_every_n_hours': 10000, '_global_id_in_cluster': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_steps': 50000, '_tf_random_seed': 42, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_protocol': None, '_device_fn': None, '_save_summary_steps': 50000, '_num_ps_replicas': 0, '_eval_distribute': None, '_num_worker_replicas': 1, '_log_step_count_steps': 100, '_task_type': 'worker'}
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 50000 or save_checkpoints_secs None.
INFO:tensorflow:Beginning training AdaNet iteration 0
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Saving checkpoints for 0 into /tmp/tmpyxwongpm/model.ckpt.
INFO:tensorflow:loss = 21.773132, step = 1
INFO:tensorflow:global_step/sec: 140.958
INFO:tensorflow:loss = 0.62784123, step = 101 (0.711 sec)
INFO:tensorflow:global_step/sec: 316.671
INFO:tensorflow:loss = 0.56678665, step = 201 (0.315 sec)
INFO:tensorflow:global_step/sec: 300.513
INFO:tensorflow:loss = 0.078039765, step = 301 (0.333 sec)
INFO:tensorflow:global_step/sec: 299.365
INFO:tensorflow:loss = 0.086782694, step = 401 (0.334 sec)
INFO:tensorflow:global_step/sec: 305.519
INFO:tensorflow:loss = 0.08137445, step = 501 (0.327 sec)
INFO:tensorflow:global_step/sec: 310.289
INFO:tensorflow:loss = 0.056509923, step = 601 (0.325 sec)
INFO:tensorflow:global_step/sec: 319.378
INFO:tensorflow:loss = 0.025883604, step = 701 (0.313 sec)
INFO:tensorflow:global_step/sec: 216.501
INFO:tensorflow:loss = 0.030180356, step = 801 (0.462 sec)
INFO:tensorflow:global_step/sec: 232.224
INFO:tensorflow:loss = 0.037590638, step = 901 (0.429 sec)
INFO:tensorflow:global_step/sec: 249.671
INFO:tensorflow:loss = 0.06694432, step = 1001 (0.405 sec)
INFO:tensorflow:global_step/sec: 237.714
INFO:tensorflow:loss = 0.038478173, step = 1101 (0.416 sec)
INFO:tensorflow:global_step/sec: 321.145
INFO:tensorflow:loss = 0.04998316, step = 1201 (0.311 sec)
INFO:tensorflow:global_step/sec: 242.151
INFO:tensorflow:loss = 0.09006661, step = 1301 (0.417 sec)
INFO:tensorflow:global_step/sec: 308.934
INFO:tensorflow:loss = 0.026879994, step = 1401 (0.319 sec)
INFO:tensorflow:global_step/sec: 255.401
INFO:tensorflow:loss = 0.021093277, step = 1501 (0.393 sec)
INFO:tensorflow:global_step/sec: 332.521
INFO:tensorflow:loss = 0.03607753, step = 1601 (0.300 sec)
INFO:tensorflow:global_step/sec: 312.926
INFO:tensorflow:loss = 0.03416162, step = 1701 (0.322 sec)
INFO:tensorflow:global_step/sec: 211.064
INFO:tensorflow:loss = 0.04626117, step = 1801 (0.471 sec)
INFO:tensorflow:global_step/sec: 281.592
INFO:tensorflow:loss = 0.07378492, step = 1901 (0.356 sec)
INFO:tensorflow:global_step/sec: 282.328
INFO:tensorflow:loss = 0.049188316, step = 2001 (0.354 sec)
INFO:tensorflow:global_step/sec: 308.875
INFO:tensorflow:loss = 0.078179166, step = 2101 (0.323 sec)
INFO:tensorflow:global_step/sec: 334.139
INFO:tensorflow:loss = 0.03029899, step = 2201 (0.299 sec)
INFO:tensorflow:global_step/sec: 294.106
INFO:tensorflow:loss = 0.024719719, step = 2301 (0.341 sec)
INFO:tensorflow:global_step/sec: 332.18
INFO:tensorflow:loss = 0.024992699, step = 2401 (0.301 sec)
INFO:tensorflow:global_step/sec: 374.081
INFO:tensorflow:loss = 0.04709203, step = 2501 (0.268 sec)
INFO:tensorflow:global_step/sec: 368.409
INFO:tensorflow:loss = 0.047214545, step = 2601 (0.270 sec)
INFO:tensorflow:global_step/sec: 364.516
INFO:tensorflow:loss = 0.038211394, step = 2701 (0.274 sec)
INFO:tensorflow:global_step/sec: 345.828
INFO:tensorflow:loss = 0.03274207, step = 2801 (0.294 sec)
INFO:tensorflow:global_step/sec: 357.417
INFO:tensorflow:loss = 0.04549656, step = 2901 (0.279 sec)
INFO:tensorflow:global_step/sec: 352.133
INFO:tensorflow:loss = 0.035480063, step = 3001 (0.285 sec)
INFO:tensorflow:global_step/sec: 344.663
INFO:tensorflow:loss = 0.024679933, step = 3101 (0.286 sec)
INFO:tensorflow:global_step/sec: 382.242
INFO:tensorflow:loss = 0.041259166, step = 3201 (0.261 sec)
INFO:tensorflow:global_step/sec: 352.471
INFO:tensorflow:loss = 0.04356738, step = 3301 (0.284 sec)
INFO:tensorflow:global_step/sec: 384.285
INFO:tensorflow:loss = 0.034602944, step = 3401 (0.259 sec)
INFO:tensorflow:global_step/sec: 364.285
INFO:tensorflow:loss = 0.069668576, step = 3501 (0.275 sec)
INFO:tensorflow:global_step/sec: 371.728
INFO:tensorflow:loss = 0.034798123, step = 3601 (0.273 sec)
INFO:tensorflow:global_step/sec: 354.306
INFO:tensorflow:loss = 0.021452527, step = 3701 (0.285 sec)
INFO:tensorflow:global_step/sec: 350.869
INFO:tensorflow:loss = 0.02612273, step = 3801 (0.283 sec)
INFO:tensorflow:global_step/sec: 335.128
INFO:tensorflow:loss = 0.031272262, step = 3901 (0.299 sec)
INFO:tensorflow:global_step/sec: 342.451
INFO:tensorflow:loss = 0.05301467, step = 4001 (0.286 sec)
INFO:tensorflow:global_step/sec: 341.576
INFO:tensorflow:loss = 0.02896322, step = 4101 (0.293 sec)
INFO:tensorflow:global_step/sec: 366.845
INFO:tensorflow:loss = 0.022142775, step = 4201 (0.277 sec)
INFO:tensorflow:global_step/sec: 342.606
INFO:tensorflow:loss = 0.02221645, step = 4301 (0.291 sec)
INFO:tensorflow:global_step/sec: 306.676
INFO:tensorflow:loss = 0.027055696, step = 4401 (0.323 sec)
INFO:tensorflow:global_step/sec: 291.316
INFO:tensorflow:loss = 0.050597515, step = 4501 (0.347 sec)
INFO:tensorflow:global_step/sec: 353.302
INFO:tensorflow:loss = 0.02597157, step = 4601 (0.283 sec)
INFO:tensorflow:global_step/sec: 326.918
INFO:tensorflow:loss = 0.079174936, step = 4701 (0.303 sec)
INFO:tensorflow:global_step/sec: 356.635
INFO:tensorflow:loss = 0.034027025, step = 4801 (0.280 sec)
INFO:tensorflow:global_step/sec: 353.448
INFO:tensorflow:loss = 0.033307478, step = 4901 (0.283 sec)
INFO:tensorflow:global_step/sec: 384.233
INFO:tensorflow:loss = 0.02684283, step = 5001 (0.261 sec)
INFO:tensorflow:global_step/sec: 343.57
INFO:tensorflow:loss = 0.039310887, step = 5101 (0.295 sec)
INFO:tensorflow:global_step/sec: 358.382
INFO:tensorflow:loss = 0.030656522, step = 5201 (0.277 sec)
INFO:tensorflow:global_step/sec: 346.319
INFO:tensorflow:loss = 0.078128755, step = 5301 (0.286 sec)
INFO:tensorflow:global_step/sec: 321.706
INFO:tensorflow:loss = 0.021291938, step = 5401 (0.315 sec)
INFO:tensorflow:global_step/sec: 319.996
INFO:tensorflow:loss = 0.032513306, step = 5501 (0.308 sec)
INFO:tensorflow:global_step/sec: 342.397
INFO:tensorflow:loss = 0.028400544, step = 5601 (0.293 sec)
INFO:tensorflow:global_step/sec: 317.27
INFO:tensorflow:loss = 0.034857225, step = 5701 (0.321 sec)
INFO:tensorflow:global_step/sec: 316.78
INFO:tensorflow:loss = 0.037171274, step = 5801 (0.314 sec)
INFO:tensorflow:global_step/sec: 338.394
INFO:tensorflow:loss = 0.017138816, step = 5901 (0.290 sec)
INFO:tensorflow:global_step/sec: 329.102
INFO:tensorflow:loss = 0.030491471, step = 6001 (0.312 sec)
INFO:tensorflow:global_step/sec: 349.063
INFO:tensorflow:loss = 0.048120163, step = 6101 (0.279 sec)
INFO:tensorflow:global_step/sec: 339.279
INFO:tensorflow:loss = 0.044583093, step = 6201 (0.295 sec)
INFO:tensorflow:global_step/sec: 339.525
INFO:tensorflow:loss = 0.04749337, step = 6301 (0.295 sec)
INFO:tensorflow:global_step/sec: 334.616
INFO:tensorflow:loss = 0.07128422, step = 6401 (0.304 sec)
INFO:tensorflow:global_step/sec: 331.25
INFO:tensorflow:loss = 0.05821591, step = 6501 (0.296 sec)
INFO:tensorflow:global_step/sec: 335.526
INFO:tensorflow:loss = 0.019353827, step = 6601 (0.298 sec)
| MIT | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml |
load test data and loc_map | test = np.load("checkpt/test_data.npy")
loc_map = np.load("checkpt/test_loc_map.npy")
test_label = np.loadtxt("checkpt/test_label.txt")
test_label.shape | _____no_output_____ | CC-BY-4.0 | CNN/Heatmap_demo.ipynb | ucl-exoplanets/DI-Project |
read checkpoint | model = load_model("checkpt/ckt/checkpt_0.h5")
model.summary()
pred = model.predict(test) | _____no_output_____ | CC-BY-4.0 | CNN/Heatmap_demo.ipynb | ucl-exoplanets/DI-Project |
get True Positive | ## .argmax(axis =1 ) will return the biggest value of the two as 1, and the other as 0. i.e. [0.6 ,0.9] will give [0,1]
## this is a good format as our test_label is organised in [0,1] or [1,0] format.
TP = np.where(pred.argmax(axis=1) == test_label.argmax(axis=1))
## I will suggest to access the confidence of the predication. Usually we want 0.9 at least
def return_heatmap(model, org_img, normalise=True):
## CAM code implementation
## we need to extract the last conv layer, and that depends on your architecture.
test_img = model.output[:, 1]
last_conv_layer = model.get_layer('conv2d_6')
grads = K.gradients(test_img, last_conv_layer.output)[0]
pooled_grads = K.mean(grads, axis=(0, 1, 2))
message = K.print_tensor(pooled_grads, message='pool_grad = ')
iterate = K.function([model.input, K.learning_phase()],
[message, last_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([org_img.reshape(-1, 64, 64, 1), 0])
for i in range(conv_layer_output_value.shape[2]):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
heatmap = np.mean(conv_layer_output_value, axis=-1)
if normalise:
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
return heatmap
def plot_heatmap(heatmap, loc_map):
fig = plt.figure(figsize=(16, 8))
grid = ImageGrid(fig, 111, # as in plt.subplot(111)
nrows_ncols=(1, 2),
axes_pad=0.15,
share_all=True,
)
# Add data to image grid
im = grid[0].imshow(heatmap)
im = grid[1].imshow(loc_map)
plt.show()
| _____no_output_____ | CC-BY-4.0 | CNN/Heatmap_demo.ipynb | ucl-exoplanets/DI-Project |
Calculate and plot heatmap | num = -1
heatmap = return_heatmap(model, test[num])
plot_heatmap(heatmap, loc_map[num]) | _____no_output_____ | CC-BY-4.0 | CNN/Heatmap_demo.ipynb | ucl-exoplanets/DI-Project |
构造数据集 | def create_data():
datasets = [['青年', '否', '否', '一般', '否'],
['青年', '否', '否', '好', '否'],
['青年', '是', '否', '好', '是'],
['青年', '是', '是', '一般', '是'],
['青年', '否', '否', '一般', '否'],
['中年', '否', '否', '一般', '否'],
['中年', '否', '否', '好', '否'],
['中年', '是', '是', '好', '是'],
['中年', '否', '是', '非常好', '是'],
['中年', '否', '是', '非常好', '是'],
['老年', '否', '是', '非常好', '是'],
['老年', '否', '是', '好', '是'],
['老年', '是', '否', '好', '是'],
['老年', '是', '否', '非常好', '是'],
['老年', '否', '否', '一般', '否'],
]
labels = [u'年龄', u'有工作', u'有自己的房子', u'信贷情况', u'类别']
# 返回数据集和每个维度的名称
return datasets, labels
dataset,columns = create_data()
X,y = np.array(dataset)[:,:-1],np.array(dataset)[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=1)
pd.DataFrame(datasets, columns=labels) | _____no_output_____ | MIT | DecisionTree/MyDecisionTree.ipynb | QYHcrossover/ML-numpy |
Subsets and Splits