markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Run Detection and MatchingWe're using `sep`, via the DES Y6 settings from `esheldon/sxdes`, here for simplicity. Eventually, one should use the stack itself. | import sep
from sxdes import run_sep
import esutil.numpy_util
from ssi_tools.matching import do_balrogesque_matching
sep.set_extract_pixstack(1_000_000)
def _run_sep_and_add_radec(ti, img, err=None, minerr=None):
if err is None:
err = np.sqrt(img.variance.array.copy())
img = img.image.array.copy()
if minerr is not None:
msk = err < minerr
err[msk] = minerr
cat, seg = run_sep(
img,
err,
)
cat = esutil.numpy_util.add_fields(cat, [("ra", "f8"), ("dec", "f8")])
wcs = ti.getWcs()
cat["ra"], cat["dec"] = wcs.pixelToSkyArray(cat["x"], cat["y"], degrees=True)
return cat, seg
orig_det_cat, orig_det_seg = _run_sep_and_add_radec(ti, image)
fsi_det_cat, fsi_det_seg = _run_sep_and_add_radec(ti, fake_image)
fsi_truth_cat, fsi_truth_seg = _run_sep_and_add_radec(
ti,
(fake_image.image.array - image.image.array).copy(),
np.zeros_like(np.sqrt(fake_image.variance.array.copy())),
minerr=np.mean(np.sqrt(fake_image.variance.array.copy())),
)
print("found truth srcs:", fsi_truth_cat.shape)
match_flag, match_index = do_balrogesque_matching(
fsi_det_cat, orig_det_cat, fsi_truth_cat, "flux_auto",
)
ms = 2
tst = np.arcsinh(fake_image.maskedImage.image.array/np.sqrt(image.variance.array))
vmin = tst.min()
vmax = tst.max()
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(15, 10))
for ax in axes.ravel()[:-1]:
ax.set_xlabel("x [pixels]")
ax.set_ylabel("y [pixels]")
axes[0, 0].imshow(
np.arcsinh(image.maskedImage.image.array/np.sqrt(image.variance.array)),
vmin=vmin, vmax=vmax,
origin='lower',
)
axes[0, 0].set_title("image")
axes[0, 0].plot(orig_det_cat["x"], orig_det_cat["y"], '.r', ms=ms)
u = np.random.uniform(size=orig_det_seg.max()+1)
axes[1, 0].imshow(
u[orig_det_seg],
origin='lower',
)
axes[1, 0].set_title("image seg map")
axes[0, 1].imshow(
np.arcsinh(fake_image.maskedImage.image.array/np.sqrt(image.variance.array)),
vmin=vmin, vmax=vmax,
origin='lower',
)
axes[0, 1].set_title("FSI image")
axes[0, 1].plot(fsi_det_cat["x"], fsi_det_cat["y"], '.r', ms=ms)
u = np.random.uniform(size=fsi_det_seg.max()+1)
axes[1, 1].imshow(
u[fsi_det_seg],
origin='lower',
)
axes[1, 1].set_title("FSI image seg map")
axes[0, 2].imshow(
np.arcsinh((fake_image.maskedImage.image.array - image.maskedImage.image.array)/np.sqrt(image.variance.array)),
origin='lower',
)
axes[0, 2].set_title("diff. image")
axes[0, 2].plot(fsi_truth_cat["x"], fsi_truth_cat["y"], '.r', ms=ms)
coadd_zp = 2.5*np.log10(image.getPhotoCalib().getInstFluxAtZeroMagnitude())
msk = match_flag < 2
true_mag = coadd_zp - 2.5*np.log10(fsi_truth_cat["flux_auto"][match_index[msk]])
obs_mag = coadd_zp - 2.5*np.log10(fsi_det_cat["flux_auto"][msk])
dmag = obs_mag - true_mag
axes[1, 2].plot(true_mag, dmag, '.k')
axes[1, 2].set_xlim(25, 19)
axes[1, 2].set_xlabel("true r-band mag auto")
axes[1, 2].set_ylabel("obs - true r-band mag auto")
fig.savefig("fsi_matched.pdf")
plt.show()
| _____no_output_____ | BSD-3-Clause | examples/cosmoDC2_galaxy_hexgrid_matching_example.ipynb | LSSTDESC/ssi-tools |
Financial SimulationsMeasure different investment strategies Helper FunctionsRun me before running any of the lower cells. | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import random
findata = pd.read_csv("https://raw.githubusercontent.com/sameerkulkarni/financial_simulations/master/returns.csv", sep=",")
# Add data of monthly rate of change in adj close.
findata['Scale'] = findata['Adj Close'].pct_change() +1
# Adjust the scale to inflation every month.
findata['Scale_InfAdj'] = findata['Scale'] - (findata['Inflation']/1200)
findata = findata.drop(['Open', 'High', 'Low'], axis=1)
fin_index = pd.Index(findata['Date'])
# Number of times to run any of the simulations below
NUM_SAMPLES=10000
def pretty_plot_fn(distribution, heighlight_value, message):
plt.hist(distribution, bins=100)
plt.title(message)
plt.axvline(heighlight_value,color='r')
plt.show()
# The three functions below calucate the rate of returs given a start point and
# a duration. e.g. A value of 1.07 would mean a 7% average return on investment
# annually over the investment period.
# 1. Basic Strategy: Invest once; for the entire duration.
def calculate_returns(startpoint, duration):
begin = findata.loc[startpoint]['Adj Close']
end = findata.loc[startpoint+(duration*12)]['Adj Close']
total_returns = 1+ ((end-begin)/begin)
avg_returns = (total_returns**(1.0/duration))
return avg_returns
# 2. Secondary strategy: If the intended retirement would cause an average
# return of less than expected_returns, then wait a max of extended_duration
# years and retire as soon as it reaches expected_returns. If waiting does
# not reachexpected returns, retire at the end of the extended_duration.
def flexible_end_date(startpoint, duration, extended_duration=2, expected_returns=1.065):
begin = findata.loc[startpoint]['Adj Close']
intended_end_month= startpoint+(duration*12)
end = findata.loc[intended_end_month]['Adj Close']
total_returns = 1+ ((end-begin)/begin)
avg_returns = (total_returns**(1.0/duration))
# print("%2d, %2d, %3.3f, %3.3f, %1.3f, %1.3f" %(startpoint, intended_end_month, begin, end, total_returns, avg_returns))
if (avg_returns > expected_returns):
return avg_returns
for i in range(intended_end_month,intended_end_month+(extended_duration*12)):
end = findata.loc[i]['Adj Close']
total_returns= 1+ ((end-begin)/begin)
avg_returns = (total_returns**(1.0/((i-startpoint)/12)))
# print("%2d, %2d, %3.3f, %3.3f, %1.3f, %1.3f" %(startpoint, i, begin, end, total_returns, avg_returns))
if avg_returns >= expected_returns:
return avg_returns
return avg_returns
# 3. Capped Gains Strategy: Here your returns are capped between [0,9] on a yearly
# basis. Note that this is an extremely uncommon investment vehicle, and
# there is a high chance that this is something you would not want to pick
# if at all available.
def capped_gains_strategy(startpoint, duration):
returns=[]
for i in range(0,duration):
start_month=startpoint +(i*12)
end_month=startpoint +((i+1)*12)
begin = findata.loc[start_month]['Adj Close']
end = findata.loc[end_month]['Adj Close']
yearly_return = 1+ ((end-begin)/begin)
if yearly_return < 1:
yearly_return = 1
if yearly_return > 1.095:
yearly_return = 1.095
returns.append(yearly_return)
# print(returns)
avg_returns = np.average(returns)
return avg_returns
# The next three functions also take into account inflation for each of the
# months that we are taking the returns into account. In months of large market
# fluctuations, inflation also changes, this would allow one to take into
# account real investment returns for those months. e.g. if the market drops
# a lot, hypothesis is that that consumer goods would also see a significant
# drop. It would be very useful to take this into account.
def current_return(month, investment_type="snp500"):
# investment_type: 0=s&p500, 1=capped_s&p500, 2=bank, 3=bonds(tbd)
if investment_type == "capped_snp500":
return max(1, min((1+(.09/12)), findata.loc[month]['Scale_InfAdj']))
if investment_type == "savings_account":
return (1.0 - (findata.loc[month]['Inflation']/1200))
if investment_type == "bonds":
# If invested in a bond, assume that you are unaffected by inflation.
return 1.0
if investment_type == "snp500":
return findata.loc[month]['Scale_InfAdj']
# Default case is if investing in an s&p 500.
return findata.loc[month]['Scale_InfAdj']
# Regular investment
def future_value(initial_amount, investment_amount, num_months, start_month, investment_type=0):
# initial_amount: Initial amount to start the investment with.
# investment_amount: Amount to be invested every month.
# num_months: Number of months to invest.
# start_month: When do you start investing the money.
# investment_type: 0=s&p500, 1=capped_s&p500, 2=bank, 3=bonds(tbd)
current_amount=initial_amount
for i in range(start_month, (start_month+num_months)):
current_amount+=investment_amount
current_amount*=current_return(i, investment_type)
return current_amount
def weighted_return(initial_rampup_duration, initial_amount, total_duration, trickle_invest, start_month, investment_type=0):
# initial_rampup_horizon: Number of months to split the initital investment time.
# initial_amount: Total amount in units to be invested initially.
# total_horizon: Total investment horizon.
# trickle_invest:
investment_during_rampup = (initial_amount/initial_rampup_duration)+trickle_invest
amount_after_initial_rampup=future_value(0,investment_during_rampup,initial_rampup_duration,start_month, investment_type)
final_amount=future_value(amount_after_initial_rampup, trickle_invest, total_duration-initial_rampup_duration, start_month+initial_rampup_duration, investment_type)
return final_amount | _____no_output_____ | MIT | base_functions.ipynb | sameerkulkarni/financial_simulations |
One shot investment strategiesThe strategies below mimic the one time investments. e.g. If you have a pot of money that you need to invest into the market for "num_years". | num_years = 25 #@param {type:"slider", min:1, max:60, step:1.0}
start_points=[random.randint(0,(92-num_years)*12) for i in range(NUM_SAMPLES)]
yearly_returns = [calculate_returns(start_points[i],num_years) for i in range(len(start_points))]
ref_returns=np.median(yearly_returns)
pretty_plot_fn(yearly_returns, ref_returns, "S&P 500 Returns for %d years (%1.3f)."%(num_years,ref_returns))
print("%1.4f"%ref_returns)
num_years = 16 #@param {type:"slider", min:1, max:60, step:1.0}
flexible_years = 3 #@param {type:"slider", min:0, max:15, step:1.0}
# min_acceptable_return is the avg. return that you would be comfortable
# retiring with, a value of 1.03 means that your investment kept pace with
# inflation (avg inflation assumed to be 1.03), 1.1 would mean that you would
# like an average return of 10% per year (values above 1.07 may be unrealistic).
min_acceptable_return = 1.065 #@param {type:"slider", min:1.03, max:1.1, step:0.05}
start_points=[random.randint(0,(92-(num_years+flexible_years))*12) for i in range(NUM_SAMPLES)]
yearly_returns = [flexible_end_date(start_points[i],num_years,flexible_years, min_acceptable_return) for i in range(len(start_points))]
ref_returns=np.median(yearly_returns)
pretty_plot_fn(yearly_returns, ref_returns, "S&P 500 Returns for %d years (%1.3f)."%(num_years,ref_returns))
print("%1.4f"%ref_returns)
num_years = 15 #@param {type:"slider", min:1, max:60, step:1.0}
start_points=[random.randint(0,(92-num_years)*12) for i in range(NUM_SAMPLES)]
yearly_returns = [capped_gains_strategy(start_points[i],num_years) for i in range(len(start_points))]
ref_returns=np.median(yearly_returns)
pretty_plot_fn(yearly_returns, ref_returns, "CappedReturns for %d years (%1.3f)."%(num_years,ref_returns))
print("%1.4f"%ref_returns) | _____no_output_____ | MIT | base_functions.ipynb | sameerkulkarni/financial_simulations |
Systematic monthly investmentsThese simulations simulate a typical method of saving. One would start with an initial investment amount ("intial_amount"), and would then invest some moreamount on a monthly basis ("monthly_amount").These simulations also account for inflations during the months question, and thus try to paint the most accurate picture available. The final value is presented in current year dollar amounts to make it easier to visualize. | # Number of years one has before retirement.
num_years = 20 #@param {type:"slider", min:1, max:60, step:1.0}
# The amount of money that one has at present that you would like to invest.
initial_amount=100 #@param {type:"slider", min:10, max:200, step:1.0}
# If the amount of money is large it would be advisable to split the intial
# amount of money over "initial_rampup_months" number of months.
initial_rampup_months=6 #@param {type:"slider", min:1, max:60, step:1.0}
# Amount of money that one would like to invest every month.
monthly_amount=4 #@param {type:"slider", min:1, max:100, step:1.0}
# Invest in S&P 500
investment_type = "snp500" #@param ['snp500', 'savings_account', 'bonds', 'capped_snp500']
start_points=[random.randint(1,(92-num_years)*12) for i in range(NUM_SAMPLES)]
final_networths = [weighted_return(initial_rampup_months, initial_amount, (num_years*12), monthly_amount, start_points[i],investment_type) for i in range(len(start_points))]
ref_nw=np.median(final_networths)
pretty_plot_fn(final_networths, ref_nw, "%s Net worths after %d years (%1.3f)."%(investment_type, num_years,ref_nw))
print("Median final Networth = $%4.4f"%ref_nw)
investment_type = "bonds" #@param ['snp500', 'savings_account', 'bonds', 'capped_snp500']
start_points=[random.randint(1,(92-num_years)*12) for i in range(NUM_SAMPLES)]
final_networths = [weighted_return(initial_rampup_months, initial_amount, (num_years*12), monthly_amount, start_points[i],investment_type) for i in range(len(start_points))]
ref_nw=np.median(final_networths)
pretty_plot_fn(final_networths, ref_nw, "%s Net worths after %d years (%1.3f)."%(investment_type, num_years,ref_nw))
print("Median final Networth = $%4.4f"%ref_nw)
investment_type = "savings_account" #@param ['snp500', 'savings_account', 'bonds', 'capped_snp500']
start_points=[random.randint(1,(92-num_years)*12) for i in range(NUM_SAMPLES)]
final_networths = [weighted_return(initial_rampup_months, initial_amount, (num_years*12), monthly_amount, start_points[i],investment_type) for i in range(len(start_points))]
ref_nw=np.median(final_networths)
pretty_plot_fn(final_networths, ref_nw, "%s Net worths after %d years (%1.3f)."%(investment_type, num_years,ref_nw))
print("Median final Networth = $%4.4f"%ref_nw) | _____no_output_____ | MIT | base_functions.ipynb | sameerkulkarni/financial_simulations |
COVID-19 ish simulationsGiven the current financial situation, the current markets are a-typical. Thus the simlations below show returns around past recessions. | # Bear Markets finder (more than 20% drop from the previous high)
# https://www.investopedia.com/terms/b/bearmarket.asp
# Top 11 Bear market dates = http://www.nbcnews.com/id/37740147/ns/business-stocks_and_economy/t/historic-bear-markets/#.XoCWDzdKh24
bear_market_dates=['1929-09-01', '1946-05-01', '1961-12-01', '1968-11-01', '1973-01-01', '1980-11-01', '1987-08-01', '2000-03-01', '2007-10-01']
bear_market_months=[fin_index.get_loc(bear_date) for bear_date in bear_market_dates]
print(bear_market_months)
num_years = 20 #@param {type:"slider", min:1, max:60, step:1.0}
initial_rampup_months=6 #@param {type:"slider", min:1, max:60, step:1.0}
initial_amount=100 #@param {type:"slider", min:10, max:200, step:1.0}
monthly_invest=4 #@param {type:"slider", min:1, max:100, step:1.0}
month_offset=-1
start_points=[month+month_offset for month in bear_market_months[:8]]
# print(start_points)
# investment_type: 0=s&p500, 1=capped_s&p500, 2=bank, 3=bonds(tbd)
for toi in ['bonds', 'snp500', 'savings_account', 'capped_snp500']:
tinvest = monthly_invest
if toi == 1:
tinvest-=1
final_networths = [weighted_return(initial_rampup_months, initial_amount, (num_years*12), tinvest, start_points[i], toi) for i in range(len(start_points))]
# final_networths = np.sort(final_networths)
avg_nw=np.average(final_networths)
med_nw=np.median(final_networths)
print('Type of investment : %s \t Median Networths : %f'%(toi, med_nw))
print(final_networths)
print()
findata.loc[-5:] | _____no_output_____ | MIT | base_functions.ipynb | sameerkulkarni/financial_simulations |
`ApJdataFrames` Malo et al. 2014---`Title`: BANYAN. III. Radial velocity, Rotation and X-ray emission of low-mass star candidates in nearby young kinematic groups `Authors`: Malo L., Artigau E., Doyon R., Lafreniere D., Albert L., Gagne J.Data is from this paper: http://iopscience.iop.org/article/10.1088/0004-637X/722/1/311/ | import warnings
warnings.filterwarnings("ignore")
from astropy.io import ascii
import pandas as pd | _____no_output_____ | MIT | notebooks/Malo2014.ipynb | BrownDwarf/ApJdataFrames |
Table 1 - Target Information for Ophiuchus Sources | #! mkdir ../data/Malo2014
#! wget http://iopscience.iop.org/0004-637X/788/1/81/suppdata/apj494919t7_mrt.txt
! head ../data/Malo2014/apj494919t7_mrt.txt
from astropy.table import Table, Column
t1 = Table.read("../data/Malo2014/apj494919t7_mrt.txt", format='ascii')
sns.distplot(t1['Jmag'].data.data)
t1 | _____no_output_____ | MIT | notebooks/Malo2014.ipynb | BrownDwarf/ApJdataFrames |
Основные виды нейросетей (CNN и RNN)**Разработчик: Алексей Умнов** Этот семинар будет состоять из двух частей: сначала мы позанимаемся реализацией сверточных и рекуррентных сетей, а потом поисследуем проблему затухающих и взрывающихся градиентов. Сверточные сетиВернемся в очередной раз к датасету MNIST. Для начала загрузим данные и определим несколько полезных функций как на прошлом семинаре. | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import random
from IPython.display import clear_output
import torch
import torch.nn as nn
import torch.nn.functional as F
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(42)
from util import load_mnist
X_train, y_train, X_val, y_val, X_test, y_test = load_mnist(flatten=True)
plt.figure(figsize=[6, 6])
for i in range(4):
plt.subplot(2, 2, i + 1)
plt.title("Label: %i" % y_train[i])
plt.imshow(X_train[i].reshape([28, 28]), cmap='gray');
from util import iterate_minibatches
def train_epoch(model, optimizer, batchsize=32):
loss_log, acc_log = [], []
model.train()
for x_batch, y_batch in iterate_minibatches(X_train, y_train, batchsize=batchsize, shuffle=True):
data = torch.from_numpy(x_batch.astype(np.float32))
target = torch.from_numpy(y_batch.astype(np.int64))
optimizer.zero_grad()
output = model(data)
pred = torch.max(output, 1)[1].numpy()
acc = np.mean(pred == y_batch)
acc_log.append(acc)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
loss = loss.item()
loss_log.append(loss)
return loss_log, acc_log
def test(model):
loss_log, acc_log = [], []
model.eval()
for x_batch, y_batch in iterate_minibatches(X_val, y_val, batchsize=32, shuffle=True):
data = torch.from_numpy(x_batch.astype(np.float32))
target = torch.from_numpy(y_batch.astype(np.int64))
output = model(data)
loss = F.nll_loss(output, target)
pred = torch.max(output, 1)[1].numpy()
acc = np.mean(pred == y_batch)
acc_log.append(acc)
loss = loss.item()
loss_log.append(loss)
return loss_log, acc_log
def plot_history(train_history, val_history, title='loss'):
plt.figure()
plt.title('{}'.format(title))
plt.plot(train_history, label='train', zorder=1)
points = np.array(val_history)
plt.scatter(points[:, 0], points[:, 1], marker='+', s=180, c='orange', label='val', zorder=2)
plt.xlabel('train steps')
plt.legend(loc='best')
plt.grid()
plt.show()
def train(model, opt, n_epochs):
train_log, train_acc_log = [], []
val_log, val_acc_log = [], []
batchsize = 32
for epoch in range(n_epochs):
print("Epoch {} of {}".format(epoch, n_epochs))
train_loss, train_acc = train_epoch(model, opt, batchsize=batchsize)
val_loss, val_acc = test(model)
train_log.extend(train_loss)
train_acc_log.extend(train_acc)
steps = len(X_train) / batchsize
val_log.append((steps * (epoch + 1), np.mean(val_loss)))
val_acc_log.append((steps * (epoch + 1), np.mean(val_acc)))
clear_output()
plot_history(train_log, val_log)
plot_history(train_acc_log, val_acc_log, title='accuracy')
print("Epoch {} error = {:.2%}".format(epoch, 1 - val_acc_log[-1][1]))
print("Final error: {:.2%}".format(1 - val_acc_log[-1][1])) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
**Задание 1:** Реализуйте сверточную сеть, которая состоит из двух последовательных применений свертки, relu и max-пулинга, а потом полносвязного слоя. Подберите параметры так, чтобы на выходе последнего слоя размерность тензора была 4 x 4 x 16. В коде ниже используется обертка nn.Sequential, ознакомьтесь с ее интерфейсом.Добейтесь, чтобы ошибка классификации после обучения (см. ниже) была не выше 1.5%. | class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(
# <your code here>
)
self.classifier = nn.Linear(4 * 4 * 16, 10)
def forward(self, x):
# <your code here>
return F.log_softmax(out, dim=-1) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Посчитаем количество обучаемых параметров сети (полносвязные сети с прошлого семинара имеют 30-40 тысяч параметров). | def count_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
return sum([np.prod(p.size()) for p in model_parameters])
model = ConvNet()
print("Total number of trainable parameters:", count_parameters(model))
%%time
opt = torch.optim.RMSprop(model.parameters(), lr=0.001)
train(model, opt, 5) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Мы с легкостью получили качество классификаци лучше, чем было раньше с помощью полносвязных сетей. На самом деле для более честного сравнения нужно поисследовать обе архитектуры и подождать побольше итераций до сходимости, но в силу ограниченности вычислительных ресурсов мы это сделать не можем. Результаты из которых "выжали максимум" можно посмотреть например, на этой странице: http://yann.lecun.com/exdb/mnist/, и там видно, что качество сверточных сетей гораздо выше. А если работать с более сложными изоражениями (например, с ImageNet), то сверточные сети побеждают с большим отрывом.**Упражнение:** Вспомните материалы лекции и ответьте на вопросы ниже:* Почему сверточные сети обладают таким преимуществом именно для изображений?* Почему несмотря на малое количество параметров обучение сверточных сетей занимает так много времени? Рекуррентные сетиДля рекуррентных сетей используем датасет с именами и будем определять из какого языка произошло данное имя. Для этого построим рекуррентную сеть, которая с именами на уровне символов. Для начала скачаем файлы и конвертируем их к удобному формату (можно не особо вникать в этот код). | # На Windows придется скачать архив по ссылке (~3Mb) и распаковать самостоятельно
! wget -nc https://download.pytorch.org/tutorial/data.zip
! unzip -n ./data.zip
from io import open
import glob
def findFiles(path): return glob.glob(path)
print(findFiles('data/names/*.txt'))
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('Ślusàrski'))
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('data/names/*.txt'):
category = filename.split('/')[-1].split('.')[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0][0]
return all_categories[category_i], category_i | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Определим несколько удобных функций для конвертации букв и слов в тензоры.**Задание 2**: напишите последнюю функцию для конвертации слова в тензор. | # Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
# <your code here>
print(letterToTensor('J'))
print(lineToTensor('Jones').size()) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
**Задание 3:** Реализуйте однослойную рекуррентную сеть. | class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(RNNCell, self).__init__()
self.hidden_size = hidden_size
# <your code here>
# <end>
def forward(self, input, hidden):
# <your code here>
# <end>
return hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnncell = RNNCell(n_letters, n_hidden) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Предсказание будем осуществлять при помощи линейного класссификатора поверх скрытых состояний сети. | classifier = nn.Sequential(nn.Linear(n_hidden, n_categories), nn.LogSoftmax(dim=1)) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Проверим, что все корректно работает: выходы классификаторы должны быть лог-вероятностями. | input = letterToTensor('A')
hidden = torch.zeros(1, n_hidden)
output = classifier(rnncell(input, hidden))
print(output)
print(torch.exp(output).sum())
input = lineToTensor('Albert')
hidden = torch.zeros(1, n_hidden)
output = classifier(rnncell(input[0], hidden))
print(output)
print(torch.exp(output).sum()) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Для простоты в этот раз будем оптимизировать не по мини-батчам, а по отдельным примерам. Ниже несколько полезных функций для этого. | import random
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
**Задание 4:** Реализуйте вычисление ответа в функции train. Если все сделано правильно, то точность на обучающей выборке должна быть не менее 70%. | from tqdm import trange
def train(category, category_tensor, line_tensor, optimizer):
hidden = rnncell.initHidden()
rnncell.zero_grad()
classifier.zero_grad()
# <your code here>
# use rnncell and classifier
# <end>
loss = F.nll_loss(output, category_tensor)
loss.backward()
optimizer.step()
acc = (categoryFromOutput(output)[0] == category)
return loss.item(), acc
n_iters = 50000
plot_every = 1000
current_loss = 0
all_losses = []
current_acc = 0
all_accs = []
n_hidden = 128
rnncell = RNNCell(n_letters, n_hidden)
classifier = nn.Sequential(nn.Linear(n_hidden, n_categories), nn.LogSoftmax(dim=1))
params = list(rnncell.parameters()) + list(classifier.parameters())
opt = torch.optim.RMSprop(params, lr=0.001)
for iter in trange(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
loss, acc = train(category, category_tensor, line_tensor, opt)
current_loss += loss
current_acc += acc
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
all_accs.append(current_acc / plot_every)
current_acc = 0
plt.figure()
plt.title("Loss")
plt.plot(all_losses)
plt.grid()
plt.show()
plt.figure()
plt.title("Accuracy")
plt.plot(all_accs)
plt.grid()
plt.show() | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Затухающие и взрывающиеся градиентыЭксперименты будем проводить опять на датасете MNIST, но будем работать с полносвязными сетями. В этом разделе мы не будем пытаться подобрать более удачную архитектуру, нам интересно только посмотреть на особенности обучения глубоких сетей. | from util import load_mnist
X_train, y_train, X_val, y_val, X_test, y_test = load_mnist(flatten=True) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Для экспериментов нам понадобится реализовать сеть, в которой можно легко менять количество слоев. Также эта сеть должна сохранять градиенты на всех слоях, чтобы потом мы могли посмотреть на их величины.**Задание 5:** допишите недостающую часть кода ниже. | class DeepDenseNet(nn.Module):
def __init__(self, n_layers, hidden_size, activation):
super().__init__()
self.activation = activation
l0 = nn.Linear(X_train.shape[1], hidden_size)
self.weights = [l0.weight]
self.layers = [l0]
# <your code here>
self.seq = nn.Sequential(*self.layers)
for l in self.weights:
l.retain_grad()
def forward(self, x):
out = self.seq(x)
return F.log_softmax(out, dim=-1) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Модифицируем наши функции обучения, чтобы они также рисовали графики изменения градиентов. | import scipy.sparse.linalg
def train_epoch_grad(model, optimizer, batchsize=32):
loss_log, acc_log = [], []
grads = [[] for l in model.weights]
model.train()
for x_batch, y_batch in iterate_minibatches(X_train, y_train, batchsize=batchsize, shuffle=True):
# data preparation
data = torch.from_numpy(x_batch.astype(np.float32))
target = torch.from_numpy(y_batch.astype(np.int64))
optimizer.zero_grad()
output = model(data)
pred = torch.max(output, 1)[1].numpy()
acc = np.mean(pred == y_batch)
acc_log.append(acc)
loss = F.nll_loss(output, target)
# compute gradients
loss.backward()
# make a step
optimizer.step()
loss = loss.item()
loss_log.append(loss)
for g, l in zip(grads, model.weights):
g.append(np.linalg.norm(l.grad.numpy()))
return loss_log, acc_log, grads
def train_grad(model, opt, n_epochs):
train_log, train_acc_log = [], []
val_log, val_acc_log = [], []
grads_log = None
batchsize = 32
for epoch in range(n_epochs):
print("Epoch {} of {}".format(epoch, n_epochs))
train_loss, train_acc, grads = train_epoch_grad(model, opt, batchsize=batchsize)
if grads_log is None:
grads_log = grads
else:
for a, b in zip(grads_log, grads):
a.extend(b)
val_loss, val_acc = test(model)
train_log.extend(train_loss)
train_acc_log.extend(train_acc)
steps = len(X_train) / batchsize
val_log.append((steps * (epoch + 1), np.mean(val_loss)))
val_acc_log.append((steps * (epoch + 1), np.mean(val_acc)))
# display all metrics
clear_output()
plot_history(train_log, val_log)
plot_history(train_acc_log, val_acc_log, title='accuracy')
plt.figure()
all_vals = []
for i, g in enumerate(grads_log):
w = np.ones(100)
w /= w.sum()
vals = np.convolve(w, g, mode='valid')
plt.semilogy(vals, label=str(i+1), color=plt.cm.coolwarm((i / len(grads_log))))
all_vals.extend(vals)
plt.legend(loc='best')
plt.grid()
plt.show() | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
**Задание 6:*** Обучите сети глубины 10 и больше с сигмоидой в качестве активации. Исследуйте, как глубина влияет на качество обучения и поведение градиентов на далеких от выхода слоях.* Теперь замените активацию на ReLU и посмотрите, что получится. | # ... | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Теперь попробуем добавить в сеть skip-connections (по примеру ResNet) вместо замены сигмоиды на relu и посмотрим, что получится. Запихнуть все слои в nn.Sequential и просто их применить теперь не получится - вместо этого мы их применим вручную. Но положить их в отдельный модуль nn.Sequential все равно нужно, иначе torch не сможет их найти и оптимизировать.**Задание 7:** допишите недостающую часть кода ниже. | class DeepDenseResNet(nn.Module):
def __init__(self, n_layers, hidden_size, activation):
super().__init__()
self.activation = activation
l0 = nn.Linear(X_train.shape[1], hidden_size)
self.weights = [l0.weight]
self.layers = [l0]
for i in range(1, n_layers - 1):
l = nn.Linear(hidden_size, hidden_size)
self.layers.append(l)
self.weights.append(l.weight)
l = nn.Linear(hidden_size, 10)
self.layers.append(l)
self.weights.append(l.weight)
self.seq = nn.Sequential(*self.layers)
for l in self.weights:
l.retain_grad()
def forward(self, x):
# <your code here>
return F.log_softmax(x, dim=-1) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Убедимся, что такая сеть отлично учится даже на большом числе слоев. | model = DeepDenseResNet(n_layers=20, hidden_size=10, activation=nn.Sigmoid)
opt = torch.optim.RMSprop(model.parameters(), lr=0.001)
train_grad(model, opt, 10) | _____no_output_____ | Apache-2.0 | 2020-fall/seminars/seminar3/DL20_fall_seminar3.ipynb | aosokin/DL_CSHSE_spring2018 |
Comandamenti Il Comitato Supremo per la Dottrina del Coding ha emanato importanti comandamenti che seguirai scrupolosamente.Se accetti le loro sagge parole, diventerai un vero Jedi Python. **ATTENZIONE**: se non segui i Comandamenti, finirai nel _Debugging Hell_ ! I COMANDAMENTO**Scriverai codice Python**Chi non scrive codice Python, non impara Python II COMANDAMENTO**Quando inserisci una variabile in un ciclo** `for`**, questa variabile deve essere nuova** Se hai definito la variabile prima, non la reintrodurrai in un `for`, perchè ciò portebbe confusione nelle menti di chi legge.Perciò evita questi peccati: | i = 7
for i in range(3): # peccato, perdi la variabile i
print(i)
print(i) # stampa 2 e non 7 !!
def f(i):
for i in range(3): # altro peccato, perdi il parametro i
print(i)
print(i) # stampa 2, e non il 7 che gli abbiamo passato !
f(7)
for i in range(2):
for i in range(5): # inferno da debuggare, perdi l'i del ciclo for esterno
print(i)
print(i) # stampa 4 !! | 0
1
2
3
4
4
0
1
2
3
4
4
| CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
III COMANDAMENTO**Noi riassegnerai mai parametri di funzione**Non farai mai nessuna di queste assegnazioni, pena la perdita del parametro passato quando viene chiamata la funzione: | def peccato(intero):
intero = 666 # peccato, hai perso il 5 passato dall'esterno !
print(intero) # stampa 666
x = 5
peccato(x) | 666
| CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Lo stesso discorso si applica per tutti gli altri tipi: | def male(stringa):
stringa = "666"
def disgrazia(lista):
lista = [666]
def delirio(dizionario):
dizionario = {"evil":666} | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Per il solo caso di parametri compositi come liste o dizionari, puoi scrivere come sotto SE E SOLO SE le specifiche della funzione ti richiedono di MODIFICARE gli elementi interni del parametro (come per esempio ordinare una lista o cambiare il campo di un dizionario) | # MODIFICA lista in qualche modo
def consentito(lista):
lista[2] = 9 # OK, lo richiede il testo della funzione
fuori = [8,5,7]
consentito(fuori)
print(fuori)
# MODIFICA dizionario in qualche modo
def daccordo(dizionario):
dizionario["mio campo"] = 5 # OK, lo richiede il testo
# MODIFICA istanza in qualche modo
def va_bene(istanza_di_classe):
istanza_di_classe.mio_campo = 7 # OK, lo richiede il testo | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Se invece il testo di una funzione ti chiede di RITORNARE un NUOVO oggetto, non cadrai nella tentazione di modificare l'input: |
# RITORNA una NUOVA lista ordinata
def dolore(lista):
lista.sort() # MALE, stai modificando la lista di input invece di crearne una nuova!
return lista
# RITORNA una NUOVA lista
def crisi(lista):
lista[0] = 5 # MALE, come sopra
return lista
# RITORNA un NUOVO dizionario
def tormento(dizionario):
dizionario['a'] = 6 # MALE, stai modificando il dizionario di input
# invece di crearne uno nuovo!
return dizionario
# RITORNA una NUOVA istanza di classe
def disperazione(istanza):
istanza.mio_campo = 6 # MALE, stai modificando l'oggetto di input
# invece di crearne uno nuovo!
return istanza | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
IV COMANDAMENTO**Non riassegnerai mai valori a chiamate a funzioni o metodi**```pythonmia_funzione() = 666 SBAGLIATO mia_funzione() = 'evil' SBAGLIATOmia_funzione() = [666] SBAGLIATO``````pythonx = 5 OKy = my_fun() OKz = [] OKz[0] = 7 OKd = dict() OKd["a"] = 6 OK```Chiamate a funzione come `mia_funzione()` ritornano risultati di calcoli e li mettono in una scatola che è creata solo per lo scopo della chiamata e Python non ci consentirà di riusarla come una variabile. Quando vedi `nome()` alla parte sinistra, _non può_ essere seguito da un segno di uguaglianza `=` (ma può essere seguito da due segni di uguaglianza `==` se stai eseguendo una comparazione). V COMANDAMENTO**Non ridifinerai mai funzioni di sistema**Python ha diverse funzioni di sistema predefinite. Per esempio `list` è un tipo Python: come tale, puoi usarlo per esempio come funzione per convertire un qualche tipo a lista: | list("ciao") | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Quando consenti alle Forze del Male di prendere il sopravvento, potresti essere tentato di usare tipi e funzioni di sistema (per es. `list`) come una variabile per i tuoi miserabili propositi personali:```pythonlist = ['la', 'mia', 'lista', 'raccapricciante']``` Python ti permette di farlo, ma **noi no**, poichè le conseguenze sono disastrose. Per esempio, se adesso usi `list` per il proposito per cui è stata creata, cioè conversione a lista, non funzionerà più: ```pythonlist("ciao")``````---------------------------------------------------------------------------TypeError Traceback (most recent call last) in ()----> 1 list("ciao")TypeError: 'list' object is not callable``` In particolare, raccomandiamo di **non ridefinire** queste preziose funzioni:* `bool`, `int`,`float`,`tuple`,`str`,`list`,`set`,`dict`* `max`, `min`, `sum`* `next`, `iter`* `id`, `dir`, `vars`,`help` VI COMANDAMENTO**Userai il comando** `return` **solo se vedi scritto RITORNA nella descrizione di funzione!**Se non c'è un RITORNA nella descrizione di funzione, si intende che la funzione ritorni `None`. In questo caso non devi nemmeno scrivere `return None`, perchè Python lo farà implicitamente per te. VII COMANDAMENTO**Scriverai anche su carta!**Se fissare il monitor non funziona, aiutati e disegna su carta una rappresentazione dello stato del programma. Tabelle, nodi, frecce, tutto può aiutare nel trovare una soluzione al problema. VIII COMANDAMENTO**Non riassegnerai mai** `self` **!** Non scriverai mai empietà come questa: | class MiaClasse:
def mio_metodo(self):
self = {'mio_campo':666} | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Dato che `self` è una specie di dizionario, potresti essere tentato di scrivere come sopra, ma al mondo esterno questo non porterà alcun effetto. Per esempio, supponiamo che qualcuno da fuori faccia una chiamata come questa: | mc = MiaClasse()
mc.mio_metodo() | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Dopo la chiamata `mc` non punterà a `{'mio_campo':666}` | mc | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
e non avrà `mio_campo`: ```pythonmc.mio_campo---------------------------------------------------------------------------AttributeError Traceback (most recent call last) in ()----> 1 mc.mio_campoAttributeError: 'MiaClasse' object has no attribute 'mio_campo'``` Per lo stesso ragionamento, non devi riassegnare `self` a liste o altro: | class MiaClasse:
def mio_metodo(self):
self = ['evil']
self = 666 | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
IX COMANDAMENTO**Testerai il codice!**Il codice non testato per definizione _non funziona_. Per idee su come testare, guarda [Gestione degli errori e testing](errors-and-testing/errors-and-testing-sol.ipynb) X COMANDAMENTO**Non aggiungerai o toglierai mai elementi da una sequenza che iteri con un** `for` **!**Abbandonarti in simil tentazioni **produrrebbe comportamenti del tutto imprevedibili** (conosci forse l'espressione _tirare il tappeto da sotto i piedi?_ ) **Non aggiungere**, poichè rischi di camminare su un tapis roulant che mai si spegne:```pythonlista = ['a','b','c','d','e']for el in lista: lista.append(el) STAI INTASANDO LA MEMORIA DEL COMPUTER``` **Non togliere**, poichè rischi di corrompere l'ordine naturale delle cose: | lista = ['a','b','c','d','e']
for el in lista:
lista.remove(el) # PESSIMA IDEA | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Guarda bene il codice. Credi che abbiamo rimosso tutto, eh? | lista | _____no_output_____ | CC-BY-4.0 | commandments.ipynb | DavidLeoni/softpython- |
Lab Three---For this lab we're going to be making and using a bunch of functions. Our Goals are:- Searching our Documentation- Using built in functions- Making our own functions- Combining functions- Structuring solutions | # For the following built in functions we didn't touch on them in class. I want you to look for them in the python documentation and implement them.
# I want you to find a built in function to SWAP CASE on a string. Print it.
# For example the string "HeY thERe HowS iT GoING" turns into "hEy THerE hOWs It gOing"
sample_string = "HeY thERe HowS iT GoING"
print(sample_string.swapcase())
# I want you to find a built in function to CENTER a string and pad the sides with 4 dashes(-) a side. Print it.
# For example the string "Hey There" becomes "----Hey There----"
sample_string = "Hey There"
print(sample_string.center(17, "-"))
# I want you to find a built in function to PARTITION a string. Print it.
# For example the string "abcdefg.hijklmnop" would come out to be ["abcdefg",".","hijklmnop"]
sample_string = "abcdefg.hijklmnop"
print(sample_string.partition("."))
# I want you to write a function that will take in a number and raise it to the power given.
# For example if given the numbers 2 and 3. The math that the function should do is 2^3 and should print out or return 8. Print the output.
def power(number, exponent) -> int:
return number ** exponent
example = power(2, 3)
print(example)
# I want you to write a function that will take in a list and see how many times a given number is in the list.
# For example if the array given is [2,3,5,2,3,6,7,8,2] and the number given is 2 the function should print out or return 3. Print the output.
array = [2,3,5,2,3,6,7,8,2]
def number_counter(array, target):
count = 0
for number in array:
if number == target:
count += 1
return count
example = number_counter(array, 2)
print(example)
# Use the functions given to create a slope function. The function should be named slope and have 4 parameters.
# If you don't remember the slope formula is (y2 - y1) / (x2 - x1) If this doesn't make sense look up `Slope Formula` on google.
def division(x, y):
return x / y
def subtraction(x, y):
return x - y
def slope(x1, x2, y1, y2):
return division(subtraction(y2, y1), subtraction(x2, x1))
example = slope(1, 2, 1, 2)
print(example)
# Use the functions given to create a distance function. The function should be named function and have 4 parameters.
# HINT: You'll need a built in function here too. You'll also be able to use functions written earlier in the notebook as long as you've run those cells.
# If you don't remember the distance formula it is the square root of the following ((x2 - x1)^2 + (y2 - y1)^2). If this doesn't make sense look up `Distance Formula` on google.
import math
def addition(x, y):
return x + y
def distance(x1, x2, y1, y2):
x_side = power(subtraction(x2, x1), 2)
y_side = power(subtraction(y2, y1), 2)
combined_sides = addition(x_side, y_side)
return math.sqrt(combined_sides)
print(distance(1, 2, 1, 2))
| 1.4142135623730951
| MIT | JupyterNotebooks/Labs/Lab 3 Solution.ipynb | owenbres01/CMPT-120L-910-20F |
Reconstruct phantom dataThis exercise shows how to handle data from the Siemens mMR. It shows how to get from listmode data to sinograms, get a randoms estimate, and reconstruct using normalisation, randoms and attenuation.(Scatter is not yet available from in SIRF).It is recommended you complete the first part of `ML_reconstruct.ipynb` exercise first.This exercise uses data from a phantom acquisition at UCL on a Siemens mMR. The phantom is the NEMA phantom (essentially a torso-shaped perspex box, with some spherical inserts). You will need to download that data. Please use the `SIRF-Exercises/scripts/download_PET_data.sh` script which will get the data, and make symbolic links in the location expected in this script. The script should work for other data of course, but you will need to adapt filenames.Note that we currently don't show how to extract the data from the console. Please[check our wiki for more information](https://github.com/CCPPETMR/SIRF/wiki/PET-raw-data). Authors: Kris Thielemans and Evgueni Ovtchinnikov First version: 8th of September 2016 Second Version: 17th of May 2018CCP PETMR Synergistic Image Reconstruction Framework (SIRF). Copyright 2015 - 2017 Rutherford Appleton Laboratory STFC. Copyright 2015 - 2018 University College London.This is software developed for the Collaborative ComputationalProject in Positron Emission Tomography and Magnetic Resonance imaging(http://www.ccppetmr.ac.uk/).Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. Initial set-up | #%% make sure figures appears inline and animations works
%matplotlib notebook
import os
import sys
import matplotlib.pyplot as plt
from sirf.Utilities import show_2D_array, examples_data_path
from sirf.STIR import *
data_path = examples_data_path('PET') + '/mMR'
#data_path='/home/sirfuser/data/NEMA'
print('Finding files in %s' % data_path)
os.chdir(data_path)
# check content of current directory using an iPython "magic" command
%ls
#%% set filenames
# input files
list_file = '20170809_NEMA_60min_UCL.l.hdr';
norm_file = 'norm.n.hdr'
attn_file = 'mu_map.hv'
# output filename prefixes
sino_file = 'sino'
# redirect STIR messages to some files
# you can check these if things go wrong
msg_red = MessageRedirector('info.txt', 'warn.txt') | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Creating sinograms from listmode dataModern PET scanners can store data in listmode format. This is essentially a long list of all events detected by the scanner. We are interested here in the *prompts* (the coincidence events) and the *delayed events* (which form an estimate of the *accidental coincidences* in the prompts.We show how to histogram the prompts into a sinogram etc. First create a template for the sinogramThis template is used to specify the sizes of the output sinogram.It is often the case in PET that we use sinograms with "larger" bins, i.e. combine data from several detector pairs into a single bin. This reduces size of the final sinogram, and decreases computation time. The terminology here is somewhat complicated, but *span* uses "axial compression" (higher span means smaller data size), *max_ring_diff* specifies the maximum ring difference to store, and *view_mash_factor* can be used to reduce the number of views (or azimutal angles).Siemens uses span=1, max_ring_diff=60 and view_mash_factor=1. Here we will use a smaller data size to reduce computation time for the exercise. Feel free to change these numbers (if you know what you are doing...). | template_acq_data = AcquisitionData('Siemens_mMR', span=11, max_ring_diff=15, view_mash_factor=2)
template_acq_data.write('template.hs')
# create listmode-to-sinograms converter object
lm2sino = ListmodeToSinograms()
# set input, output and template files
lm2sino.set_input(list_file)
lm2sino.set_output_prefix(sino_file)
lm2sino.set_template('template.hs')
# set timing interval (in secs) since start of acquisition
# (the listmode file provided is for 1 hour).
# you can vary this to see the effect on noise. Increasing it will mean somewhat longer
# processing time in the following steps (but not in the reconstruction).
lm2sino.set_time_interval(0, 500)
# set up the converter
lm2sino.set_up()
# create the prompts sinogram
lm2sino.process()
# check the content of the directory. there should be a `sino*.hs`, `'.s` pair.
# The `.hs` file is an Interfile header pointing to the binary data.
%ls | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Check the prompts sinogramsThe 3D PET data returned by `as_array` are organised by 2D sinogram. The exact order of the sinogramsis complicated for 3D PET, but they by *segment* (roughly: average ring difference). The firstsegment corresponds to "segment 0", i.e. detector pairs which are (roughly) in the same detector ring. For a scanner with `N` rings, there will be `2N-1` (2D) sinograms in segment 0. | # get access to the sinograms
acq_data = lm2sino.get_output()
# copy the acquisition data into a Python array
acq_array = acq_data.as_array()[0,:,:,:]
# print the data sizes.
print('acquisition data dimensions: %dx%dx%d' % acq_array.shape)
# use a slice number for display that is appropriate for the NEMA phantom
z = 71
show_2D_array('Acquisition data', acq_array[z,:,:]) | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Estimate the *randoms* backgroundSiemens stores *delayed coincidences*. These form a very noisy estimate of thebackground due to accidental coincidences in the data. However, that estimate is too noisyto be used in iterative image reconstruction.SIRF uses an algorithm from STIR that gives a much less noisy estimate. The help message gives some information. | help(lm2sino)
# Get the randoms estimate
# This will take a while
randoms = lm2sino.estimate_randoms() | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Plot the randoms-estimateA (2D) sinogram of the randoms has diagonal lines. This is related to thedetector efficiencies, but we cannot get into that here. | randoms_array=randoms.as_array()[0,:,:,:]
show_2D_array('randoms', randoms_array[z,:,:]) | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Reconstruct the dataWe will reconstruct the data with increasingly accurate models for the acquisition as illustration.For simplicity, we will use OSEM and use only a few sub-iterations for speed. | # First just select an acquisition model that implements the geometric
# forward projection by a ray tracing matrix multiplication
acq_model = AcquisitionModelUsingRayTracingMatrix()
acq_model.set_num_tangential_LORs(10);
# define objective function to be maximized as
# Poisson logarithmic likelihood (with linear model for mean)
obj_fun = make_Poisson_loglikelihood(acq_data)
obj_fun.set_acquisition_model(acq_model)
# create the reconstruction object
recon = OSMAPOSLReconstructor()
recon.set_objective_function(obj_fun)
num_subsets = 7
# Feel free to increase these
num_subiterations = 4
recon.set_num_subsets(num_subsets)
recon.set_num_subiterations(num_subiterations)
# create initial image estimate of dimensions and voxel sizes
# compatible with the scanner geometry (included in the AcquisitionData
# object acq_data) and initialize each voxel to 1.0
nxny = (127, 127)
initial_image = acq_data.create_uniform_image(1.0, nxny)
image = initial_image
recon.set_up(image)
# set the initial image estimate
recon.set_current_estimate(image)
# reconstruct
recon.process()
# show reconstructed image
image_array = recon.get_current_estimate().as_array()
show_2D_array('Reconstructed image', image_array[z,:,:]) | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Add detector sensitivity modellingEach crystal pair will have different detection efficiency. We need to take that into accountin our acquisition model. The scanner provides a *normalisation file* to do this (the terminologyoriginates from the days that we were "normalising" by dividing by the detected counts by the sensitivities.)In SIRF, you can incorporate this effect in the acquisition model by using an `AcquisitionSensitivityModel`. | # create it from the supplied file
asm_norm = AcquisitionSensitivityModel(norm_file)
# add it to the acquisition model
acq_model.set_acquisition_sensitivity(asm_norm)
# update the objective function
obj_fun.set_acquisition_model(acq_model)
recon.set_objective_function(obj_fun)
# reconstruct
image = initial_image
recon.set_up(image)
recon.set_current_estimate(image)
recon.process()
# show reconstructed image
image_array = recon.get_current_estimate().as_array()
show_2D_array('Reconstructed image', image_array[z,:,:]) | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Add attenuation modeling | # read attenuation image
attn_image = ImageData(attn_file)
z = 71
attn_image.show(z)
attn_acq_model = AcquisitionModelUsingRayTracingMatrix()
asm_attn = AcquisitionSensitivityModel(attn_image, attn_acq_model)
# converting attenuation into attenuation factors (see previous exercise)
asm_attn.set_up(acq_data)
attn_factors = AcquisitionData(acq_data)
attn_factors.fill(1.0)
print('applying attenuation (please wait, may take a while)...')
asm_attn.unnormalise(attn_factors)
# use these in the final attenuation model
asm_attn = AcquisitionSensitivityModel(attn_factors) | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
We now have two acquisition_sensitivity_models: for detection sensitivity and forcount loss due to attenuation. We combine them by "chaning" them together (which willmodel the multiplication of both sensitivities). | # chain attenuation and normalisation
asm = AcquisitionSensitivityModel(asm_norm, asm_attn)
# update the acquisition model etc
acq_model.set_acquisition_sensitivity(asm)
obj_fun.set_acquisition_model(acq_model)
recon.set_objective_function(obj_fun)
# reconstruct
image = initial_image
recon.set_up(image)
recon.set_current_estimate(image)
recon.process()
# show reconstructed image
image_array = recon.get_current_estimate().as_array()
show_2D_array('Reconstructed image', image_array[z,:,:]) | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Add a background term for modelling the randoms | acq_model.set_background_term(randoms)
obj_fun.set_acquisition_model(acq_model)
recon.set_objective_function(obj_fun)
image = initial_image
recon.set_up(image)
recon.set_current_estimate(image)
recon.process()
# show reconstructed image
image_array = recon.get_current_estimate().as_array()
show_2D_array('Reconstructed image', image_array[z,:,:]) | _____no_output_____ | Apache-2.0 | notebooks/PET/reconstruct_measured_data.ipynb | KrisThielemans/SIRF-Exercises |
Initialization | # %load init.ipy
%reload_ext autoreload
%autoreload 2
import os, sys
import numpy as np
import scipy as sp
import scipy.integrate
import matplotlib.pyplot as plt
import matplotlib as mpl
CWD = os.path.abspath(os.path.curdir)
print("CWD: '{}'".format(CWD))
ODIR = os.path.join(CWD, "output", "")
if not os.path.exists(ODIR):
os.makedirs(ODIR)
print("Created output directory: '{}'".format(ODIR))
par_dir = os.path.join(CWD, os.path.pardir)
if par_dir not in sys.path:
sys.path.append(par_dir)
print("Added parent directory: '{}'".format(par_dir))
import bhem
import bhem.basics
import bhem.utils
import bhem.disks
import bhem.radiation
import bhem.spectra
from bhem.constants import MSOL, H_PLNK, K_BLTZ, SPLC, MPRT, MELC, QELC, BANDS, SIGMA_SB, NWTG
np.seterr(over='ignore');
# Plotting settings
mpl.rc('font', **{'family': 'serif', 'sans-serif': ['Times']})
mpl.rc('lines', solid_capstyle='round')
mpl.rc('mathtext', fontset='cm')
plt.rcParams.update({'grid.alpha': 0.5})
FS_TITLE = 20
FS_LABEL = 16
plt.rcParams.update({'axes.titlesize': FS_TITLE})
plt.rcParams.update({'axes.labelsize': FS_LABEL})
plt.rcParams.update({'xtick.labelsize': FS_LABEL})
plt.rcParams.update({'ytick.labelsize': FS_LABEL})
| _____no_output_____ | MIT | notebooks/shakura-sunyaev.ipynb | lzkelley/bhem |
Parameters | MASS = 1e7 * MSOL
FEDD = 0.1
PATH_OUTPUT = os.path.join(ODIR, 'shakura-sunyaev', '')
if not os.path.exists(PATH_OUTPUT):
os.makedirs(PATH_OUTPUT)
thin = bhem.disks.Thin(MASS, fedd=FEDD) | _____no_output_____ | MIT | notebooks/shakura-sunyaev.ipynb | lzkelley/bhem |
Derived | mdot = bhem.basics.eddington_accretion(MASS)
rsch = bhem.basics.radius_schwarzschild(MASS)
# rads = np.logspace(np.log10(6), 4, 200) * rsch
rads = thin.rads
freqs = np.logspace(10, 18, 120) | _____no_output_____ | MIT | notebooks/shakura-sunyaev.ipynb | lzkelley/bhem |
Disk Primitives Profiles | # temp = bhem.basics.temperature_profile(MASS, mdot, rads)
mu = 1.2
pres_over_dens = (K_BLTZ * thin.temp / (mu * MPRT)) + (4*SIGMA_SB*thin.temp**4 / (3*SPLC) )
hh = np.sqrt(pres_over_dens * 2 * (thin.rads**3) / (NWTG * thin.mass))
fig, ax = plt.subplots(figsize=[6, 4])
ax.set(xscale='log', yscale='log')
ax.plot(thin.rads, hh/thin.rads)
IND = 1/8
norm = hh[0]/thin.rads[0]
ax.plot(thin.rads, np.power(thin.rads/thin.rads[0], IND) * norm, 'k--')
plt.show()
fig, ax = plt.subplots(figsize=[10, 5])
ax.set(xscale='log', xlabel='Radius [$R_s$]', yscale='log', ylabel='Temperature [K]')
ax.plot(rads/rsch, thin.temp, 'r-', lw=2.0, alpha=0.8)
plt.show() | _____no_output_____ | MIT | notebooks/shakura-sunyaev.ipynb | lzkelley/bhem |
Blackbody Spectrum | # erg/s/Hz/cm^2/steradian
# bb_spec_rad = bhem.basics.blackbody_spectral_radiance(MASS, mdot, rads[:, np.newaxis], freqs[np.newaxis, :])
rr = rads[np.newaxis, :]
ff = freqs[:, np.newaxis]
bb_spec_rad = thin._blackbody_spectral_radiance(rr, ff)
xx, yy = np.meshgrid(rr, ff)
norm = mpl.colors.LogNorm(vmin=1e-10, vmax=np.max(bb_spec_rad))
smap = mpl.cm.ScalarMappable(norm=norm, cmap='hot')
smap.cmap.set_under('0.5')
fig, axes = plt.subplots(figsize=[14, 6], ncols=2)
for ax in axes:
ax.set(xscale='log', xlabel='Radius [$R_s$]', yscale='log', ylabel='Freq [Hz]')
for nn, band in bhem.constants.BANDS.items():
ax.axhline(band.freq, color=band.color, lw=2.0, alpha=0.5)
pcm = axes[0].pcolormesh(xx/rsch, yy, bb_spec_rad, norm=norm, cmap=smap.cmap)
plt.colorbar(pcm, ax=axes[0], orientation='horizontal')
finds = (1e14 < freqs) & (freqs < 1e16)
norm = mpl.colors.Normalize(0.0, np.max(bb_spec_rad[finds, :]))
smap = mpl.cm.ScalarMappable(norm=norm, cmap='hot')
pcm = axes[1].pcolormesh(xx[finds, :]/rsch, yy[finds, :], bb_spec_rad[finds, :], norm=norm, cmap=smap.cmap)
plt.colorbar(pcm, ax=axes[1], orientation='horizontal')
plt.show()
# bb_lum = bhem.basics.blackbody_spectral_luminosity(MASS, mdot, freqs)
bb_lum = thin.blackbody_spectral_luminosity(freqs)
fig, ax = plt.subplots(figsize=[10, 5])
ax.set(xscale='log', xlabel='Frequency [Hz]',
yscale='log', ylabel='Spectral Luminosity [erg/s/Hz]', ylim=[1e20, 1e30])
ax.plot(freqs, bb_lum, 'r-', lw=2.0, alpha=0.6)
for nn, band in bhem.constants.BANDS.items():
ax.axvline(band.freq, color=band.color, lw=1.0, alpha=0.5)
plt.show() | _____no_output_____ | MIT | notebooks/shakura-sunyaev.ipynb | lzkelley/bhem |
Varying Eddington Ratios : Spectra and Efficiencies | _MASS = 1e9 * MSOL
fig, axes = plt.subplots(figsize=[12, 5], ncols=2)
plt.subplots_adjust(wspace=0.55, left=0.08, right=0.92, top=0.96)
for ax in axes:
ax.set(xscale='log', yscale='log')
ax.grid(True, which='major', axis='both', c='0.5', alpha=0.5)
ax = axes[0]
ax.set(xlabel='Frequency [Hz]', # xlim=[1e5, 1e22],
ylabel='$\\nu \, F_\\nu [\mathrm{erg \,\, s}^{-1}]$')
tw = ax.twinx(); tw.set(yscale='log', ylabel='Cumulative Luminosity $[\mathrm{erg \,\, s}^{-1}]$')
fedds = np.logspace(-6, 0, 7)[::-1]
lums = np.zeros_like(fedds)
cmap = mpl.cm.get_cmap('gist_heat_r')
colors = [cmap(xx) for xx in np.linspace(0.1, 0.9, fedds.size)]
ymax = 0.0
for ii, fe in enumerate(fedds):
label = '${:+.1f}$'.format(np.log10(fe))
cc = colors[ii]
kw = dict(color=cc, lw=2.0, label=label)
_thin = bhem.disks.Thin(_MASS, 100, fedd=fe)
bb_lum = _thin.blackbody_spectral_luminosity(freqs)
lum = bb_lum
ax.plot(freqs, freqs*lum, ls='--', alpha=0.5, **kw)
ymax = np.maximum(np.max(freqs*lum), ymax)
lum_mid = bhem.utils.log_midpoints(lum)
freqs_mid = bhem.utils.log_midpoints(freqs)
df = np.diff(freqs)
cumlum = np.cumsum(df * lum_mid)
lums[ii] = cumlum[-1]
tw.plot(freqs_mid, cumlum, alpha=0.8, **kw)
tw.set_ylim([1e32, 1e50])
ax.set_ylim([1e30, 3*ymax])
ax.text(0.02, 0.98, "$M = {:.1e} \,\, M_\odot$".format(_MASS/MSOL), transform=ax.transAxes,
ha='left', va='top')
for nn, band in bhem.constants.BANDS.items():
ax.axvline(band.freq, color=band.color, lw=1.0, alpha=0.5)
ax.legend(title="$\log(\dot{M}/\dot{M}_\mathrm{edd})$", fontsize=12, loc='center left')
ax = axes[1]
ax.set(xlabel='Eddington Fraction',
ylabel='$L_\mathrm{bol} [\mathrm{erg \,\, s}^{-1}]$')
tw = ax.twinx(); tw.set(yscale='log', ylabel='Efficiency')
mdot_edd = bhem.basics.eddington_accretion(_MASS)
effs = lums/(mdot_edd * fedds * SPLC**2)
ax.plot(fedds, lums, 'r-', alpha=0.8)
tw.plot(fedds, effs, 'r--', alpha=0.8)
tw.plot(fedds, np.minimum(10*fedds, 0.1), color='0.5', ls='--', alpha=0.5)
plt.show()
fname = 'lum-eff_thin_mdot'
fname = os.path.join(PATH_OUTPUT, fname)
fig.savefig(fname + '.pdf')
fig.savefig(fname + '.png')
print("Saved to '{}'".format(fname)) | _____no_output_____ | MIT | notebooks/shakura-sunyaev.ipynb | lzkelley/bhem |
Disk Truncation | _MASS = 1e6 * MSOL
_FEDD = 1e-1
VAR_LABEL = "$\log(R_\mathrm{max}/R_s)$"
BAND = "v"
NRAD = 100
fig, axes = plt.subplots(figsize=[12, 5], ncols=2)
plt.subplots_adjust(wspace=0.55, left=0.08, right=0.92, top=0.96)
for ax in axes:
ax.set(xscale='log', yscale='log')
ax.grid(True, which='major', axis='both', c='0.5', alpha=0.5)
ax = axes[0]
ax.set(xlabel='Frequency [Hz]', # xlim=[1e5, 1e22],
ylabel='$\\nu \, F_\\nu [\mathrm{erg \,\, s}^{-1}]$')
tw = ax.twinx(); tw.set(yscale='log', ylabel='Cumulative Luminosity $[\mathrm{erg \,\, s}^{-1}]$')
# fedds = np.logspace(-6, 0, 7)[::-1]
rad_max = np.logspace(1, 5, 9)
lums = np.zeros_like(rad_max)
lums_spec = np.zeros_like(rad_max)
cmap = mpl.cm.get_cmap('gist_heat_r')
colors = [cmap(xx) for xx in np.linspace(0.1, 0.9, rad_max.size)]
ymax = 0.0
for ii, rm in enumerate(rad_max):
label = '${:.1f}$'.format(np.log10(rm))
cc = colors[ii]
kw = dict(color=cc, lw=2.0, label=label)
_thin = bhem.disks.Thin(_MASS, fedd=_FEDD, rmax=rm, nrad=NRAD)
bb_lum = _thin.blackbody_spectral_luminosity(freqs)
lum = bb_lum
ax.plot(freqs, freqs*lum, ls='--', alpha=0.5, **kw)
ymax = np.maximum(np.max(freqs*lum), ymax)
_slum = bhem.utils.log_interp1d(freqs, lum*freqs)(BANDS[BAND].freq)
lums_spec[ii] = _slum
lum_mid = bhem.utils.log_midpoints(lum)
freqs_mid = bhem.utils.log_midpoints(freqs)
df = np.diff(freqs)
cumlum = np.cumsum(df * lum_mid)
lums[ii] = cumlum[-1]
tw.plot(freqs_mid, cumlum, alpha=0.8, **kw)
tw.set_ylim([1e32, 1e50])
ax.set_ylim([1e30, 3*ymax])
ax.text(0.02, 0.98, "$M = {:.1e} \,\, M_\odot$".format(_MASS/MSOL), transform=ax.transAxes,
ha='left', va='top')
for nn, band in bhem.constants.BANDS.items():
ax.axvline(band.freq, color=band.color, lw=1.0, alpha=0.5)
ax.legend(title=VAR_LABEL, fontsize=12, loc='center left')
ax = axes[1]
ax.set(xlabel=VAR_LABEL,
ylabel='$L_\mathrm{bol} [\mathrm{erg \,\, s}^{-1}]$')
tw = ax.twinx(); tw.set(yscale='log', ylabel='Efficiency')
mdot_edd = bhem.basics.eddington_accretion(_MASS)
effs = lums/(mdot_edd * _FEDD * SPLC**2)
ax.plot(rad_max, lums, 'r-', alpha=0.8, lw=2.0)
ax.plot(rad_max, lums_spec, 'b-', alpha=0.8)
tw.plot(rad_max, effs, 'r--', alpha=0.8)
# tw.plot(rad_max, np.minimum(10*fedds, 0.1), color='0.5', ls='--', alpha=0.5)
plt.show()
fname = 'spec-eff_thin_rmax'
fname = os.path.join(PATH_OUTPUT, fname)
fig.savefig(fname + '.pdf')
print("Saved to '{}'".format(fname))
_MASS = 1e7 * MSOL
_FEDD = 1e-1
VAR_LABEL = "$\log(R_\mathrm{max}/R_s)$"
BAND = "v"
RAD_MAX = 1e3
fig, axes = plt.subplots(figsize=[12, 5], ncols=2)
plt.subplots_adjust(wspace=0.55, left=0.08, right=0.92, top=0.96)
for ax in axes:
ax.set(xscale='log', yscale='log')
ax.grid(True, which='major', axis='both', c='0.5', alpha=0.5)
ax = axes[0]
ax.set(xlabel='Frequency [Hz]', # xlim=[1e5, 1e22],
ylabel='$\\nu \, F_\\nu [\mathrm{erg \,\, s}^{-1}]$')
tw = ax.twinx(); tw.set(yscale='log', ylabel='Cumulative Luminosity $[\mathrm{erg \,\, s}^{-1}]$')
# fedds = np.logspace(-6, 0, 7)[::-1]
rad_max = np.logspace(1, 5, 8)
lums = np.zeros_like(rad_max)
lums_spec = np.zeros_like(rad_max)
cmap = mpl.cm.get_cmap('gist_heat_r')
colors = [cmap(xx) for xx in np.linspace(0.1, 0.9, rad_max.size)]
ymax = 0.0
for ii, rm in enumerate(rad_max):
label = '${:.1f}$'.format(np.log10(rm))
cc = colors[ii]
kw = dict(color=cc, lw=2.0, label=label)
_thin = bhem.disks.Thin(_MASS, fedd=_FEDD, rmax=rm, nrad=NRAD)
bb_lum = _thin.blackbody_spectral_luminosity(freqs)
lum = bb_lum
ax.plot(freqs, freqs*lum, ls='--', alpha=0.5, **kw)
ymax = np.maximum(np.max(freqs*lum), ymax)
_slum = bhem.utils.log_interp1d(freqs, lum*freqs)(BANDS[BAND].freq)
lums_spec[ii] = _slum
lum_mid = bhem.utils.log_midpoints(lum)
freqs_mid = bhem.utils.log_midpoints(freqs)
df = np.diff(freqs)
cumlum = np.cumsum(df * lum_mid)
lums[ii] = cumlum[-1]
tw.plot(freqs_mid, cumlum, alpha=0.8, **kw)
tw.set_ylim([1e32, 1e50])
ax.set_ylim([1e30, 3*ymax])
ax.text(0.02, 0.98, "$M = {:.1e} \,\, M_\odot$".format(_MASS/MSOL), transform=ax.transAxes,
ha='left', va='top')
for nn, band in bhem.constants.BANDS.items():
ax.axvline(band.freq, color=band.color, lw=1.0, alpha=0.5)
ax.legend(title=VAR_LABEL, fontsize=12, loc='center left')
ax = axes[1]
ax.set(xlabel=VAR_LABEL,
ylabel='$L_\mathrm{bol} [\mathrm{erg \,\, s}^{-1}]$')
tw = ax.twinx(); tw.set(yscale='log', ylabel='Efficiency')
mdot_edd = bhem.basics.eddington_accretion(_MASS)
effs = lums/(mdot_edd * _FEDD * SPLC**2)
ax.plot(rad_max, lums, 'r-', alpha=0.8, lw=2.0)
ax.plot(rad_max, lums_spec, 'b-', alpha=0.8)
tw.plot(rad_max, effs, 'r--', alpha=0.8)
# tw.plot(rad_max, np.minimum(10*fedds, 0.1), color='0.5', ls='--', alpha=0.5)
plt.show()
fname = 'spec-eff_thin_rmax'
fname = os.path.join(PATH_OUTPUT, fname)
fig.savefig(fname + '.pdf')
print("Saved to '{}'".format(fname)) | _____no_output_____ | MIT | notebooks/shakura-sunyaev.ipynb | lzkelley/bhem |
$m \ddot{x} + c \dot{x} + k x + sin(x) = u$ $\vec{x} = \begin{bmatrix}x \\\dot{x}\end{bmatrix}$ $\vec{u} = \begin{bmatrix} u\end{bmatrix}$ $\vec{y} = \vec{g}(\vec{x}) = \begin{bmatrix} x\end{bmatrix}$ $\ddot{x} = (-c \dot{x} - kx + u)/m$ $\dot{\vec{x}} = \vec{f}(\vec{x}) = \begin{bmatrix}\dot{x} \\(-c \dot{x} - kx - sin(x) + u)/m\end{bmatrix}$ $\dot{\vec{x}} = A \vec{x} + B \vec{u}$$\vec{y} = C \vec{x} + D \vec{u}$ $A = \dfrac{\partial \vec{f}}{\partial \vec{x}}$$B = \dfrac{\partial \vec{f}}{\partial \vec{u}}$$C = \dfrac{\partial \vec{g}}{\partial \vec{x}}$$D = \dfrac{\partial \vec{g}}{\partial \vec{u}}$ | m = ca.SX.sym('m')
c = ca.SX.sym('c')
k = ca.SX.sym('k')
p = ca.vertcat(m, c, k)
u = ca.SX.sym('u')
xv = ca.SX.sym('x', 2)
x = xv[0]
xd = xv[1]
y = x
xv_dot = ca.vertcat(xd, (-c*xd - k*x - ca.sin(x) + u + 3)/m)
xv_dot
f_rhs = ca.Function('rhs', [xv, u, p], [xv_dot], ['x', 'u', 'p'], ['x_dot'], {'jit': True})
f_rhs
f_rhs([1, 2], [0], [1, 2, 3])
import scipy.integrate
import numpy as np
tf = 10
res = scipy.integrate.solve_ivp(
fun=lambda t, x: np.array(f_rhs(x, 0.0, [1, 2, 3])).reshape(-1),
t_span=[0, tf],
y0=[0, 0], t_eval=np.arange(0, tf, 0.1))
plt.plot(res['t'], res['y'][0, :]);
A = ca.jacobian(xv_dot, xv)
A
B = ca.jacobian(xv_dot, u)
B
C = ca.jacobian(y, xv)
C
D = ca.jacobian(y, u)
D
f_ss = ca.Function('f_ss', [xv, p], [A, B, C, D], ['x', 'p'], ['A', 'B', 'C', 'D'])
f_ss
import control
sys = control.ss(*f_ss([0, 0], [1, 2, 3]))
sys
f_rhs.generate('rhs.c')
#!cat rhs.c
s = control.TransferFunction([1, 0], [0, 1])
H = (s + 2)
control.rlocus(H*sys);
H*sys | _____no_output_____ | BSD-3-Clause | lectures/4-Casadi-MSD MODIFY.ipynb | winstonlevin/aae497-f19 |
Linear Time Invariant Systems (LTI) * Transfer Functions: $G(s) = s/(s+1)$* State-space: $\dot{x} = Ax + Bu$, $y = Cx + Du$* Impulse response function: $g(t)$ * $\dot{x} = a_1 x + a_2 x + b u$, $y = c x + du$ Linear? (Yes) Because A = A1 + A2* $\dot{x} = a_1 x + 3 + b u$, $y = c x + du$ Linear? (No, not a linear system) * What u would balance this equation at x=0? -> u0 = -3/b (trim input) For compensated dynamcis to be $G(s) = 1/(s+1)$, u(x)=? * LTI $\implies$ zero in -> zero out $u(x) = (-a1 x - x - 3)/b$$\dot{x} = -x$ Trimming the MSD | f_rhs([0, 0], [-3], [1, 2, 3]) | _____no_output_____ | BSD-3-Clause | lectures/4-Casadi-MSD MODIFY.ipynb | winstonlevin/aae497-f19 |
$\dot{x} = Ax + Bu$, $y = Cx + Du + 3$ (non-linear -> violates zero in zero out law) Trimming an aircraft means, finding where the rhs = 0, or $f(t, x) = 0$, in order to do this we want to minimize$dot(f(t, x), f(t, x))$. | def trim_function(xv_dot):
# return xv_dot[0] + xv_dot[1] # BAD, will drive to -inf
return xv_dot[0]**2 + xv_dot[1]**2 | _____no_output_____ | BSD-3-Clause | lectures/4-Casadi-MSD MODIFY.ipynb | winstonlevin/aae497-f19 |
This design problems find the state at which a given input will drive the sytem to.* x is the design vector* f is the objective function* p is a list of constant parameters* S is the solver itself | nlp = {'x':xv, 'f':trim_function(xv_dot), 'p': ca.vertcat(p, u)}
S = ca.nlpsol('S', 'ipopt', nlp)
print(S)
S(x0=(0, 0), p=(1, 2, 3, 0))
nlp = {'x':u, 'f':trim_function(xv_dot), 'p': ca.vertcat(p, xv)}
S2 = ca.nlpsol('S', 'ipopt', nlp)
print(S2)
res = S2(x0=(0), p=(1, 2, 3, 0, 0))
#print('we need a trim input of {:f}'.format(float(res['x']))) | This is Ipopt version 3.12.3, running with linear solver mumps.
NOTE: Other linear solvers might be more efficient (see Ipopt documentation).
Number of nonzeros in equality constraint Jacobian...: 0
Number of nonzeros in inequality constraint Jacobian.: 0
Number of nonzeros in Lagrangian Hessian.............: 1
Total number of variables............................: 1
variables with only lower bounds: 0
variables with lower and upper bounds: 0
variables with only upper bounds: 0
Total number of equality constraints.................: 0
Total number of inequality constraints...............: 0
inequality constraints with only lower bounds: 0
inequality constraints with lower and upper bounds: 0
inequality constraints with only upper bounds: 0
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
0 9.0000000e+00 0.00e+00 6.00e+00 -1.0 0.00e+00 - 0.00e+00 0.00e+00 0
1 0.0000000e+00 0.00e+00 0.00e+00 -1.0 3.00e+00 - 1.00e+00 1.00e+00f 1
Number of Iterations....: 1
(scaled) (unscaled)
Objective...............: 0.0000000000000000e+00 0.0000000000000000e+00
Dual infeasibility......: 0.0000000000000000e+00 0.0000000000000000e+00
Constraint violation....: 0.0000000000000000e+00 0.0000000000000000e+00
Complementarity.........: 0.0000000000000000e+00 0.0000000000000000e+00
Overall NLP error.......: 0.0000000000000000e+00 0.0000000000000000e+00
Number of objective function evaluations = 2
Number of objective gradient evaluations = 2
Number of equality constraint evaluations = 0
Number of inequality constraint evaluations = 0
Number of equality constraint Jacobian evaluations = 0
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations = 1
Total CPU secs in IPOPT (w/o function evaluations) = 0.001
Total CPU secs in NLP function evaluations = 0.000
EXIT: Optimal Solution Found.
t_proc [s] t_wall [s] n_eval
S 0.00183 0.00212 1
nlp_f 5e-06 5.23e-06 2
nlp_grad 6e-06 4.32e-06 1
nlp_grad_f 8e-06 8.68e-06 3
nlp_hess_l 2e-06 1.93e-06 1
| BSD-3-Clause | lectures/4-Casadi-MSD MODIFY.ipynb | winstonlevin/aae497-f19 |
TensorFlow Graphs | import tensorflow as tf
n1 = tf.constant(1)
n2 = tf.constant(2)
n3 = n1 + n2
with tf.Session() as sess:
result = sess.run(n3)
print(result)
print(tf.get_default_graph())
g = tf.Graph()
print(g)
graph_one = tf.get_default_graph()
print(graph_one)
graph_two = tf.Graph()
print(graph_two)
with graph_two.as_default():
print(graph_two is tf.get_default_graph())
print(graph_two is tf.get_default_graph()) | False
| MIT | Section 1/1.3_TensorFlow_Graphs.ipynb | manpreet-kau-r/Hands-on-Machine-Learning-with-TensorFlow |
Read supplementary material csvs from https://www.ncbi.nlm.nih.gov/pubmed/29425488 |
human_tfs = pd.read_csv("http://humantfs.ccbr.utoronto.ca/download/v_1.01/DatabaseExtract_v_1.01.csv", index_col=0)
print(human_tfs.shape)
human_tfs.head()
human_tfs.to_csv("Lambert_Jolma_Campitelli_etal_2018_human_transcription_factors.csv", index=False)
true_tfs = human_tfs.loc[human_tfs['Is TF?'] == "Yes"]
print(true_tfs.shape)
true_tfs.head()
tf_records = []
# with open("Homo_sapiens.GRCh38.pep.transcription_factors.fa") as f:
with screed.open("Homo_sapiens.GRCh38.pep.all.fa.gz") as records:
for record in records:
if '*' in record['sequence']:
continue
if any(x in record['name'] for x in true_tfs['Ensembl ID']):
tf_records.append(record)
len(tf_records)
r = tf_records[0]
r
r['name'] | _____no_output_____ | MIT | notebooks/220_tfs_from_human_ensembl_protein_coding.ipynb | czbiohub/kh-analysis |
regex made with: https://regex101.com/r/7IzJgx/1 | pattern = "(?P<protein_id>ENSP\d+\.\d+) (?P<seqtype>\w+) (?P<location>chromosome:GRCh38:[\dXY]+:\d+:\d+:\d+) gene:(?P<gene_id>ENSG\d+\.\d+) transcript:(?P<transcript_id>ENST\d+\.\d+) gene_biotype:(?P<gene_biotype>\w+) transcript_biotype:(?P<transcript_biotype>\w+) gene_symbol:(?P<gene_symbol>\w+) description:(?P<description>[\w ]+) (?P<source>\[Source:[\w ]+ Symbol;Acc:HGNC:\d+\])"
m = re.match(pattern, r['name'])
m.groupdict() | _____no_output_____ | MIT | notebooks/220_tfs_from_human_ensembl_protein_coding.ipynb | czbiohub/kh-analysis |
Updated regex to work with all fasta entries of TFs: https://regex101.com/r/7IzJgx/2 | lines = []
PATTERN = r"(?P<protein_id>ENSP\d+\.\d+) (?P<seqtype>\w+) (?P<location>chromosome:GRCh38:[\dXY]+:\d+:\d+:-?\d+) gene:(?P<gene_id>ENSG\d+\.\d+) transcript:(?P<transcript_id>ENST\d+\.\d+) gene_biotype:(?P<gene_biotype>\w+) transcript_biotype:(?P<transcript_biotype>\w+) gene_symbol:(?P<gene_symbol>[\w\.\-]+) description:(?P<description>[\w\d\-',/ \.]+)(?P<source>\[Source:[\w ]+;Acc:[\w:\d]+\])?"
pattern = re.compile(PATTERN.strip())
for record in tf_records:
m = re.match(pattern, record['name'])
try:
series = pd.Series(m.groupdict())
except AttributeError:
# If it doesn't work, break on the record it didn't work on
print(record['name'])
break
lines.append(series)
tf_metadata = pd.DataFrame.from_records(lines)
print(tf_metadata.shape)
PATTERN
re.findall(PATTERN, record['name'])
record['name']
m
tf_metadata.head()
tf_metadata.gene_id.nunique()
tf_metadata['gene_id_no_version'] = tf_metadata.gene_id.str.split('.').str[0]
rows = true_tfs['Ensembl ID'].isin(tf_metadata['gene_id_no_version'])
true_tfs_not_in_ensembl97 = true_tfs.loc[~rows]
true_tfs_not_in_ensembl97
print(true_tfs_not_in_ensembl97.loc[:, ['Ensembl ID', 'HGNC symbol','DBD', 'Is TF?', 'Final Comments']].to_csv(index=False)) | Ensembl ID,HGNC symbol,DBD,Is TF?,Final Comments
ENSG00000214189,ZNF788,C2H2 ZF,Yes,Virtually nothing is known for this protein except that it has a decent cassette of znfC2H2 domains
ENSG00000228623,ZNF883,C2H2 ZF,Yes,None
DUX1_HUMAN,DUX1,Homeodomain,Yes,Not included in Ensembl. Binds GATCTGAGTCTAATTGAGAATTACTGTAC in EMSA (PMID: 9736770)
DUX3_HUMAN,DUX3,Homeodomain,Yes,Not included in Ensembl.
| MIT | notebooks/220_tfs_from_human_ensembl_protein_coding.ipynb | czbiohub/kh-analysis |
Are these TFs in the fasta file? `DUX1` and `DUX3` are likely not | ! zcat Homo_sapiens.GRCh38.pep.all.fa.gz | grep ZNF788
for i, (ensembl_id, gene_symbol) in true_tfs_not_in_ensembl97[['Ensembl ID', 'HGNC symbol']].iterrows():
print(f"Grep for {ensembl_id}")
! zcat Homo_sapiens.GRCh38.pep.all.fa.gz | grep $ensembl_id
print(f"Grep for {gene_symbol}")
! zcat Homo_sapiens.GRCh38.pep.all.fa.gz | grep $gene_symbol | Grep for ENSG00000214189
Grep for ZNF788
Grep for ENSG00000228623
Grep for ZNF883
>ENSP00000490059.1 pep chromosome:GRCh38:9:112997120:113050043:-1 gene:ENSG00000285447.1 transcript:ENST00000619044.1 gene_biotype:protein_coding transcript_biotype:protein_coding gene_symbol:ZNF883 description:zinc finger protein 883 [Source:NCBI gene;Acc:169834]
Grep for DUX1_HUMAN
Grep for DUX1
Grep for DUX3_HUMAN
Grep for DUX3
| MIT | notebooks/220_tfs_from_human_ensembl_protein_coding.ipynb | czbiohub/kh-analysis |
Batch Normalization from scratchWhen you train a linear model, you update the weightsin order to optimize some objective.And for the linear model, the distribution of the inputs stays the same throughout training.So all we have to worry about is how to map from these well-behaved inputs to some appropriate outputs.But if we focus on some layer in the middle of a deep neural network,for example the third,things look a bit different. After each training iteration, we update the weights in all the layers, including the first and the second.That means that over the course of training,as the weights for the first two layers are learned,the inputs to the third layer might look dramatically different than they did at the beginning.For starters, they might take values on a scale orders of magnitudes different from when we started training.And this shift in feature scale might have serious implications, say for the ideal learning rate at each time. To explain, let us consider the Taylor's expansion for the objective function $f$ with respect to the updated parameter $\mathbf{w}$, such as $f(\mathbf{w} - \eta \nabla f(\mathbf{w}))$. Coefficients of those higher-order terms with respect to the learning rate $\eta$ may be so large in scale (usually due to many layers) that these terms cannot be ignored. However, the effect of common lower-order optimization algorithms, such as gradient descent, in iteratively reducing the objective function is based on an important assumption: all those higher-order terms with respect to the learning rate in the aforementioned Taylor's expansion are ignored.Motivated by this sort of intuition, Sergey Ioffe and Christian Szegedy proposed [Batch Normalization](https://arxiv.org/abs/1502.03167),a technique that normalizes the mean and variance of each of the features at every level of representation during training. The technique involves normalization of the features across the examples in each mini-batch.While competing explanations for the technique's effect abound,its success is hard to deny.Empirically it appears to stabilize the gradient (less exploding or vanishing values)and batch-normalized models appear to overfit less.In fact, batch-normalized models seldom even use dropout. In this notebooks, we'll explain how it works. Import dependencies and grab the MNIST datasetWe'll get going by importing the typical packages and grabbing the MNIST data. | from __future__ import print_function
import mxnet as mx
import numpy as np
from mxnet import nd, autograd
mx.random.seed(1)
ctx = mx.gpu() | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
The MNIST dataset | batch_size = 64
num_inputs = 784
num_outputs = 10
def transform(data, label):
return nd.transpose(data.astype(np.float32), (2,0,1))/255, label.astype(np.float32)
train_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=True, transform=transform),
batch_size, shuffle=True)
test_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=False, transform=transform),
batch_size, shuffle=False) | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Batch Normalization layerThe layer, unlike Dropout, is usually used **before** the activation layer (according to the authors' original paper), instead of after activation layer.The basic idea is doing the normalization then applying a linear scale and shift to the mini-batch:For input mini-batch $B = \{x_{1, ..., m}\}$, we want to learn the parameter $\gamma$ and $\beta$.The output of the layer is $\{y_i = BN_{\gamma, \beta}(x_i)\}$, where:$$\mu_B \leftarrow \frac{1}{m}\sum_{i = 1}^{m}x_i$$$$\sigma_B^2 \leftarrow \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_B)^2$$$$\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_B^2 + \epsilon}}$$$$y_i \leftarrow \gamma \hat{x_i} + \beta \equiv \mbox{BN}_{\gamma,\beta}(x_i)$$* formulas taken from Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." International Conference on Machine Learning. 2015.With gluon, this is all actually implemented for us, but we'll do it this one time by ourselves,using the formulas from the original paperso you know how it works, and perhaps you can improve upon it!Pay attention that, when it comes to (2D) CNN, we normalize `batch_size * height * width` over each channel.So that `gamma` and `beta` have the lengths the same as `channel_count`.In our implementation, we need to manually reshape `gamma` and `beta` so that they could (be automatically broadcast and) multipy the matrices in the desired way. | def pure_batch_norm(X, gamma, beta, eps = 1e-5):
if len(X.shape) not in (2, 4):
raise ValueError('only supports dense or 2dconv')
# dense
if len(X.shape) == 2:
# mini-batch mean
mean = nd.mean(X, axis=0)
# mini-batch variance
variance = nd.mean((X - mean) ** 2, axis=0)
# normalize
X_hat = (X - mean) * 1.0 / nd.sqrt(variance + eps)
# scale and shift
out = gamma * X_hat + beta
# 2d conv
elif len(X.shape) == 4:
# extract the dimensions
N, C, H, W = X.shape
# mini-batch mean
mean = nd.mean(X, axis=(0, 2, 3))
# mini-batch variance
variance = nd.mean((X - mean.reshape((1, C, 1, 1))) ** 2, axis=(0, 2, 3))
# normalize
X_hat = (X - mean.reshape((1, C, 1, 1))) * 1.0 / nd.sqrt(variance.reshape((1, C, 1, 1)) + eps)
# scale and shift
out = gamma.reshape((1, C, 1, 1)) * X_hat + beta.reshape((1, C, 1, 1))
return out | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Let's do some sanity checks. We expect each **column** of the input matrix to be normalized. | A = nd.array([1,7,5,4,6,10], ctx=ctx).reshape((3,2))
A
pure_batch_norm(A,
gamma = nd.array([1,1], ctx=ctx),
beta=nd.array([0,0], ctx=ctx))
ga = nd.array([1,1], ctx=ctx)
be = nd.array([0,0], ctx=ctx)
B = nd.array([1,6,5,7,4,3,2,5,6,3,2,4,5,3,2,5,6], ctx=ctx).reshape((2,2,2,2))
B
pure_batch_norm(B, ga, be) | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Our tests seem to support that we've done everything correctly.Note that for batch normalization, implementing **backward** pass is a little bit tricky. Fortunately, you won't have to worry about that here, because the MXNet's `autograd` package can handle differentiation for us automatically. Besides that, in the testing process, we want to use the mean and variance of the **complete dataset**, instead of those of **mini batches**. In the implementation, we use moving statistics as a trade off, because we don't want to or don't have the ability to compute the statistics of the complete dataset (in the second loop).Then here comes another concern: we need to maintain the moving statistics **along with multiple runs of the BN**. It's an engineering issue rather than a deep/machine learning issue. On the one hand, the moving statistics are similar to `gamma` and `beta`; on the other hand, they are **not** updated by the gradient backwards. In this quick-and-dirty implementation, we use the global dictionary variables to store the statistics, in which each key is the name of the layer (`scope_name`), and the value is the statistics. (**Attention**: always be very careful if you have to use global variables!) Moreover, we have another parameter `is_training` to indicate whether we are doing training or testing.Now we are ready to define our complete `batch_norm()`: | def batch_norm(X,
gamma,
beta,
momentum = 0.9,
eps = 1e-5,
scope_name = '',
is_training = True,
debug = False):
"""compute the batch norm """
global _BN_MOVING_MEANS, _BN_MOVING_VARS
#########################
# the usual batch norm transformation
#########################
if len(X.shape) not in (2, 4):
raise ValueError('the input data shape should be one of:\n' +
'dense: (batch size, # of features)\n' +
'2d conv: (batch size, # of features, height, width)'
)
# dense
if len(X.shape) == 2:
# mini-batch mean
mean = nd.mean(X, axis=0)
# mini-batch variance
variance = nd.mean((X - mean) ** 2, axis=0)
# normalize
if is_training:
# while training, we normalize the data using its mean and variance
X_hat = (X - mean) * 1.0 / nd.sqrt(variance + eps)
else:
# while testing, we normalize the data using the pre-computed mean and variance
X_hat = (X - _BN_MOVING_MEANS[scope_name]) *1.0 / nd.sqrt(_BN_MOVING_VARS[scope_name] + eps)
# scale and shift
out = gamma * X_hat + beta
# 2d conv
elif len(X.shape) == 4:
# extract the dimensions
N, C, H, W = X.shape
# mini-batch mean
mean = nd.mean(X, axis=(0,2,3))
# mini-batch variance
variance = nd.mean((X - mean.reshape((1, C, 1, 1))) ** 2, axis=(0, 2, 3))
# normalize
X_hat = (X - mean.reshape((1, C, 1, 1))) * 1.0 / nd.sqrt(variance.reshape((1, C, 1, 1)) + eps)
if is_training:
# while training, we normalize the data using its mean and variance
X_hat = (X - mean.reshape((1, C, 1, 1))) * 1.0 / nd.sqrt(variance.reshape((1, C, 1, 1)) + eps)
else:
# while testing, we normalize the data using the pre-computed mean and variance
X_hat = (X - _BN_MOVING_MEANS[scope_name].reshape((1, C, 1, 1))) * 1.0 \
/ nd.sqrt(_BN_MOVING_VARS[scope_name].reshape((1, C, 1, 1)) + eps)
# scale and shift
out = gamma.reshape((1, C, 1, 1)) * X_hat + beta.reshape((1, C, 1, 1))
#########################
# to keep the moving statistics
#########################
# init the attributes
try: # to access them
_BN_MOVING_MEANS, _BN_MOVING_VARS
except: # error, create them
_BN_MOVING_MEANS, _BN_MOVING_VARS = {}, {}
# store the moving statistics by their scope_names, inplace
if scope_name not in _BN_MOVING_MEANS:
_BN_MOVING_MEANS[scope_name] = mean
else:
_BN_MOVING_MEANS[scope_name] = _BN_MOVING_MEANS[scope_name] * momentum + mean * (1.0 - momentum)
if scope_name not in _BN_MOVING_VARS:
_BN_MOVING_VARS[scope_name] = variance
else:
_BN_MOVING_VARS[scope_name] = _BN_MOVING_VARS[scope_name] * momentum + variance * (1.0 - momentum)
#########################
# debug info
#########################
if debug:
print('== info start ==')
print('scope_name = {}'.format(scope_name))
print('mean = {}'.format(mean))
print('var = {}'.format(variance))
print('_BN_MOVING_MEANS = {}'.format(_BN_MOVING_MEANS[scope_name]))
print('_BN_MOVING_VARS = {}'.format(_BN_MOVING_VARS[scope_name]))
print('output = {}'.format(out))
print('== info end ==')
#########################
# return
#########################
return out | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Parameters and gradients | #######################
# Set the scale for weight initialization and choose
# the number of hidden units in the fully-connected layer
#######################
weight_scale = .01
num_fc = 128
W1 = nd.random_normal(shape=(20, 1, 3,3), scale=weight_scale, ctx=ctx)
b1 = nd.random_normal(shape=20, scale=weight_scale, ctx=ctx)
gamma1 = nd.random_normal(shape=20, loc=1, scale=weight_scale, ctx=ctx)
beta1 = nd.random_normal(shape=20, scale=weight_scale, ctx=ctx)
W2 = nd.random_normal(shape=(50, 20, 5, 5), scale=weight_scale, ctx=ctx)
b2 = nd.random_normal(shape=50, scale=weight_scale, ctx=ctx)
gamma2 = nd.random_normal(shape=50, loc=1, scale=weight_scale, ctx=ctx)
beta2 = nd.random_normal(shape=50, scale=weight_scale, ctx=ctx)
W3 = nd.random_normal(shape=(800, num_fc), scale=weight_scale, ctx=ctx)
b3 = nd.random_normal(shape=num_fc, scale=weight_scale, ctx=ctx)
gamma3 = nd.random_normal(shape=num_fc, loc=1, scale=weight_scale, ctx=ctx)
beta3 = nd.random_normal(shape=num_fc, scale=weight_scale, ctx=ctx)
W4 = nd.random_normal(shape=(num_fc, num_outputs), scale=weight_scale, ctx=ctx)
b4 = nd.random_normal(shape=10, scale=weight_scale, ctx=ctx)
params = [W1, b1, gamma1, beta1, W2, b2, gamma2, beta2, W3, b3, gamma3, beta3, W4, b4]
for param in params:
param.attach_grad() | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Activation functions | def relu(X):
return nd.maximum(X, 0) | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Softmax output | def softmax(y_linear):
exp = nd.exp(y_linear-nd.max(y_linear))
partition = nd.nansum(exp, axis=0, exclude=True).reshape((-1,1))
return exp / partition | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
The *softmax* cross-entropy loss function | def softmax_cross_entropy(yhat_linear, y):
return - nd.nansum(y * nd.log_softmax(yhat_linear), axis=0, exclude=True) | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Define the modelWe insert the BN layer right after each linear layer. | def net(X, is_training = True, debug=False):
########################
# Define the computation of the first convolutional layer
########################
h1_conv = nd.Convolution(data=X, weight=W1, bias=b1, kernel=(3,3), num_filter=20)
h1_normed = batch_norm(h1_conv, gamma1, beta1, scope_name='bn1', is_training=is_training)
h1_activation = relu(h1_normed)
h1 = nd.Pooling(data=h1_activation, pool_type="avg", kernel=(2,2), stride=(2,2))
if debug:
print("h1 shape: %s" % (np.array(h1.shape)))
########################
# Define the computation of the second convolutional layer
########################
h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2, kernel=(5,5), num_filter=50)
h2_normed = batch_norm(h2_conv, gamma2, beta2, scope_name='bn2', is_training=is_training)
h2_activation = relu(h2_normed)
h2 = nd.Pooling(data=h2_activation, pool_type="avg", kernel=(2,2), stride=(2,2))
if debug:
print("h2 shape: %s" % (np.array(h2.shape)))
########################
# Flattening h2 so that we can feed it into a fully-connected layer
########################
h2 = nd.flatten(h2)
if debug:
print("Flat h2 shape: %s" % (np.array(h2.shape)))
########################
# Define the computation of the third (fully-connected) layer
########################
h3_linear = nd.dot(h2, W3) + b3
h3_normed = batch_norm(h3_linear, gamma3, beta3, scope_name='bn3', is_training=is_training)
h3 = relu(h3_normed)
if debug:
print("h3 shape: %s" % (np.array(h3.shape)))
########################
# Define the computation of the output layer
########################
yhat_linear = nd.dot(h3, W4) + b4
if debug:
print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))
return yhat_linear
| _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Test runCan data be passed into the `net()`? | for data, _ in train_data:
data = data.as_in_context(ctx)
break
output = net(data, is_training=True, debug=True) | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Optimizer | def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Evaluation metric | def evaluate_accuracy(data_iterator, net):
numerator = 0.
denominator = 0.
for i, (data, label) in enumerate(data_iterator):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
label_one_hot = nd.one_hot(label, 10)
output = net(data, is_training=False) # attention here!
predictions = nd.argmax(output, axis=1)
numerator += nd.sum(predictions == label)
denominator += data.shape[0]
return (numerator / denominator).asscalar() | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Execute the training loopNote: you may want to use a gpu to run the code below. (And remember to set the `ctx = mx.gpu()` accordingly in the very beginning of this article.) | epochs = 1
moving_loss = 0.
learning_rate = .001
for e in range(epochs):
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
label_one_hot = nd.one_hot(label, num_outputs)
with autograd.record():
# we are in training process,
# so we normalize the data using batch mean and variance
output = net(data, is_training=True)
loss = softmax_cross_entropy(output, label_one_hot)
loss.backward()
SGD(params, learning_rate)
##########################
# Keep a moving average of the losses
##########################
if i == 0:
moving_loss = nd.mean(loss).asscalar()
else:
moving_loss = .99 * moving_loss + .01 * nd.mean(loss).asscalar()
test_accuracy = evaluate_accuracy(test_data, net)
train_accuracy = evaluate_accuracy(train_data, net)
print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" % (e, moving_loss, train_accuracy, test_accuracy)) | _____no_output_____ | Apache-2.0 | chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.ipynb | sgeos/mxnet_the_straight_dope |
Collection of Helpful Functions for [Class](https://sites.wustl.edu/jeffheaton/t81-558/)This is a collection of helpful functions that I will introduce during this course. | import base64
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from sklearn import preprocessing
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = f"{name}-{x}"
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1
# at every location where the original column (name) matches each of the target_values. One column is added for
# each target value.
def encode_text_single_dummy(df, name, target_values):
for tv in target_values:
l = list(df[name].astype(str))
l = [1 if str(x) == str(tv) else 0 for x in l]
name2 = f"{name}-{tv}"
df[name2] = l
# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).
def encode_text_index(df, name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Encode a numeric column as zscores
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
# Convert all missing values in the specified column to the median
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
# Convert all missing values in the specified column to the default
def missing_default(df, name, default_value):
df[name] = df[name].fillna(default_value)
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(
target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)
# Regression
return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return f"{h}:{m:>02}:{s:>05.2f}"
# Regression chart.
def chart_regression(pred, y, sort=True):
t = pd.DataFrame({'pred': pred, 'y': y.flatten()})
if sort:
t.sort_values(by=['y'], inplace=True)
plt.plot(t['y'].tolist(), label='expected')
plt.plot(t['pred'].tolist(), label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
# Remove all rows where the specified column is +/- sd standard deviations
def remove_outliers(df, name, sd):
drop_rows = df.index[(np.abs(df[name] - df[name].mean())
>= (sd * df[name].std()))]
df.drop(drop_rows, axis=0, inplace=True)
# Encode a column to a range between normalized_low and normalized_high.
def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1,
data_low=None, data_high=None):
if data_low is None:
data_low = min(df[name])
data_high = max(df[name])
df[name] = ((df[name] - data_low) / (data_high - data_low)) \
* (normalized_high - normalized_low) + normalized_low
# This function submits an assignment. You can submit an assignment as much as you like, only the final
# submission counts. The paramaters are as follows:
# data - Pandas dataframe output.
# key - Your student key that was emailed to you.
# no - The assignment class number, should be 1 through 1.
# source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name.
# . The number must match your assignment number. For example "_class2" for class assignment #2.
def submit(data,key,no,source_file=None):
if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.')
if source_file is None: source_file = __file__
suffix = '_class{}'.format(no)
if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix))
with open(source_file, "rb") as image_file:
encoded_python = base64.b64encode(image_file.read()).decode('ascii')
ext = os.path.splitext(source_file)[-1].lower()
if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext))
r = requests.post("https://api.heatonresearch.com/assignment-submit",
headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"),
'assignment': no, 'ext':ext, 'py':encoded_python})
if r.status_code == 200:
print("Success: {}".format(r.text))
else: print("Failure: {}".format(r.text))
| _____no_output_____ | Apache-2.0 | jeffs_helpful.ipynb | guyvani/t81_558_deep_learning |
Rudder equations | %load_ext autoreload
%autoreload 2
%matplotlib inline
import sympy as sp
from sympy.plotting import plot as plot
from sympy.plotting import plot3d as plot3d
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sp.init_printing()
from IPython.core.display import HTML,Latex
import seaman_symbol as ss
from rudder_equations import *
from bis_system import BisSystem
from seaman_symbols import *
import seaman_symbol as ss
import sys
sys.path.append("../")
import seaman | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Coordinate system Symbols | #HTML(ss.create_html_table(symbols=equations.total_sway_hull_equation_SI.free_symbols)) | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Rudder equationThe rudder forces consist of mainly two parts, one that isdepending on the ship axial speed and one that is depending on the thrust.The stalling effect is represented by a third degree term with a stall coefficient s.The total expression for the rudder force is thus written as: |
rudder_equation_no_stall | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
If we also consider stall | rudder_equation | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Effective rudder angle | effective_rudder_angle_equation
delta_e_expanded | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Speed dependent part | Latex(sp.latex(rudder_u_equation)) | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Thrust dependent partThis part is assumed to be proportional to the propeller thrust | rudder_T_equation
sp.latex(rudder_total_sway_equation)
rudder_total_sway_equation_SI | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Rudder resistanceThe rudder resistance is taken to be proportional to the rudder side force (without stall) and therudder angle, thus: | rudder_drag_equation
sp.latex(rudder_drag_equation_expanded)
rudder_drag_equation_expanded_SI | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Rudder yawing moment | rudder_yaw_equation
rudder_yaw_equation_expanded_SI | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Rudder roll moment | rudder_roll_equation
rudder_roll_equation_expanded_SI = ss.expand_bis(rudder_roll_equation_expanded)
rudder_roll_equation_expanded_SI | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Lambda functions | from rudder_lambda_functions import * | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Plotting effective rudder angle equation | df = pd.DataFrame()
V = 5.0
beta = np.deg2rad(np.linspace(-10,10,20))
df['u_w'] = V*np.cos(beta)
df['v_w'] = -V*np.sin(beta)
df['delta'] = np.deg2rad(5)
df['r_w'] = 0.0
df['L'] = 50.0
df['k_r'] = 0.5
df['k_v'] = -1.0
df['g'] = 9.81
df['xx_rud'] = -1
df['l_cg'] = 0
result = df.copy()
result['delta_e'] = effective_rudder_angle_function(**df)
result['delta_e_deg'] = np.rad2deg(result['delta_e'])
result['beta_deg'] = np.rad2deg(beta)
result.plot(x = 'beta_deg',y = 'delta_e_deg');
df = pd.DataFrame()
V = 5.0
beta = np.deg2rad(np.linspace(-10,10,20))
df['u_w'] = V*np.cos(beta)
df['v_w'] = -V*np.sin(beta)
df['delta'] = np.deg2rad(5)
df['r_w'] = 0.0
df['L'] = 50.0
df['k_r'] = 0
df['k_v'] = 0
df['g'] = 9.81
df['xx_rud'] = -1
df['l_cg'] = 0
result = df.copy()
result['delta_e'] = effective_rudder_angle_function(**df)
result['delta_e_deg'] = np.rad2deg(result['delta_e'])
result['beta_deg'] = np.rad2deg(beta)
result.plot(x = 'beta_deg',y = 'delta_e_deg');
df = pd.DataFrame()
df['r_w'] = np.linspace(-0.3,0.3,20)
df['delta'] = 0.1
df['u_w'] = 5.0
df['v_w'] = 0.0
df['L'] = 50.0
df['k_r'] = 0.5
df['k_v'] = 0.5
df['g'] = 9.81
df['xx_rud'] = -1
df['l_cg'] = 0
result = df.copy()
result['delta_e'] = effective_rudder_angle_function(**df)
result.plot(x = 'r_w',y = 'delta_e'); | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Plotting the total sway rudder equation | df = pd.DataFrame()
df['delta'] = np.linspace(-0.3,0.3,10)
df['T_prop'] = 1.0
df['n_prop'] = 1.0
df['u_w'] = 5.0
df['v_w'] = 0.0
df['r_w'] = 0.0
df['rho'] = 1025
df['L'] = 1.0
df['k_r'] = 1.0
df['k_v'] = 1.0
df['g'] = 9.81
df['disp'] = 23.0
df['s'] = 0
df['Y_Tdelta'] = 1.0
df['Y_uudelta'] = 1.0
df['xx_rud'] = -1
df['l_cg'] = 0
result = df.copy()
result['fy'] = rudder_total_sway_function(**df)
result.plot(x = 'delta',y = 'fy'); | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Plotting with coefficients from a real seaman ship model | import generate_input
ship_file_path='test_ship.ship'
shipdict = seaman.ShipDict.load(ship_file_path)
df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-35,35,20))
df['T_prop'] = 10*10**6
df['n_prop'] = 1
df['u_w'] = 5.0
df['v_w'] = 0.0
df['r_w'] = 0.0
df['rho'] = 1025
df['g'] = 9.81
df_input = generate_input.add_shipdict_inputs(lambda_function=rudder_total_sway_function,
shipdict = shipdict,
df = df,)
df_input
result = df_input.copy()
result['fy'] = rudder_total_sway_function(**df_input)
result.plot(x = 'delta',y = 'fy'); | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Plotting the total rudder drag equation | df = pd.DataFrame()
df['delta'] = np.linspace(-0.3,0.3,20)
df['T'] = 1.0
df['u_w'] = 5.0
df['v_w'] = 0.0
df['r_w'] = 0.0
df['rho'] = 1025
df['L'] = 1.0
df['k_r'] = 1.0
df['k_v'] = 1.0
df['g'] = 9.81
df['disp'] = 23.0
df['s'] = 0
df['Y_Tdelta'] = 1.0
df['Y_uudelta'] = 1.0
df['X_Yrdelta'] = -1.0
df['xx_rud'] = -1
df['l_cg'] = 0
result = df.copy()
result['fx'] = rudder_drag_function(**df)
result.plot(x = 'delta',y = 'fx'); | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Real seaman has a maximum effective rudder angle 0.61 rad for the rudder drag, which is why seaman gives different result for really large drift angles or yaw rates: | df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-45,45,50))
df['T'] = 10*10**6
df['u_w'] = 5.0
df['v_w'] = 0.0
df['r_w'] = 0.0
df['rho'] = 1025
df['g'] = 9.81
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=rudder_drag_function,
shipdict = shipdict,
df = df,
label='fx',
seaman_function = run_real_seaman.calculate_static_ship_rudder)
fig,ax = plt.subplots()
result_comparison.plot(x = 'delta',y = ['fx','fx_seaman'],ax = ax)
ax.set_title('Rudder angle variation');
df = pd.DataFrame()
df['v_w'] = (np.linspace(-10,10,20))
df['delta'] = 0
df['T'] = 10*10**6
df['u_w'] = 5.0
df['r_w'] = 0.0
df['rho'] = 1025
df['g'] = 9.81
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=rudder_drag_function,
shipdict = shipdict,
df = df,
label='fx',
seaman_function = run_real_seaman.calculate_static_ship_rudder)
fig,ax = plt.subplots()
result_comparison.plot(x = 'v_w',y = ['fx','fx_seaman'],ax = ax)
ax.set_title('Rudder drift angle variation');
df = pd.DataFrame()
df['r_w'] = (np.linspace(-0.05,0.05,20))
df['delta'] = 0
df['T'] = 10*10**6
df['u_w'] = 5.0
df['v_w'] = 0.0
df['rho'] = 1025
df['g'] = 9.81
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=rudder_drag_function,
shipdict = shipdict,
df = df,
label='fx',
seaman_function = run_real_seaman.calculate_static_ship_rudder)
fig,ax = plt.subplots()
result_comparison.plot(x = 'r_w',y = ['fx','fx_seaman'],ax = ax)
ax.set_title('Rudder yaw rate variation'); | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Plotting the rudder yawing moment equation | df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-35,35,20))
df['T'] = 10*10**6
df['u_w'] = 5.0
df['v_w'] = 0.0
df['r_w'] = 0.0
df['rho'] = 1025
df['g'] = 9.81
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=rudder_yawing_moment_function,
shipdict = shipdict,
df = df,
label='mz',
seaman_function = run_real_seaman.calculate_static_ship)
fig,ax = plt.subplots()
result_comparison.plot(x = 'delta',y = ['mz','mz_seaman'],ax = ax)
ax.set_title('Rudder angle variation'); | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Plotting the rudder roll moment equation | df = pd.DataFrame()
df['delta'] = np.deg2rad(np.linspace(-35,35,20))
df['T'] = 10*10**6
df['u_w'] = 5.0
df['v_w'] = 0.0
df['r_w'] = 0.0
df['rho'] = 1025
df['g'] = 9.81
result_comparison = run_real_seaman.compare_with_seaman(lambda_function=rudder_roll_moment_function,
shipdict = shipdict,
df = df,
label='mx',
seaman_function = run_real_seaman.calculate_static_ship)
fig,ax = plt.subplots()
result_comparison.plot(x = 'delta',y = ['mx','mx_seaman'],ax = ax)
ax.set_title('Rudder angle variation');
shipdict.rudder_particulars
%connect_info | _____no_output_____ | MIT | docs/seaman/04.1_seaman_rudder_equation.ipynb | martinlarsalbert/wPCC |
Combining Pulse TemplatesSo far we have seen how to define simple pulses using the `TablePulseTemplate` ([Modelling a Simple TablePulseTemplate](00SimpleTablePulse.ipynb)), `FunctionPulseTemplate` ([Modelling Pulses Using Functions And Expressions](02FunctionPulse.ipynb)) and `PointPulseTemplate` ([The PointPulseTemplate](03PointPulse.ipynb)) classes. These are the elementary building blocks to create pulses and we call them *atomic* pulse templates.We will now have a look at how to compose more complex pulse structures. SequencePulseTemplate: Putting Pulses in a SequenceAs the name suggests `SequencePulseTemplate` allows us to define a pulse as a sequence of already existing pulse templates which are run one after another. In the following example we have two templates created using `PointPulseTemplate` and want to define a higher-level pulse template that puts them in sequence. | from qupulse.pulses import PointPT, SequencePT
# create our atomic "low-level" PointPTs
first_point_pt = PointPT([(0, 'v_0'),
(1, 'v_1', 'linear'),
('t', 'v_0+v_1', 'jump')],
channel_names={'A'},
measurements={('M', 1, 't-1')})
second_point_pt = PointPT([(0, 'v_0+v_1'),
('t_2', 'v_0', 'linear')],
channel_names={'A'},
measurements={('M', 0, 1)})
# define the SequencePT
sequence_pt = SequencePT(first_point_pt, second_point_pt)
print("sequence parameters: {}".format(sequence_pt.parameter_names))
print("sequence measurements: {}".format(sequence_pt.measurement_names)) | sequence parameters: {'t_2', 'v_1', 'v_0', 't'}
sequence measurements: {'M'}
| MIT | doc/source/examples/03xComposedPulses.ipynb | lankes-fzj/qupulse |
It is important to note that all of the pulse templates used to create a `SequencePT` (we call those *subtemplates*) are defined on the same channels, in this case the channel `A` (otherwise we would encounter an exception). The `SequencePT` will also be defined on the same channel.The `SequencePT` will further have the union of all parameters defined in its subtemplates as its own parameter set. If two subtemplates defined parameters with the same name, they will be treated as the same parameters in the `SequencePT`.Finally, `SequencePT` will also expose all measurements defined in subtemplates. It is also possible to define additional measurements in the constructor of `SequencePT`. See [Definition of Measurements](08Measurements.ipynb) for me info about measurements.There are several cases where the above constraints represent a problem: Subtemplates might not all be defined on the same channel, subtemplates might define parameters with the same name which should still be treated as different parameters in the sequence or names of measurements defined by different subtemplates might collide. To deal with these, we can wrap a subtemplate with the `MappingPulseTemplate` class which allows us to rename parameters, channels and measurements or even derive parameter values from other parameters using mathematical expressions. You can learn how to do all this in [Mapping with the MappingPulseTemplate](05MappingTemplate.ipynb).In our example above, however, we were taking care not to encounter these problems yet. Let's plot all of them with some parameters to see the results. | %matplotlib notebook
from qupulse.pulses.plotting import plot
parameters = dict(t=3,
t_2=2,
v_0=1,
v_1=1.4)
_ = plot(first_point_pt, parameters, sample_rate=100)
_ = plot(second_point_pt, parameters, sample_rate=100)
_ = plot(sequence_pt, parameters, sample_rate=100)
| _____no_output_____ | MIT | doc/source/examples/03xComposedPulses.ipynb | lankes-fzj/qupulse |
RepetitionPulseTemplate: Repeating a PulseIf we simply want to repeat some pulse template a fixed number of times, we can make use of the `RepetitionPulseTemplate`. In the following, we will reuse one of our `PointPT`s, `first_point_pt` and use it to create a new pulse template that repeats it `n_rep` times, where `n_rep` will be a parameter. | from qupulse.pulses import RepetitionPT
repetition_pt = RepetitionPT(first_point_pt, 'n_rep')
print("repetition parameters: {}".format(repetition_pt.parameter_names))
print("repetition measurements: {}".format(repetition_pt.measurement_names))
# let's plot to see the results
parameters['n_rep'] = 5 # add a value for our n_rep parameter
_ = plot(repetition_pt, parameters, sample_rate=100) | repetition parameters: {'v_1', 'n_rep', 'v_0', 't'}
repetition measurements: {'M'}
| MIT | doc/source/examples/03xComposedPulses.ipynb | lankes-fzj/qupulse |
The same remarks that were made about `SequencePT` also hold for `RepetitionPT`: it will expose all parameters and measurements defined by its subtemplate and will be defined on the same channels. ForLoopPulseTemplate: Repeat a Pulse with a Varying Loop ParameterThe `RepetitionPT` simple repeats the exact same subtemplate a given number of times. Sometimes, however, it is rather required to vary the parameters of a subtemplate in a loop, for example when trying to determine the best value for a parameter of a given pulse. This is what the `ForLoopPulseTemplate` is intended for. As the name suggests, its behavior mimics that for `for-loop` constructs in programming languages by repeating its content - the subtemplate - for a number of times while at the same time supplying a loop parameter that iterates over a range of values.In the following we make use of this to vary the value of parameter `t` in `first_point_pt` over several iterations. More specifically, we will have all a `first_point_pt` pulse for all even values of `t` between `t_start` and `t_end` which are new parameters. For the plot we will set them to `t_start = 4` and `t_end = 13`, i.e., `t = 4, 6, 8, 10, 12`. | from qupulse.pulses import ForLoopPT
for_loop_pt = ForLoopPT(first_point_pt, 't', ('t_start', 't_end', 2))
print("for loop parameters: {}".format(for_loop_pt.parameter_names))
print("for loop measurements: {}".format(for_loop_pt.measurement_names))
# plot it
parameters['t_start'] = 4
parameters['t_end'] = 13
_ = plot(for_loop_pt, parameters, sample_rate=100) | for loop parameters: {'t_start', 'v_1', 'v_0', 't_end'}
for loop measurements: {'M'}
| MIT | doc/source/examples/03xComposedPulses.ipynb | lankes-fzj/qupulse |
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. 02. Facial Expression Recognition using ONNX Runtime GPU on AzureMLThis example shows how to deploy an image classification neural network using the Facial Expression Recognition ([FER](https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data)) dataset and Open Neural Network eXchange format ([ONNX](http://aka.ms/onnxdocarticle)) on the Azure Machine Learning platform. This tutorial will show you how to deploy a FER+ model from the [ONNX model zoo](https://github.com/onnx/models), use it to make predictions using ONNX Runtime Inference, and deploy it as a web service in Azure.Throughout this tutorial, we will be referring to ONNX, a neural network exchange format used to represent deep learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools (CNTK, PyTorch, Caffe, MXNet, TensorFlow) and choose the combination that is best for them. ONNX is developed and supported by a community of partners including Microsoft AI, Facebook, and Amazon. For more information, explore the [ONNX website](http://onnx.ai) and [open source files](https://github.com/onnx).[ONNX Runtime](https://aka.ms/onnxruntime) is the runtime engine that enables evaluation of trained machine learning (Traditional ML and Deep Learning) models with high performance and low resource utilization. Tutorial Objectives:1. Describe the FER+ dataset and pretrained Convolutional Neural Net ONNX model for Emotion Recognition, stored in the ONNX model zoo.2. Deploy and run the pretrained FER+ ONNX model on an Azure Machine Learning instance3. Predict labels for test set data points in the cloud using ONNX Runtime and Azure ML Prerequisites 1. Install Azure ML SDK and create a new workspacePlease follow [00.configuration.ipynb](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) notebook. 2. Install additional packages needed for this NotebookYou need to install the popular plotting library `matplotlib` and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed.```sh(myenv) $ pip install matplotlib onnx``` 3. Download sample data and pre-trained ONNX model from ONNX Model Zoo.[Download the ONNX Emotion FER+ model and corresponding test data](https://www.cntk.ai/OnnxModels/emotion_ferplus/opset_7/emotion_ferplus.tar.gz) and place them in the same folder as this tutorial notebook. You can unzip the file through the following line of code.```sh(myenv) $ tar xvzf emotion_ferplus.tar.gz```More information can be found about the ONNX FER+ model on [github](https://github.com/onnx/models/tree/master/emotion_ferplus). For more information about the FER+ dataset, please visit Microsoft Researcher Emad Barsoum's [FER+ source data repository](https://github.com/ebarsoum/FERPlus). Load Azure ML workspaceWe begin by instantiating a workspace object from the existing workspace created earlier in the configuration notebook. | # Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, ws.location, sep = '\n') | _____no_output_____ | MIT | onnx/onnx-inference-emotion-recognition.ipynb | sxusx/MachineLearningNotebooks |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.