markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
SVD features of edges decomposed from incidence matrix | # SVD features of edges decomposed from incidence matrix
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan','red','purple']
svd = fig.add_subplot(1,1,1)
vi1 = np.transpose(vi)
svd.scatter([vi1[:, 0]], [vi1[:, 1]],c=np.array(ed_label),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
svd.title.set_text("W-edges")
plt.show() | _____no_output_____ | MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
NORMALIZED GRAPH LAPLACIAN Decomposing normalized laplacian and plotting node features(W) | # calculate normalized graph laplacian
L = nx.normalized_laplacian_matrix(G).todense()
print(L.shape)
print(L[0,0:5])
# NMF does not work on input matrix with negative values
# from sklearn.decomposition import NMF
# model = NMF(n_components=2,init='random', random_state=0)
# # decomposing normalized graph laplacian L
# W = model.fit_transform(L)
# H = model.components_
# err = model.reconstruction_err_
# it = model.n_iter_
# print(err)
# print(it)
# print(W.shape)
# print(H.shape)
# print(W[0])
# print(H[:,0]) | _____no_output_____ | MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
SVD decomposition of normalized graph laplacian | # SVD decomposition
ul,sl,vl = np.linalg.svd(L)
print(ul.shape)
# u=np.around(u,decimals=5)
# print(ui)
print(sl.shape)
# s=np.around(s)
# print(si)
print(vl.shape)
# v=np.around(v,decimals=5)
# print(vi) | (30, 30)
(30,)
(30, 30)
| MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
displaying SVD node features(U) of laplacian matrix Doing SVD on normalized graph laplacian gives USV^T, where U and V are same, i.e. rows of U are same as clms of V^T. Hence below I displayed node features from U | import matplotlib
import numpy as np
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan','red','purple']
svd = fig.add_subplot(1,1,1)
svd.scatter([ul[:, 0]], [ul[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
svd.title.set_text("U-nodes:SVD decomposition of normalized graph laplacian")
plt.show()
# applying tsne and pca on U-nde features--laplacian matrix
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan', 'red', 'purple']
# normalize
ul1 = normalize(ul)
tsne = fig.add_subplot(1,2,1)
X_tsne = TSNE(n_components=2, perplexity=40).fit_transform(ul1)
tsne.scatter(X_tsne[:, 0], X_tsne[:, 1], c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
tsne.title.set_text("t-SNE")
pca = fig.add_subplot(1,2,2)
X_pca = PCA(n_components=2).fit_transform(ul1)
pca.scatter(X_pca[:, 0], X_pca[:, 1], c=np.array(list(partition.values())), s=[50, 50], cmap=matplotlib.colors.ListedColormap(colors))
pca.title.set_text("PCA")
plt.show() | _____no_output_____ | MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
ADJACENCY MATRIX Decomposing Adjacency matrix and displaying node featues | Adj = nx.adjacency_matrix(G)
print(Adj.todense().shape)
# convert adjacency matrix to dense matrix(default format is sparse matrix)
AdjDense = Adj.todense() | (30, 30)
| MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
NMF decomposition of Adjacency matrix | from sklearn.decomposition import NMF
model = NMF(n_components=2,init='random', random_state=0)
Wa = model.fit_transform(AdjDense)
Ha= model.components_
erra = model.reconstruction_err_
ita = model.n_iter_
print(erra)
print(ita)
print(Wa.shape)
print(Ha.shape)
print(Wa[0])
print(Ha[:,0])
# displaying learned nodes
import matplotlib
import numpy as np
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan','red','purple']
svd = fig.add_subplot(1,1,1)
svd.scatter([Wa[:, 0]], [Wa[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
svd.title.set_text("W-nodes:NMF decomposition of Adjacency matrix")
plt.show() | _____no_output_____ | MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
SVD Decomposition of adjacency matrix | # Calculate SVD (Singular value decomposition) of graph's adjacency matrix
ua,sa,va = np.linalg.svd(AdjDense)
print(ua.shape)
# u=np.around(u,decimals=3)
# print(u)
print(sa.shape)
# s=np.around(s)
# print(s)
print(va.shape)
# v=np.around(v,decimals=3)
# print(v)
import matplotlib
import numpy as np
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan','red','purple']
svd = fig.add_subplot(1,1,1)
svd.scatter([ua[:, 0]], [ua[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
svd.title.set_text("U-nodes:SVD Decomposition of adjacency matrix")
plt.show() | _____no_output_____ | MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
Chellenge 3 Challenge 3.1 | myinput = '/home/fmuinos/projects/adventofcode/2016/ferran/inputs/input3.txt'
def is_triangle(sides):
return sides[0] + sides[1] > sides[2]
def no_triangles(path):
with open(path,'rt') as f:
ntr = 0
for line in f:
sides = list(map(int, line.rstrip().split()))
if is_triangle(sorted(sides)):
ntr += 1
return ntr
no_triangles(myinput) | _____no_output_____ | MIT | 2016/ferran/day3.ipynb | bbglab/adventofcode |
Challenge 3.2 | def no_triangles_by_cols(path):
triangles = [[0,0,0], [0,0,0], [0,0,0]]
with open(path,'rt') as f:
ntr = 0
i = 1
for line in f:
sides = list(map(int, line.rstrip().split()))
for j in range(3):
triangles[j][i % 3] = sides[j]
if i % 3 == 0:
for j in range(3):
if is_triangle(sorted(triangles[j])):
ntr += 1
i += 1
return ntr
no_triangles_by_cols(myinput) | _____no_output_____ | MIT | 2016/ferran/day3.ipynb | bbglab/adventofcode |
Agenda **Tópicos**:* Revisão de Python: - Variáveis - Operações Matemáticas* Exercício Prático (Hands on)* Carregamento dos dados* Visualizações Revisão de Python Variáveis Uma variável é um objeto que guarda um valor e armazena esse valor na memória do computador durante o tempo de desenvolvimento. Podemos inicializar uma variável por meio do comando de atribuição '='. | # podemos definir uma variável dando um nome
ano = 2020
# para imprimir a variável criada, utilizamos a função print
print(ano)
salario = 1500
print(salario)
salario = 1000
print(salario)
| 1500
1000
| MIT | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste |
Operações Matemáticas Com Python podemos realizar operações matemáticas. Com o uso das variáveis isso fica ainda mais poderoso. | salario1 = 1500
salario2 = 1000
print(salario1 + salario2) #soma
print(salario1 - salario2) #subtração
print(salario1 * salario2) #multiplicação
print(salario1 / salario2) #divisão
print(salario1 // salario2) #divisão inteira
print(salario1 % salario2) #resto da divisão
print(salario1 ** 2) #exponenciação | 2500
500
1500000
1.5
1
500
2250000
| MIT | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste |
Exercício Prático (Hands on)* 1. Abrir Google Colab: https://colab.research.google.com/* 2. Login na conta Google* 3. Arquivo --> Novo notebook Carregamento dos dados | #Biblioteca Pandas
import pandas as pd
#Carregando bases de dados de Jan22 a Mar22
#Fonte: https://www.gov.br/anp/pt-br/centrais-de-conteudo/dados-abertos/serie-historica-de-precos-de-combustiveis
etanol_202201 = pd.read_csv('https://github.com/marioandrededeus/semana_sala_aberta_DH/raw/main/precos-gasolina-etanol-2022-01.csv', sep = ';', decimal = ',', encoding = 'latin')
etanol_202202 = pd.read_csv('https://github.com/marioandrededeus/semana_sala_aberta_DH/raw/main/precos-gasolina-etanol-2022-02.csv', sep = ';', decimal = ',', encoding = 'latin')
etanol_202203 = pd.read_csv('https://github.com/marioandrededeus/semana_sala_aberta_DH/raw/main/dados-abertos-precos-2022-03-gasolina-etanol.csv', sep = ';', decimal = ',', encoding = 'latin')
df = pd.concat([etanol_202201, etanol_202202, etanol_202203])
df['Data da Coleta'] = pd.to_datetime(df['Data da Coleta'], dayfirst=True)
df.head() | _____no_output_____ | MIT | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste |
Dimensões do dataframe (tabela) | df.shape | _____no_output_____ | MIT | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste |
Visualizações Preço por Estado | df_estado = df.groupby('Estado')['Valor de Venda'].mean()
df_estado
df_estado.plot.bar(figsize = (20,5)); | _____no_output_____ | MIT | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste |
Preço por Regiao | df_regiao = df.groupby('Regiao')['Valor de Venda'].mean()
df_regiao
df_regiao.plot.bar(figsize = (10,5)); | _____no_output_____ | MIT | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste |
Preço por Região - Linha do Tempo | df_regiao_data = df.groupby(['Regiao','Data da Coleta'])['Valor de Venda'].mean().reset_index()
df_regiao_data
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize = (20,5))
sns.lineplot(data = df_regiao_data,
x = 'Data da Coleta',
y = 'Valor de Venda',
hue = 'Regiao');
| _____no_output_____ | MIT | Primeiros_Passos_Christian_Python.ipynb | ChristianEngProd/HTML_Teste |
Network Initializer What is neuron?Feed-forward neural networks are inspired by the information processing of one or more neural cells, called a neuron. A neuron accepts input signals via its dendrites, which pass the electrical signal down to the cell body. The axon carries the signal out to synapses, which are the connections of a cell’s axon to other cell’s dendrites. | from random import random, seed
def initialize_network(n_inputs, n_hidden, n_outputs):
network = list()
# Creating hidden layers according to the number of inputs
hidden_layer = [{'weights': [random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
network.append(hidden_layer)
# Creating output layer according to the number of hidden layers
output_layer = [{'weights': [random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]
network.append(output_layer)
return network
# It is good practice to initialize the network weights to small random numbers.
# In this case, will we use random numbers in the range of 0 to 1.
# To achieve that we seed random with 1
seed(1)
# 2 input units, 1 hidden unit and 2 output units
network = initialize_network(2, 1, 2)
# You can see the hidden layer has one neuron with 2 input weights plus the bias.
# The output layer has 2 neurons, each with 1 weight plus the bias.
for layer in network:
print(layer) | [{'weights': [0.7887233511355132, 0.0938595867742349, 0.02834747652200631]}]
[{'weights': [0.8357651039198697, 0.43276706790505337]}, {'weights': [0.762280082457942, 0.0021060533511106927]}]
| Apache-2.0 | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning |
Forward propagateWe can calculate an output from a neural network by propagating an input signal through each layer until the output layer outputs its values.We can break forward propagation down into three parts:1. Neuron Activation.2. Neuron Transfer.3. Forward Propagation. 1. Neuron ActivationThe first step is to calculate the activation of one neuron given an input.Neuron activation is calculated as the weighted sum of the inputs. Much like linear regression.activation = sum(weight_i * input_i) + biasWhere weight is a network weight, input is an input, i is the index of a weight or an input and bias is a special weight that has no input to multiply with (or you can think of the input as always being 1.0). | # Implementation
def activate(weights, inputs):
activation = weights[-1]
for i in range(len(weights) - 1):
activation += weights[i] * inputs[i]
return activation | _____no_output_____ | Apache-2.0 | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning |
2. Neuron TransferOnce a neuron is activated, we need to transfer the activation to see what the neuron output actually is.Different transfer functions can be used. It is traditional to use the *sigmoid activation function*, but you can also use the *tanh* (hyperbolic tangent) function to transfer outputs. More recently, the *rectifier transfer function* has been popular with large deep learning networks.Sigmoid formulaoutput = 1 / (1 + e^(-activation)) | from math import exp
def transfer(activation):
return 1.0 / (1.0 + exp(-activation)) | _____no_output_____ | Apache-2.0 | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning |
3. Forawrd propagate | # Foward propagate is self-explanatory
def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron['weights'], inputs)
neuron['output'] = transfer(activation)
new_inputs.append(neuron['output'])
inputs = new_inputs
return inputs
inputs = [1, 0, None]
output = forward_propagate(network, inputs)
# Running the example propagates the input pattern [1, 0] and produces an output value that is printed.
# Because the output layer has two neurons, we get a list of two numbers as output.
output | _____no_output_____ | Apache-2.0 | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning |
Backpropagation What is it?1. Error is calculated between the expected outputs and the outputs forward propagated from the network.2. These errors are then propagated backward through the network from the output layer to the hidden layer, assigning blame for the error and updating weights as they go. This part is broken down into two sections.- Transfer Derivative- Error Backpropagation Transfer DerivativeGiven an output value from a neuron, we need to calculate it’s *slope*.derivative = output * (1.0 - output) | # Calulates the derivation from an neuron output
def transfer_derivative(output):
return output * (1.0 - output) | _____no_output_____ | Apache-2.0 | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning |
Error Backpropagation1. calculate the error for each output neuron, this will give us our error signal (input) to propagate backwards through the network.error = (expected - output) * transfer_derivative(output)expected: expected output value for the neuronoutput: output value for the neuron and transfer_derivative()----The back-propagated error signal is accumulated and then used to determine the error for the neuron in the hidden layer, as follows:error = (weight_k * error_j) * transfer_derivative(output)error_j: the error signal from the jth neuron in the output layerweight_k: the weight that connects the kth neuron to the current neuron and output is the output for the current neuron | def backward_propagate_error(network, expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = list()
if i != len(network) - 1:
for j in range(len(layer)):
error = 0.0
for neuron in network[i + 1]:
error += (neuron['weights'][j] * neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron = layer[j]
errors.append(expected[j] - neuron['output'])
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])
expected = [0, 1]
backward_propagate_error(network, expected)
# delta: error value
for layer in network:
print(layer) | [{'weights': [0.7887233511355132, 0.0938595867742349, 0.02834747652200631], 'output': 0.6936142046010635, 'delta': -0.011477619712406795}]
[{'weights': [0.8357651039198697, 0.43276706790505337], 'output': 0.7335023968859138, 'delta': -0.1433825771158816}, {'weights': [0.762280082457942, 0.0021060533511106927], 'output': 0.6296776889933221, 'delta': 0.08635312555373359}]
| Apache-2.0 | 003-forward-and-back-props/Backward Propagation.ipynb | wfraher/deeplearning |
Modeling and Simulation in PythonChapter 20Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) | # Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import * | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
Dropping penniesI'll start by getting the units we need from Pint. | m = UNITS.meter
s = UNITS.second | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
And defining the initial state. | init = State(y=381 * m,
v=0 * m/s) | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
Acceleration due to gravity is about 9.8 m / s$^2$. | g = 9.8 * m/s**2 | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
When we call `odeint`, we need an array of timestamps where we want to compute the solution.I'll start with a duration of 10 seconds. | t_end = 10 * s | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
Now we make a `System` object. | system = System(init=init, g=g, t_end=t_end) | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
And define the slope function. | def slope_func(state, t, system):
"""Compute derivatives of the state.
state: position, velocity
t: time
system: System object containing `g`
returns: derivatives of y and v
"""
y, v = state
unpack(system)
dydt = v
dvdt = -g
return dydt, dvdt | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
It's always a good idea to test the slope function with the initial conditions. | dydt, dvdt = slope_func(init, 0, system)
print(dydt)
print(dvdt) | 0.0 meter / second
-9.8 meter / second ** 2
| MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
Now we're ready to call `run_ode_solver` | results, details = run_ode_solver(system, slope_func, max_step=0.5*s)
details.message | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
Here are the results: | results | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
And here's position as a function of time: | def plot_position(results):
plot(results.y, label='y')
decorate(xlabel='Time (s)',
ylabel='Position (m)')
plot_position(results)
savefig('figs/chap09-fig01.pdf') | Saving figure to file figs/chap09-fig01.pdf
| MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
Onto the sidewalkTo figure out when the penny hit the sidewalk, we can use `crossings`, which finds the times where a `Series` passes through a given value. | t_crossings = crossings(results.y, 0) | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
For this example there should be just one crossing, the time when the penny hits the sidewalk. | t_sidewalk = t_crossings[0] * s | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
We can compare that to the exact result. Without air resistance, we have$v = -g t$and$y = 381 - g t^2 / 2$Setting $y=0$ and solving for $t$ yields$t = \sqrt{\frac{2 y_{init}}{g}}$ | sqrt(2 * init.y / g) | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
The estimate is accurate to about 10 decimal places. EventsInstead of running the simulation until the penny goes through the sidewalk, it would be better to detect the point where the penny hits the sidewalk and stop. `run_ode_solver` provides exactly the tool we need, **event functions**.Here's an event function that returns the height of the penny above the sidewalk: | def event_func(state, t, system):
"""Return the height of the penny above the sidewalk.
"""
y, v = state
return y | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
And here's how we pass it to `run_ode_solver`. The solver should run until the event function returns 0, and then terminate. | results, details = run_ode_solver(system, slope_func, events=event_func)
details | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
The message from the solver indicates the solver stopped because the event we wanted to detect happened.Here are the results: | results | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
With the `events` option, the solver returns the actual time steps it computed, which are not necessarily equally spaced. The last time step is when the event occurred: | t_sidewalk = get_last_label(results) * s | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
Unfortunately, `run_ode_solver` does not carry the units through the computation, so we have to put them back at the end.We could also get the time of the event from `details`, but it's a minor nuisance because it comes packed in an array: | details.t_events[0][0] * s | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
The result is accurate to about 15 decimal places.We can also check the velocity of the penny when it hits the sidewalk: | v_sidewalk = get_last_value(results.v) * m / s | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
And convert to kilometers per hour. | km = UNITS.kilometer
h = UNITS.hour
v_sidewalk.to(km / h) | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
If there were no air resistance, the penny would hit the sidewalk (or someone's head) at more than 300 km/h.So it's a good thing there is air resistance. Under the hoodHere is the source code for `crossings` so you can see what's happening under the hood: | %psource crossings | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
The [documentation of InterpolatedUnivariateSpline is here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.html).And you can read the [documentation of `scipy.integrate.solve_ivp`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html) to learn more about how `run_ode_solver` works. Exercises**Exercise:** Here's a question from the web site [Ask an Astronomer](http://curious.astro.cornell.edu/about-us/39-our-solar-system/the-earth/other-catastrophes/57-how-long-would-it-take-the-earth-to-fall-into-the-sun-intermediate):"If the Earth suddenly stopped orbiting the Sun, I know eventually it would be pulled in by the Sun's gravity and hit it. How long would it take the Earth to hit the Sun? I imagine it would go slowly at first and then pick up speed."Use `run_ode_solver` to answer this question.Here are some suggestions about how to proceed:1. Look up the Law of Universal Gravitation and any constants you need. I suggest you work entirely in SI units: meters, kilograms, and Newtons.2. When the distance between the Earth and the Sun gets small, this system behaves badly, so you should use an event function to stop when the surface of Earth reaches the surface of the Sun.3. Express your answer in days, and plot the results as millions of kilometers versus days.If you read the reply by Dave Rothstein, you will see other ways to solve the problem, and a good discussion of the modeling decisions behind them.You might also be interested to know that [it's actually not that easy to get to the Sun](https://www.theatlantic.com/science/archive/2018/08/parker-solar-probe-launch-nasa/567197/). | # Solution
N = UNITS.newton
kg = UNITS.kilogram
m = UNITS.meter
AU = UNITS.astronomical_unit
# Solution
r_0 = (1 * AU).to_base_units()
v_0 = 0 * m / s
init = State(r=r_0,
v=v_0)
# Solution
r_earth = 6.371e6 * m
r_sun = 695.508e6 * m
system = System(init=init,
G=6.674e-11 * N / kg**2 * m**2,
m1=1.989e30 * kg,
r_final=r_sun + r_earth,
m2=5.972e24 * kg,
t_0=0 * s,
t_end=1e7 * s)
# Solution
def universal_gravitation(state, system):
"""Computes gravitational force.
state: State object with distance r
system: System object with m1, m2, and G
"""
r, v = state
unpack(system)
force = G * m1 * m2 / r**2
return force
# Solution
universal_gravitation(init, system)
# Solution
def slope_func(state, t, system):
"""Compute derivatives of the state.
state: position, velocity
t: time
system: System object containing `g`
returns: derivatives of y and v
"""
y, v = state
unpack(system)
force = universal_gravitation(state, system)
dydt = v
dvdt = -force / m2
return dydt, dvdt
# Solution
slope_func(init, 0, system)
# Solution
def event_func(state, t, system):
r, v = state
return r - system.r_final
# Solution
event_func(init, 0, system)
# Solution
results, details = run_ode_solver(system, slope_func, events=event_func)
details
# Solution
t_event = details.t_events[0] * s
# Solution
t_event.to(UNITS.day)
# Solution
ts = linspace(t_0, t_event, 201)
results, details = run_ode_solver(system, slope_func, events=event_func, t_eval=ts)
# Solution
results.index /= 60 * 60 * 24
# Solution
results.r /= 1e9
# Solution
plot(results.r, label='r')
decorate(xlabel='Time (day)',
ylabel='Distance from sun (million km)') | _____no_output_____ | MIT | code/soln/chap20soln.ipynb | arunkhattri/ModSimPy |
# !gdown --id 1LukOUfVNeps1Jj7Z27JbkmrO90jwBgie
# !pip install kora
# from kora import drive
# drive.download_folder('1LukOUfVNeps1Jj7Z27JbkmrO90jwBgie')
import shutil
shutil.unpack_archive('mri.zip')
# !ls /content/img_align_celeba | _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
|
Load Libraries | !pip install scipy==1.1.0
import glob
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras import Input
from keras.applications import VGG19
from keras.callbacks import TensorBoard
from keras.layers import BatchNormalization, Activation, LeakyReLU, Add, Dense
from keras.layers.convolutional import Conv2D, UpSampling2D
from keras.models import Model
from keras.optimizers import Adam
from scipy.misc import imread, imresize
import keras.backend as K
import cv2
import os
from PIL import Image
# from imageio import imread
# from skimage.transform import resize | _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Residual Block | def residual_block(x):
"""
Residual block
"""
filters = [64, 64]
kernel_size = 3
strides = 1
padding = "same"
momentum = 0.8
activation = "relu"
res = Conv2D(filters=filters[0], kernel_size=kernel_size, strides=strides, padding=padding)(x)
res = Activation(activation=activation)(res)
res = BatchNormalization(momentum=momentum)(res)
res = Conv2D(filters=filters[1], kernel_size=kernel_size, strides=strides, padding=padding)(res)
res = BatchNormalization(momentum=momentum)(res)
# Add res and x
res = Add()([res, x])
return res | _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Build Generator | def build_generator():
"""
Create a generator network using the hyperparameter values defined below
:return:
"""
residual_blocks = 16
momentum = 0.8
input_shape = (64, 64, 3)
# Input Layer of the generator network
input_layer = Input(shape=input_shape)
# Add the pre-residual block
gen1 = Conv2D(filters=64, kernel_size=9, strides=1, padding='same', activation='relu')(input_layer)
# Add 16 residual blocks
res = residual_block(gen1)
for i in range(residual_blocks - 1):
res = residual_block(res)
# Add the post-residual block
gen2 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(res)
gen2 = BatchNormalization(momentum=momentum)(gen2)
# Take the sum of the output from the pre-residual block(gen1) and the post-residual block(gen2)
gen3 = Add()([gen2, gen1])
# Add an upsampling block
gen4 = UpSampling2D(size=2)(gen3)
gen4 = Conv2D(filters=256, kernel_size=3, strides=1, padding='same')(gen4)
gen4 = Activation('relu')(gen4)
# Add another upsampling block
gen5 = UpSampling2D(size=2)(gen4)
gen5 = Conv2D(filters=256, kernel_size=3, strides=1, padding='same')(gen5)
gen5 = Activation('relu')(gen5)
# Output convolution layer
gen6 = Conv2D(filters=3, kernel_size=9, strides=1, padding='same')(gen5)
output = Activation('tanh')(gen6)
# Keras model
model = Model(inputs=[input_layer], outputs=[output], name='generator')
return model
| _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Build Descriminator | def build_discriminator():
"""
Create a discriminator network using the hyperparameter values defined below
:return:
"""
leakyrelu_alpha = 0.2
momentum = 0.8
input_shape = (256, 256, 3)
input_layer = Input(shape=input_shape)
# Add the first convolution block
dis1 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(input_layer)
dis1 = LeakyReLU(alpha=leakyrelu_alpha)(dis1)
# Add the 2nd convolution block
dis2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same')(dis1)
dis2 = LeakyReLU(alpha=leakyrelu_alpha)(dis2)
dis2 = BatchNormalization(momentum=momentum)(dis2)
# Add the third convolution block
dis3 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same')(dis2)
dis3 = LeakyReLU(alpha=leakyrelu_alpha)(dis3)
dis3 = BatchNormalization(momentum=momentum)(dis3)
# Add the fourth convolution block
dis4 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same')(dis3)
dis4 = LeakyReLU(alpha=leakyrelu_alpha)(dis4)
dis4 = BatchNormalization(momentum=0.8)(dis4)
# Add the fifth convolution block
dis5 = Conv2D(256, kernel_size=3, strides=1, padding='same')(dis4)
dis5 = LeakyReLU(alpha=leakyrelu_alpha)(dis5)
dis5 = BatchNormalization(momentum=momentum)(dis5)
# Add the sixth convolution block
dis6 = Conv2D(filters=256, kernel_size=3, strides=2, padding='same')(dis5)
dis6 = LeakyReLU(alpha=leakyrelu_alpha)(dis6)
dis6 = BatchNormalization(momentum=momentum)(dis6)
# Add the seventh convolution block
dis7 = Conv2D(filters=512, kernel_size=3, strides=1, padding='same')(dis6)
dis7 = LeakyReLU(alpha=leakyrelu_alpha)(dis7)
dis7 = BatchNormalization(momentum=momentum)(dis7)
# Add the eight convolution block
dis8 = Conv2D(filters=512, kernel_size=3, strides=2, padding='same')(dis7)
dis8 = LeakyReLU(alpha=leakyrelu_alpha)(dis8)
dis8 = BatchNormalization(momentum=momentum)(dis8)
# Add a dense layer
dis9 = Dense(units=1024)(dis8)
dis9 = LeakyReLU(alpha=0.2)(dis9)
# Last dense layer - for classification
output = Dense(units=1, activation='sigmoid')(dis9)
model = Model(inputs=[input_layer], outputs=[output], name='discriminator')
return model | _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Build VGG19 | def build_vgg():
"""
Build VGG network to extract image features
"""
input_shape = (256, 256, 3)
# Load a pre-trained VGG19 model trained on 'Imagenet' dataset
vgg = VGG19(include_top=False, weights='imagenet', input_shape=input_shape)
vgg.outputs = [vgg.layers[20].output]
# Create a Keras model
model = Model(vgg.input, vgg.outputs)
return model
# def build_vgg():
# """
# Build VGG network to extract image features
# """
# input_shape = (256, 256, 3)
# # Load a pre-trained VGG19 model trained on 'Imagenet' dataset
# vgg = VGG19(include_top=False, weights='imagenet')
# vgg.outputs = [vgg.layers[20].output]
# input_layer = Input(shape=input_shape)
# # Extract features
# features = vgg(input_layer)
# # Create a Keras model
# model = Model(inputs=[input_layer], outputs=[features])
# return model
model = build_vgg()
model.summary() | Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5
80142336/80134624 [==============================] - 0s 0us/step
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 256, 256, 3)] 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 256, 256, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 256, 256, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 128, 128, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 128, 128, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 128, 128, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 64, 64, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 64, 64, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 64, 64, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 64, 64, 256) 590080
_________________________________________________________________
block3_conv4 (Conv2D) (None, 64, 64, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 32, 32, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 32, 32, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 32, 32, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 32, 32, 512) 2359808
_________________________________________________________________
block4_conv4 (Conv2D) (None, 32, 32, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 16, 16, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 16, 16, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 16, 16, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 16, 16, 512) 2359808
_________________________________________________________________
block5_conv4 (Conv2D) (None, 16, 16, 512) 2359808
=================================================================
Total params: 20,024,384
Trainable params: 20,024,384
Non-trainable params: 0
_________________________________________________________________
| Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Sample Images | def sample_images(data_dir, batch_size, high_resolution_shape, low_resolution_shape):
# Make a list of all images inside the data directory
all_images = glob.glob(data_dir)
# Choose a random batch of images
images_batch = np.random.choice(all_images, size=batch_size)
low_resolution_images = []
high_resolution_images = []
for img in images_batch:
# Get an ndarray of the current image
img1 = imread(img, mode='RGB')
img1 = img1.astype(np.float32)
# Resize the image
img1_high_resolution = imresize(img1, high_resolution_shape)
img1_low_resolution = imresize(img1, low_resolution_shape)
# Do a random horizontal flip
if np.random.random() < 0.5:
img1_high_resolution = np.fliplr(img1_high_resolution)
img1_low_resolution = np.fliplr(img1_low_resolution)
high_resolution_images.append(img1_high_resolution)
low_resolution_images.append(img1_low_resolution)
# Convert the lists to Numpy NDArrays
return np.array(high_resolution_images), np.array(low_resolution_images) | _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Save Images | def compute_psnr(original_image, generated_image):
original_image = tf.convert_to_tensor(original_image, dtype = tf.float32)
generated_image = tf.convert_to_tensor(generated_image, dtype = tf.float32)
psnr = tf.image.psnr(original_image, generated_image, max_val = 1.0)
return tf.math.reduce_mean(psnr, axis = None, keepdims = False, name = None)
def plot_psnr(psnr):
psnr_means = psnr['psnr_quality']
plt.figure(figsize = (10,8))
plt.plot(psnr_means)
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.title('PSNR')
def compute_ssim(original_image, generated_image):
original_image = tf.convert_to_tensor(original_image, dtype = tf.float32)
generated_image = tf.convert_to_tensor(generated_image, dtype = tf.float32)
ssim = tf.image.ssim(original_image, generated_image, max_val = 1.0, filter_size = 11, filter_sigma = 1.5, k1 = 0.01, )
return tf.math.reduce_mean(ssim, axis = None, keepdims = False, name = None)
def plot_ssim(ssim):
ssim_means = ssim['ssim_quality']
plt.figure(figsize = (10,8))
plt.plot(ssim_means)
plt.xlabel('Epochs')
plt.ylabel('SSIM')
plt.title('SSIM')
def plot_loss(losses):
d_loss = losses['d_history']
g_loss = losses['g_history']
plt.figure(figsize = (10,8))
plt.plot(d_loss, label = "Discriminator loss")
plt.plot(g_loss, label = "Generator Loss")
plt.xlabel("Epochs")
plt.ylabel('Loss')
plt.title("Loss")
plt.legend()
def save_images(low_resolution_image, original_image, generated_image, path, psnr, ssim):
"""
Save low-resolution, high-resolution(original) and
generated high-resolution images in a single image
"""
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(1, 3, 1)
ax.imshow(low_resolution_image)
ax.axis("off")
ax.set_title("Low-resolution ")
ax = fig.add_subplot(1, 3, 2)
ax.imshow(original_image)
ax.axis("off")
ax.set_title(f"High-resolution\nPSNR : {psnr}")
# ax.set_xlabel(f"PSNR : {psnr}")
# ax.save(hr_path,bbox_inches='tight',transparent=True, pad_inches=0)
ax = fig.add_subplot(1, 3, 3)
ax.imshow(np.squeeze(generated_image), cmap = plt.get_cmap(name = 'gray'))
ax.axis("off")
ax.set_title(f"Generated\nSSIM : {ssim}" )
# ax.set_xlabel(f"SSIM : {ssim}")
# ax.save(pr_path, bbox_inches='tight',transparent=True, pad_inches=0)
plt.savefig(path) | _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Write a Log | from PIL import Image
from skimage.metrics import structural_similarity as ssim | _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Final SRGAN Execution | losses = {'d_history' : [], "g_history": []}
psnr = {'psnr_quality' : []}
ssim = {'ssim_quality' : []}
from tqdm.notebook import tqdm
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
data_dir = "/content/train/*.*"
os.makedirs("results", exist_ok=True)
# os.makedirs("HR", exist_ok=True)
# os.makedirs("PR", exist_ok=True)
# os.makedirs("LR", exist_ok=True)
epochs = 1000
batch_size = 1
mode = 'train'
# Shape of low-resolution and high-resolution images
low_resolution_shape = (64, 64, 3)
high_resolution_shape = (256, 256, 3)
# Common optimizer for all networks
common_optimizer = Adam(0.0002, 0.5)
if mode == 'train':
# Build and compile VGG19 network to extract features
vgg = build_vgg()
vgg.trainable = False
vgg.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build and compile the discriminator network
discriminator = build_discriminator()
discriminator.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build the generator network
generator = build_generator()
"""
Build and compile the adversarial model
"""
# Input layers for high-resolution and low-resolution images
input_high_resolution = Input(shape=high_resolution_shape)
input_low_resolution = Input(shape=low_resolution_shape)
# Generate high-resolution images from low-resolution images
generated_high_resolution_images = generator(input_low_resolution)
# Extract feature maps of the generated images
features = vgg(generated_high_resolution_images)
# Make the discriminator network as non-trainable
discriminator.trainable = False
# Get the probability of generated high-resolution images
probs = discriminator(generated_high_resolution_images)
# Create and compile an adversarial model
adversarial_model = Model([input_low_resolution, input_high_resolution], [probs, features])
adversarial_model.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[1e-3, 1], optimizer=common_optimizer)
# Add Tensorboard
tensorboard = TensorBoard(log_dir="logs/".format(time.time()))
tensorboard.set_model(generator)
tensorboard.set_model(discriminator)
for epoch in tqdm(range(epochs)):
# print("Epoch:{}".format(epoch))
"""
Train the discriminator network
"""
# Sample a batch of images
high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=batch_size,
low_resolution_shape=low_resolution_shape,
high_resolution_shape=high_resolution_shape)
# Normalize images
high_resolution_images = high_resolution_images / 127.5 - 1.
low_resolution_images = low_resolution_images / 127.5 - 1.
# Generate high-resolution images from low-resolution images
generated_high_resolution_images = generator.predict(low_resolution_images)
# Generate batch of real and fake labels
real_labels = np.ones((batch_size, 16, 16, 1))
fake_labels = np.zeros((batch_size, 16, 16, 1))
# Train the discriminator network on real and fake images
d_loss_real = discriminator.train_on_batch(high_resolution_images, real_labels)
d_loss_fake = discriminator.train_on_batch(generated_high_resolution_images, fake_labels)
# Calculate total discriminator loss
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# print("d_loss:", d_loss)
"""
Train the generator network
"""
# Sample a batch of images
high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=batch_size,
low_resolution_shape=low_resolution_shape,
high_resolution_shape=high_resolution_shape)
# Normalize images
high_resolution_images = high_resolution_images / 127.5 - 1.
low_resolution_images = low_resolution_images / 127.5 - 1.
# Extract feature maps for real high-resolution images
image_features = vgg.predict(high_resolution_images)
# Train the generator network
g_loss = adversarial_model.train_on_batch([low_resolution_images, high_resolution_images], [real_labels, image_features])
# print("g_loss:", g_loss)
# Write the losses to Tensorboard
# write_log(tensorboard, 'g_loss', g_loss[0], epoch)
# write_log(tensorboard, 'd_loss', d_loss[0], epoch)
# Sample and save images after every 100 epochs
if epoch % 100 == 0:
high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=batch_size,
low_resolution_shape=low_resolution_shape,
high_resolution_shape=high_resolution_shape)
# Normalize images
high_resolution_images = high_resolution_images / 127.5 - 1.
low_resolution_images = low_resolution_images / 127.5 - 1.
generated_images = generator.predict_on_batch(low_resolution_images)
ps = compute_psnr(high_resolution_images, generated_images)
ss = compute_ssim(high_resolution_images, generated_images)
print("-"*15)
print("Epoch:{}".format(epoch))
print(f"D_loss : {d_loss}")
print(f"G_loss : {g_loss}")
print(f"PSNR : {np.around(ps,decimals=2)}")
print(f"SSIM: {np.around(ss,decimals=2)}")
#***************************************
# Store into list
#***************************************
losses['d_history'].append(d_loss)
g_loss = 0.5 * (g_loss[1])
losses['g_history'].append(g_loss)
psnr['psnr_quality'].append(ps)
ssim['ssim_quality'].append(ss)
for index, img in enumerate(generated_images):
img = np.mean(img, axis=2)
save_images(low_resolution_images[index], high_resolution_images[index], img, path="/content/results/img_{}_{}".format(epoch, index), psnr=ps, ssim=ss)
# gn_im = np.squeeze(img).astype(np.float16)
# hr_im = high_resolution_images[index].astype(np.float16)
# lr_im = low_resolution_images[index].astype(np.float16)
# psnr = psnr(hr_im,gn_im).numpy()
# ssim_Score = ssim(hr_im,gn_im, multichannel=True)
# print("PSNR : ", psnr)
# print("SSIM Loss : ", ssim_Score)
# plt.imshow(np.squeeze(img), cmap = plt.get_cmap(name = 'gray'))
# plt.axis('off')
# plt.savefig(f"PR/im_PR_{epoch}_{index}.png", dpi=100, pad_inches=0.0, bbox_inches='tight')
# plt.clf()
# plt.imshow(high_resolution_images[index])
# plt.axis('off')
# plt.savefig(f"HR/im_HR_{epoch}_{index}.png", dpi=100, pad_inches=0.0, bbox_inches='tight')
# plt.clf()
# plt.imshow(low_resolution_images[index])
# plt.axis('off')
# plt.savefig(f"LR/im_LR_{epoch}_{index}.png", dpi=100, pad_inches=0.0, bbox_inches='tight')
# plt.clf()
# Save models
generator.save_weights("generator.h5")
discriminator.save_weights("discriminator.h5")
if mode == 'predict':
# Build and compile the discriminator network
discriminator = build_discriminator()
# Build the generator network
generator = build_generator()
# Load models
generator.load_weights("generator.h5")
discriminator.load_weights("discriminator.h5")
# Get 10 random images
high_resolution_images, low_resolution_images = sample_images(data_dir=data_dir, batch_size=10,
low_resolution_shape=low_resolution_shape,
high_resolution_shape=high_resolution_shape)
# Normalize images
high_resolution_images = high_resolution_images / 127.5 - 1.
low_resolution_images = low_resolution_images / 127.5 - 1.
# Generate high-resolution images from low-resolution images
generated_images = generator.predict_on_batch(low_resolution_images)
# generated_images = cv2.cvtColor(generated_images, cv2.COLOR_BGR2GRAY)
# Save images
for index, img in enumerate(generated_images):
img = np.mean(img, axis=2)
# save_images(low_resolution_images[index], high_resolution_images[index], img, path="/content/results/gen_{}".format(index))
plot_loss(losses)
plot_psnr(psnr)
plot_ssim(ssim)
!zip -r results.zip /content/results
| _____no_output_____ | Apache-2.0 | SRGAN_Final.ipynb | ashishpatel26/SRGAN-Keras |
Diseño de software para cómputo científico---- Unidad 5: Integración con lenguajes de alto nivel con bajo nivel. Agenda de la Unidad 5- JIT (Numba)- **Cython.**- Integración de Python con FORTRAN.- Integración de Python con C. Recapitulando- Escribimos el código Python.- Pasamos todo a numpy.- Hicimos profile.- Paralelisamos (joblib/dask).- Hicimos profile.- Usamos Numba.- Hicimos profile.- **Si llegamos acá** Cython Imports | # vamos a hacer profiling
import timeit
import math
# vamos a plotear
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np | _____no_output_____ | BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Numba vs Cython- Cython es un compilador estático/optimizador tanto para el lenguaje de programación Python como para el extenciones en Cython. - Hace que escribir extensiones C para Python sea tan ""fácil"" como el propio Python.- En lugar de analizar bytecode y generar IR, Cython usa un superconjunto de sintaxis de Python que luego se traduce en código C (Se escribe código C con sintaxis Python).- A diferencia de usar C, **generalmente** no hay que preocuparse las llamadas de bajo nivel de Python (esto se expande automáticamente a un código C por Cython).- A diferencia de Numba, todo el código debe estar separado en archivos especiales (`*.pyx`). - Cython analiza y traduce dichos archivos a código C y luego lo compila utilizando el compilador C proporcionado. Por qué preferimos Numba/JIT sobre Cython?- Curva de aprendizaje (Es otro lenguaje)- Necesitas *algo* de experiencia en C **Y** Python- El paquete se vuelve un poco complejo.- Todo código Python es Cython válido Por qué preferimos Cython sobre Numba/C/Fortran?Facil interaccion con librerias C/C++ y integración total con objetos y clases python Ejemplo - Mandelbrot Fractal Python Puro | def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return 255
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image | _____no_output_____ | BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Ejemplo - Mandelbrot Fractal Python Puro | # creamos la imagen
image = np.zeros((500 * 2, 750 * 2), dtype=np.uint8)
# ejecutamos los calculos
normal = %timeit -o create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
# mostramos todo
plt.imshow(image, cmap="viridis"); | 4.09 s ± 22.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
| BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Ejemplo - Mandelbrot Fractal Cython | !pip install Cython
%load_ext Cython
%%cython --annotate
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return 255
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image | _____no_output_____ | BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Ejemplo - Mandelbrot Fractal Cython | # creamos la imagen
image = np.zeros((500 * 2, 750 * 2), dtype=np.uint8)
# ejecutamos los calculos
normal = %timeit -o create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
# mostramos todo
plt.imshow(image, cmap="viridis"); | 3.41 s ± 64.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
| BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Cython Hello World 1/2- Como Cython puede aceptar casi cualquier archivo fuente de Python válido, una de las cosas más difíciles para comenzar es descubrir cómo compilar su extensión.- Entonces, comencemos con el hola-mundo canónico de Python:```python helloworld.pyxprint("Hello World")```- Pueden ver el código resultante con - `cython -3 helloworld.pyx`, - o `cython -3 helloworld.pyx -cplus` Cython Hello World 2/2- Y en `setup.py````pythonfrom distutils.core import setupfrom Cython.Build import cythonizesetup( ... ext_modules=cythonize("helloworld.pyx"))```- Ejecutar `python setup.py build_ext --inplace`- Probamos con `python -c "import helloworld"` | import sys
sys.path.insert(0, "./cython")
import helloworld
helloworld.__file__ | Hello World
| BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Cython - Números Primos | %%cython
def primes(int nb_primes):
cdef int n, i, len_p
cdef int p[1000]
if nb_primes > 1000:
nb_primes = 1000
len_p = 0 # The current number of elements in p.
n = 2
while len_p < nb_primes:
# Is n prime?
for i in p[:len_p]:
if n % i == 0:
break
# If no break occurred in the loop, we have a prime.
else:
p[len_p] = n
len_p += 1
n += 1
# Let's return the result in a python list:
result_as_list = [prime for prime in p[:len_p]]
return result_as_list
print(primes(100)) | [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541]
| BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Cython - Números Primos - Numpy | %%cython
import numpy as np # importar donde vas a compilar
def primes_np(int nb_primes):
# Memoryview on a NumPy array
narr = np.empty(nb_primes, dtype=np.dtype(int))
cdef long [:] narr_view = narr
cdef long len_p = 0 # The current number of elements in p.
cdef long n = 2
while len_p < nb_primes:
# Is n prime?
for i in narr_view[:len_p]:
if n % i == 0:
break
# If no break occurred in the loop, we have a prime.
else:
narr_view[len_p] = n
len_p += 1
n += 1
return narr
print(primes_np(2000)) | [ 2 3 5 ... 17383 17387 17389]
| BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Cython - Números Primos - Profiling | %%cython --annotate
import numpy as np # importar donde vas a compilar
cdef primes_np(unsigned int nb_primes):
# Memoryview on a NumPy array
narr = np.empty(nb_primes, dtype=np.dtype(int))
cdef long [:] narr_view = narr
cdef long len_p = 0 # The current number of elements in p.
cdef long n = 2
while len_p < nb_primes:
# Is n prime?
for i in narr_view[:len_p]:
if n % i == 0:
break
# If no break occurred in the loop, we have a prime.
else:
narr_view[len_p] = n
len_p += 1
n += 1
return narr | _____no_output_____ | BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Y si usamos la librería vector de C++ | %%cython --cplus
from libcpp.vector cimport vector
def primes_cpp(unsigned int nb_primes):
cdef int n, i
cdef vector[int] p
p.reserve(nb_primes) # allocate memory for 'nb_primes' elements.
n = 2
while p.size() < nb_primes: # size() for vectors is similar to len()
for i in p:
if n % i == 0:
break
else:
p.push_back(n) # push_back is similar to append()
n += 1
# Vectors are automatically converted to Python
# lists when converted to Python objects.
return p | _____no_output_____ | BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Benchmarks | %timeit primes(1000)
%timeit primes_np(1000)
%timeit primes_cpp(1000) | 2.3 ms ± 58.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
113 ms ± 1.35 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
2.29 ms ± 19.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
| BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
Integrando C puro con Cython- Supongamos que tenemos ya escrito este super complejo codigo C en un archivo que se llama `hello_c.c`.```Cinclude void f();void f() { printf("%s", "Hello world from a pure C function!\n");}```Y queremos integrarlo a -Python- Hay que hacer el wrapper `hello_cwrapper.pyx`.```cythoncdef extern from "hello_c.c": void f() cpdef myf(): f() ``` Despues agregarlo al `setup.py` Librería externa desde el notebook | %%cython -I ./cython/
cdef extern from "hello_c.c":
void f()
cpdef myf():
f()
myf() ## ESTO IMPRIME SI O SI A LA CONSOLA | _____no_output_____ | BSD-3-Clause | unidad5/01_Cython.ipynb | leliel12/diseno_sci_sfw |
SetupImport the standard Python Libraries that are used in this lab. | import boto3
from time import sleep
import subprocess
import pandas as pd
import json
import time | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Import sagemaker and get execution role for getting role ARN | import sagemaker
region = boto3.Session().region_name
smclient = boto3.Session().client('sagemaker')
from sagemaker import get_execution_role
role_arn = get_execution_role()
print(role_arn)
#Make sure this role has the forecast permissions set to be able to use S3 | arn:aws:iam::226154724374:role/service-role/AmazonSageMaker-ExecutionRole-wkshop
| Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
The last part of the setup process is to validate that your account can communicate with Amazon Forecast, the cell below does just that. | session = boto3.Session(region_name='us-east-1')
forecast = session.client(service_name='forecast')
forecastquery = session.client(service_name='forecastquery') | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Data Prepraration | df = pd.read_csv("../data/COF_yearly_Revenue_Data.csv", dtype = object, names=['metric_name','timestamp','metric_value'])
df.head(3) | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Create the training set and validation set. Use the last years revenue as the validation set | # Select 1996 to 2017 in one data frame
df_1996_2017 = df[(df['timestamp'] >= '1995-12-31') & (df['timestamp'] <= '2017-12-31')]
# Select the year 2018 seprately for validation
df = pd.read_csv("../data/COF_yearly_Revenue_Data.csv", dtype = object, names=['metric_name','timestamp','metric_value'])
df_2018 = df[(df['timestamp'] >= '2018-12-31')]
df_1996_2017
df_2018 | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Now export them to CSV files and place them into your data folder. | df_1996_2017.to_csv("../data/cof-revenue-train.csv", header=False, index=False)
df_2018.to_csv("../data/cof-revenue-validation.csv", header=False, index=False) | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Define the S3 bucket name where we will upload data where Amazon Forecast will pick up the data later | bucket_name = "sagemaker-capone-forecast-useast1-03" # Rember to change this to the correct bucket name used for Capital One
folder_name = "cone" # change this to the folder name of the user. | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Upload the data to S3 | s3 = session.client('s3')
key=folder_name+"/cof-revenue-train.csv"
s3.upload_file(Filename="../data/cof-revenue-train.csv", Bucket=bucket_name, Key=key) | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Creating the Dataset Group and Dataset In Amazon Forecast , a dataset is a collection of file(s) which contain data that is relevant for a forecasting task. A dataset must conform to a schema provided by Amazon Forecast. More details about `Domain` and dataset type can be found on the [documentation](https://docs.aws.amazon.com/forecast/latest/dg/howitworks-domains-ds-types.html) . For this example, we are using [METRICS](https://docs.aws.amazon.com/forecast/latest/dg/metrics-domain.html) domain with 3 required attributes `metrics_name`, `timestamp` and `metrics_value`.It is importan to also convey how Amazon Forecast can understand your time-series information. That the cell immediately below does that, the next one configures your variable names for the Project, DatasetGroup, and Dataset. | DATASET_FREQUENCY = "Y"
TIMESTAMP_FORMAT = "yyyy-mm-dd"
project = 'cof_revenue_forecastdemo'
datasetName= project+'_ds'
datasetGroupName= project +'_dsg'
s3DataPath = "s3://"+bucket_name+"/"+key | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Create the Dataset Group | create_dataset_group_response = forecast.create_dataset_group(DatasetGroupName=datasetGroupName,
Domain="METRICS",
)
datasetGroupArn = create_dataset_group_response['DatasetGroupArn']
forecast.describe_dataset_group(DatasetGroupArn=datasetGroupArn) | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Create the Schema | # Specify the schema of your dataset here. Make sure the order of columns matches the raw data files.
schema ={
"Attributes":[
{
"AttributeName":"metric_name",
"AttributeType":"string"
},
{
"AttributeName":"timestamp",
"AttributeType":"timestamp"
},
{
"AttributeName":"metric_value",
"AttributeType":"float"
}
]
} | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Create the Dataset | response=forecast.create_dataset(
Domain="METRICS",
DatasetType='TARGET_TIME_SERIES',
DatasetName=datasetName,
DataFrequency=DATASET_FREQUENCY,
Schema = schema
)
datasetArn = response['DatasetArn']
forecast.describe_dataset(DatasetArn=datasetArn) | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Add Dataset to Dataset Group | forecast.update_dataset_group(DatasetGroupArn=datasetGroupArn, DatasetArns=[datasetArn]) | _____no_output_____ | Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Create Data Import JobNow that Forecast knows how to understand the CSV we are providing, the next step is to import the data from S3 into Amazon Forecaast. | datasetImportJobName = 'EP_DSIMPORT_JOB_TARGET'
ds_import_job_response=forecast.create_dataset_import_job(DatasetImportJobName=datasetImportJobName,
DatasetArn=datasetArn,
DataSource= {
"S3Config" : {
"Path":s3DataPath,
"RoleArn": role_arn
}
},
TimestampFormat=TIMESTAMP_FORMAT
)
ds_import_job_arn=ds_import_job_response['DatasetImportJobArn']
print(ds_import_job_arn) | arn:aws:forecast:us-east-1:457927431838:dataset-import-job/cof_revenue_forecastdemo_ds/EP_DSIMPORT_JOB_TARGET
| Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
Check the status of dataset, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on the data size. It can take 10 mins to be **ACTIVE**. This process will take 5 to 10 minutes. | while True:
dataImportStatus = forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)['Status']
print(dataImportStatus)
if dataImportStatus != 'ACTIVE' and dataImportStatus != 'CREATE_FAILED':
sleep(30)
else:
break
forecast.describe_dataset_import_job(DatasetImportJobArn=ds_import_job_arn)
print("DatasetArn: ")
print(datasetGroupArn) | DatasetArn:
arn:aws:forecast:us-east-1:457927431838:dataset-group/cof_revenue_forecastdemo_dsg
| Apache-2.0 | forecast/1.Getting_Data_Ready(Revenue).ipynb | veerathp/forecastimmersionday |
def nn_topology(num_layers, nodes_per_layer, connections): to-do | topology = np.array([
[ 'input',
[
['x', '-'],
['y', '-'],
['classe', '-']
],
],
[ 'n1',
[
['w10', 1],
['w11', '-'],
['w12', '-'],
0, # delta1
0 # o1
]
],
[ 'n2',
[
['w20', 1],
['w21', '-'],
['w22', '-'],
0, # delta2
0 # o2
]
],
[ 'n3',
[
['w30', 1],
['w31', '-'],
['w32', '-'],
0, # delta3
0 # o3
]
]
])
print(topology)
update_weights(topology, [0.4, 1.4, 1.0, -1.5, 1, -0.5, 5.4, -8.0, -10.0])
print(topology)
training_data
update_input(topology, training_data[0][0], training_data[1][0], training_data[2][0])
print(topology)
def calculate_ssod(topology): # calculate sums, sigmoids, outputs and deltas
sum1 = 1*topology[1][1][0][1] + topology[0][1][0]*topology[1][1][1][1] + topology[0][1][1]*topology[1][1][2][1]
sig1 = sigmoid(sum1)
out1 = 1 / (1 + math.exp(-sum1))
sum2 = 1*topology[2][1][0][1] + topology[0][1][0]*topology[2][1][1][1] + topology[0][1][1]*topology[2][1][2][1]
sig2 = sigmoid(sum2)
out2 = 1 / (1 + math.exp(-sum2))
sum3 = 1*topology[3][1][0][1] + out1*topology[3][1][1][1] + out2*topology[3][1][2][1]
sig3 = sigmoid(sum3)
out3 = 1 / (1 + math.exp(-sum3))
delta3 = out3*(1-out3)*(topology[0][1][2] - out3)
delta1 = out1*(1-out1)*topology[3][1][1][1]*delta3
delta2 = out2*(1-out2)*topology[3][1][2][1]*delta3
return np.array([['sums', [sum1, sum2, sum3]], ['sigmoids', [sig1, sig2, sig3]], ['outputs', [out1, out2, out3]], ['deltas', [delta1, delta2, delta3]]])
backpropagation = calculate_ssod(topology)
print('sum1, sum2, sum3: ')
print(backpropagation[0][1])
print('\nsig1, sig2, sig3: ')
print(backpropagation[1][1])
print('\nout1, out2, out3: ')
print(backpropagation[2][1])
print('\ndelta1, delta2, delta3: ')
print(backpropagation[3][1])
print('topology:')
print(topology)
print('\nbackpropagation:')
print(backpropagation)
def calculate_new_weights(topology, backpropagation, learning_factor):
for i in range(3):
for j in range(3):
if j == 0:
inputW=1
elif i==2: # no caso do 3º neuronio, o input é o out1 ou out2
inputW = backpropagation[2][1][j-1]
else:
inputW = topology[0][1][j-1]
topology[i+1][1][j][1] += learning_factor*backpropagation[3][1][i]*inputW
calculate_new_weights(topology, backpropagation, 0.1)
print(topology)
# reset à topologia
topology = np.array([
[ 'input',
[
['x', '-'],
['y', '-'],
['classe', '-']
],
],
[ 'n1',
[
['w10', 1],
['w11', '-'],
['w12', '-'],
0, # delta1
0 # o1
]
],
[ 'n2',
[
['w20', 1],
['w21', '-'],
['w22', '-'],
0, # delta2
0 # o2
]
],
[ 'n3',
[
['w30', 1],
['w31', '-'],
['w32', '-'],
0, # delta3
0 # o3
]
]
])
update_weights(topology, [0.4, 1.4, 1.0, -1.5, 1, -0.5, 5.4, -8.0, -10.0])
print(topology)
epoch = 1
error = 1
topology_not_fitted = topology
update_input(topology_not_fitted, training_data[0][0], training_data[1][0], training_data[2][0])
out1_not_fitted = []
out2_not_fitted = []
out3_not_fitted = []
while(error != 0):
error = 0
out1 = []
out2 = []
out3 = []
for i in range(len(training_data[0])):
update_input(topology, training_data[0][i], training_data[1][i], training_data[2][i])
backpropagation = calculate_ssod(topology)
calculate_new_weights(topology, backpropagation, 0.1)
out1.append(backpropagation[2][1][0])
out2.append(backpropagation[2][1][1])
out3.append(backpropagation[2][1][2])
if round(backpropagation[2][1][2]) != training_data[2][i]:
error = 1
if epoch==1:
out1_not_fitted = out1
out2_not_fitted = out2
out3_not_fitted = out3
print('epoch:' + str(epoch))
epoch += 1 | epoch:1
epoch:2
epoch:3
epoch:4
epoch:5
epoch:6
epoch:7
epoch:8
epoch:9
epoch:10
epoch:11
epoch:12
epoch:13
epoch:14
epoch:15
epoch:16
epoch:17
epoch:18
epoch:19
epoch:20
epoch:21
epoch:22
epoch:23
epoch:24
epoch:25
epoch:26
epoch:27
epoch:28
epoch:29
epoch:30
epoch:31
epoch:32
epoch:33
epoch:34
epoch:35
epoch:36 | MIT | lab4/notebooks/50046-nn.ipynb | brun0vieira/psn |
VER MELHOR O MODELO NÃO TREINADO | draw_plot(topology_not_fitted, out1_not_fitted, out2_not_fitted, out3_not_fitted, 'Modelo Não Treinado')
draw_plot(topology, out1, out2, out3, 'Modelo Treinado (Erro zero)') | _____no_output_____ | MIT | lab4/notebooks/50046-nn.ipynb | brun0vieira/psn |
7章 畳み込みニューラルネットワーク 7.1 全体の構造CNNはニューラルネットワークと同様、複数のレイヤを組み合わせて作成する。CNNでは新たに「Convolutionレイヤ(畳み込み層)」と「Poolingレイヤ(プーリング層)」が登場する。これまで出てきたニューラルネットワークは隣接する層の全てのニューロン間を結合する全結合(fully-connected)であり、Affineレイヤと言う名前で実装してきた。例として全結合のニューラルネットワークでは「Affineレイヤ→活性化関数ReLUレイヤ」の組み合わせを1層として複数層で構築し、出力層にはSoftmaxレイヤを用いていた。CNNでは「Convolutionレイヤ→ReLU→(Poolingレイヤ。省略される場合あり)」を1層として構築する。また、出力に近い層ではこれまでの「Affine→ReLU」が、出力層には「Affine→Softmax」が用いられることが一般的には多い。 7.2 畳み込み層 7.2.1 全結合層の問題点全結合層では隣接する層のニューロンがすべて連結されており、出力数は任意に定めることができる。問題点としてはデータの形状が無視されてしまうことである。入力データが画像の際には縦・横・チャンネル方向の3次元形状だが、全結合層へ入力する際には一列の配列(1次元)にする必要がある。そのため空間的な近さなどの本質的な近さを無視して扱うので情報を活かす事ができていない。畳み込み層(Convolutionレイヤ)は形状を維持する。画像のデータを3次元として扱い、次の層にデータ出力することができる。CNNでは畳み込み層の入出力データを「特徴マップ(feature map)」と言う場合がある。更に、畳み込み層の入力データを「入力特徴マップ(input feature map)」、出力データを「出力特徴マップ(output feature map)」と言う。 7.2.2 畳み込み演算畳み込み層で行う処理は「畳み込み演算」である。畳み込み演算は入力データに対してフィルターを適用する。入力データが縦・横方向の形状を持つデータに対して、フィルターも同様に縦・横方向の次元を持たせる。例として、入力サイズが4×4、フィルターサイズが3×3、出力サイズが2×2などのようになる。文献によっては「フィルター」という単語は「カーネル」とも言われる。畳み込み演算は入力データに対してフィルターのウィンドウを一定の間隔でスライドさせながら適用する。それぞれの場所でフィルターの要素と入力の要素を乗算し、その和を求める(この計算を積和演算と呼ぶ)。結果を出力の対応する場所へ格納するプロセスをすべての場所で行なう子男tで畳み込み演算の出力を得ることが出来る。CNNにおける重みパラメータはフィルターのパラメータにあたる。また、バイアスはフィルター適用後のデータに対して加算する、一つの固定値(いずれの要素に対しても)である。 7.2.3 パディング畳み込み層の処理を行うにあたり、入力データの周囲に固定のデータ(0など)を埋めることがある。これを「パディング」という。例として4×4の入力データに対して幅1のパディングを適用するなどである。周囲を幅1ピクセル0で埋めることを言う。(パディング適用後は6×6のデータとなる)パディングを用いる理由は出力サイズを調整するためにある。4×4の入力データに3×3のフィルターを適用した場合、出力サイズは2×2となってしまう。ディープなネットワークにおいては小さくなり続けて処理できなくなってしまう。そこでパディングを用いるとデータサイズを保つことができる。 7.2.4 ストライドフィルターを適用する位置の感覚を「ストライド(stride)」と言う。ストライドを2とするとフィルターを適用する窓の間隔が2要素毎になる。ストライドを大きくすると出力サイズが小さくなるが、パディングを用いると出力サイズは大きくなる。出力サイズの計算を考えてみる。入力サイズを$(H,W)$、フィルターサイズを$(FH,FW)$、出力サイズを$(OH,OW)$、パディングを$P$、ストライドを$S$とする。出力サイズは以下式で求められる。$$OH = \frac{H + 2P - FH}{S} + 1 \\OH = \frac{W + 2P - FW}{S} + 1$$(例)入力サイズ:(4,4)、パディング:1、ストライド:1、フィルターサイズ:(3,3)$$OH = \frac{4+2・1-3}{1} + 1 = 4\\OH = \frac{4+2・1-3}{1} + 1 = 4$$ 7.2.5 3次元データの畳み込み演算画像の場合、縦・横方向に加えてチャンネル方向も合わせた3次元データを扱う必要がある。チャンネル別にフィルターを用意して畳み込み演算を行い、すべての結果を加算して出力を得る。チャンネル数とフィルターの数は一致している必要があり、チャンネル毎のフィルターサイズは全て統一する必要がある。 7.2.6 ブロックで考える3次元の畳み込み演算はデータやフィルターを直方体のブロックで考える事ができる。多次元配列として表す時は(channel, height, width)の順に並べて書く。フィルターの場合フィルターの高さをFH(Filter Height)、横幅をFW(Filter Width)と記載する。フィルターが一つの時には出力データはチャンネル数1つの特徴マップになる。チャンネル方向にも複数持たせるためには、複数のフィルター(重み)を用いる。フィルターの重みデータは4次元データとして(output_channel, input_channel, height, width)の順に書く。また、バイアスは1チャンネル毎に1つ持つため形状は(FN, 1, 1)である。 7.2.7 バッチ処理ニューラルネットワークの処理では、入力データをひと束にまとめたバッチ処理を行っていた。畳み込み演算でも同様にバッチ処理を行なう。その為、各層を流れるデータとして4次元のデータ(batch_num, channnel, height, width)を格納する。 7.3 プーリング層プーリングは縦・横方向の空間を小さくする演算である。例えば2×2の領域を一つの要素に集約するような処理である。「Maxプーリング」は対象とする領域のサイズ無いで最大値を取る演算である。一般的にプーリングのウィンドウサイズとストライドは同じ手に設定する。プーリングにはMaxプーリングの他に、Averageプーリングなどがある。Averageプーリングは対象領域の平均を計算する。画像認識の分野においては主にMaxプーリングが使われる 7.3.1 プーリング層の特徴* 学習するパラメータが無いプーリング層は畳み込み層と違って学習するパラメータを持たない。(最大値を取るだけなので)* チャンネル数は変化しないチャンネルごとに独立して計算が行われるため、チャンネル数は変化しない。* 微小な位置変化に対してロバスト(頑健)入力データの小さなズレに対してプーリングは同じような結果を返す。 7.4 Convoultion/Poolingレイヤの実装 7.4.1 4次元配列CNNで流れる4次元データが(10, 1, 28, 28)だとすると、高さ28・横幅28・1チャンネル・データが10個ある場合に対応する。以下処理でランダムデータが作成できる。x = np.random.rand(10, 1, 28, 28) 7.4.2 im2colによる展開畳み込み演算をfor文で行なうと処理が遅くなってしまう(Numpyでは要素アクセスの際にfor文を使わないほうがよい)。そこでim2colという関数を用いる。im2colはフィルターにとって都合の良いように展開する関数である。入力データに対してフィルターを適用する場所の領域を横方向に1列に展開する。im2colによる展開はフィルター適用における重複要素を配列として出力するために元ブロックの要素数よりも多くなるため、メモリを多く消費してしまう。 7.4.3 Convolutionレイヤの実装Convolutionクラスで用いるim2colの引数は以下を設定。* input_data:データ数、チャンネル、高さ、横幅の4次元配列からなる入力データ* filter_h:フィルターの高さ* filter_w:フィルターの横幅* stride:ストライド* pad:パディングConvolutionクラスは以下処理を実装する* __init__:初期化メソッド。 フィルターとバイアス、ストライドとパディングを受け取る* forward:順伝播メソッド。 初期化メソッドで定めたパラメータから出力の高さ、幅のサイズを定める。 Affineレイヤと同じように計算出来るように入力データをim2colを用いて配列化する。 重みもreshape( ,-1)とすることによって配列化を行なう。 入力データ配列と重み配列のドット積を求め、バイアスを加算する。 計算結果をtransposeを用いて整形し、返却する。 7.4.4 Poolingレイヤの実装Poolingレイヤも同じくim2colを使って入力データを展開するが、チャンネル方向には独立である点が異なる。 7.5 CNNの実装ネットワークの構成は「Convolution-ReLU-Pooling-Affine-ReLU-Affine-Softmax」とする。 7.6 CNNの可視化 7.6.1 1層目の重みの可視化今まで行ったMNISTのCNNにおける学習では1層目の重みの形状は(30,1,5,5)(サイズが5×5、チャンネルが1のフィルターが30個)である。フィルターは1チャンネルのグレー画像として可視化出来ると言うことを意味する。(サンプルコードch07/visualize_filter.py)学習前のフィルターはランダムに初期化されているため白黒の濃淡に規則性は無いが、学習後は規則性のある画像になっている。白から黒へグラデーションを伴って変化するフィルターや塊のある領域(「ブロブ(blob)」)を持つフィルターなど学習によって更新されていることが分かる。規則性のあるフィルターは何を見てるのかというと、エッジやブロブなどを見ている。畳み込み層のフィルターはエッジやブロブなどのプリミティブな情報を抽出することが分かる。 7.6.2 階層構造による情報抽出1層目の畳み込み層ではエッジやブロブなどの低レベルな情報が抽出される。何層も重ねたCNNにおいて各層でどのような情報が抽出されるのかというと、層が深くなるに従って抽出される情報(強く反応するニューロン)はより抽象化されていく。一般物体認識(車や犬など)を行なう8層のCNNであるAlexNetは畳み込み層とプーリング層が何層も重なり、最後に全結合層を用いて結果が出力される。最初の層は単純なエッジに反応し、続いてテクスチャ、より複雑な物体のパーツへと反応するように変化している。 7.7 代表的なCNN | _____no_output_____ | MIT | notebooks/section7.ipynb | kamujun/exercise_of_deep_larning_from_scratch |
|
Final Code | import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import csr_matrix
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
'''
This cell reads in the data needed for the model. The two files needed are the combined data files with the
customer ratings and the movie titles files in order for the model to print out recommended movies.
Then once the data is read in, we put it in a pandas dataframe for it to be easier to work with.
'''
mov_titles = pd.read_csv('movie_titles.csv', header = None, encoding = "ISO-8859-1")
mov_titles = mov_titles.drop(columns=[1,3,4,5], axis = 1)
mov_titles = mov_titles.rename(columns = {0:'movie_id',2:'movie_title'})
list_1 = []
with open('combined_data_1.txt', 'r') as f:
for line in f:
splitLine = line.split('/t')
for item in splitLine:
list_1.append(splitLine)
# Then we needed a way to loop through and associate movie id with each record. So we append it back to the list_1
for x in list_1:
for i in x:
if ':' in i:
a = len(i)
y2 = i[0:a]
y = y2.replace(":", "")
x.append(y)
# In this section we want to take everything in list_1 and split out the customer id, rating, and date better.
keys = ['customer_id','customer_rating','date','movie_id']
newList=[]
for x in list_1:
movie_id = x[1]
y = x[0]
d = y.split(',')
d.append(movie_id)
newList.append(d)
# Now that we have the structure by customer, how they rated the movie and all that jazz.
# We need to get rid of the values in the list that are just the movie numbers.
values = []
for x in newList:
if len(x)==4:
values.append(x)
# Finally we can put it into a dataframe and start looking at our data.
df = pd.DataFrame(values, columns=keys)
df = df.replace('\n','', regex=True)
df['date'] = df['date'].astype('datetime64[ns]')
df['customer_rating'] = df['customer_rating'].astype('float')
'''
In this cell, we do a left join of the ratings file and the movie titles file to replace movie id with the title of the movie.
we will use the df3 dataframe later in the model to output movie titles.
'''
df_3 = df.join(mov_titles, lsuffix='movie_id', rsuffix='movie_id')
df_3 = df_3.drop(columns=['movie_idmovie_id'], axis = 1)
'''
This section of code is to create functions to run our code. The PreProcess function takes a given customer id. Then it
filters our dataset for the movies that customer rated. Then we get a list of just those movies and apply it back to
the overall dataset. This way when we run a our model, the nearest neighbors aren't the ones with many 0's for ratings.
From the PreProcessing function we receive a matrix to use with filtered values necessary for modeling.
The matrix_prep function takes the processed matrix and groups it so that we get a nxm matrix where n are the customers
and m are the movies they rated. If there is a movie a customer has not rated it gets a 0. The output is a sparse matrix
with these results.
Finally, the Recommendation function takes the sparse matrix from the matrix_prep function, the customer id,
and how many neighbors you want your model to have. The model is a nearestneighbor model that caluclates the
cosine similarity between the provided customer and the other customers that rated the at least one of the
movies that the customer rated.
Then we loop through the customers pulling out the similar customers and put this in a list. We then use this
list to go back and filter for these customers movies that they rated a 4 or 5. Then we grab this list of movies
and this is the list returned.
'''
def PreProcess(customer_id):
query_index = str(customer_id) #np.random.choice(ddf_3.shape[0])
customer = df[df['customer_id'] == query_index]
customer_movies = customer.loc[:, (customer != 0).any(axis = 0)]
movies_to_include = customer_movies['movie_id'].tolist()
mask = df['movie_id'].isin(movies_to_include)
movies_matrix_for_sim = df.loc[~mask]
movies_matrix_for_sim = movies_matrix_for_sim.append(customer_movies, ignore_index=True)
return movies_matrix_for_sim
def matrix_prep(movies_matrix_for_sim):
ddf_2 = movies_matrix_for_sim.groupby(['customer_id', 'movie_id']).customer_rating.mean().unstack(fill_value=0)
mat_features = csr_matrix(ddf_2.values)
return mat_features
def Recommendation(mat_features, customer_id, n_neighbors):
query_index = str(customer_id)
model_knn = NearestNeighbors(metric='cosine', algorithm='brute')
model_knn = model_knn.fit(mat_features)
distances, indices = model_knn.kneighbors(ddf_2.loc[[query_index]], n_neighbors = n_neighbors)
sim_customers_key = []
sim_customers_vals = []
for i in range(0, len(distances.flatten())):
if i == 0:
#key = ddf_2.index[customer_id]
#sim_customers_key.append(key)
pass
else:
val = ddf_2.index[indices.flatten()[i]]
sim_customers_vals.append(val)
mask = df_3['customer_id'].isin(sim_customers_vals)
sim_customers = df_3.loc[~mask]
#need orig customer to have filtered df_3 table
orig_customer = df_3[df_3['customer_id'] == query_index]
#mask = df_3['customer_id'].isin(sim_customers_key)
#orig_customer = df_3.loc[~mask]
mask = sim_customers['customer_rating'].isin([4,5])
sim_customers = sim_customers.loc[~mask]
orig_movies = orig_customer['movie_title'].values
sim_movies = sim_customers['movie_title'].values
rec_list = [i for i in sim_movies if i not in orig_movies]
return rec_list
'''
This is implementing the PreProcess function for customer 1488844.
'''
matrix_1 = PreProcess(1488844)
'''
Due to memory issues I could not run matrix_prep with the two function in it. Thus I ran them separately.
This is the first part of the matrix_prep function.
'''
ddf_2 = matrix_1.groupby(['customer_id', 'movie_id']).customer_rating.mean().unstack(fill_value=0)
'''
Due to memory issues I could not run matrix_prep with the two function in it. Thus I ran them separately.
This is the second part of the matrix_prep function.
'''
mat_features = csr_matrix(ddf_2.values)
'''
This is the final function running the model and saving the results for customer 1488844 with 3 neighbors.
'''
recommended_for_1488844 = Recommendation(mat_features,1488844, 3)
'''
This is the firt 10 recommended movies for customer 1488844.
'''
recommended_for_1488844[0:10] | _____no_output_____ | Unlicense | Netflix Recommended Movies/DSC 630 Final Code.ipynb | Lemonchasers/Lemonchasers.github.io |
Decision trees example Continuous output example: A prediction model that states the motor references. Different decision trees are created according to the required selection criteria. **Step 1**: Import the required libraries. | # import numpy package for arrays and stuff
import numpy as np
# import matplotlib.pyplot for plotting our result
import matplotlib.pyplot as plt
# import pandas for importing csv files
import pandas as pd
| _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
Import the file `predicted_values_Dt.py` containing the decision trees algorithms | import sys
sys.path.insert(0, 'decision trees')
from predicted_values_DT import * | _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
Read the dataframe for references of motors: | # import dataset
# dataset = pd.read_csv('Data.csv')
# alternatively open up .csv file to read data
import pandas as pd
import matplotlib.pyplot as plt
path='./Motors/'
df = pd.read_csv(path+'Non-Dominated-Motors.csv', sep=';')
df = df[['Tnom_Nm','Kt_Nm_A','r_omn','weight_g']] # we select the first five rows
df.head()
| _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
Calculated values: Example. Code: Tnom=2.2 Nm,Kt=?, R=?Criteria:- Torque: select the next ref.- Kt: select the nearest ref.- Resistance: select the nearest ref. **1D decision tree** Torque: Once the value of the torque in the optimization code is calculated, we will create a 1D decision tree that selects the higher value. | df_X=pd.DataFrame(df.iloc[:,0]) # column torque
df_y=df.iloc[:,0] # column of torque
df_X=pd.DataFrame(df_X)
xy = pd.concat([df_X,df_y],axis=1)
sorted_xy = np.unique(xy,axis=0)
#axis X
frames=[]
for i in range(len(df_X.columns)):
# a vector of supplementary points around the reference value to force the regression tree through X
C=(np.vstack((sorted_xy[:,i]-sorted_xy[:,i].min()/1000,sorted_xy[:,i]+sorted_xy[:,i].min()/1000)).ravel('F'))
D=np.repeat(C, 2)
frames.append(D[:-2])
df_X_Next=np.column_stack(frames)
#axis y
df_y1 = sorted_xy[:,-1]
df_y1_C1 = df_y1-df_y1.min()/100
df_y1_C2 = df_y1+df_y1.min()/100
A=np.repeat(df_y1_C1, 2)
B=np.repeat(df_y1_C2, 2)
C=(np.vstack((A,B)).ravel('F'))
C=(np.delete(np.delete(C,2),2))
df_y_Next=(C)
# create a regressor object (https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)
regressorNext = DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=1,
max_leaf_nodes=len(df_X_Next), min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
random_state=None, splitter='best')
# fit the regressor with X and Y data
regressorNext.fit(df_X_Next, df_y_Next)
# arange for creating a range of values
# from min value of X to max value of X
# with a difference of 0.01 between two
# consecutive values
X_grid = np.linspace(min(df_X_Next), max(df_X_Next), num=10000)
# reshape for reshaping the data into
# a len(X_grid)*1 array, i.e. to make
# a column out of the X_grid values
X_grid = X_grid.reshape((len(X_grid), 1))
# scatter plot for original data
plt.scatter(df_X_Next, df_y_Next, color = 'red',label='supplementary points')
plt.scatter(df_X ,df_y, marker='x',label='references',color='black')
# specify title
plt.title('Non dominated references torque to torque (Decision Tree Regression)')
# plot predicted data
plt.plot(X_grid, regressorNext.predict(X_grid), color = 'green', label='decision tree')
# specify y axis label
plt.ylabel('Torque [Nm]')
plt.xlabel('Torque [Nm]')
# show the plot
plt.grid()
plt.legend()
plt.show()
| _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
If the calculated value was 2.2 Nm, the predicted one is: | regressorNext.predict(np.array([[2.2]])) | _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
**2D decision tree** With this new predicted value of torque, we will estimate the best Kt constant of the catalogue.For that, we construct a decision tree centered on the reference, which takes as input the torque and as output, the Kt constant: | from sklearn.tree import DecisionTreeRegressor
df_X=pd.DataFrame(df.iloc[:,0])
df_y=df.iloc[:,1]
# create a regressor object (https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)
regressorAver = DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=1,
max_leaf_nodes=len(df_X), min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
random_state=None, splitter='best')
# fit the regressor with X and Y data
regressorAver.fit(df_X , df_y)
# arange for creating a range of values
# from min value of X to max value of X
# with a difference of 0.0001 between two
# consecutive values
X_grid = np.linspace(min(df_X.values), max(df_X.values), num=10000)
# reshape for reshaping the data into
# a len(X_grid)*1 array, i.e. to make
# a column out of the X_grid values
X_grid = X_grid.reshape((len(X_grid), 1))
# scatter plot for original data
plt.scatter(df_X, df_y, color = 'red', label='references')
# plt.scatter(df_X, df_y, color = 'red')
# plt.xlim(df_X.min(), df_X.max())
# plot predicted data
plt.plot(X_grid, regressorAver.predict(X_grid), color = 'black', label='decision tree based on average val.')
# specify title
plt.title('Non dominated references Kt to torque (Decision Tree Regression)')
# specify labels
plt.xlabel('Torque [Nm]')
plt.ylabel('Kt [Nm/A]')
# plot the legend
plt.legend()
# show the plot
plt.grid()
plt.show() | _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
Estimated value: (Tnom=3.2003048 Nm/A), the nearest Kt in the dataframe is: | # average_DT(df.iloc[:,0:2],df.iloc[:,2],np.array([[]]))
regressorAver.predict(np.array([[3.2003048]])) | _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
**3D Decision Tree** In the file `predicted_values_DT.py` we have developed different algorithms which construct decision trees based on the previous reference (previous_DT), on the next references (next_DT) or centered on the reference (average_DT). Considering we have previously obtained the values of Kt and Tnom, a prediction of the resistance value can be deduced from the decision tree: | average_DT(df[['Tnom_Nm','Kt_Nm_A']],df['r_omn'],np.array([[3.2003048,0.05161782]])) | _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
**Visualizing 3D decision tree in scikit-learn** | from IPython.display import Image
from sklearn.externals.six import StringIO
import pydot
from sklearn import tree
df_X=df[['Tnom_Nm','Kt_Nm_A']]
df_y=df['r_omn']
# create a regressor object (https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html)
regressorAver = DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=1,
max_leaf_nodes=len(df_X), min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
random_state=None, splitter='best')
# fit the regressor with X and Y data
regressorAver.fit(df_X, df_y)
dot_data = StringIO()
tree.export_graphviz(regressorAver, out_file=dot_data, feature_names=['Torque','Kt'],
filled=True, rounded=True,
special_characters=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png()) | _____no_output_____ | Apache-2.0 | notebooks/decision trees/.ipynb_checkpoints/decision_trees_3D_strategy_v2-checkpoint.ipynb | aitorochotorena/multirotor-all |
This is a sketch for Adversarial images in MNIST | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
import seaborn as sns
sns.set_style('white')
colors_list = sns.color_palette("Paired", 10) | _____no_output_____ | Apache-2.0 | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST |
recreate the network structure | x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_pred = tf.nn.softmax(y_conv)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) | _____no_output_____ | Apache-2.0 | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST |
Load previous model | model_path = './MNIST.ckpt'
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
tf.train.Saver().restore(sess, model_path)
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline | _____no_output_____ | Apache-2.0 | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST |
Extract some "2" images from test set | index_mask = np.where(mnist.test.labels[:, 2])[0]
subset_mask = np.random.choice(index_mask, 10)
subset_mask
origin_images = mnist.test.images[subset_mask]
origin_labels = mnist.test.labels[subset_mask]
origin_labels
prediction=tf.argmax(y_pred,1)
prediction_val = prediction.eval(feed_dict={x: origin_images, keep_prob: 1.0}, session=sess)
print("predictions", prediction_val)
probabilities=y_pred
probabilities_val = probabilities.eval(feed_dict={x: origin_images, keep_prob: 1.0}, session=sess)
print ("probabilities", probabilities_val)
for i in range(0, 10):
print('correct label:', np.argmax(origin_labels[i]))
print('predict label:', prediction_val[i])
print('Confidence:', np.max(probabilities_val[i]))
plt.figure(figsize=(2, 2))
plt.axis('off')
plt.imshow(origin_images[i].reshape([28, 28]), interpolation=None, cmap=plt.cm.gray)
plt.show()
target_number = 6
target_labels = np.zeros(origin_labels.shape)
target_labels[:, target_number] = 1
origin_labels
target_labels
img_gradient = tf.gradients(cross_entropy, x)[0] | _____no_output_____ | Apache-2.0 | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST |
one Adversarial vs one image | eta = 0.5
iter_num = 10 | _____no_output_____ | Apache-2.0 | notebook/AdversarialMNIST_sketch.ipynb | tiddler/AdversarialMNIST |
Subsets and Splits