Unnamed: 0
int64 0
16k
| text_prompt
stringlengths 110
62.1k
| code_prompt
stringlengths 37
152k
|
---|---|---|
11,300 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Double Pendulum
Let's setup some parameters
Step1: How many steps are there?
Step2: Helper functions for various calculations
Calculate various derivatives
The time derivative of $\theta_1$
Step3: The time derivative of $\theta_2$
Step4: The time derivative of $p_1$
Step5: The time derivative of $p_2$
Step6: Helper function to calculate a constant $C_1$
Step7: Helper function to calculate constant $C_2$
Step8: Calculate the derivatives for each $\theta_1,\theta_2,p_1,p_2$
Step9: Use a naive euler integration schemed to make a single step in the pendulum's motion
Step10: Now we need to do the actual calculations
Use a default or specified method of integration to solve the pendulum's motion
Step11: Plotting the pendulum
Now we want to plot everything!
Import all the good stuff
Step12: Plot the paths to a png image pendulum.png
The function a three dimensional numpy array
Step13: Now we can plot! | Python Code:
import numpy as np
g = 9.8 # m/s
l1 = 1.2 # m
l2 = .7 # m
theta1_0 = np.pi/5 # other initial angle
theta2_0 = np.pi # initial angle
m1 = .10 # kg
m2 = .05 # kg
dt = 1.e-3 # time steps
max_t = 5.0 # max time
nsteps = long(max_t/dt) # number of steps
Explanation: Double Pendulum
Let's setup some parameters
End of explanation
print "nsteps:", nsteps
# Import numpy for numerical calculations, vectors, etc.
from numpy import array, zeros, cos, sin, pi
from numpy.linalg import norm as norm
from scipy.integrate import ode
Explanation: How many steps are there?
End of explanation
def dtheta1(theta1, theta2, p1, p2):
num = l2*p1 - l1*p2*cos(theta1 - theta2)
den = l1*l1*l2*(m1 + m2*sin(theta1 - theta2)**2)
return num/den
Explanation: Helper functions for various calculations
Calculate various derivatives
The time derivative of $\theta_1$
End of explanation
def dtheta2(theta1, theta2, p1, p2):
num = l1*(m1+m2)*p2 - l2*m2*p1*cos(theta1-theta2)
den = l1*l2*l2*m2*(m1+ m2*sin(theta1-theta2)**2)
return num/den
Explanation: The time derivative of $\theta_2$
End of explanation
def dp1(theta1, theta2, p1, p2, c1, c2):
return -(m1+m2)*g*l1*sin(theta1) - c1 + c2
Explanation: The time derivative of $p_1$
End of explanation
def dp2(theta1, theta2, p1, p2, c1, c2):
return -m2*g*l2*sin(theta2) + c1 - c2
Explanation: The time derivative of $p_2$
End of explanation
def C1(theta1, theta2, p1, p2):
num = p1*p2*sin(theta1 - theta2)
den = l1*l2*(m1 + m2*sin(theta1 - theta2)**2)
return num/den
Explanation: Helper function to calculate a constant $C_1$
End of explanation
def C2(theta1, theta2, p1, p2):
num = l2*l2*m2*p1*p2 + l1*(m1 + m2)*p2**2 - l1*l2*m2*p1*p2*cos(theta1-theta2)
den = 2*l1*l1*l2*l2*(m1 + m2*sin(theta1-theta2)**2)**2*sin(2*(theta1-theta2))
return num/den
Explanation: Helper function to calculate constant $C_2$
End of explanation
def deriv(t, y):
theta1, theta2, p1, p2 = y[0], y[1], y[2], y[3]
_c1 = C1(theta1, theta2, p1, p2)
_c2 = C2(theta1, theta2, p1, p2)
_dtheta1 = dtheta1(theta1, theta2, p1, p2)
_dtheta2 = dtheta2(theta1, theta2, p1, p2)
_dp1 = dp1(theta1, theta2, p1, p2, _c1, _c2)
_dp2 = dp2(theta1, theta2, p1, p2, _c1, _c2)
return array([_dtheta1, _dtheta2, _dp1, _dp2])
Explanation: Calculate the derivatives for each $\theta_1,\theta_2,p_1,p_2$
End of explanation
def euler(theta1, theta2, p1, p2):
_y = deriv(0, [theta1, theta2, p1, p2])
_dtheta1, _dtheta2, _dp1, _dp2 = _y[0], _y[1], _y[2], _y[3]
theta1 += _dtheta1*dt
theta2 += _dtheta2*dt
p1 += _dp1*dt
p2 += _dp2*dt
return theta1, theta2, p1, p2
Explanation: Use a naive euler integration schemed to make a single step in the pendulum's motion
End of explanation
def calculate_paths(method = "euler"):
theta1 = theta1_0
theta2 = theta2_0
p1, p2 = 0.0, 0
paths = []
if method == "euler":
print "Running EULER method"
for i in range(nsteps):
if (i % 500==0): print "Step = %d" % i
theta1, theta2, p1, p2 = euler(theta1, theta2, p1, p2)
r1 = array([l1*sin(theta1), -l1*cos(theta1)])
r2 = r1 + array([l2*sin(theta2), -l2*cos(theta2)])
paths.append([r1, r2])
elif method == "scipy":
print "Running SCIPY method"
yint = [theta1, theta2, p1, p2]
# r = ode(deriv).set_integrator('zvode', method='bdf')
r = ode(deriv).set_integrator('vode', method='bdf')
r.set_initial_value(yint, 0)
paths = []
while r.successful() and r.t < max_t:
r.integrate(r.t+dt)
theta1, theta2 = r.y[0], r.y[1]
r1 = array([l1*sin(theta1), -l1*cos(theta1)])
r2 = r1 + array([l2*sin(theta2), -l2*cos(theta2)])
paths.append([r1, r2])
return array(paths)
paths = calculate_paths()
Explanation: Now we need to do the actual calculations
Use a default or specified method of integration to solve the pendulum's motion
End of explanation
%pylab inline --no-import-all
import matplotlib
import matplotlib.pyplot as pyplot
from matplotlib import animation
from matplotlib.collections import LineCollection
from matplotlib.lines import Line2D
import numpy as np
Explanation: Plotting the pendulum
Now we want to plot everything!
Import all the good stuff
End of explanation
def plot_paths(paths, IMAGE_PATH = "pendulum.png", TITLE = "Double Pendulum Evolution"):
# set up a list of points for each node we draw the path of
points1 = np.array([paths[:, 0, 0], paths[:, 0, 1]]).transpose().reshape(-1,1,2)
points2 = np.array([paths[:, 1, 0], paths[:, 1, 1]]).transpose().reshape(-1,1,2)
# set up a list of segments for plot coloring
segs1 = np.concatenate([points1[:-1],points1[1:]],axis=1)
segs2 = np.concatenate([points2[:-1],points2[1:]],axis=1)
# make the collection of segments
lc1 = LineCollection(segs1, cmap=pyplot.get_cmap('Blues'), linewidth=3, alpha=0.7)
lc2 = LineCollection(segs2, cmap=pyplot.get_cmap('Greens'), linewidth=3, alpha=0.7)
# fill up the line collections with the time data
t = np.linspace(0,1,paths.shape[0])
lc1.set_array(t)
lc2.set_array(t)
# fake line objects to add to legend for reference
lc1_line = Line2D([0, 1], [0, 1], color='b')
lc2_line = Line2D([0, 1], [0, 1], color='g')
# settings for plotting
YAXIS = "Y"
XAXIS = "X"
# Plot the trajectories
print "Plotting."
# create a plot
plt = pyplot.figure(figsize=(15, 10), dpi=80, facecolor='w')
ax = pyplot.axes()
# set the title and axis labels
ax.set_xlabel(XAXIS)
ax.set_ylabel(YAXIS)
ax.set_title(TITLE)
ax.add_collection(lc1)
ax.add_collection(lc2)
# Manually adding artists doesn't rescale the plot, so we need to autoscale
ax.autoscale()
#ax.plot(paths[:, 0, 0], paths[:, 0, 1], "b-", alpha=0.7, linewidth=3, label="$m_1$")
#ax.plot(paths[:, 1, 0], paths[:, 1, 1], "g-", alpha=0.7, linewidth=3, label="$m_2$")
# # Objects: draw a dot on the last trajectory point
#ax.plot(paths[-1, 0, 0], paths[-1, 0, 1], "b-")
#ax.plot(paths[-1, 1, 0], paths[-1, 1, 1], "g-")
# pyplot.axis('equal')
ax.set_aspect('equal', adjustable='box')
ax.legend([lc1_line, lc2_line], ['$m_1$', '$m_2$'], bbox_to_anchor=(1., 1.), loc="best",
ncol=1, fancybox=True, shadow=True)
# Save our plot
print "Saving plot to %s." % IMAGE_PATH
plt.savefig(IMAGE_PATH, bbox_inches='tight')
Explanation: Plot the paths to a png image pendulum.png
The function a three dimensional numpy array: The first index is over time, the second specifies which mass, the third specifies the cartesian displacement
End of explanation
plot_paths(paths)
Explanation: Now we can plot!
End of explanation |
11,301 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Deep Learning
Assignment 2
Previously in 1_notmnist.ipynb, we created a pickle with formatted datasets for training, development and testing on the notMNIST dataset.
The goal of this assignment is to progressively train deeper and more accurate models using TensorFlow.
Step1: First reload the data we generated in 1_notmnist.ipynb.
Step2: Reformat into a shape that's more adapted to the models we're going to train
Step3: We're first going to train a multinomial logistic regression using simple gradient descent.
TensorFlow works like this
Step4: Let's run this computation and iterate
Step5: Let's now switch to stochastic gradient descent training instead, which is much faster.
The graph will be similar, except that instead of holding all the training data into a constant node, we create a Placeholder node which will be fed actual data at every call of session.run().
Step6: Let's run it
Step7: Problem
Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units nn.relu() and 1024 hidden nodes. This model should improve your validation / test accuracy. | Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
Explanation: Deep Learning
Assignment 2
Previously in 1_notmnist.ipynb, we created a pickle with formatted datasets for training, development and testing on the notMNIST dataset.
The goal of this assignment is to progressively train deeper and more accurate models using TensorFlow.
End of explanation
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Explanation: First reload the data we generated in 1_notmnist.ipynb.
End of explanation
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Explanation: Reformat into a shape that's more adapted to the models we're going to train:
- data as a flat matrix,
- labels as float 1-hot encodings.
End of explanation
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random values following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
Explanation: We're first going to train a multinomial logistic regression using simple gradient descent.
TensorFlow works like this:
* First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below:
with graph.as_default():
...
Then you can run the operations on this graph as many times as you want by calling session.run(), providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below:
with tf.Session(graph=graph) as session:
...
Let's load all the data into TensorFlow and build the computation graph corresponding to our training:
End of explanation
# Defining accuracy function to find accuracy of predictions against actuals
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
num_steps = 801
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
Explanation: Let's run this computation and iterate:
End of explanation
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
Explanation: Let's now switch to stochastic gradient descent training instead, which is much faster.
The graph will be similar, except that instead of holding all the training data into a constant node, we create a Placeholder node which will be fed actual data at every call of session.run().
End of explanation
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Explanation: Let's run it:
End of explanation
batch_size = 128
num_hidden_nodes = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = {
'hidden': tf.Variable(tf.truncated_normal([image_size * image_size, num_hidden_nodes])),
'output': tf.Variable(tf.truncated_normal([num_hidden_nodes, num_labels]))
}
biases = {
'hidden': tf.Variable(tf.zeros([num_hidden_nodes])),
'output': tf.Variable(tf.zeros([num_labels]))
}
# Training computation.
hidden_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights['hidden']) + biases['hidden'])
logits = tf.matmul(hidden_train, weights['output']) + biases['output']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
hidden_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights['hidden']) + biases['hidden'])
valid_prediction = tf.nn.softmax(tf.matmul(hidden_valid, weights['output']) + biases['output'])
hidden_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights['hidden']) + biases['hidden'])
test_prediction = tf.nn.softmax(tf.matmul(hidden_test, weights['output']) + biases['output'])
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Explanation: Problem
Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units nn.relu() and 1024 hidden nodes. This model should improve your validation / test accuracy.
End of explanation |
11,302 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Writing a LogPDF
Probability density functions in Pints can be defined via models and problems, but they can also be defined directly.
In this example, we implement the Rosenbrock function and run an optimisation using it.
The rosenbrock function is a two dimensional defined as
f(x,y) = -((a - x)^2 + b(y - x^2)^2)
where a and b are constants and x and y are variable. In analogy with typical Pints models x and y are our parameters.
First, take a look at the LogPDF interface. It tells us two things
Step1: We can test our class by creating an object and calling it with a few parameters
Step2: Wikipedia tells for that for a = 1 and b = 100 the minimum value should be at [1, 1]. We can test this by inspecting its value at that point
Step3: We get an error here, because the notebook doesn't like it, but it returns the correct value!
Now let's try an optimisation
Step4: Finally, print the returned point. If it worked, we should be at [1, 1] | Python Code:
import numpy as np
import pints
class Rosenbrock(pints.LogPDF):
def __init__(self, a=1, b=100):
self._a = a
self._b = b
def __call__(self, x):
return - np.log((self._a - x[0])**2 + self._b * (x[1] - x[0]**2)**2)
def n_parameters(self):
return 2
Explanation: Writing a LogPDF
Probability density functions in Pints can be defined via models and problems, but they can also be defined directly.
In this example, we implement the Rosenbrock function and run an optimisation using it.
The rosenbrock function is a two dimensional defined as
f(x,y) = -((a - x)^2 + b(y - x^2)^2)
where a and b are constants and x and y are variable. In analogy with typical Pints models x and y are our parameters.
First, take a look at the LogPDF interface. It tells us two things:
We need to add a method n_parameters that tells pints the dimension of the parameter space.
Objects of our class should be callable. In Python, we can do this using the special method __call__.
The input to this method should be a vector, so we should rewrite it as
f(p) = -((a - p[0])^2 + b(p[1] - p[0]^2)^2)
The result of calling this method should be the logarithm of a normalised log likelihood. That means we should (1) take the logarithm of f instead of returning it directly, and (2) invert the method, so that it has a clearly defined maximum that we can search for.
So we should create an object that evaluates
-log(f(p))
We now have all we need to implement a Rosenbrock class:
End of explanation
r = Rosenbrock()
print(r([0, 0]))
print(r([0.1, 0.1]))
print(r([0.4, 0.2]))
Explanation: We can test our class by creating an object and calling it with a few parameters:
End of explanation
r([1, 1])
Explanation: Wikipedia tells for that for a = 1 and b = 100 the minimum value should be at [1, 1]. We can test this by inspecting its value at that point:
End of explanation
# Define some boundaries
boundaries = pints.RectangularBoundaries([-5, -5], [5, 5])
# Pick an initial point
x0 = [2, 2]
# And run!
xbest, fbest = pints.optimise(r, x0, boundaries=boundaries)
Explanation: We get an error here, because the notebook doesn't like it, but it returns the correct value!
Now let's try an optimisation:
End of explanation
print(xbest)
Explanation: Finally, print the returned point. If it worked, we should be at [1, 1]:
End of explanation |
11,303 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
2.1 Advanced Indexing
Indexing files
As was shown earlier, we can create an index of the data space using the index() method
Step1: We will use the Collection class to manage the index directly in-memory
Step2: This enables us for example, to quickly search for all indexes related to a specific state point
Step3: At this point the index contains information about the statepoint and all data stored in the job document.
If we want to include the V.txt text files we used to store data in, with the index, we need to tell signac the filename pattern and optionally the file format.
Step4: The index contains basic information about the files within our data space, such as the path and the MD5 hash sum.
The format field currently says File, which is the default value.
We can specify that all files ending with .txt are to be defined to be of TextFile format
Step5: Generating a Master Index
A master index is compiled from multiple other indexes, which is useful when operating on data compiled from multiple sources, such as multiple signac projects.
To make a data space part of master index, we need to create a signac_access.py module.
We use the access module to define how the index for the particular space is to be generated.
We can create a basic access module using the Project.create_access_module() function
Step6: When compiling a master index, signac will search for access modules named signac_access.py.
Whenever it finds a file with that name, it will import the module and compile all indexes yielded from a function called get_indexes() into the master index.
Let's try that!
Step8: Please note, that we executed the index() function without specifying the project directory.
The function crawled through all sub-directories below the root directory in an attempt to find acccess modules.
We can use the access module to control how exactly the index is generated, for example by adding filename and format definitions.
Usually we could edit the file directly, here we will just overwrite the old one
Step9: Now files will also be part of the master index!
Step10: We can use the signac.fetch() function to directly open files associated with a particular index document | Python Code:
import signac
project = signac.get_project(root='projects/tutorial')
index = list(project.index())
for doc in index[:3]:
print(doc)
Explanation: 2.1 Advanced Indexing
Indexing files
As was shown earlier, we can create an index of the data space using the index() method:
End of explanation
index = signac.Collection(project.index())
Explanation: We will use the Collection class to manage the index directly in-memory:
End of explanation
for doc in index.find({'statepoint.p': 0.1}):
print(doc)
Explanation: This enables us for example, to quickly search for all indexes related to a specific state point:
End of explanation
index = signac.Collection(project.index('.*\.txt'))
for doc in index.find(limit=2):
print(doc)
Explanation: At this point the index contains information about the statepoint and all data stored in the job document.
If we want to include the V.txt text files we used to store data in, with the index, we need to tell signac the filename pattern and optionally the file format.
End of explanation
index = signac.Collection(project.index({'.*\.txt': 'TextFile'}))
print(index.find_one({'format': 'TextFile'}))
Explanation: The index contains basic information about the files within our data space, such as the path and the MD5 hash sum.
The format field currently says File, which is the default value.
We can specify that all files ending with .txt are to be defined to be of TextFile format:
End of explanation
# Let's make sure to remoe any remnants from previous runs...
% rm -f projects/tutorial/signac_access.py
# This will generate a minimal access module:
project.create_access_module(master=False)
% cat projects/tutorial/signac_access.py
Explanation: Generating a Master Index
A master index is compiled from multiple other indexes, which is useful when operating on data compiled from multiple sources, such as multiple signac projects.
To make a data space part of master index, we need to create a signac_access.py module.
We use the access module to define how the index for the particular space is to be generated.
We can create a basic access module using the Project.create_access_module() function:
End of explanation
master_index = signac.Collection(signac.index())
for doc in master_index.find(limit=2):
print(doc)
Explanation: When compiling a master index, signac will search for access modules named signac_access.py.
Whenever it finds a file with that name, it will import the module and compile all indexes yielded from a function called get_indexes() into the master index.
Let's try that!
End of explanation
access_module = \
import signac
def get_indexes(root):
yield signac.get_project(root).index({'.*\.txt': 'TextFile'})
with open('projects/tutorial/signac_access.py', 'w') as file:
file.write(access_module)
Explanation: Please note, that we executed the index() function without specifying the project directory.
The function crawled through all sub-directories below the root directory in an attempt to find acccess modules.
We can use the access module to control how exactly the index is generated, for example by adding filename and format definitions.
Usually we could edit the file directly, here we will just overwrite the old one:
End of explanation
master_index = signac.Collection(signac.index())
print(master_index.find_one({'format': 'TextFile'}))
Explanation: Now files will also be part of the master index!
End of explanation
for doc in master_index.find({'format': 'TextFile'}, limit=3):
with signac.fetch(doc) as file:
p = doc['statepoint']['p']
V = [float(v) for v in file.read().strip().split(',')]
print(p, V)
Explanation: We can use the signac.fetch() function to directly open files associated with a particular index document:
End of explanation |
11,304 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 1.3. Chemistry Scheme Scope
Is Required
Step7: 1.4. Basic Approximations
Is Required
Step8: 1.5. Prognostic Variables Form
Is Required
Step9: 1.6. Number Of Tracers
Is Required
Step10: 1.7. Family Approach
Is Required
Step11: 1.8. Coupling With Chemical Reactivity
Is Required
Step12: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required
Step13: 2.2. Code Version
Is Required
Step14: 2.3. Code Languages
Is Required
Step15: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required
Step16: 3.2. Split Operator Advection Timestep
Is Required
Step17: 3.3. Split Operator Physical Timestep
Is Required
Step18: 3.4. Split Operator Chemistry Timestep
Is Required
Step19: 3.5. Split Operator Alternate Order
Is Required
Step20: 3.6. Integrated Timestep
Is Required
Step21: 3.7. Integrated Scheme Type
Is Required
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required
Step23: 4.2. Convection
Is Required
Step24: 4.3. Precipitation
Is Required
Step25: 4.4. Emissions
Is Required
Step26: 4.5. Deposition
Is Required
Step27: 4.6. Gas Phase Chemistry
Is Required
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required
Step30: 4.9. Photo Chemistry
Is Required
Step31: 4.10. Aerosols
Is Required
Step32: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required
Step33: 5.2. Global Mean Metrics Used
Is Required
Step34: 5.3. Regional Metrics Used
Is Required
Step35: 5.4. Trend Metrics Used
Is Required
Step36: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required
Step37: 6.2. Matches Atmosphere Grid
Is Required
Step38: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required
Step39: 7.2. Canonical Horizontal Resolution
Is Required
Step40: 7.3. Number Of Horizontal Gridpoints
Is Required
Step41: 7.4. Number Of Vertical Levels
Is Required
Step42: 7.5. Is Adaptive Grid
Is Required
Step43: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required
Step44: 8.2. Use Atmospheric Transport
Is Required
Step45: 8.3. Transport Details
Is Required
Step46: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required
Step47: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required
Step48: 10.2. Method
Is Required
Step49: 10.3. Prescribed Climatology Emitted Species
Is Required
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required
Step51: 10.5. Interactive Emitted Species
Is Required
Step52: 10.6. Other Emitted Species
Is Required
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required
Step54: 11.2. Method
Is Required
Step55: 11.3. Prescribed Climatology Emitted Species
Is Required
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required
Step57: 11.5. Interactive Emitted Species
Is Required
Step58: 11.6. Other Emitted Species
Is Required
Step59: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required
Step60: 12.2. Prescribed Upper Boundary
Is Required
Step61: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required
Step62: 13.2. Species
Is Required
Step63: 13.3. Number Of Bimolecular Reactions
Is Required
Step64: 13.4. Number Of Termolecular Reactions
Is Required
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required
Step67: 13.7. Number Of Advected Species
Is Required
Step68: 13.8. Number Of Steady State Species
Is Required
Step69: 13.9. Interactive Dry Deposition
Is Required
Step70: 13.10. Wet Deposition
Is Required
Step71: 13.11. Wet Oxidation
Is Required
Step72: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required
Step73: 14.2. Gas Phase Species
Is Required
Step74: 14.3. Aerosol Species
Is Required
Step75: 14.4. Number Of Steady State Species
Is Required
Step76: 14.5. Sedimentation
Is Required
Step77: 14.6. Coagulation
Is Required
Step78: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required
Step79: 15.2. Gas Phase Species
Is Required
Step80: 15.3. Aerosol Species
Is Required
Step81: 15.4. Number Of Steady State Species
Is Required
Step82: 15.5. Interactive Dry Deposition
Is Required
Step83: 15.6. Coagulation
Is Required
Step84: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required
Step85: 16.2. Number Of Reactions
Is Required
Step86: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required
Step87: 17.2. Environmental Conditions
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'noaa-gfdl', 'gfdl-esm4', 'atmoschem')
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: NOAA-GFDL
Source ID: GFDL-ESM4
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: CMIP5:GFDL-CM3
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-20 15:02:34
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
DOC.set_value("Other: troposphere")
DOC.set_value("mesosphere")
DOC.set_value("stratosphere")
DOC.set_value("whole atmosphere")
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Lumped higher hydrocarbon species and oxidation products, parameterized source of Cly and Bry in stratosphere, short-lived species not advected")
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(82)
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
DOC.set_value("Operator splitting")
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(30)
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(30)
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
DOC.set_value("Anthropogenic")
DOC.set_value("Other: bare ground")
DOC.set_value("Sea surface")
DOC.set_value("Vegetation")
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("CO, CH2O, NO, C3H6, isoprene, C2H6, C2H4, C4H10, terpenes, C3H8, acetone, CH3OH, C2H5OH, H2, SO2, NH3")
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("DMS")
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
DOC.set_value("Aircraft")
DOC.set_value("Biomass burning")
DOC.set_value("Lightning")
DOC.set_value("Other: volcanoes")
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("CO, CH2O, NO, C3H6, isoprene, C2H6, C2H4, C4H10, terpenes, C3H8, acetone, CH3OH, C2H5OH, H2, SO2, NH3")
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("CH4, N2O")
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
DOC.set_value("Bry")
DOC.set_value("Cly")
DOC.set_value("H2O")
DOC.set_value("HOx")
DOC.set_value("NOy")
DOC.set_value("Other: sox")
DOC.set_value("Ox")
DOC.set_value("VOCs")
DOC.set_value("isoprene")
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(157)
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(21)
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(19)
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
DOC.set_value("Bry")
DOC.set_value("Cly")
DOC.set_value("NOy")
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
DOC.set_value("NAT (Nitric acid trihydrate)")
DOC.set_value("Polar stratospheric ice")
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(3)
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("3")
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
DOC.set_value("Sulphate")
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
DOC.set_value(True)
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
DOC.set_value(39)
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
DOC.set_value("Offline (with clouds)")
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation |
11,305 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Exercise 1
Work on this before the next lecture on 10 April. We will talk about questions, comments, and solutions during the exercise after the second lecture.
Please do form study groups! When you do, make sure you can explain everything in your own words, do not simply copy&paste from others.
The solutions to a lot of these problems can probably be found with Google. Please don't. You will not learn a lot by copy&pasting from the internet.
If you want to get credit/examination on this course please upload your work to your GitHub repository for this course before the next lecture starts and post a link to your repository in this thread. If you worked on things together with others please add their names to the notebook so we can see who formed groups.
Objective
There are two objectives for this set of exercises
Step1: Question 1
In the lecture we used the nearest neighbour classifier to classify points from a toy dataset into either "red" or "blue" classes. We investigated how the performance changes as a function of model complexity and what this means for the performance of our classifier on unseen data. Instead of using a linear model as in the lecture, use a k-nearest neighbour model.
plot your dataset
split your dataset into a training and testing set. Comment on how you decided to split your data.
evaluate the performance of the classifier on your training dataset.
evaluate the performance of the classifier on your testing dataset.
repeat the above two steps for varying splits (10-90, 20-80, 30-70, ...) and comment
on what you see. Is there a "best" way to split your data?
comment on why the two performance estimates agree or disagree.
plot the accuracy of the classifier as a function of n_neighbors.
comment on the similarities and differences between the performance on the testing and training dataset.
is a KNeighbor Classifier with 4 or 10 neighbors more complicated?
find the best setting of n_neighbors for this dataset.
why is this the best setting?
Use make_blobs(n_samples=400, centers=23, random_state=42) to create a simple dataset and use the KNeighborsClassifier classifier to answer the above questions.
Step2: Simply plot the dataset
Step3: Splitting the data between a training set and test.
The split operation can be done using different ratios of the train set and test set. I will comment on it later. By now, I decided to take half of the data as training test and the other half as test test. Intuitively, we can already argue that this choice may be the best tradeoff between having a bigger portion of the data as training set (thus facing the risk of overfitting) or the way around, i.e. having a training set too small in order to accurately estimate good parameters that will be able to reproduce the trend observed for unseen data, i.e. for the test set.
By now, I fixed the number of neighbors ("n_neighbors") of the model to 5, thus being able to evaluate the performance (as asked in the next steps) for the fixed hyperparameter "n_neighbors"
Step4: In the boxplots, we summarize the distribuzion of the scores for the training and test set. The upper and lower bound of the boxes mark the first and third quartile, while the vertical lines outside the boxes represent the 5th and 95 percentiles. Data represented as dots are named outliers that fall outside the range between 5th and 95th percentiles. Black horizontal line represent the means, while dotted grey lines the medians.
As we can see, the values of the means (and medians) are pretty far away the each others between the training set and test set. Moreover, the 95th percentile of the test set is just sligthly above the 5th percentile of the traning set. Overall, these mean that the scores on the training set are sistematically higher then the ones in the test set, given their distribution. Of course, some values of the scores on the test set are higher than the lowest ones on the training set, due to fluctuations in the score values.
Varying the splits between datasets (10-90, 20-80, 30-70, ...)
Step5: Here, the two scoes start having a big difference between their average values (tick lines). The train set is small so we are not using too many data to train the classifier. Increasing the fraction of the train size provide a larger number of points that improve the performance of the classifier, both on the training and test sets, since we gave it more statistics.
In particular, we see that the mean score (for both sets) as a function of the training set undergoes a vivid rise at the beginning, up to train size fraction of 0.4, when after stays almost constant. This means that increasing the train set size does not always improve significantly the accuracy of the results (on average) after a given point. Note that, however, that the standard deviation of the test set score increases, especially when we reach high values of the train set size. In this case, we are thus trying to have many data to fit the model but few to predict the accuracy in the test set. As a consequence, we are more proned to overfitting the data in the train set. The test set curve seems to show a maximum on 0.5, which means that a somehow optimal split is attained dividing in half the dataset. This is reasonable, as already commented in the part "Splitting the data between a training set and test."
Varying n_neighbors
Step6: Question 2
This is a regression problem. It mostly follows the setup of the classification problem so you should be able to reuse some of your work.
plot your dataset
fit a kNN regressor with varying number of n_neighbors and compare each regressors predictions to the location of the training and testing points.
plot the mean squared error of the classifier as a function of n_neighbors for both training and testing datasets.
comment on the similarities and differences between the performance on the testing and training dataset.
find the best setting of n_neighbors for this dataset.
why is this the best setting?
can you explain why the mean square error on the training dataset plateaus between ~n_neihgors=5 to 15 at the value that it does?
Use make_regression() to create the dataset and use KNeighborsRegressor to answer the above questions. Take a look at scikit-learn's metrics module to compute the mean squared error.
Step7: Question 3
Logistic regression. Use a linear model to solve a two class classification problem.
What is the difference between a linear regression model and a logistic regression model?
plot your data and split it into a training and test set
draw your guess for where the decision boundary will be on the plot. Why did you pick this one?
use the LogisticRegression classifier to fit a model to your training data
extract the fitted coefficients from the model and draw the fitted decision boundary
create a function to draw the decision surface (the classifier's prediction for every point in space)
why is the boundary where it is?
(bonus) create new datasets with increasingly larger amounts of noise (increase the cluster_std argument) and plot the decision boundary for each case. What happens and why?
create 20 new datasets by changing the random_state parameter and fit a model to each. Visualise the variation in the fitted parameters and the decision boundaries you obtain. Is this a high or low variance model?
Use make_two_blobs() to create a simple dataset and use the LogisticRegression classifier to answer the above questions.
Step8: Question 4
Logistic regression. Use a more complex linear model to create a two class classifier for the "circle inside a circle" problem. Think about how you can increase the complexity of a logistic regression model. Visualise the classificatio naccuracy as a function of the model complexity.
Use make_circles(n_samples=400, factor=.3, noise=.1) to create a simple dataset and use the LogisticRegression classifier to answer the above question. | Python Code:
%config InlineBackend.figure_format='retina'
%matplotlib inline
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (8, 8)
plt.rcParams["font.size"] = 14
from sklearn.utils import check_random_state
Explanation: Exercise 1
Work on this before the next lecture on 10 April. We will talk about questions, comments, and solutions during the exercise after the second lecture.
Please do form study groups! When you do, make sure you can explain everything in your own words, do not simply copy&paste from others.
The solutions to a lot of these problems can probably be found with Google. Please don't. You will not learn a lot by copy&pasting from the internet.
If you want to get credit/examination on this course please upload your work to your GitHub repository for this course before the next lecture starts and post a link to your repository in this thread. If you worked on things together with others please add their names to the notebook so we can see who formed groups.
Objective
There are two objectives for this set of exercises:
get you started using python, scikit-learn, matplotlib, and GitHub. You will be using them a lot during the course, so make sure you get a good foundation to build on.
working through the steps of opening a new dataset, plotting the data, fitting a model to it, evaluating your model, and deciding on model complexity.
Question 0
Install python, scikit-learn (v0.18), matplotlib, jupyter and git.
Instructions for doing so: https://github.com/wildtreetech/advanced-comp-2017/blob/master/install.md
Documentation and guides for the various tools:
jupyter quickstart
try jupyter without installing anything
matplotlib homepage
matplotlib gallery
scikit-learn homepage
scikit-learn examples
scikit-learn documentation
try git online without installing anything
GitHub and git
Create a GitHub account for yourself or use one you already have.
Follow the guide on creating a new repository. Name the repository "advanced-comp-2017".
Read up on git clone, git pull, git push, git add and git commit. Once you master these five commands you should be good for this course. There is a whole universe of complex things that git can do for you, don't worry about them for now. Once you feel comfortable with the basics you can always step it up later.
These are some useful default imports for plotting and numpy
End of explanation
from sklearn.datasets import make_blobs
from sklearn.neighbors import KNeighborsClassifier
labels = ["b", "darkorange"]
X, y = make_blobs(n_samples=400, centers=23, random_state=42)
y = np.take(labels, (y < 10))
Explanation: Question 1
In the lecture we used the nearest neighbour classifier to classify points from a toy dataset into either "red" or "blue" classes. We investigated how the performance changes as a function of model complexity and what this means for the performance of our classifier on unseen data. Instead of using a linear model as in the lecture, use a k-nearest neighbour model.
plot your dataset
split your dataset into a training and testing set. Comment on how you decided to split your data.
evaluate the performance of the classifier on your training dataset.
evaluate the performance of the classifier on your testing dataset.
repeat the above two steps for varying splits (10-90, 20-80, 30-70, ...) and comment
on what you see. Is there a "best" way to split your data?
comment on why the two performance estimates agree or disagree.
plot the accuracy of the classifier as a function of n_neighbors.
comment on the similarities and differences between the performance on the testing and training dataset.
is a KNeighbor Classifier with 4 or 10 neighbors more complicated?
find the best setting of n_neighbors for this dataset.
why is this the best setting?
Use make_blobs(n_samples=400, centers=23, random_state=42) to create a simple dataset and use the KNeighborsClassifier classifier to answer the above questions.
End of explanation
plt.scatter(X[:, 0], X[:, 1], facecolor=y, edgecolor="white", s=40 )
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
Explanation: Simply plot the dataset
End of explanation
from sklearn.model_selection import train_test_split
train_performance = []
test_performance = []
for i in range(500):
X_train, X_test, y_train,y_test = train_test_split(X, y, train_size=0.5)
kN_classifier = KNeighborsClassifier(n_neighbors=5)
kN_classifier.fit(X_train, y_train)
# store the performance of the kN_classifier on the train set
train_performance.append(kN_classifier.score(X_train, y_train))
# store the performance of the kN_classifier on the test set
test_performance.append(kN_classifier.score(X_test, y_test))
train_test_performances = [train_performance, test_performance]
fig = plt.figure()
ax = fig.add_subplot(111)
medianprops = dict(linestyle=':', linewidth=2.5, color='grey')
meanlineprops = dict(linestyle='-', linewidth=2.5, color='black')
bp = ax.boxplot(train_test_performances, medianprops=medianprops, whis=[5,95], meanprops=meanlineprops, meanline=True, showmeans=True)
bp['boxes'][0].set(color = 'g', lw = 2)
bp['boxes'][1].set(color = 'r', lw = 2)
ax.set_xticklabels(['Train', 'Test'])
ax.set_ylabel("Accuracy")
Explanation: Splitting the data between a training set and test.
The split operation can be done using different ratios of the train set and test set. I will comment on it later. By now, I decided to take half of the data as training test and the other half as test test. Intuitively, we can already argue that this choice may be the best tradeoff between having a bigger portion of the data as training set (thus facing the risk of overfitting) or the way around, i.e. having a training set too small in order to accurately estimate good parameters that will be able to reproduce the trend observed for unseen data, i.e. for the test set.
By now, I fixed the number of neighbors ("n_neighbors") of the model to 5, thus being able to evaluate the performance (as asked in the next steps) for the fixed hyperparameter "n_neighbors"
End of explanation
train_performance_splits = []
test_performance_splits = []
range_ft = np.linspace(0.1,0.9,9)
for i in range(500):
train_performance = []
test_performance = []
for ft in range_ft:
# choose each time a different random number to randomly split the dataset into train and test set
X_train, X_test, y_train,y_test = train_test_split(X, y, train_size=ft)
kN_classifier = KNeighborsClassifier(n_neighbors=5)
kN_classifier.fit(X_train, y_train)
# store the performance of the kN_classifier on the train set
train_performance.append(kN_classifier.score(X_train, y_train))
# store the performance of the kN_classifier on the test set
test_performance.append(kN_classifier.score(X_test, y_test))
#
train_performance_splits.append(train_performance)
test_performance_splits.append(test_performance)
train_performance_splits = np.array(train_performance_splits)
test_performance_splits = np.array(test_performance_splits)
fig = plt.figure()
ax = fig.add_subplot(111)
mean_train_performance_splits = np.mean(train_performance_splits, axis=0)
std_train_performance_splits = np.std(train_performance_splits, axis=0)
mean_test_performance_splits = np.mean(test_performance_splits, axis=0)
std_test_performance_splits = np.std(test_performance_splits, axis=0)
ax.plot(range_ft, mean_train_performance_splits, color='g', label='Train', lw=4)
ax.plot(range_ft, mean_train_performance_splits + std_train_performance_splits, range_ft, mean_train_performance_splits - std_train_performance_splits, color='g', alpha=0.5, ls="--")
ax.plot(range_ft, mean_test_performance_splits, color='r', label='Test', lw=4)
ax.plot(range_ft, mean_test_performance_splits + std_test_performance_splits, range_ft, mean_test_performance_splits - std_test_performance_splits, color='r', alpha=0.5, ls="--")
ax.set_xlabel("Train set fraction")
ax.set_ylabel("Accuracy")
plt.legend(loc='best')
# plt.plot(ks, np.array(accuracies_test).mean(axis=0), label='Test', c='r', lw=4)
# plt.plot(ks, np.array(accuracies_train).mean(axis=0), label='Train', c='b', lw=4)
# plt.xlabel('k or inverse model complexity')
# plt.ylabel('accuracy')
# plt.legend(loc='best')
# plt.xlim((0, max(ks)))
# plt.ylim((0.4, 1.));
Explanation: In the boxplots, we summarize the distribuzion of the scores for the training and test set. The upper and lower bound of the boxes mark the first and third quartile, while the vertical lines outside the boxes represent the 5th and 95 percentiles. Data represented as dots are named outliers that fall outside the range between 5th and 95th percentiles. Black horizontal line represent the means, while dotted grey lines the medians.
As we can see, the values of the means (and medians) are pretty far away the each others between the training set and test set. Moreover, the 95th percentile of the test set is just sligthly above the 5th percentile of the traning set. Overall, these mean that the scores on the training set are sistematically higher then the ones in the test set, given their distribution. Of course, some values of the scores on the test set are higher than the lowest ones on the training set, due to fluctuations in the score values.
Varying the splits between datasets (10-90, 20-80, 30-70, ...)
End of explanation
train_performance_nn = []
test_performance_nn = []
# We fix the ratio between train and test size at 0.5
range_nn = range(1,26,1)
for i in range(500):
train_performance = []
test_performance = []
for nn in range_nn:
X_train, X_test, y_train,y_test = train_test_split(X, y, train_size=0.5)
kN_classifier = KNeighborsClassifier(n_neighbors=nn)
kN_classifier.fit(X_train, y_train)
# store the performance of the kN_classifier on the train set
train_performance.append(kN_classifier.score(X_train, y_train))
# store the performance of the kN_classifier on the test set
test_performance.append(kN_classifier.score(X_test, y_test))
#
train_performance_nn.append(train_performance)
test_performance_nn.append(test_performance)
train_performance_nn = np.array(train_performance_nn)
test_performance_nn = np.array(test_performance_nn)
fig = plt.figure()
ax = fig.add_subplot(111)
mean_train_performance_nn = np.mean(train_performance_nn, axis=0)
std_train_performance_nn = np.std(train_performance_nn, axis=0)
mean_test_performance_nn = np.mean(test_performance_nn, axis=0)
std_test_performance_nn = np.std(test_performance_nn, axis=0)
ax.plot(range_nn, mean_train_performance_nn, color='g', label='Train', lw=4)
ax.plot(range_nn, mean_train_performance_nn + std_train_performance_nn, range_nn, mean_train_performance_nn - std_train_performance_nn, color='g', alpha=0.5, ls="--")
ax.plot(range_nn, mean_test_performance_nn, color='r', label='Test', lw=4)
ax.plot(range_nn, mean_test_performance_nn + std_test_performance_nn, range_nn, mean_test_performance_nn - std_test_performance_nn, color='r', alpha=0.5, ls="--")
ax.set_xlabel("n_neighbors")
ax.set_ylabel("Accuracy")
plt.legend(loc='best')
# plt.plot(ks, np.array(accuracies_test).mean(axis=0), label='Test', c='r', lw=4)
# plt.plot(ks, np.array(accuracies_train).mean(axis=0), label='Train', c='b', lw=4)
# plt.xlabel('k or inverse model complexity')
# plt.ylabel('accuracy')
# plt.legend(loc='best')
# plt.xlim((0, max(ks)))
# plt.ylim((0.4, 1.));
Explanation: Here, the two scoes start having a big difference between their average values (tick lines). The train set is small so we are not using too many data to train the classifier. Increasing the fraction of the train size provide a larger number of points that improve the performance of the classifier, both on the training and test sets, since we gave it more statistics.
In particular, we see that the mean score (for both sets) as a function of the training set undergoes a vivid rise at the beginning, up to train size fraction of 0.4, when after stays almost constant. This means that increasing the train set size does not always improve significantly the accuracy of the results (on average) after a given point. Note that, however, that the standard deviation of the test set score increases, especially when we reach high values of the train set size. In this case, we are thus trying to have many data to fit the model but few to predict the accuracy in the test set. As a consequence, we are more proned to overfitting the data in the train set. The test set curve seems to show a maximum on 0.5, which means that a somehow optimal split is attained dividing in half the dataset. This is reasonable, as already commented in the part "Splitting the data between a training set and test."
Varying n_neighbors
End of explanation
def make_regression(n_samples=100, noise_level=0.8, random_state=2):
rng = check_random_state(random_state)
X = np.linspace(-2, 2, n_samples)
y = 2 * X + np.sin(5 * X) + rng.randn(n_samples) * noise_level
return X.reshape(-1, 1), y
# Your solution
Explanation: Question 2
This is a regression problem. It mostly follows the setup of the classification problem so you should be able to reuse some of your work.
plot your dataset
fit a kNN regressor with varying number of n_neighbors and compare each regressors predictions to the location of the training and testing points.
plot the mean squared error of the classifier as a function of n_neighbors for both training and testing datasets.
comment on the similarities and differences between the performance on the testing and training dataset.
find the best setting of n_neighbors for this dataset.
why is this the best setting?
can you explain why the mean square error on the training dataset plateaus between ~n_neihgors=5 to 15 at the value that it does?
Use make_regression() to create the dataset and use KNeighborsRegressor to answer the above questions. Take a look at scikit-learn's metrics module to compute the mean squared error.
End of explanation
from sklearn.linear_model import LogisticRegression
def make_two_blobs(n_samples=400, cluster_std=2., random_state=42):
rng = check_random_state(random_state)
X = rng.multivariate_normal([5,0], [[cluster_std**2, 0], [0., cluster_std**2]],
size=n_samples//2)
X2 = rng.multivariate_normal([0, 5.], [[cluster_std**2, 0], [0., cluster_std**2]],
size=n_samples//2)
X = np.vstack((X, X2))
return X, np.hstack((np.ones(n_samples//2), np.zeros(n_samples//2)))
X, y = make_two_blobs()
labels = ['b', 'r']
y = np.take(labels, (y < 0.5))
# Your answer
Explanation: Question 3
Logistic regression. Use a linear model to solve a two class classification problem.
What is the difference between a linear regression model and a logistic regression model?
plot your data and split it into a training and test set
draw your guess for where the decision boundary will be on the plot. Why did you pick this one?
use the LogisticRegression classifier to fit a model to your training data
extract the fitted coefficients from the model and draw the fitted decision boundary
create a function to draw the decision surface (the classifier's prediction for every point in space)
why is the boundary where it is?
(bonus) create new datasets with increasingly larger amounts of noise (increase the cluster_std argument) and plot the decision boundary for each case. What happens and why?
create 20 new datasets by changing the random_state parameter and fit a model to each. Visualise the variation in the fitted parameters and the decision boundaries you obtain. Is this a high or low variance model?
Use make_two_blobs() to create a simple dataset and use the LogisticRegression classifier to answer the above questions.
End of explanation
from sklearn.datasets import make_circles
X, y = make_circles(n_samples=400, factor=.3, noise=.1)
labels = ['b', 'r']
y = np.take(labels, (y < 0.5))
plt.scatter(X[:,0], X[:,1], c=y)
# Your answer
Explanation: Question 4
Logistic regression. Use a more complex linear model to create a two class classifier for the "circle inside a circle" problem. Think about how you can increase the complexity of a logistic regression model. Visualise the classificatio naccuracy as a function of the model complexity.
Use make_circles(n_samples=400, factor=.3, noise=.1) to create a simple dataset and use the LogisticRegression classifier to answer the above question.
End of explanation |
11,306 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: 2017 Hurricane Tracks
Demonstrates how to plot all the North American hurricane tracks in 2017, starting from the BigQuery public dataset.
Step2: Plot one of the hurricanes
Let's just plot the track of Hurricane MARIA
Step3: Plot all the hurricanes
Use line thickness based on the maximum category reached by the hurricane | Python Code:
%bash
apt-get update
apt-get -y install python-mpltoolkits.basemap
from mpl_toolkits.basemap import Basemap
import google.datalab.bigquery as bq
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
query=
#standardSQL
SELECT
name,
latitude,
longitude,
iso_time,
usa_sshs
FROM
`bigquery-public-data.noaa_hurricanes.hurricanes`
WHERE
basin = 'NA'
AND season = '2017'
df = bq.Query(query).execute().result().to_dataframe()
df.head()
Explanation: 2017 Hurricane Tracks
Demonstrates how to plot all the North American hurricane tracks in 2017, starting from the BigQuery public dataset.
End of explanation
maria = df[df['name'] == 'MARIA'].sort_values('iso_time')
m = Basemap(llcrnrlon=-100.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,
projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,
resolution ='l',area_thresh=1000.)
x, y = m(maria['longitude'].values,maria['latitude'].values)
m.plot(x,y,linewidth=5,color='r')
# draw coastlines, meridians and parallels.
m.drawcoastlines()
m.drawcountries()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966',lake_color='#99ffff')
m.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])
m.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])
plt.title('Hurricane Maria (2017)');
Explanation: Plot one of the hurricanes
Let's just plot the track of Hurricane MARIA
End of explanation
names = df.name.unique()
names
m = Basemap(llcrnrlon=-100.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,
projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,
resolution ='l',area_thresh=1000.)
for name in names:
if name != 'NOT_NAMED':
named = df[df['name'] == name].sort_values('iso_time')
x, y = m(named['longitude'].values,named['latitude'].values)
maxcat = max(named['usa_sshs'])
m.plot(x,y,linewidth=maxcat,color='b')
# draw coastlines, meridians and parallels.
m.drawcoastlines()
m.drawcountries()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966',lake_color='#99ffff')
m.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])
m.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])
plt.title('Named North-Atlantic hurricanes (2017)');
Explanation: Plot all the hurricanes
Use line thickness based on the maximum category reached by the hurricane
End of explanation |
11,307 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Spatiotemporal permutation F-test on full sensor data
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Significant spatiotemporal clusters will then
be visualized using custom matplotlib code.
Step1: Set parameters
Step2: Read epochs for the channel of interest
Step3: Load FieldTrip neighbor definition to setup sensor connectivity
Step4: Compute permutation statistic
How does it work? We use clustering to bind together features which are
similar. Our features are the magnetic fields measured over our sensor
array at different times. This reduces the multiple comparison problem.
To compute the actual test-statistic, we first sum all F-values in all
clusters. We end up with one statistic for each cluster.
Then we generate a distribution from the data by shuffling our conditions
between our samples and recomputing our clusters and the test statistics.
We test for the significance of a given cluster by computing the probability
of observing a cluster of that size. For more background read
Step5: Note. The same functions work with source estimate. The only differences
are the origin of the data, the size, and the connectivity definition.
It can be used for single trials or for groups of subjects.
Visualize clusters | Python Code:
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import read_ch_connectivity
print(__doc__)
Explanation: Spatiotemporal permutation F-test on full sensor data
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Significant spatiotemporal clusters will then
be visualized using custom matplotlib code.
End of explanation
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, l_trans_bandwidth='auto', h_trans_bandwidth='auto',
filter_length='auto', phase='zero')
events = mne.read_events(event_fname)
Explanation: Set parameters
End of explanation
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id, copy=False)
condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R'
X = [epochs[k].get_data() for k in condition_names] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
Explanation: Read epochs for the channel of interest
End of explanation
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
print(type(connectivity)) # it's a sparse matrix!
plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
Explanation: Load FieldTrip neighbor definition to setup sensor connectivity
End of explanation
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.001
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
Explanation: Compute permutation statistic
How does it work? We use clustering to bind together features which are
similar. Our features are the magnetic fields measured over our sensor
array at different times. This reduces the multiple comparison problem.
To compute the actual test-statistic, we first sum all F-values in all
clusters. We end up with one statistic for each cluster.
Then we generate a distribution from the data by shuffling our conditions
between our samples and recomputing our clusters and the test statistics.
We test for the significance of a given cluster by computing the probability
of observing a cluster of that size. For more background read:
Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
End of explanation
# configure variables for visualization
times = epochs.times * 1e3
colors = 'r', 'r', 'steelblue', 'steelblue'
linestyles = '-', '--', '-', '--'
# grand average as numpy arrray
grand_ave = np.array(X).mean(axis=1)
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# loop over significant clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at significant sensors
signals = grand_ave[..., ch_inds].mean(axis=-1)
sig_times = times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
title = 'Cluster #{0}'.format(i_clu + 1)
fig.suptitle(title, fontsize=14)
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axes=ax_topo,
cmap='Reds', vmin=np.min, vmax=np.max)
# advanced matplotlib for showing image with figure and colorbar
# in one plot
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format(
*sig_times[[0, -1]]
))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
for signal, name, col, ls in zip(signals, condition_names, colors,
linestyles):
ax_signals.plot(times, signal, color=col, linestyle=ls, label=name)
# add information
ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset')
ax_signals.set_xlim([times[0], times[-1]])
ax_signals.set_xlabel('time [ms]')
ax_signals.set_ylabel('evoked magnetic fields [fT]')
# plot significant time range
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
ax_signals.legend(loc='lower right')
ax_signals.set_ylim(ymin, ymax)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
Explanation: Note. The same functions work with source estimate. The only differences
are the origin of the data, the size, and the connectivity definition.
It can be used for single trials or for groups of subjects.
Visualize clusters
End of explanation |
11,308 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
First, here's the SPA power function
Step1: Here are two helper functions for computing the dot product over space, and for plotting the results | Python Code:
def power(s, e):
x = np.fft.ifft(np.fft.fft(s.v) ** e).real
return spa.SemanticPointer(data=x)
Explanation: First, here's the SPA power function:
End of explanation
def spatial_dot(v, X, Y, Z, xs, ys, transform=1):
vs = np.zeros((len(ys),len(xs)))
for i,x in enumerate(xs):
for j, y in enumerate(ys):
# convert from cartesian to hex axial coordinates
hx = 2/3 * y
hy = (np.sqrt(3)/3 * x - y/3 )
hz = -(np.sqrt(3)/3 * x + y/3 )
#hx = x
#hy=y
#hz = np.linalg.norm([x, y])
t = power(X, hx)*power(Y,hy)*power(Z, hz)*transform
vs[j,i] = np.dot(v.v, t.v)
return vs
def spatial_plot(vs, vmax=1, vmin=-1, colorbar=True):
vs = vs[::-1, :]
plt.imshow(vs, interpolation='none', extent=(xs[0],xs[-1],ys[0],ys[-1]), vmax=vmax, vmin=vmin, cmap='plasma')
if colorbar:
plt.colorbar()
D = 256
X = spa.SemanticPointer(D)
X.make_unitary()
Y = spa.SemanticPointer(D)
Y.make_unitary()
Z = spa.SemanticPointer(D)
Z.make_unitary()
W = 10
Q = 100
xs = np.linspace(-W, W, Q)
ys = np.linspace(-W, W, Q)
def relu(x):
return np.maximum(x, 0)
M = 3
plt.figure(figsize=(12,12))
for i in range(M):
for j in range(M):
plt.subplot(M, M, i*M+j+1)
spatial_plot(relu(spatial_dot(spa.SemanticPointer(D), X, Y, Z, xs, ys)), vmin=None, vmax=None, colorbar=False)
Explanation: Here are two helper functions for computing the dot product over space, and for plotting the results
End of explanation |
11,309 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Exercise 6.7
Step1: Create Sarsa Agent
Step2: Evaluate agents with different action set
Step3: Exercise 6.8 | Python Code:
import numpy as np
ACTION_TO_XY = {
'left': (-1, 0),
'right': (1, 0),
'up': (0, 1),
'down': (0, -1),
'up_left': (-1, 1),
'down_left': (-1, -1),
'up_right': (1, 1),
'down_right': (1, -1),
'stop': (0, 0)
}
# convert tuples to np so we can do math with states
ACTION_TO_XY = {a: np.array(xy) for a, xy in ACTION_TO_XY.items()}
WIND = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]
class WindyGridworld(object):
def __init__(self):
self._state = None
self._goal = np.array([7, 3]) # goal state, XY
self._start = np.array([0, 3]) # start state, XY
self.shape = [10, 7] # grid world shape, XY
self._wind_x = WIND
assert len(self._wind_x) == self.shape[0]
def reset(self):
self._state = self._start.copy()
return tuple(self._state)
def _clip_state(self):
self._state[0] = np.clip(self._state[0], 0, self.shape[0] - 1) # clip x
self._state[1] = np.clip(self._state[1], 0, self.shape[1] - 1) # clip y
def step(self, action):
a_xy = ACTION_TO_XY[action]
# apply wind shift
wind_shift = [0, self._wind_x[self._state[0]]]
self._state += np.array(wind_shift)
self._clip_state()
# apply action
self._state += a_xy
self._clip_state()
reward = -1
term = True if np.all(self._goal == self._state) else False
return tuple(self._state), reward, term, None
Explanation: Exercise 6.7: Windy Gridworld with King’s Moves
Exercise 6.7: Windy Gridworld with King’s Moves
Create Windy Grid World environment
End of explanation
from collections import defaultdict, namedtuple
import random
from tqdm import tqdm
Transition = namedtuple('Transition', ['state1',
'action',
'reward',
'state2'])
class SarsaAgent(object):
def __init__(self, env, actions, alpha=0.5, epsilon=0.1, gamma=1):
self._env = env
self._actions = actions
self._alpha = alpha
self._epsilon = epsilon
self._gamma = gamma
self.episodes = []
# init q table
self._q = {}
action_vals = {a: 0 for a in self._actions}
for x in range(self._env.shape[0]):
for y in range(self._env.shape[1]):
self._q[(x,y)] = dict(action_vals)
def random_policy(self, state):
return random.choice(self._actions)
def greedy_policy(self, state):
return max(self._q[state], key=self._q[state].get)
def e_greedy_policy(self, state):
if np.random.rand() > self._epsilon:
action = self.greedy_policy(state)
else:
action = self.random_policy(state)
return action
def play_episode(self):
s1 = self._env.reset()
a1 = self.e_greedy_policy(s1)
transitions = []
while True:
s2, r, term, _ = self._env.step(a1)
a2 = self.e_greedy_policy(s2)
target = r + self._gamma*self._q[s2][a2]
if term:
target = 0.0
self._q[s1][a1] = self._q[s1][a1] + self._alpha*(target - self._q[s1][a1])
s1 = s2
a1 = a2
transitions.append(Transition(s1, a1, r, s2))
if term:
break
return transitions
def learn(self, n_episodes=500):
for _ in tqdm(range(n_episodes)):
transitions = self.play_episode()
self.episodes.append(transitions)
Explanation: Create Sarsa Agent
End of explanation
%matplotlib inline
import matplotlib.pyplot as plt
actions4 = ['left', 'right', 'up', 'down']
actions8 = ['left', 'right', 'up', 'down', 'up_left', 'down_left', 'up_right', 'down_right']
actions9 = ['left', 'right', 'up', 'down', 'up_left', 'down_left', 'up_right', 'down_right', 'stop']
ACTION_TO_ARROW = {
'left': '⇽',
'right': '→',
'up': '↑',
'down': '↓',
'up_left': '↖',
'down_left': '↙',
'up_right': '↗',
'down_right': '↘',
'stop': '○'
}
def evaluate(agent, title):
agent.learn()
total_rewards = []
episode_ids = []
for e_id, episode in enumerate(agent.episodes):
rewards = map(lambda e: e.reward, episode)
total_rewards.append(sum(rewards))
episode_ids.extend([e_id] * len(episode))
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
# display total reward vs episodes
ax = axs[0]
ax.plot(total_rewards)
ax.grid()
ax.set_title(title)
ax.set_xlabel('episode')
ax.set_ylabel('Total rewards')
# display time steps vs episodes
ax = axs[1]
ax.plot(episode_ids)
ax.grid()
ax.set_xlabel('Time steps')
ax.set_ylabel('Episodes')
q = agent._q
for y in range(agent._env.shape[1] - 1, -1, -1):
row = []
for x in range(agent._env.shape[0]):
state = (x,y)
a = max(q[state], key=q[state].get)
row.append(ACTION_TO_ARROW[a])
# row.append(a)
print(row)
print([str(w) for w in WIND])
world = WindyGridworld()
agent4 = SarsaAgent(world, actions4)
evaluate(agent4, 'Agent with 4 actions')
agent8 = SarsaAgent(world, actions8)
evaluate(agent8, 'Agent with 8 actions')
agent9 = SarsaAgent(world, actions9)
evaluate(agent9, 'Agent with 9 actions')
Explanation: Evaluate agents with different action set
End of explanation
class StochasticWindyGridworld(WindyGridworld):
def step(self, action):
a_xy = ACTION_TO_XY[action]
# apply wind shift
wind = self._wind_x[self._state[0]]
if wind > 0:
wind = random.choice([wind - 1, wind, wind + 1])
wind_shift = [0, wind]
self._state += np.array(wind_shift)
self._clip_state()
# apply action
self._state += a_xy
self._clip_state()
reward = -1
term = True if np.all(self._goal == self._state) else False
return tuple(self._state), reward, term, None
stochastic_world = StochasticWindyGridworld()
agent4 = SarsaAgent(stochastic_world, actions4)
evaluate(agent4, 'Agent with 4 actions')
agent8 = SarsaAgent(stochastic_world, actions8)
evaluate(agent8, 'Agent with 8 actions')
agent9 = SarsaAgent(stochastic_world, actions9)
evaluate(agent9, 'Agent with 9 actions')
Explanation: Exercise 6.8: Stochastic Wind
End of explanation |
11,310 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Principal Component Analysis in Shogun
By Abhijeet Kislay (GitHub ID
Step1: Some Formal Background (Skip if you just want code examples)
PCA is a useful statistical technique that has found application in fields such as face recognition and image compression, and is a common technique for finding patterns in data of high dimension.
In machine learning problems data is often high dimensional - images, bag-of-word descriptions etc. In such cases we cannot expect the training data to densely populate the space, meaning that there will be large parts in which little is known about the data. Hence it is expected that only a small number of directions are relevant for describing the data to a reasonable accuracy.
The data vectors may be very high dimensional, they will therefore typically lie closer to a much lower dimensional 'manifold'.
Here we concentrate on linear dimensional reduction techniques. In this approach a high dimensional datapoint $\mathbf{x}$ is 'projected down' to a lower dimensional vector $\mathbf{y}$ by
Step2: Step 2
Step3: Step 3
Step4: Step 5
Step5: In the above figure, the blue line is a good fit of the data. It shows the most significant relationship between the data dimensions.
It turns out that the eigenvector with the $highest$ eigenvalue is the $principle$ $component$ of the data set.
Form the matrix $\mathbf{E}=[\mathbf{e}^1,...,\mathbf{e}^M].$
Here $\text{M}$ represents the target dimension of our final projection
Step6: Step 6
Step7: Step 5 and Step 6 can be applied directly with Shogun's PCA preprocessor (from next example). It has been done manually here to show the exhaustive nature of Principal Component Analysis.
Step 7
Step8: The new data is plotted below
Step9: PCA on a 3d data.
Step1
Step10: Step 2
Step11: Step 3 & Step 4
Step12: Steps 5
Step13: Step 7
Step15: PCA Performance
Uptill now, we were using the EigenValue Decomposition method to compute the transformation matrix$\text{(N>D)}$ but for the next example $\text{(N<D)}$ we will be using Singular Value Decomposition.
Practical Example
Step16: Lets have a look on the data
Step17: Represent every image $I_i$ as a vector $\Gamma_i$
Step18: Step 2
Step19: Step 3 & Step 4
Step20: These 20 eigenfaces are not sufficient for a good image reconstruction. Having more eigenvectors gives us the most flexibility in the number of faces we can reconstruct. Though we are adding vectors with low variance, they are in directions of change nonetheless, and an external image that is not in our database could in fact need these eigenvectors to get even relatively close to it. But at the same time we must also keep in mind that adding excessive eigenvectors results in addition of little or no variance, slowing down the process.
Clearly a tradeoff is required.
We here set for M=100.
Step 5
Step21: Step 7
Step22: Recognition part.
In our face recognition process using the Eigenfaces approach, in order to recognize an unseen image, we proceed with the same preprocessing steps as applied to the training images.
Test images are represented in terms of eigenface coefficients by projecting them into face space$\text{(eigenspace)}$ calculated during training. Test sample is recognized by measuring the similarity distance between the test sample and all samples in the training. The similarity measure is a metric of distance calculated between two vectors. Traditional Eigenface approach utilizes $\text{Euclidean distance}$.
Step23: Here we have to project our training image as well as the test image on the PCA subspace.
The Eigenfaces method then performs face recognition by
Step24: Shogun's way of doing things | Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all shogun classes
from shogun import *
import shogun as sg
Explanation: Principal Component Analysis in Shogun
By Abhijeet Kislay (GitHub ID: <a href='https://github.com/kislayabhi'>kislayabhi</a>)
This notebook is about finding Principal Components (<a href="http://en.wikipedia.org/wiki/Principal_component_analysis">PCA</a>) of data (<a href="http://en.wikipedia.org/wiki/Unsupervised_learning">unsupervised</a>) in Shogun. Its <a href="http://en.wikipedia.org/wiki/Dimensionality_reduction">dimensional reduction</a> capabilities are further utilised to show its application in <a href="http://en.wikipedia.org/wiki/Data_compression">data compression</a>, image processing and <a href="http://en.wikipedia.org/wiki/Facial_recognition_system">face recognition</a>.
End of explanation
#number of data points.
n=100
#generate a random 2d line(y1 = mx1 + c)
m = random.randint(1,10)
c = random.randint(1,10)
x1 = random.random_integers(-20,20,n)
y1=m*x1+c
#generate the noise.
noise=random.random_sample([n]) * random.random_integers(-35,35,n)
#make the noise orthogonal to the line y=mx+c and add it.
x=x1 + noise*m/sqrt(1+square(m))
y=y1 + noise/sqrt(1+square(m))
twoD_obsmatrix=array([x,y])
#to visualise the data we must plot it.
rcParams['figure.figsize'] = 7, 7
figure,axis=subplots(1,1)
xlim(-50,50)
ylim(-50,50)
axis.plot(twoD_obsmatrix[0,:],twoD_obsmatrix[1,:],'o',color='green',markersize=6)
#the line from which we generated the data is plotted in red
axis.plot(x1[:],y1[:],linewidth=0.3,color='red')
title('One-Dimensional sub-space with noise')
xlabel("x axis")
_=ylabel("y axis")
Explanation: Some Formal Background (Skip if you just want code examples)
PCA is a useful statistical technique that has found application in fields such as face recognition and image compression, and is a common technique for finding patterns in data of high dimension.
In machine learning problems data is often high dimensional - images, bag-of-word descriptions etc. In such cases we cannot expect the training data to densely populate the space, meaning that there will be large parts in which little is known about the data. Hence it is expected that only a small number of directions are relevant for describing the data to a reasonable accuracy.
The data vectors may be very high dimensional, they will therefore typically lie closer to a much lower dimensional 'manifold'.
Here we concentrate on linear dimensional reduction techniques. In this approach a high dimensional datapoint $\mathbf{x}$ is 'projected down' to a lower dimensional vector $\mathbf{y}$ by:
$$\mathbf{y}=\mathbf{F}\mathbf{x}+\text{const}.$$
where the matrix $\mathbf{F}\in\mathbb{R}^{\text{M}\times \text{D}}$, with $\text{M}<\text{D}$. Here $\text{M}=\dim(\mathbf{y})$ and $\text{D}=\dim(\mathbf{x})$.
From the above scenario, we assume that
The number of principal components to use is $\text{M}$.
The dimension of each data point is $\text{D}$.
The number of data points is $\text{N}$.
We express the approximation for datapoint $\mathbf{x}^n$ as:$$\mathbf{x}^n \approx \mathbf{c} + \sum\limits_{i=1}^{\text{M}}y_i^n \mathbf{b}^i \equiv \tilde{\mathbf{x}}^n.$$
* Here the vector $\mathbf{c}$ is a constant and defines a point in the lower dimensional space.
* The $\mathbf{b}^i$ define vectors in the lower dimensional space (also known as 'principal component coefficients' or 'loadings').
* The $y_i^n$ are the low dimensional co-ordinates of the data.
Our motive is to find the reconstruction $\tilde{\mathbf{x}}^n$ given the lower dimensional representation $\mathbf{y}^n$(which has components $y_i^n,i = 1,...,\text{M})$. For a data space of dimension $\dim(\mathbf{x})=\text{D}$, we hope to accurately describe the data using only a small number $(\text{M}\ll \text{D})$ of coordinates of $\mathbf{y}$.
To determine the best lower dimensional representation it is convenient to use the square distance error between $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:$$\text{E}(\mathbf{B},\mathbf{Y},\mathbf{c})=\sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}[x_i^n - \tilde{x}i^n]^2.$$
* Here the basis vectors are defined as $\mathbf{B} = [\mathbf{b}^1,...,\mathbf{b}^\text{M}]$ (defining $[\text{B}]{i,j} = b_i^j$).
* Corresponding low dimensional coordinates are defined as $\mathbf{Y} = [\mathbf{y}^1,...,\mathbf{y}^\text{N}].$
* Also, $x_i^n$ and $\tilde{x}_i^n$ represents the coordinates of the data points for the original and the reconstructed data respectively.
* The bias $\mathbf{c}$ is given by the mean of the data $\sum_n\mathbf{x}^n/\text{N}$.
Therefore, for simplification purposes we centre our data, so as to set $\mathbf{c}$ to zero. Now we concentrate on finding the optimal basis $\mathbf{B}$( which has the components $\mathbf{b}^i, i=1,...,\text{M} $).
Deriving the optimal linear reconstruction
To find the best basis vectors $\mathbf{B}$ and corresponding low dimensional coordinates $\mathbf{Y}$, we may minimize the sum of squared differences between each vector $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:
$\text{E}(\mathbf{B},\mathbf{Y}) = \sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}\left[x_i^n - \sum\limits_{j=1}^{\text{M}}y_j^nb_i^j\right]^2 = \text{trace} \left( (\mathbf{X}-\mathbf{B}\mathbf{Y})^T(\mathbf{X}-\mathbf{B}\mathbf{Y}) \right)$
where $\mathbf{X} = [\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Considering the above equation under the orthonormality constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$ (i.e the basis vectors are mutually orthogonal and of unit length), we differentiate it w.r.t $y_k^n$. The squared error $\text{E}(\mathbf{B},\mathbf{Y})$ therefore has zero derivative when:
$y_k^n = \sum_i b_i^kx_i^n$
By substituting this solution in the above equation, the objective becomes
$\text{E}(\mathbf{B}) = (\text{N}-1)\left[\text{trace}(\mathbf{S}) - \text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)\right],$
where $\mathbf{S}$ is the sample covariance matrix of the data.
To minimise equation under the constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$, we use a set of Lagrange Multipliers $\mathbf{L}$, so that the objective is to minimize:
$-\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)+\text{trace}\left(\mathbf{L}\left(\mathbf{B}^T\mathbf{B} - \mathbf{I}\right)\right).$
Since the constraint is symmetric, we can assume that $\mathbf{L}$ is also symmetric. Differentiating with respect to $\mathbf{B}$ and equating to zero we obtain that at the optimum
$\mathbf{S}\mathbf{B} = \mathbf{B}\mathbf{L}$.
This is a form of eigen-equation so that a solution is given by taking $\mathbf{L}$ to be diagonal and $\mathbf{B}$ as the matrix whose columns are the corresponding eigenvectors of $\mathbf{S}$. In this case,
$\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right) =\text{trace}(\mathbf{L}),$
which is the sum of the eigenvalues corresponding to the eigenvectors forming $\mathbf{B}$. Since we wish to minimise $\text{E}(\mathbf{B})$, we take the eigenvectors with the largest corresponding eigenvalues.
Whilst the solution to this eigen-problem is unique, this only serves to define the solution subspace since one may rotate and scale $\mathbf{B}$ and $\mathbf{Y}$ such that the value of the squared loss is exactly the same. The justification for choosing the non-rotated eigen solution is given by the additional requirement that the principal components corresponds to directions of maximal variance.
Maximum variance criterion
We aim to find that single direction $\mathbf{b}$ such that, when the data is projected onto this direction, the variance of this projection is maximal amongst all possible such projections.
The projection of a datapoint onto a direction $\mathbf{b}$ is $\mathbf{b}^T\mathbf{x}^n$ for a unit length vector $\mathbf{b}$. Hence the sum of squared projections is: $$\sum\limits_{n}\left(\mathbf{b}^T\mathbf{x}^n\right)^2 = \mathbf{b}^T\left[\sum\limits_{n}\mathbf{x}^n(\mathbf{x}^n)^T\right]\mathbf{b} = (\text{N}-1)\mathbf{b}^T\mathbf{S}\mathbf{b} = \lambda(\text{N} - 1)$$
which ignoring constants, is simply the negative of the equation for a single retained eigenvector $\mathbf{b}$(with $\mathbf{S}\mathbf{b} = \lambda\mathbf{b}$). Hence the optimal single $\text{b}$ which maximises the projection variance is given by the eigenvector corresponding to the largest eigenvalues of $\mathbf{S}.$ The second largest eigenvector corresponds to the next orthogonal optimal direction and so on. This explains why, despite the squared loss equation being invariant with respect to arbitrary rotation of the basis vectors, the ones given by the eigen-decomposition have the additional property that they correspond to directions of maximal variance. These maximal variance directions found by PCA are called the $\text{principal} $ $\text{directions}.$
There are two eigenvalue methods through which shogun can perform PCA namely
* Eigenvalue Decomposition Method.
* Singular Value Decomposition.
EVD vs SVD
The EVD viewpoint requires that one compute the eigenvalues and eigenvectors of the covariance matrix, which is the product of $\mathbf{X}\mathbf{X}^\text{T}$, where $\mathbf{X}$ is the data matrix. Since the covariance matrix is symmetric, the matrix is diagonalizable, and the eigenvectors can be normalized such that they are orthonormal:
$\mathbf{S}=\frac{1}{\text{N}-1}\mathbf{X}\mathbf{X}^\text{T},$
where the $\text{D}\times\text{N}$ matrix $\mathbf{X}$ contains all the data vectors: $\mathbf{X}=[\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Writing the $\text{D}\times\text{N}$ matrix of eigenvectors as $\mathbf{E}$ and the eigenvalues as an $\text{N}\times\text{N}$ diagonal matrix $\mathbf{\Lambda}$, the eigen-decomposition of the covariance $\mathbf{S}$ is
$\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{X}^\text{T}\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\tilde{\mathbf{E}}=\tilde{\mathbf{E}}\mathbf{\Lambda},$
where we defined $\tilde{\mathbf{E}}=\mathbf{X}^\text{T}\mathbf{E}$. The final expression above represents the eigenvector equation for $\mathbf{X}^\text{T}\mathbf{X}.$ This is a matrix of dimensions $\text{N}\times\text{N}$ so that calculating the eigen-decomposition takes $\mathcal{O}(\text{N}^3)$ operations, compared with $\mathcal{O}(\text{D}^3)$ operations in the original high-dimensional space. We then can therefore calculate the eigenvectors $\tilde{\mathbf{E}}$ and eigenvalues $\mathbf{\Lambda}$ of this matrix more easily. Once found, we use the fact that the eigenvalues of $\mathbf{S}$ are given by the diagonal entries of $\mathbf{\Lambda}$ and the eigenvectors by
$\mathbf{E}=\mathbf{X}\tilde{\mathbf{E}}\mathbf{\Lambda}^{-1}$
On the other hand, applying SVD to the data matrix $\mathbf{X}$ follows like:
$\mathbf{X}=\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}$
where $\mathbf{U}^\text{T}\mathbf{U}=\mathbf{I}\text{D}$ and $\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\text{N}$ and $\mathbf{\Sigma}$ is a diagonal matrix of the (positive) singular values. We assume that the decomposition has ordered the singular values so that the upper left diagonal element of $\mathbf{\Sigma}$ contains the largest singular value.
Attempting to construct the covariance matrix $(\mathbf{X}\mathbf{X}^\text{T})$from this decomposition gives:
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)^\text{T}$
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{V}\mathbf{\Sigma}\mathbf{U}^\text{T}\right)$
and since $\mathbf{V}$ is an orthogonal matrix $\left(\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\right),$
$\mathbf{X}\mathbf{X}^\text{T}=\left(\mathbf{U}\mathbf{\Sigma}^\mathbf{2}\mathbf{U}^\text{T}\right)$
Since it is in the form of an eigen-decomposition, the PCA solution given by performing the SVD decomposition of $\mathbf{X}$, for which the eigenvectors are then given by $\mathbf{U}$, and corresponding eigenvalues by the square of the singular values.
CPCA Class Reference (Shogun)
CPCA class of Shogun inherits from the Preprocessor class. Preprocessors are transformation functions that doesn't change the domain of the input features. Specifically, CPCA performs principal component analysis on the input vectors and keeps only the specified number of eigenvectors. On preprocessing, the stored covariance matrix is used to project vectors into eigenspace.
Performance of PCA depends on the algorithm used according to the situation in hand.
Our PCA preprocessor class provides 3 method options to compute the transformation matrix:
$\text{PCA(EVD)}$ sets $\text{PCAmethod == EVD}$ : Eigen Value Decomposition of Covariance Matrix $(\mathbf{XX^T}).$
The covariance matrix $\mathbf{XX^T}$ is first formed internally and then
its eigenvectors and eigenvalues are computed using QR decomposition of the matrix.
The time complexity of this method is $\mathcal{O}(D^3)$ and should be used when $\text{N > D.}$
$\text{PCA(SVD)}$ sets $\text{PCAmethod == SVD}$ : Singular Value Decomposition of feature matrix $\mathbf{X}$.
The transpose of feature matrix, $\mathbf{X^T}$, is decomposed using SVD. $\mathbf{X^T = UDV^T}.$
The matrix V in this decomposition contains the required eigenvectors and
the diagonal entries of the diagonal matrix D correspond to the non-negative
eigenvalues.The time complexity of this method is $\mathcal{O}(DN^2)$ and should be used when $\text{N < D.}$
$\text{PCA(AUTO)}$ sets $\text{PCAmethod == AUTO}$ : This mode automagically chooses one of the above modes for the user based on whether $\text{N>D}$ (chooses $\text{EVD}$) or $\text{N<D}$ (chooses $\text{SVD}$)
PCA on 2D data
Step 1: Get some data
We will generate the toy data by adding orthogonal noise to a set of points lying on an arbitrary 2d line. We expect PCA to recover this line, which is a one-dimensional linear sub-space.
End of explanation
#convert the observation matrix into dense feature matrix.
train_features = features(twoD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=2 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = sg.transformer('PCA', method='EVD')
#since we are projecting down the 2d data, the target dim is 1. But here the exhaustive method is detailed by
#setting the target dimension to 2 to visualize both the eigen vectors.
#However, in future examples we will get rid of this step by implementing it directly.
preprocessor.put('target_dim', 2)
#Centralise the data by subtracting its mean from it.
preprocessor.fit(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get('mean_vector')
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
Explanation: Step 2: Subtract the mean.
For PCA to work properly, we must subtract the mean from each of the data dimensions. The mean subtracted is the average across each dimension. So, all the $x$ values have $\bar{x}$ subtracted, and all the $y$ values have $\bar{y}$ subtracted from them, where:$$\bar{\mathbf{x}} = \frac{\sum\limits_{i=1}^{n}x_i}{n}$$ $\bar{\mathbf{x}}$ denotes the mean of the $x_i^{'s}$
Shogun's way of doing things :
Preprocessor PCA performs principial component analysis on input feature vectors/matrices. It provides an interface to set the target dimension by $\text{put('target_dim', target_dim) method}.$ When the $\text{init()}$ method in $\text{PCA}$ is called with proper
feature matrix $\text{X}$ (with say $\text{N}$ number of vectors and $\text{D}$ feature dimension), a transformation matrix is computed and stored internally.It inherenty also centralizes the data by subtracting the mean from it.
End of explanation
#Get the eigenvectors(We will get two of these since we set the target to 2).
E = preprocessor.get('transformation_matrix')
#Get all the eigenvalues returned by PCA.
eig_value=preprocessor.get('eigenvalues_vector')
e1 = E[:,0]
e2 = E[:,1]
eig_value1 = eig_value[0]
eig_value2 = eig_value[1]
Explanation: Step 3: Calculate the covariance matrix
To understand the relationship between 2 dimension we define $\text{covariance}$. It is a measure to find out how much the dimensions vary from the mean $with$ $respect$ $to$ $each$ $other.$$$cov(X,Y)=\frac{\sum\limits_{i=1}^{n}(X_i-\bar{X})(Y_i-\bar{Y})}{n-1}$$
A useful way to get all the possible covariance values between all the different dimensions is to calculate them all and put them in a matrix.
Example: For a 3d dataset with usual dimensions of $x,y$ and $z$, the covariance matrix has 3 rows and 3 columns, and the values are this:
$$\mathbf{S} = \quad\begin{pmatrix}cov(x,x)&cov(x,y)&cov(x,z)\cov(y,x)&cov(y,y)&cov(y,z)\cov(z,x)&cov(z,y)&cov(z,z)\end{pmatrix}$$
Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix
Find the eigenvectors $e^1,....e^M$ of the covariance matrix $\mathbf{S}$.
Shogun's way of doing things :
Step 3 and Step 4 are directly implemented by the PCA preprocessor of Shogun toolbar. The transformation matrix is essentially a $\text{D}$$\times$$\text{M}$ matrix, the columns of which correspond to the eigenvectors of the covariance matrix $(\text{X}\text{X}^\text{T})$ having top $\text{M}$ eigenvalues.
End of explanation
#find out the M eigenvectors corresponding to top M number of eigenvalues and store it in E
#Here M=1
#slope of e1 & e2
m1=e1[1]/e1[0]
m2=e2[1]/e2[0]
#generate the two lines
x1=range(-50,50)
x2=x1
y1=multiply(m1,x1)
y2=multiply(m2,x2)
#plot the data along with those two eigenvectors
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x1[:], y1[:], linewidth=0.7, color='black')
axis.plot(x2[:], y2[:], linewidth=0.7, color='blue')
p1 = Rectangle((0, 0), 1, 1, fc="black")
p2 = Rectangle((0, 0), 1, 1, fc="blue")
legend([p1,p2],["1st eigenvector","2nd eigenvector"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Eigenvectors selection')
xlabel("x axis")
_=ylabel("y axis")
Explanation: Step 5: Choosing components and forming a feature vector.
Lets visualize the eigenvectors and decide upon which to choose as the $principle$ $component$ of the data set.
End of explanation
#The eigenvector corresponding to higher eigenvalue(i.e eig_value2) is choosen (i.e e2).
#E is the feature vector.
E=e2
Explanation: In the above figure, the blue line is a good fit of the data. It shows the most significant relationship between the data dimensions.
It turns out that the eigenvector with the $highest$ eigenvalue is the $principle$ $component$ of the data set.
Form the matrix $\mathbf{E}=[\mathbf{e}^1,...,\mathbf{e}^M].$
Here $\text{M}$ represents the target dimension of our final projection
End of explanation
#transform all 2-dimensional feature matrices to target-dimensional approximations.
yn=preprocessor.transform(train_features).get('feature_matrix')
#Since, here we are manually trying to find the eigenvector corresponding to the top eigenvalue.
#The 2nd row of yn is choosen as it corresponds to the required eigenvector e2.
yn1=yn[1,:]
Explanation: Step 6: Projecting the data to its Principal Components.
This is the final step in PCA. Once we have choosen the components(eigenvectors) that we wish to keep in our data and formed a feature vector, we simply take the vector and multiply it on the left of the original dataset.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by
$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$
Here the $\mathbf{E}^T$ is the matrix with the eigenvectors in rows, with the most significant eigenvector at the top. The mean adjusted data, with data items in each column, with each row holding a seperate dimension is multiplied to it.
Shogun's way of doing things :
Step 6 can be performed by shogun's PCA preprocessor as follows:
The transformation matrix that we got after $\text{init()}$ is used to transform all $\text{D-dim}$ feature matrices (with $\text{D}$ feature dimensions) supplied, via $\text{apply_to_feature_matrix methods}$.This transformation outputs the $\text{M-Dim}$ approximation of all these input vectors and matrices (where $\text{M}$ $\leq$ $\text{min(D,N)}$).
End of explanation
x_new=(yn1 * E[0]) + tile(mean_x,[n,1]).T[0]
y_new=(yn1 * E[1]) + tile(mean_y,[n,1]).T[0]
Explanation: Step 5 and Step 6 can be applied directly with Shogun's PCA preprocessor (from next example). It has been done manually here to show the exhaustive nature of Principal Component Analysis.
Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x_new, y_new, 'o', color='blue', markersize=5, label="red")
title('PCA Projection of 2D data into 1D subspace')
xlabel("x axis")
ylabel("y axis")
#add some legend for information
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="g")
p3 = Rectangle((0, 0), 1, 1, fc="b")
legend([p1,p2,p3],["normal projection","2d data","1d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
#plot the projections in red:
for i in range(n):
axis.plot([x[i],x_new[i]],[y[i],y_new[i]] , color='red')
Explanation: The new data is plotted below
End of explanation
rcParams['figure.figsize'] = 8,8
#number of points
n=100
#generate the data
a=random.randint(1,20)
b=random.randint(1,20)
c=random.randint(1,20)
d=random.randint(1,20)
x1=random.random_integers(-20,20,n)
y1=random.random_integers(-20,20,n)
z1=-(a*x1+b*y1+d)/c
#generate the noise
noise=random.random_sample([n])*random.random_integers(-30,30,n)
#the normal unit vector is [a,b,c]/magnitude
magnitude=sqrt(square(a)+square(b)+square(c))
normal_vec=array([a,b,c]/magnitude)
#add the noise orthogonally
x=x1+noise*normal_vec[0]
y=y1+noise*normal_vec[1]
z=z1+noise*normal_vec[2]
threeD_obsmatrix=array([x,y,z])
#to visualize the data, we must plot it.
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
#plot the noisy data generated by distorting a plane
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p2],["3d data"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Two dimensional subspace with noise')
xx, yy = meshgrid(range(-30,30), range(-30,30))
zz=-(a * xx + b * yy + d) / c
Explanation: PCA on a 3d data.
Step1: Get some data
We generate points from a plane and then add random noise orthogonal to it. The general equation of a plane is: $$\text{a}\mathbf{x}+\text{b}\mathbf{y}+\text{c}\mathbf{z}+\text{d}=0$$
End of explanation
#convert the observation matrix into dense feature matrix.
train_features = features(threeD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=3 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = sg.transformer('PCA', method='EVD')
#If we set the target dimension to 2, Shogun would automagically preserve the required 2 eigenvectors(out of 3) according to their
#eigenvalues.
preprocessor.put('target_dim', 2)
preprocessor.fit(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get('mean_vector')
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
mean_z=mean_datapoints[2]
Explanation: Step 2: Subtract the mean.
End of explanation
#get the required eigenvectors corresponding to top 2 eigenvalues.
E = preprocessor.get('transformation_matrix')
Explanation: Step 3 & Step 4: Calculate the eigenvectors of the covariance matrix
End of explanation
#This can be performed by shogun's PCA preprocessor as follows:
yn=preprocessor.transform(train_features).get('feature_matrix')
Explanation: Steps 5: Choosing components and forming a feature vector.
Since we performed PCA for a target $\dim = 2$ for the $3 \dim$ data, we are directly given
the two required eigenvectors in $\mathbf{E}$
E is automagically filled by setting target dimension = M. This is different from the 2d data example where we implemented this step manually.
Step 6: Projecting the data to its Principal Components.
End of explanation
new_data=dot(E,yn)
x_new=new_data[0,:]+tile(mean_x,[n,1]).T[0]
y_new=new_data[1,:]+tile(mean_y,[n,1]).T[0]
z_new=new_data[2,:]+tile(mean_z,[n,1]).T[0]
#all the above points lie on the same plane. To make it more clear we will plot the projection also.
fig=pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p1,p2,p3],["normal projection","3d data","2d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
title('PCA Projection of 3D data into 2D subspace')
for i in range(100):
ax.scatter(x_new[i], y_new[i], z_new[i],marker='o', color='b')
ax.plot([x[i],x_new[i]],[y[i],y_new[i]],[z[i],z_new[i]],color='r')
Explanation: Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
rcParams['figure.figsize'] = 10, 10
import os
def get_imlist(path):
Returns a list of filenames for all jpg images in a directory
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.pgm')]
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/training/')
#set no. of rows that the images will be resized.
k1=100
#set no. of columns that the images will be resized.
k2=100
filenames = get_imlist(path_train)
filenames = array(filenames)
#n is total number of images that has to be analysed.
n=len(filenames)
Explanation: PCA Performance
Uptill now, we were using the EigenValue Decomposition method to compute the transformation matrix$\text{(N>D)}$ but for the next example $\text{(N<D)}$ we will be using Singular Value Decomposition.
Practical Example : Eigenfaces
The problem with the image representation we are given is its high dimensionality. Two-dimensional $\text{p} \times \text{q}$ grayscale images span a $\text{m=pq}$ dimensional vector space, so an image with $\text{100}\times\text{100}$ pixels lies in a $\text{10,000}$ dimensional image space already.
The question is, are all dimensions really useful for us?
$\text{Eigenfaces}$ are based on the dimensional reduction approach of $\text{Principal Component Analysis(PCA)}$. The basic idea is to treat each image as a vector in a high dimensional space. Then, $\text{PCA}$ is applied to the set of images to produce a new reduced subspace that captures most of the variability between the input images. The $\text{Pricipal Component Vectors}$(eigenvectors of the sample covariance matrix) are called the $\text{Eigenfaces}$. Every input image can be represented as a linear combination of these eigenfaces by projecting the image onto the new eigenfaces space. Thus, we can perform the identfication process by matching in this reduced space. An input image is transformed into the $\text{eigenspace,}$ and the nearest face is identified using a $\text{Nearest Neighbour approach.}$
Step 1: Get some data.
Here data means those Images which will be used for training purposes.
End of explanation
# we will be using this often to visualize the images out there.
def showfig(image):
imgplot=imshow(image, cmap='gray')
imgplot.axes.get_xaxis().set_visible(False)
imgplot.axes.get_yaxis().set_visible(False)
from PIL import Image
from scipy import misc
# to get a hang of the data, lets see some part of the dataset images.
fig = pyplot.figure()
title('The Training Dataset')
for i in range(49):
fig.add_subplot(7,7,i+1)
train_img=array(Image.open(filenames[i]).convert('L'))
train_img=misc.imresize(train_img, [k1,k2])
showfig(train_img)
Explanation: Lets have a look on the data:
End of explanation
#To form the observation matrix obs_matrix.
#read the 1st image.
train_img = array(Image.open(filenames[0]).convert('L'))
#resize it to k1 rows and k2 columns
train_img=misc.imresize(train_img, [k1,k2])
#since features accepts only data of float64 datatype, we do a type conversion
train_img=array(train_img, dtype='double')
#flatten it to make it a row vector.
train_img=train_img.flatten()
# repeat the above for all images and stack all those vectors together in a matrix
for i in range(1,n):
temp=array(Image.open(filenames[i]).convert('L'))
temp=misc.imresize(temp, [k1,k2])
temp=array(temp, dtype='double')
temp=temp.flatten()
train_img=vstack([train_img,temp])
#form the observation matrix
obs_matrix=train_img.T
Explanation: Represent every image $I_i$ as a vector $\Gamma_i$
End of explanation
train_features = features(obs_matrix)
preprocessor= sg.transformer('PCA', method='AUTO')
preprocessor.put('target_dim', 100)
preprocessor.fit(train_features)
mean=preprocessor.get('mean_vector')
Explanation: Step 2: Subtract the mean
It is very important that the face images $I_1,I_2,...,I_M$ are $centered$ and of the $same$ size
We observe here that the no. of $\dim$ for each image is far greater than no. of training images. This calls for the use of $\text{SVD}$.
Setting the $\text{PCA}$ in the $\text{AUTO}$ mode does this automagically according to the situation.
End of explanation
#get the required eigenvectors corresponding to top 100 eigenvalues
E = preprocessor.get('transformation_matrix')
#lets see how these eigenfaces/eigenvectors look like:
fig1 = pyplot.figure()
title('Top 20 Eigenfaces')
for i in range(20):
a = fig1.add_subplot(5,4,i+1)
eigen_faces=E[:,i].reshape([k1,k2])
showfig(eigen_faces)
Explanation: Step 3 & Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix.
End of explanation
#we perform the required dot product.
yn=preprocessor.transform(train_features).get('feature_matrix')
Explanation: These 20 eigenfaces are not sufficient for a good image reconstruction. Having more eigenvectors gives us the most flexibility in the number of faces we can reconstruct. Though we are adding vectors with low variance, they are in directions of change nonetheless, and an external image that is not in our database could in fact need these eigenvectors to get even relatively close to it. But at the same time we must also keep in mind that adding excessive eigenvectors results in addition of little or no variance, slowing down the process.
Clearly a tradeoff is required.
We here set for M=100.
Step 5: Choosing components and forming a feature vector.
Since we set target $\dim = 100$ for this $n \dim$ data, we are directly given the $100$ required eigenvectors in $\mathbf{E}$
E is automagically filled. This is different from the 2d data example where we implemented this step manually.
Step 6: Projecting the data to its Principal Components.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by $$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$$
End of explanation
re=tile(mean,[n,1]).T[0] + dot(E,yn)
#lets plot the reconstructed images.
fig2 = pyplot.figure()
title('Reconstructed Images from 100 eigenfaces')
for i in range(1,50):
re1 = re[:,i].reshape([k1,k2])
fig2.add_subplot(7,7,i)
showfig(re1)
Explanation: Step 7: Form the approximate reconstruction of the original image $I_n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\mathbf{x}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/testing/')
test_files=get_imlist(path_train)
test_img=array(Image.open(test_files[0]).convert('L'))
rcParams.update({'figure.figsize': (3, 3)})
#we plot the test image , for which we have to identify a good match from the training images we already have
fig = pyplot.figure()
title('The Test Image')
showfig(test_img)
#We flatten out our test image just the way we have done for the other images
test_img=misc.imresize(test_img, [k1,k2])
test_img=array(test_img, dtype='double')
test_img=test_img.flatten()
#We centralise the test image by subtracting the mean from it.
test_f=test_img-mean
Explanation: Recognition part.
In our face recognition process using the Eigenfaces approach, in order to recognize an unseen image, we proceed with the same preprocessing steps as applied to the training images.
Test images are represented in terms of eigenface coefficients by projecting them into face space$\text{(eigenspace)}$ calculated during training. Test sample is recognized by measuring the similarity distance between the test sample and all samples in the training. The similarity measure is a metric of distance calculated between two vectors. Traditional Eigenface approach utilizes $\text{Euclidean distance}$.
End of explanation
#We have already projected our training images into pca subspace as yn.
train_proj = yn
#Projecting our test image into pca subspace
test_proj = dot(E.T, test_f)
Explanation: Here we have to project our training image as well as the test image on the PCA subspace.
The Eigenfaces method then performs face recognition by:
1. Projecting all training samples into the PCA subspace.
2. Projecting the query image into the PCA subspace.
3. Finding the nearest neighbour between the projected training images and the projected query image.
End of explanation
#To get Eucledian Distance as the distance measure use EuclideanDistance.
workfeat = features(mat(train_proj))
testfeat = features(mat(test_proj).T)
RaRb = sg.distance('EuclideanDistance')
RaRb.init(testfeat, workfeat)
#The distance between one test image w.r.t all the training is stacked in matrix d.
d=empty([n,1])
for i in range(n):
d[i]= RaRb.distance(0,i)
#The one having the minimum distance is found out
min_distance_index = d.argmin()
iden=array(Image.open(filenames[min_distance_index]))
title('Identified Image')
showfig(iden)
Explanation: Shogun's way of doing things:
Shogun uses EuclideanDistance class to compute the familiar Euclidean distance for real valued features. It computes the square root of the sum of squared disparity between the corresponding feature dimensions of two data points.
$\mathbf{d(x,x')=}$$\sqrt{\mathbf{\sum\limits_{i=0}^{n}}|\mathbf{x_i}-\mathbf{x'_i}|^2}$
End of explanation |
11,311 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Mousai
Step1: Overview
A wide array of contemporary problems can be represented by nonlinear ordinary differential equations with solutions that can be represented by Fourier Series
Step2: Mousai can easily recreate the near-continuous response
python
time, xc = ms.time_history(t, x)
Step3: Let's sweep through driving frequencies to find a frequency response function
Step4: Two degree of freedom system
$$\begin{bmatrix}1&0\0&1\end{bmatrix}\begin{bmatrix}\ddot{x}1\ \ddot{x}_2\end{bmatrix}+\begin{bmatrix}2&-1 \-1&2\end{bmatrix}\begin{bmatrix}{x}_1\{x}_2\end{bmatrix}+\begin{bmatrix}\alpha x{1}^{3}\0\end{bmatrix}=\begin{bmatrix}0 \A \sin(\omega t)\end{bmatrix}$$
Step5: Let's find a response.
Step6: Or a parametric study of response amplitude versus nonlinearity.
Step7: Two degree of freedom system with Coulomb Damping
$$\begin{bmatrix}1&0\0&1\end{bmatrix}\begin{bmatrix}\ddot{x}1\ \ddot{x}_2\end{bmatrix}+\begin{bmatrix}2&-1 \-1&2\end{bmatrix}\begin{bmatrix}{x}_1\{x}_2\end{bmatrix}+\begin{bmatrix}\mu |\dot{x}|{1}\0\end{bmatrix}=\begin{bmatrix}0 \A \sin(\omega t)\end{bmatrix}$$
Step8: Too much Coulomb friction can increase the response.
Did you know that?
This damping shifted resonance.
Step9: But can I solve an equation in one line? Yes!!!
Damped Duffing oscillator in one command. | Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import mousai as ms
from scipy import pi, sin
matplotlib.rcParams['figure.figsize'] = (11, 5)
from traitlets.config.manager import BaseJSONConfigManager
path = "/Users/jslater/anaconda3/etc/jupyter/nbconfig"
cm = BaseJSONConfigManager(config_dir=path)
cm.update("livereveal", {
"theme": "sky",
"transition": "zoom",
})
Explanation: Mousai: An Open-Source General Purpose Harmonic Balance Solver
ASME Dayton Engineering Sciences Symposium 2017
Joseph C. Slater, October 23, 2017
End of explanation
# Define our function (Python)
def duff_osc_ss(x, params):
omega = params['omega']
t = params['cur_time']
xd = np.array([[x[1]],
[-x[0] - 0.1 * x[0]**3 - 0.1 * x[1] + 1 * sin(omega * t)]])
return xd
# Arguments are name of derivative function, number of states, driving frequency,
# form of the equation, and number of harmonics
t, x, e, amps, phases = ms.hb_time(duff_osc_ss, num_variables=2, omega=.1,
eqform='first_order', num_harmonics=5)
print('Displacement amplitude is ', amps[0])
print('Velocity amplitude is ', amps[1])
Explanation: Overview
A wide array of contemporary problems can be represented by nonlinear ordinary differential equations with solutions that can be represented by Fourier Series:
Limit cycle oscillation of wings/blades
Flapping motion of birds/insects/ornithopters
Flagellum (threadlike cellular structures that enable bacteria etc. to swim)
Shaft rotation, especially including rubbing or nonlinear bearing contacts
Engines
Radio/sonar/radar electronics
Wireless power transmission
Power converters
Boat/ship motions and interactions
Cardio systems (heart/arteries/veins)
Ultrasonic systems transversing nonlinear media
Responses of composite materials or materials with cracks
Near buckling behavior of vibrating columns
Nonlinearities in power systems
Energy harvesting systems
Wind turbines
Radio Frequency Integrated Circuits
Any system with nonlinear coatings/friction damping, air damping, etc.
These can all be observed in a quick literature search on 'Harmonic Balance'.
Why (did I) write Mousai?
The ability to code harmonic balance seems to be publishable by itself
It's not research- it's just application of a known family of technique
A limited number of people have this knowledge and skill
Most cannot access this technique
"Research effort" is spent coding the technique, not doing research
Why write Mousai? (continued)
Matlab command eig unleashed power to the masses
Very few papers are published on eigensolutions- they have to be better than eig
eig only provides simple access to high-end eigensolvers written in C and Fortran
Undergraduates with no practical understanding of the algorithms easily solve problems
that were intractable a few decades ago.
Access and ease of use of such techniques enable greater science and greater research.
The real world is nonlinear, but linear analysis dominates because the tools are easier to use.
With Mousai, an undergraduate can solve a nonlinear harmonic response problem easier then a PhD can today.
Theory:
Linear Solution
Most dynamics systems can be modeled as a first order differential equation
\begin{equation}\ddot{\mathbf{z}}(t)=\mathbf{f}(\mathbf{z}(t),\mathbf{u}(t))\end{equation}
- Use finite differences
- Use Galerkin methods (Finite Elements)
- Of course- discrete objects
This is the common State-Space form:
-solutions exceedingly well known if it is linear
Finding the oscillatory response, after dissipation of the transient response, requires long time marching.
Without damping, this may not even been feasible.
With damping, tens, hundreds, or thousands of cycles, therefore thousands of times steps at minimum.
For a linear system in the frequency domain this is
\begin{equation}j\omega\mathbf{Z}(\omega)=\mathbf{f}(\mathbf{Z}(\omega),\mathbf{U}(\omega))\end{equation}
\begin{equation}j\omega\mathbf{Z}(\omega)=A\mathbf{Z}(\omega)+B\mathbf{U}(\omega)\end{equation}
where
\begin{equation}A = \frac{\partial \mathbf{f}(\mathbf{Z}(\omega),\mathbf{U}(\omega))}{\partial\mathbf{Z}(\omega)},\qquad
B = \frac{\partial \mathbf{f}(\mathbf{Z}(\omega),\mathbf{U}(\omega))}{\partial\mathbf{U}(\omega)}\end{equation}
are constant matrices.
The solution is:
\begin{equation}\mathbf{Z}(\omega) = \left(Ij\omega-A\right)^{-1}B\mathbf{U}(\omega)\end{equation}
where the magnitudes and phases of the elements of $\mathbf{Z}$ provide the amplitudes and phases of the harmonic response of each state at the frequency $\omega$.
Nonlinear solution
For a nonlinear system in the frequency domain we assume a Fourier series solution
\begin{equation}\mathbf{z}(t)=\lim_{N\to\infty}\sum_{n=-N}^{N}\mathbf{Z}_n e^{j n \omega t}\end{equation}
$N=1$ for a single harmonic. $n=0$ is the constant term.
This can be substituted into the governing equation to find $\dot{\mathbf{z}}(t)$:
\begin{equation}\dot{\mathbf{z}}(t)=\mathbf{f}(\mathbf{z}(t),\mathbf{u}(t))\end{equation}
This is actually a function call to a Finite Element Package, CFD, Matlab function, - whatever your solver uses to get derivatives
We can also find $\dot{\mathbf{z}}(t)$ from the derivative of the Fourier Series:
\begin{equation}\dot{\mathbf{z}}(t)=\lim_{N\to\infty}\sum_{n=-N}^{N}j n \omega\mathbf{Z}_n e^{j n \omega t}\end{equation}
The difference between these methods is zero when $\mathbf{Z}_n$ are correct.
\begin{equation}\mathbf{0} \approx\sum_{n=-N}^{N}j n\omega \mathbf{Z}n e^{j n \omega t}-\mathbf{f}\left(\sum{n=-N}^{N}\mathbf{Z}_n e^{j n \omega t},\mathbf{u}(t)\right)\end{equation}
These operations are wrapped inside a function that returns this error
This function is used by a Newton-Krylov nonlinear algebraic solver.
Calls any solver in the SciPy family of solvers with the ability to easily pass through parameters to the solver and to the external derivative evaluator.
Examples:
Duffing Oscillator
\begin{equation}\ddot{x}+0.1\dot{x}+x+0.1 x^3=\sin(\omega t)\end{equation}
End of explanation
def pltcont():
time, xc = ms.time_history(t, x)
disp_plot, _ = plt.plot(time, xc.T[:, 0], t,
x.T[:, 0], '*b', label='Displacement')
vel_plot, _ = plt.plot(time, xc.T[:, 1], 'r',
t, x.T[:, 1], '*r', label='Velocity')
plt.legend(handles=[disp_plot, vel_plot])
plt.xlabel('Time (sec)')
plt.title('Response of Duffing Oscillator at 0.0159 rad/sec')
plt.ylabel('Response')
plt.legend
plt.grid(True)
fig=plt.figure()
ax=fig.add_subplot(111)
time, xc = ms.time_history(t, x)
disp_plot, _ = ax.plot(time, xc.T[:, 0], t,
x.T[:, 0], '*b', label='Displacement')
vel_plot, _ = ax.plot(time, xc.T[:, 1], 'r',
t, x.T[:, 1], '*r', label='Velocity')
ax.legend(handles=[disp_plot, vel_plot])
ax.set_xlabel('Time (sec)')
ax.set_title('Response of Duffing Oscillator at 0.0159 rad/sec')
ax.set_ylabel('Response')
ax.legend
ax.grid(True)
pltcont()# abbreviated plotting function
time, xc = ms.time_history(t, x)
disp_plot, _ = plt.plot(time, xc.T[:, 0], t,
x.T[:, 0], '*b', label='Displacement')
vel_plot, _ = plt.plot(time, xc.T[:, 1], 'r',
t, x.T[:, 1], '*r', label='Velocity')
plt.legend(handles=[disp_plot, vel_plot])
plt.xlabel('Time (sec)')
plt.title('Response of Duffing Oscillator at 0.0159 rad/sec')
plt.ylabel('Response')
plt.legend
plt.grid(True)
omega = np.arange(0, 3, 1 / 200) + 1 / 200
amp = sp.zeros_like(omega)
amp[:] = np.nan
t, x, e, amps, phases = ms.hb_time(duff_osc_ss, num_variables=2,
omega=1 / 200, eqform='first_order', num_harmonics=1)
for i, freq in enumerate(omega):
# Here we try to obtain solutions, but if they don't work,
# we ignore them by inserting `np.nan` values.
x = x - sp.average(x)
try:
t, x, e, amps, phases =
ms.hb_time(duff_osc_ss, x0=x,
omega=freq, eqform='first_order', num_harmonics=1)
amp[i] = amps[0]
except:
amp[i] = np.nan
if np.isnan(amp[i]):
break
plt.plot(omega, amp)
Explanation: Mousai can easily recreate the near-continuous response
python
time, xc = ms.time_history(t, x)
End of explanation
omegal = np.arange(3, .03, -1 / 200) + 1 / 200
ampl = sp.zeros_like(omegal)
ampl[:] = np.nan
t, x, e, amps, phases = ms.hb_time(duff_osc_ss, num_variables=2,
omega=3, eqform='first_order', num_harmonics=1)
for i, freq in enumerate(omegal):
# Here we try to obtain solutions, but if they don't work,
# we ignore them by inserting `np.nan` values.
x = x - np.average(x)
try:
t, x, e, amps, phases = ms.hb_time(duff_osc_ss, x0=x,
omega=freq, eqform='first_order', num_harmonics=1)
ampl[i] = amps[0]
except:
ampl[i] = np.nan
if np.isnan(ampl[i]):
break
plt.plot(omega,amp, label='Up sweep')
plt.plot(omegal,ampl, label='Down sweep')
plt.legend()
plt.title('Amplitude versus frequency for Duffing Oscillator')
plt.xlabel('Driving frequency $\\omega$')
plt.ylabel('Amplitude')
plt.grid()
Explanation: Let's sweep through driving frequencies to find a frequency response function
End of explanation
def two_dof_demo(x, params):
omega = params['omega']
t = params['cur_time']
force_amplitude = params['force_amplitude']
alpha = params['alpha']
# The following could call an external code to obtain the state derivatives
xd = np.array([[x[1]],
[-2 * x[0] - alpha * x[0]**3 + x[2]],
[x[3]],
[-2 * x[2] + x[0]]] + force_amplitude * np.sin(omega * t))
return xd
Explanation: Two degree of freedom system
$$\begin{bmatrix}1&0\0&1\end{bmatrix}\begin{bmatrix}\ddot{x}1\ \ddot{x}_2\end{bmatrix}+\begin{bmatrix}2&-1 \-1&2\end{bmatrix}\begin{bmatrix}{x}_1\{x}_2\end{bmatrix}+\begin{bmatrix}\alpha x{1}^{3}\0\end{bmatrix}=\begin{bmatrix}0 \A \sin(\omega t)\end{bmatrix}$$
End of explanation
parameters = {'force_amplitude': 0.2}
parameters['alpha'] = 0.4
t, x, e, amps, phases = ms.hb_time(two_dof_demo, num_variables=4,
omega=1.2, eqform='first_order', params=parameters)
amps
Explanation: Let's find a response.
End of explanation
alpha = np.linspace(-1, .45, 2000)
amp = np.zeros_like(alpha)
for i, alphai in enumerate(alpha):
parameters['alpha'] = alphai
t, x, e, amps, phases = ms.hb_time(two_dof_demo, num_variables=4, omega=1.2,
eqform='first_order', params=parameters)
amp[i] = amps[0]
plt.plot(alpha,amp)
plt.title('Amplitude of $x_1$ versus $\\alpha$')
plt.ylabel('Amplitude of $x_1$')
plt.xlabel('$\\alpha$')
plt.grid()
Explanation: Or a parametric study of response amplitude versus nonlinearity.
End of explanation
def two_dof_coulomb(x, params):
omega = params['omega']
t = params['cur_time']
force_amplitude = params['force_amplitude']
mu = params['mu']
# The following could call an external code to obtain the state derivatives
xd = np.array([[x[1]],
[-2 * x[0] - mu * np.abs(x[1]) + x[2]],
[x[3]],
[-2 * x[2] + x[0]]] + force_amplitude * np.sin(omega * t))
return xd
parameters = {'force_amplitude': 0.2}
parameters['mu'] = 0.1
t, x, e, amps, phases = ms.hb_time(two_dof_coulomb, num_variables=4,
omega=1.2, eqform='first_order', params=parameters)
amps
mu = np.linspace(0, 1.0, 200)
amp = np.zeros_like(mu)
for i, mui in enumerate(mu):
parameters['mu'] = mui
t, x, e, amps, phases = ms.hb_time(two_dof_coulomb, num_variables=4, omega=1.2,
eqform='first_order', num_harmonics=3, params=parameters)
amp[i] = amps[0]
Explanation: Two degree of freedom system with Coulomb Damping
$$\begin{bmatrix}1&0\0&1\end{bmatrix}\begin{bmatrix}\ddot{x}1\ \ddot{x}_2\end{bmatrix}+\begin{bmatrix}2&-1 \-1&2\end{bmatrix}\begin{bmatrix}{x}_1\{x}_2\end{bmatrix}+\begin{bmatrix}\mu |\dot{x}|{1}\0\end{bmatrix}=\begin{bmatrix}0 \A \sin(\omega t)\end{bmatrix}$$
End of explanation
plt.plot(mu,amp)
plt.title('Amplitude of $x_1$ versus $\\mu$')
plt.ylabel('Amplitude of $x_1$')
plt.xlabel('$\\mu$')
plt.grid()
Explanation: Too much Coulomb friction can increase the response.
Did you know that?
This damping shifted resonance.
End of explanation
out = ms.hb_time(lambda x, v,
params: np.array([[-x - .1 * x**3 - .1 * v + 1 *
sin(params['omega'] * params['cur_time'])]]),
num_variables=1, omega=.7, num_harmonics=1)
out[3][0]
Explanation: But can I solve an equation in one line? Yes!!!
Damped Duffing oscillator in one command.
End of explanation |
11,312 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="https
Step1: MedMNIST
MedMNIST, a collection of 10 pre-processed medical open datasets. MedMNIST is standardized to perform classification tasks on lightweight 28 * 28 images, which requires no background knowledge. Covering the primary data modalities in medical image analysis, it is diverse on data scale (from 100 to 100,000) and tasks (binary/multi-class, ordinal regression and multi-label). MedMNIST could be used for educational purpose, rapid prototyping, multi-modal machine learning or AutoML in medical image analysis. Moreover, MedMNIST Classification Decathlon is designed to benchmark AutoML algorithms on all 10 datasets.
(著者
Step2: 資料探索
Step3: 搭建資料流
Step4: 模型訓練
Step5: 模型評估
Step6: 解釋模型
tf-keras-vis
tf-keras-vis is a visualization toolkit for debugging tf.keras models in Tensorflow2.0+.
github 連結
grad-CAM
<img src="https | Python Code:
# import package
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import (Input, Dense, Dropout, Activation, GlobalAveragePooling2D,
BatchNormalization, Flatten, Conv2D, MaxPooling2D)
#需安裝google download套件, Google Drive direct download of big files.
#pip install gdown
tf.__version__
#確認CPU&GPU裝置狀況
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
Explanation: <a href="https://colab.research.google.com/github/stuser/temp/blob/master/pneumoniamnist_CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
End of explanation
# find the share link of the file/folder on Google Drive
file_share_link = "https://drive.google.com/file/d/1nebGwtoKTNegJ-fUYO-NEz0mzC1481hv/view?usp=sharing"
# extract the ID of the file
file_id = "1nebGwtoKTNegJ-fUYO-NEz0mzC1481hv"
# download file name
file_name = 'pneumoniamnist.npz'
!gdown --id "$file_id" --output "$file_name"
!ls -lh
Explanation: MedMNIST
MedMNIST, a collection of 10 pre-processed medical open datasets. MedMNIST is standardized to perform classification tasks on lightweight 28 * 28 images, which requires no background knowledge. Covering the primary data modalities in medical image analysis, it is diverse on data scale (from 100 to 100,000) and tasks (binary/multi-class, ordinal regression and multi-label). MedMNIST could be used for educational purpose, rapid prototyping, multi-modal machine learning or AutoML in medical image analysis. Moreover, MedMNIST Classification Decathlon is designed to benchmark AutoML algorithms on all 10 datasets.
(著者: 上海交通大學 Jiancheng Yang, Rui Shi, Bingbing Ni, Bilian Ke)
GitHub Pages 連結
<img src="https://medmnist.github.io/assets/overview.jpg" alt="MedMNIST figure" width="700">
PneumoniaMNIST資料集下載(google drive): https://drive.google.com/file/d/1nebGwtoKTNegJ-fUYO-NEz0mzC1481hv/view?usp=sharing
PneumoniaMNIST:
PneumoniaMNIST:
A dataset based on a prior dataset of 5,856 pediatric chest X-ray images. The task is binary-class classification of pneumonia and normal. We split the source training set with a ratio of 9:1 into training and validation set, and use its source validation set as the test set. The source images are single-channel, and their sizes range from (384-2,916) x (127-2,713). We center-crop the images and resize them into 1 x 28 x 28.
task: Binary-Class (2)
label:
0: normal, 1: pneumonia
n_channels: 1
n_samples:
train: 4708, val: 524, test: 624
End of explanation
import numpy as np
#load pneumoniamnist dataset
pneumoniamnist = np.load('pneumoniamnist.npz')
type(pneumoniamnist) #include files: train_images, val_images, test_images, train_labels, val_labels, test_labels
pneumoniamnist['train_images'].shape, pneumoniamnist['train_labels'].shape
(x_train, y_train), (x_test, y_test) = (pneumoniamnist['train_images'], pneumoniamnist['train_labels']), (pneumoniamnist['test_images'], pneumoniamnist['test_labels'])
(x_val, y_val) = (pneumoniamnist['val_images'], pneumoniamnist['val_labels'])
print(x_train.shape) # (4708, 28, 28)
print(y_train.shape) # (4708, 1)
print(y_train[40:50]) # class-label
print(x_test.shape) # (624, 28, 28)
print(y_test.shape) # (624, 1)
# 將資料集轉成 'float32'
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# rescale value to [0 - 1] from [0 - 255]
x_train /= 255 # rescaling
x_test /= 255 # rescaling
x_val = x_val.astype('float32')/255
# montage
# source: https://github.com/MedMNIST/MedMNIST/blob/main/getting_started.ipynb
from skimage.util import montage
def process(dataset, n_channels, length=20):
scale = length * length
image = np.zeros((scale, 28, 28, 3)) if n_channels == 3 else np.zeros((scale, 28, 28))
index = [i for i in range(scale)]
np.random.shuffle(index)
plt.figure(figsize=(6,6))
for idx in range(scale):
img = dataset[idx]
if n_channels == 3:
img = img.permute(1, 2, 0)
else:
img = img.reshape(28, 28)
image[index[idx]] = img
if n_channels == 1:
image = image.reshape(scale, 28, 28)
arr_out = montage(image)
plt.imshow(arr_out, cmap='gray')
else:
image = image.reshape(scale, 28, 28, 3)
arr_out = montage(image, multichannel=3)
plt.imshow(arr_out)
process( x_train, n_channels=1, length=5)
# visualization
import matplotlib.pylab as plt
sample_num = 99
img = x_train[sample_num].reshape(28, 28)
plt.imshow(img, cmap='gray')
template = "label:{label}"
_ = plt.title(template.format(label= str(y_train[sample_num])))
plt.grid(False)
Explanation: 資料探索
End of explanation
x_train.shape+(1,)
np.expand_dims(x_train, axis=3).shape
x_train = np.expand_dims(x_train, axis=3)
print('x_train shape:',x_train.shape)
x_test = np.expand_dims(x_test, axis=3)
print('x_test shape:',x_test.shape)
x_val = np.expand_dims(x_val, axis=3)
print('x_val shape:',x_val.shape)
# 將訓練資料與測試資料的 label,進行 Onehot encoding 轉換
num_classes = 2
from tensorflow.keras.utils import to_categorical
y_train_onehot = to_categorical(y_train)
y_test_onehot = to_categorical(y_test)
y_val_onehot = to_categorical(y_val)
print('y_train_onehot shape:', y_train_onehot.shape)
print('y_test_onehot shape:', y_test_onehot.shape)
print('y_val_onehot shape:', y_val_onehot.shape)
input = Input(shape=x_train.shape[1:])
x = Conv2D(32, (3, 3), activation='relu', padding='same')(input)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='last_conv_layer')(x)
x = GlobalAveragePooling2D(name='avg_pool')(x)
output = Dense(num_classes, activation='softmax', name='predictions')(x)
model = Model(inputs=[input], outputs=[output])
print(model.summary())
tf.keras.utils.plot_model(
model,
to_file='model_plot_CNN.png',
show_shapes=True,
show_layer_names=True,
rankdir='TB',
expand_nested=True,
dpi=96,
)
Explanation: 搭建資料流
End of explanation
# 編譯模型
# 選用 Adam 為 optimizer
from keras.optimizers import Adam
batch_size = 256
epochs = 20
init_lr = 0.001
opt = Adam(lr=init_lr)
model.compile(optimizer = opt, loss='categorical_crossentropy', metrics='accuracy')
cnn_history = model.fit(x_train, y_train_onehot,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_val, y_val_onehot),
verbose=2)
import plotly.graph_objects as go
plt.clf()
fig = go.Figure()
fig.add_trace(go.Scatter( y=cnn_history.history['accuracy'],
name='Train'))
fig.add_trace(go.Scatter( y=cnn_history.history['val_accuracy'],
name='Valid'))
fig.update_layout(height=500,width=700,
title='Accuracy for race feature',
xaxis_title='Epoch',
yaxis_title='Accuracy')
fig.show()
predictions = model.predict(x_test)
print(predictions.shape)
print(predictions[0:5])
print("**********************************************")
plt.hist(predictions)
plt.show()
y_pred = np.argmax(predictions, axis=1)
print(y_pred.shape)
print(y_pred[0:5])
print("**********************************************")
plt.hist(y_pred)
plt.show()
Explanation: 模型訓練
End of explanation
cnn_pred = model.evaluate(x_test, y_test_onehot, verbose=2)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import itertools
classes = ['normal','pneumonia']
print(classification_report(y_test, y_pred, target_names=classes))
print ("**************************************************************")
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(5,5))
plt.title('confusion matrix')
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = 'd' #'.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
y_pred[0:10], y_pred.shape
_y_test = y_test.reshape(y_pred.shape)
_y_test[0:10], _y_test.shape
# visualization
import matplotlib.pylab as plt
sample_num = 1
img = x_test[sample_num].reshape(28, 28)
plt.imshow(img, cmap='gray')
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(y_test[sample_num]),
predict= str(y_pred[sample_num])))
plt.grid(False)
Explanation: 模型評估
End of explanation
#需安裝一下套件
!pip install tf-keras-vis
%%time
from matplotlib import cm
import matplotlib.pyplot as plt
from tf_keras_vis.gradcam import Gradcam,GradcamPlusPlus
from tensorflow.keras import backend as K
from tf_keras_vis.saliency import Saliency
from tf_keras_vis.utils import normalize
def Grad_CAM_savepictures(file_index,model,save_name):
def loss(output):
return (output[0][y_test[file_index][0]])
def model_modifier(m):
m.layers[-1].activation = tf.keras.activations.linear
return m
# Create Gradcam object
gradcam = Gradcam(model,model_modifier=model_modifier,clone=False)
originalimage=x_test[file_index]
originalimage=originalimage.reshape((1,originalimage.shape[0],originalimage.shape[1],1))
# Generate heatmap with GradCAM
cam = gradcam(loss,originalimage,penultimate_layer=-1)
cam = normalize(cam)
#overlap image
plt.figure(figsize=(12,8))
ax1=plt.subplot(1, 3, 1)
heatmap = np.uint8(cm.jet(cam)[..., :3] * 255)
ax1.imshow(x_test[file_index].reshape((x_test.shape[1],x_test.shape[2])),cmap="gray")
ax1.imshow(heatmap.reshape((x_test.shape[1],x_test.shape[2],3)), cmap='jet', alpha=0.4) # overlay
ax1.set_title("Grad-CAM")
gradcam = GradcamPlusPlus(model,model_modifier=model_modifier,clone=False)
cam = gradcam(loss,originalimage,penultimate_layer=-1)
cam = normalize(cam)
ax1=plt.subplot(1, 3, 2)
heatmap = np.uint8(cm.jet(cam)[..., :3] * 255)
ax1.imshow(x_test[file_index].reshape((x_test.shape[1],x_test.shape[2])),cmap="gray")
ax1.imshow(heatmap.reshape((x_test.shape[1],x_test.shape[2],3)), cmap='jet', alpha=0.4) # overlay
ax1.set_title("Grad-CAM++")
plt.savefig(save_name)
plt.show()
file_index = 0
Grad_CAM_savepictures( file_index, model, "Grad-CAM_{}.jpg".format(file_index))
print('saved file - Grad-CAM_{}.jpg'.format(file_index))
file_index = 1
Grad_CAM_savepictures( file_index, model, "Grad-CAM_{}.jpg".format(file_index))
print('saved file - Grad-CAM_{}.jpg'.format(file_index))
file_index = 2
Grad_CAM_savepictures( file_index, model, "Grad-CAM_{}.jpg".format(file_index))
print('saved file - Grad-CAM_{}.jpg'.format(file_index))
file_index = 10
Grad_CAM_savepictures( file_index, model, "Grad-CAM_{}.jpg".format(file_index))
print('saved file - Grad-CAM_{}.jpg'.format(file_index))
Explanation: 解釋模型
tf-keras-vis
tf-keras-vis is a visualization toolkit for debugging tf.keras models in Tensorflow2.0+.
github 連結
grad-CAM
<img src="https://github.com/keisen/tf-keras-vis/raw/master/examples/images/gradcam_plus_plus.png" alt="gradcam figure" width="700">
End of explanation |
11,313 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
This section reviews the various methods for reading and modifying metadata within a ReproPhylo Project. Utilizing it will be discussed in later sections.
3.4.1 What is metadata in ReproPhylo?
Within a ReproPhylo Project, metadata is tied to sequences and sequence features, in the Biopython sense. Since sequence records in the Project are in fact Biopython SeqRecord objects, a quick review of the GenBank file format, based on which the SeqRecord class is structured, will help understand basic concepts.
Step1: When a data file is read, each sequence will be stored as a SeqRecord object in the Project.records list of SeqRecord objects. A SeqRecord object has an annotations attribute (SeqRecord.annotations) which is a Python dictionary containing information regarding the sequence as a whole. Additional SeqRecord attribute is the features Python list (SeqRecord.features). Features in the list are stored as SeqFeature Biopython objects, and they define the start and end of each locus in the sequence (SeqFeature.location attribute. The SeqFeature also has the SeqFeature.qualifiers dictionary, which holds any additional metadata about this sequence feature. For example, the gene name and product name. This information is used by ReproPhylo to sort loci into their respective bins, (eg, coi, 18S etc.). Note that the values in the SeqFeature.qualifiers dictionary are always stored as Python lists, even if they consist of a single value. For example, a SeqFeature.qualifiers dictionary might look like this
Step2: The first record looks like this
Step3: The next cell will get the organism name for each record, which is a qualifier in the source feature. It will then get the genus out of this name, and place it in a new qualifier, in all the record's features
Step4: The genus qualifier was added to all the features.
Step5: Important
Step6: This method iterates over all the records in the project and makes some changes where the rules provided to it apply. If the value 'Cinachyrella' is found in the qualifier 'genus' it will put the value 'yes' in the qualifier 'porocalices', which is a morphological trait of the sponge genus Cinachyrella. mode='part' means that the match can be partial. The default is mode='whole'.
Step7: This method will add a qualifier spam? with the value why not to specific features that have the feature IDs KC902343.1_f0 and JX177933.1_f0. Within each record, ReproPhylo assigns unique ID for each feature.
Step8: We may want to place the source qualifier 'country' explicitly in each of the other features. The add_qualifier_from_source method will take effect in all the records that have a country qualifier in their source feature. It will copy it to all the other features in the record, along with its value in that record.
Step9: Or vice-versa, we can make sure that a qualifier that is in only one of the features, is copied as a value of a source feature qualifier and thus apply it to the whole record (and all its features). In this case, the function copy_paste_from_features_to_source will take effect in records where at least one non-source record feature has the qualifier eggs?, and it will copy the value of eggs? to the source qualifier spam?.
Step10: Lastly, we may want to equate qualifiers that have different names in different records, but are essentially the same thing. For example, 'sample' and 'voucher'. This can be done by applying the qualifier name of one of them
to the other, using the method copy_paste_within_feature. In every record feature that has the qualifier GC_content, a new qualifier will be created, %GC, and it will contain the value of GC_content.
This is the FEATURES section of the above record, with the resulting changes to it. The method which is responsible for each qualifier is indicated next to it (the method names are not a part of the real output)
Step11: In the resulting file, each feature has its own line, and each record has as many lines as non-source features it contains. Source feature qualifiers are included in all the lines, as they apply to all the features in the record. They are indicated in the titles with the prefix source
Step12: The edited spreadsheet was saved as outputs/edited_metadata_example.tsv, and it can now be read back to the Project
Step13: If we print the first record again, this is how its FEATURES section looks now
Step14: 3.4.3 Quick reference | Python Code:
from IPython.display import Image
Image('images/genbank_terminology.jpg', width=400)
Explanation: This section reviews the various methods for reading and modifying metadata within a ReproPhylo Project. Utilizing it will be discussed in later sections.
3.4.1 What is metadata in ReproPhylo?
Within a ReproPhylo Project, metadata is tied to sequences and sequence features, in the Biopython sense. Since sequence records in the Project are in fact Biopython SeqRecord objects, a quick review of the GenBank file format, based on which the SeqRecord class is structured, will help understand basic concepts.
End of explanation
from reprophylo import *
pj = unpickle_pj('outputs/my_project.pkpj', git=False)
Explanation: When a data file is read, each sequence will be stored as a SeqRecord object in the Project.records list of SeqRecord objects. A SeqRecord object has an annotations attribute (SeqRecord.annotations) which is a Python dictionary containing information regarding the sequence as a whole. Additional SeqRecord attribute is the features Python list (SeqRecord.features). Features in the list are stored as SeqFeature Biopython objects, and they define the start and end of each locus in the sequence (SeqFeature.location attribute. The SeqFeature also has the SeqFeature.qualifiers dictionary, which holds any additional metadata about this sequence feature. For example, the gene name and product name. This information is used by ReproPhylo to sort loci into their respective bins, (eg, coi, 18S etc.). Note that the values in the SeqFeature.qualifiers dictionary are always stored as Python lists, even if they consist of a single value. For example, a SeqFeature.qualifiers dictionary might look like this:
<pre>
{'gene': ['cox1'],
'translation': ['AATRNLLK']}
</pre>
Another important SeqRecord attribute is type (SeqRecord.type), which is a string stating the feature type, whether it is 'gene', 'CDS', 'rRNA' or anything else.
A special SeqFeature is the source feature (SeqFeature.type == 'source'). It is the first feature in each SeqRecord.features list, and is generated automatically by ReproPhylo if you read a file that does not have such features (eg, fasta format). The qualifiers dictionary of this automatically generated source feature will then contain the original_id and original_desc (description) from the fasta headers. Technically, there is absolutely no difference between the source feature and all the other features. However, conceptually, metadata stored in the source feature applies to all the other features. ReproPhylo knows this and provides tools to access it accordingly (further down).
For a more detailed description of metadata in the SeqRecord Biopython object, refer to this section in the Biopython tutorial. Although ReproPhylo provides some Project methods for modifying the metadata, and also a method to edit the metadata in a spreadsheet, the most flexible way to do it is by utilizing Biopython code, and mastering it is helpful within ReproPhylo and in life in general!
3.4.2 Modifying the metadata
3.4.2.1 A Biopython example
With Biopython we can iterate over the records and their features in the pj.records list and make changes or additions to the qualifiers of each feature as follows. To get a working example going, first we load our project with its loci and data:
End of explanation
print pj.records[0].format('genbank')
Explanation: The first record looks like this:
End of explanation
for record in pj.records:
# get the source qualifiers
source_feature = record.features[0]
source_qualifiers = source_feature.qualifiers
# get the species name
species = None
if 'organism' in source_qualifiers:
species = source_qualifiers['organism'][0] # qualifier values are lists
# place the genus as a qualifier in all the features
if species:
genus = species.split()[0]
for f in record.features:
f.qualifiers['genus'] = [genus]
Explanation: The next cell will get the organism name for each record, which is a qualifier in the source feature. It will then get the genus out of this name, and place it in a new qualifier, in all the record's features:
End of explanation
print pj.records[0].format('genbank')
Explanation: The genus qualifier was added to all the features.
End of explanation
pj.if_this_then_that('Cinachyrella', 'genus', 'yes', 'porocalices', mode='part')
Explanation: Important:
In addition to the record ID, ReproPhylo assigns a unique feature ID for each feature within the record. In a record with a record ID KC902343, the ID of the first feature will be KC902343_source, for the second and third features the IDs will be KC902343_f0 and KC902343_f1 and so on. For a record with a record ID denovo2, the features will get the feature IDs denovo2_source, denovo2_f0, denovo2_f1 and so on. This is important because it allows to access specific features directly (say, the cox1 features), using their feature ID.
3.4.2.2 Some ReproPhylo shortcuts
ReproPhylo adds some basic shortcuts for convenience. Here are some examples:
End of explanation
features_to_modify = ['KC902343.1_f0', 'JX177933.1_f0']
pj.add_qualifier(features_to_modify, 'spam?', 'why not')
Explanation: This method iterates over all the records in the project and makes some changes where the rules provided to it apply. If the value 'Cinachyrella' is found in the qualifier 'genus' it will put the value 'yes' in the qualifier 'porocalices', which is a morphological trait of the sponge genus Cinachyrella. mode='part' means that the match can be partial. The default is mode='whole'.
End of explanation
pj.add_qualifier_from_source('country')
Explanation: This method will add a qualifier spam? with the value why not to specific features that have the feature IDs KC902343.1_f0 and JX177933.1_f0. Within each record, ReproPhylo assigns unique ID for each feature.
End of explanation
pj.copy_paste_from_features_to_source('eggs?', 'spam?')
Explanation: We may want to place the source qualifier 'country' explicitly in each of the other features. The add_qualifier_from_source method will take effect in all the records that have a country qualifier in their source feature. It will copy it to all the other features in the record, along with its value in that record.
End of explanation
pj.copy_paste_within_feature('GC_content', '%GC')
Explanation: Or vice-versa, we can make sure that a qualifier that is in only one of the features, is copied as a value of a source feature qualifier and thus apply it to the whole record (and all its features). In this case, the function copy_paste_from_features_to_source will take effect in records where at least one non-source record feature has the qualifier eggs?, and it will copy the value of eggs? to the source qualifier spam?.
End of explanation
pj.write('outputs/metadata_example.tsv', format='csv')
Explanation: Lastly, we may want to equate qualifiers that have different names in different records, but are essentially the same thing. For example, 'sample' and 'voucher'. This can be done by applying the qualifier name of one of them
to the other, using the method copy_paste_within_feature. In every record feature that has the qualifier GC_content, a new qualifier will be created, %GC, and it will contain the value of GC_content.
This is the FEATURES section of the above record, with the resulting changes to it. The method which is responsible for each qualifier is indicated next to it (the method names are not a part of the real output):
<pre>
FEATURES Location/Qualifiers
source 1..1728
/feature_id="KC902343.1_source"
/mol_type="genomic DNA"
/country="Australia"
/eggs?="why not" #### copy_paste_from_features_to_source
/note="PorToL ID: NCI376"
/db_xref="taxon:1342549"
/specimen_voucher="0M9H2022-P"
/genus="Cinachyrella" #### Biopython script from section 3.4.2.1
/organism="Cinachyrella cf. paterifera 0M9H2022-P"
rRNA <1..>1728
/porocalices="yes" ####if_this_then_that
/product="small subunit 18S ribosomal RNA"
/country="Australia" #### add_qualifier_from_source
/nuc_degen_prop="0.0"
/feature_id="KC902343.1_f0"
/spam?="why not" #### add_qualifier
/%GC="52.0833333333" #### copy_paste_within_feature
/GC_content="52.0833333333"
/genus="Cinachyrella" #### Biopython script from section 3.4.2.1
</pre>
3.4.2.2 Using a spreadsheet
ReproPhylo provides an alternative route for metadata editing that goes through a spreadsheet. This way, the spreadsheet can be routinely edited and the changes read into the Project and propagated to its existing components (eg, trees). The best way to edit this spreadsheet probably goes through pandas, if you are familiar with it. Otherwise, it is possible to edit and save in excel, libreoffice and similar programmes, although beware of errors.
In this section I will give an example using a spreadsheet programme. This example will add the qualifier 'monty' and the value 'python' to each source feature, and the qualifier 'holy' with the value 'grail' to each non-source feature.
The first step is to write a csv file (the separators are actually tabs and not commas)
End of explanation
from IPython.display import Image
Image('images/spreadsheet.png', width=400)
Explanation: In the resulting file, each feature has its own line, and each record has as many lines as non-source features it contains. Source feature qualifiers are included in all the lines, as they apply to all the features in the record. They are indicated in the titles with the prefix source:_. To add a qualifier to the source feature, we will need to use this prefix in its title.
I have opened this file in a spreadsheet programme and added the qualifiers as follows:
End of explanation
pj.correct_metadata_from_file('outputs/edited_metadata_example.tsv')
# Propagate the changes so they are also updated in tree leaves.
pj.propagate_metadata()
Explanation: The edited spreadsheet was saved as outputs/edited_metadata_example.tsv, and it can now be read back to the Project
End of explanation
# Update the pickle file
pickle_pj(pj, 'outputs/my_project.pkpj')
Explanation: If we print the first record again, this is how its FEATURES section looks now:
<pre>
FEATURES Location/Qualifiers
source 1..1728
/note="PorToL ID: NCI376"
/mol_type="genomic DNA"
/country="Australia"
/organism="Cinachyrella cf. paterifera 0M9H2022-P"
/feature_id="KC902343.1_source"
/db_xref="taxon:1342549"
/specimen_voucher="0M9H2022-P"
/genus="Cinachyrella"
/eggs?="why not"
/monty="python" #### New source qualifier
rRNA <1..>1728
/porocalices="yes"
/product="small subunit 18S ribosomal RNA"
/holy="grail" #### New non-source qualifier
/country="Australia"
/nuc_degen_prop="0"
/feature_id="KC902343.1_f0"
/%GC="52.0833333333"
/spam?="why not"
/record_id="KC902343.1"
/GC_content="52.0833333333"
/genus="Cinachyrella"
</pre>
End of explanation
## A Biopython example
for record in pj.records:
# get the source qualifiers
source_feature = record.features[0]
source_qualifiers = source_feature.qualifiers
# get the species name
species = None
if 'organism' in source_qualifiers:
# qualifier values are lists
species = source_qualifiers['organism'][0]
# place the genus as a qualifier in all the features
if species:
genus = species.split()[0]
for f in record.features:
f.qualifiers['genus'] = [genus]
## Add qualifier based on condition
pj.if_this_then_that('Cinachyrella', 'genus', 'yes', 'porocalices',
mode='part')
## Modify qualifier of specific features
features_to_modify = ['KC902343.1_f0', 'JX177933.1_f0']
pj.add_qualifier(features_to_modify, 'spam?', 'why not')
## Copy qualifier from source to features
pj.add_qualifier_from_source('country')
# or vice-versa
pj.copy_paste_from_features_to_source('spam?', 'eggs?')
## Duplicate a qualifier with a new name
pj.copy_paste_within_feature('GC_content', '%GC')
## Write metadata spreadsheet
pj.write('outputs/metadata_example.tsv', format='csv')
# Read a corrected metadata spreadsheet
pj.correct_metadata_from_file('outputs/edited_metadata_example.tsv')
# Propagate the changes
pj.propagate_metadata()
Explanation: 3.4.3 Quick reference
End of explanation |
11,314 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="http
Step1: <a ><img src = "https
Step2: The code in the indent is executed N times, each time the value of i is increased by 1 for every execution. The statement executed is to print out the value in the list at index i as shown here
Step3: Write a for loop the prints out all the element between -5 and 5 using the range function.
Step4: <div align="right">
<a href="#q2" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q2" class="collapse">
```
for i in range(-5,6)
Step5: For each iteration, the value of the variable years behaves like the value of dates[i] in the first example
Step6: <div align="right">
<a href="#q3" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q3" class="collapse">
```
Genres=[ 'rock', 'R&B', 'Soundtrack' 'R&B', 'soul', 'pop']
for Genre in Genres
Step7: Write a for loop that prints out the following list
Step8: <div align="right">
<a href="#q3" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q3" class="collapse">
```
squares=['red','yellow','green','purple','blue ']
for square in squares
Step9: <a id="ref2"></a>
<center><h2>While Loops</h2></center>
As you can see, the for loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The while loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a False boolean value.
Let’s say we would like to iterate through list dates and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code
Step10: A while loop iterates merely until the condition in the argument is not met, as shown in the following figure
Step11: <div align="right">
<a href="#q8" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q8" class="collapse">
```
PlayListRatings = [10,9.5,10, 8,7.5, 5,10, 10]
i = 0;
Rating = PlayListRatings[i]
while(Rating >= 6) | Python Code:
range(3)
Explanation: <a href="http://cocl.us/topNotebooksPython101Coursera"><img src = "https://ibm.box.com/shared/static/yfe6h4az47ktg2mm9h05wby2n7e8kei3.png" width = 750, align = "center"></a>
<a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 300, align = "center"></a>
<h1 align=center><font size = 5>LOOPS IN PYTHON</font></h1>
Table of Contents
<div class="alert alert-block alert-info" style="margin-top: 20px">
<li><a href="#ref1">For Loops</a></p></li>
<li><a href="#ref2">While Loops </a></p></li>
<br>
<p></p>
Estimated Time Needed: <strong>15 min</strong>
</div>
<hr>
<a id="ref1"></a>
<center><h2>For Loops</h2></center>
Sometimes, you might want to repeat a given operation many times. Repeated executions like this are performed by loops. We will look at two types of loops, for loops and while loops.
Before we discuss loops lets discuss the range object. It is helpful to think of the range object as an ordered list. For now, let's look at the simplest case. If we would like to generate a sequence that contains three elements ordered from 0 to 2 we simply use the following command:
End of explanation
dates = [1982,1980,1973]
N=len(dates)
for i in range(N):
print(dates[i])
Explanation: <a ><img src = "https://ibm.box.com/shared/static/mxzjehamhqq5dljnxeh0vwqlju67j6z8.png" width = 300, align = "center"></a>
<h4 align=center>:Example of range function.
</h4>
The for loop
The for loop enables you to execute a code block multiple times. For example, you would use this if you would like to print out every element in a list.
Let's try to use a for loop to print all the years presented in the list dates:
This can be done as follows:
End of explanation
for i in range(0,8):
print(i)
Explanation: The code in the indent is executed N times, each time the value of i is increased by 1 for every execution. The statement executed is to print out the value in the list at index i as shown here:
<a ><img src = "https://ibm.box.com/shared/static/w021psh5dtxcl2qheyc5d19d8tik7vq3.gif" width = 1000, align = "center"></a>
<h4 align=center> Example of printing out the elements of a list.
</h4>
In this example we can print out a sequence of numbers from 0 to 7:
End of explanation
for i in range(-5, 6):
print(i)
Explanation: Write a for loop the prints out all the element between -5 and 5 using the range function.
End of explanation
for year in dates:
print(year)
Explanation: <div align="right">
<a href="#q2" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q2" class="collapse">
```
for i in range(-5,6):
print(i)
```
</div>
In Python we can directly access the elements in the list as follows:
End of explanation
Genres=[ 'rock', 'R&B', 'Soundtrack' 'R&B', 'soul', 'pop']
for genre in Genres:
print(genre)
Explanation: For each iteration, the value of the variable years behaves like the value of dates[i] in the first example:
<a ><img src = "https://ibm.box.com/shared/static/zljq7m9stw8znv7ca2it6vkekaudfuwf.gif" width = 1100, align = "center"></a>
<h4 align=center> Example of a for loop
</h4>
Print the elements of the following list:
Genres=[ 'rock', 'R&B', 'Soundtrack' 'R&B', 'soul', 'pop']
Make sure you follow Python conventions.
End of explanation
squares=['red','yellow','green','purple','blue ']
for i in range(0,5):
print("Before square ",i, 'is', squares[i])
squares[i]='wight'
print("After square ",i, 'is', squares[i])
Explanation: <div align="right">
<a href="#q3" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q3" class="collapse">
```
Genres=[ 'rock', 'R&B', 'Soundtrack' 'R&B', 'soul', 'pop']
for Genre in Genres:
print(Genre)
```
</div>
We can change the elements in a list:
End of explanation
squares=['red','yellow','green','purple','blue ']
for square in squares:
print(square)
Explanation: Write a for loop that prints out the following list: squares=['red','yellow','green','purple','blue ']:
End of explanation
squares=['red','yellow','green','purple','blue ']
for i,square in enumerate(squares):
print(i,square)
Explanation: <div align="right">
<a href="#q3" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q3" class="collapse">
```
squares=['red','yellow','green','purple','blue ']
for square in squares:
print(square)
```
</div>
We can access the index and the elements of a list as follows:
End of explanation
dates = [1982,1980,1973,2000]
i=0;
year=0
while(year!=1973):
year=dates[i]
i=i+1
print(year)
print("it took ", i ,"repetitions to get out of loop")
Explanation: <a id="ref2"></a>
<center><h2>While Loops</h2></center>
As you can see, the for loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The while loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a False boolean value.
Let’s say we would like to iterate through list dates and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code:
End of explanation
PlayListRatings = [10,9.5,10,8,7.5,5,10,10]
i = 0
while(not (PlayListRatings[i] < 6)):
print(PlayListRatings[i])
i += 1
Explanation: A while loop iterates merely until the condition in the argument is not met, as shown in the following figure :
<a ><img src = "https://ibm.box.com/shared/static/hhe9tiskw1qqpycs4b8l2l2q58e2kn54.gif" width = 1000, align = "center"></a>
<h4 align=center> An Example of indices as negative numbers
</h4>
Write a while loop to display the values of the Rating of an album playlist stored in the list “PlayListRatings”. If the score is less than 6, exit the loop. The list “PlayListRatings” is given by: PlayListRatings = [10,9.5,10, 8,7.5, 5,10, 10]:
End of explanation
squares=['orange','orange','purple','blue ','orange']
new_squares=[];
for square in squares:
if square != 'orange':
break
else:
new_squares.append(square)
print(new_squares)
Explanation: <div align="right">
<a href="#q8" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
</div>
<div id="q8" class="collapse">
```
PlayListRatings = [10,9.5,10, 8,7.5, 5,10, 10]
i = 0;
Rating = PlayListRatings[i]
while(Rating >= 6):
print(Rating)
i = i + 1
Rating = PlayListRatings[i]
```
</div>
<hr>
Write a while loop to copy the strings 'orange' of the list 'squares' to the list 'new_squares'. Stop and exit the loop if the value on the list is not 'orange':
End of explanation |
11,315 | Given the following text description, write Python code to implement the functionality described.
Description:
Filter an input list of strings only for ones that contain given substring
This is how the function will work:
filter_by_substring([], 'a')
[]
This is how the function will work:
filter_by_substring(['abc', 'bacd', 'cde', 'array'], 'a')
['abc', 'bacd', 'array']
| Python Code:
from typing import List
def filter_by_substring(strings: List[str], substring: str) -> List[str]:
return [x for x in strings if substring in x] |
11,316 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Bienvenida a otra reunión de pyladies!!
Yo sé que después de las vacaciones lo que ya habías aprendido en python tal vez no esté tan fresco. Así que vamos a enumerar (y explicar) brevemente lo que hemos estado viendo en python.
Operaciones básicas o cómo usar python como calculadora. Python se puede usar básicamente como cualquier calculadora operando directamente sobre objetos como números enteros (integers) o decimales (floats) y series de caracteres (strings)
Asignación de variables.Si quieres guardar los resultados de operaciones, floats, integers, strings en la memoria de python lo que tenemos que hacer es asignarlos a unas variables. Para hacer esto tienes que inventar un nombre (que empiece con letras del alfabeto) poner un signo igual y después de este el valor u operación que desees guardar como en el siguiente ejemplo
Step1: Yo creo que el punto está entendido... Es tedioso estar escribiendo lo mismo 20 veces. Ahora imagina que no tienes que hacer esto 20 veces, sino 10 000!!! Suena a mucho trabajo no? Sin embargo en python hay varias estrategias para resolverlo. Hoy veremos el for loop (o froot loop como yo le digo jejeje).
El for loop es una clase de iteración a la cual tu le vas a dar una lista o colección de objetos para iterar (llamados iterables) y sobre cada elemento va a ejecutar la serie de instrucciones que le diste hasta que se acabe la lista o iterble. Veamos un ejemplo para clarificarlo... Hagamos lo mismo que queríamos hacer en el ejemplo anterior.
Step2: Yeiii!!! viste lo que se puede hacer con loops. Ahora te toca a ti.
Ejercicio 1
Crea un programa que convierta todos los elementos de la siguiente lista a integers (usando por supuesto el froot loop)
Step3: Ejercicio 2
Crea un programa que te de como resultado una nueva lista con el promedio de la lista creada anteriormente.
Ejercicio 3
crea un programa que imprima "hola" el número de veces que el usuario escoja.
Ejemplo.
"Escoge un número del 1 al 100"
Step4: Observa lo que para cuando le pedimos a python que nos imprima cada elemento de la lista anidada
Step5: Y que pasa si queremos obtener cada elemento de todas las listas | Python Code:
#Obtén el cuadrado de 1
1**2
#Obtén el cuadrado de 2
2**2
#Obtén el cuadrado de 3
3**2
#Obtén el cuadrado de 4
4**2
#Obtén el cuadrado de 5
5**2
#Obtén el cuadrado de 6
6**2
#Obtén el cuadrado de 7
7**2
#Obtén el cuadrado de 8
8**2
#Obtén el cuadrado de 9
9**2
#Obtén el cuadrado de 10
10**2
Explanation: Bienvenida a otra reunión de pyladies!!
Yo sé que después de las vacaciones lo que ya habías aprendido en python tal vez no esté tan fresco. Así que vamos a enumerar (y explicar) brevemente lo que hemos estado viendo en python.
Operaciones básicas o cómo usar python como calculadora. Python se puede usar básicamente como cualquier calculadora operando directamente sobre objetos como números enteros (integers) o decimales (floats) y series de caracteres (strings)
Asignación de variables.Si quieres guardar los resultados de operaciones, floats, integers, strings en la memoria de python lo que tenemos que hacer es asignarlos a unas variables. Para hacer esto tienes que inventar un nombre (que empiece con letras del alfabeto) poner un signo igual y después de este el valor u operación que desees guardar como en el siguiente ejemplo:
variable = 5 + 2.5
variable_string = "String"
Listas, el álbum coleccionador de python. Si lo que quieres es una colección de elementos en python, una de las estructuras de datos que te permite hacer esto son las listas, para estas tienes que poner entre corchetes los elementos que quieras guardar (todos los tipos de datos incluyendo listas!) separados por comas. Ejemplo:
lista = [variable, 5, 2.5, "Hola"]
Control de flujo. Decisiones con "if" y "else". En algún punto tendrás que hacer un programa el cual deba seguir dos caminos distintos dependiendo de una condición. Por ejemplo para decidir si usar un paraguas o no un programa puede ser: Si llueve entonces uso un paraguas, de lo contrario no se usa. Esto en python se representa de la siguiente forma:
if lluvia == True:
paraguas = True
else:
paraguas = False
Espero que este repaso te haya ayudado a refrescar tu memoria, pero lo que hoy veremos es un concepto muy útil en la programación y éste es la iteració.
Iteraciones en python
Las iteraciones son la repetición de una misma secuencia de paso determinado número de veces, esta repetición iteración se va a llevar a cabo hasta que se cumpla una condición. Para hacerlo más claro imagina que tu quieres obtener el cuadrado de todos los número del 1 al 20, lo que tendrías que hacer en python (si no hubiera iteraciones) es escribir la misma operación 20 veces. Como ejercicio obtén los cuadrados manualmente
End of explanation
for numero in range(1,21):
cuadrado = numero**2
print(cuadrado)
Explanation: Yo creo que el punto está entendido... Es tedioso estar escribiendo lo mismo 20 veces. Ahora imagina que no tienes que hacer esto 20 veces, sino 10 000!!! Suena a mucho trabajo no? Sin embargo en python hay varias estrategias para resolverlo. Hoy veremos el for loop (o froot loop como yo le digo jejeje).
El for loop es una clase de iteración a la cual tu le vas a dar una lista o colección de objetos para iterar (llamados iterables) y sobre cada elemento va a ejecutar la serie de instrucciones que le diste hasta que se acabe la lista o iterble. Veamos un ejemplo para clarificarlo... Hagamos lo mismo que queríamos hacer en el ejemplo anterior.
End of explanation
lista = [5.9, 3.0, 2, 25.5, 14.2,3, 5]
integers=[]
for num in lista:
integers.append(int(num))
integers
range(20)
list(range(20))
for num in range(len(lista)):
print (int(lista[num]))
Explanation: Yeiii!!! viste lo que se puede hacer con loops. Ahora te toca a ti.
Ejercicio 1
Crea un programa que convierta todos los elementos de la siguiente lista a integers (usando por supuesto el froot loop)
End of explanation
lista_anidada = [['Perro', 'Gato'], ['Joven', 'Viejo'], [1, 2]]
Explanation: Ejercicio 2
Crea un programa que te de como resultado una nueva lista con el promedio de la lista creada anteriormente.
Ejercicio 3
crea un programa que imprima "hola" el número de veces que el usuario escoja.
Ejemplo.
"Escoge un número del 1 al 100":
3
"hola"
"hola"
"hola"
Loops anidados
Algo curioso en python es que puedes generar un loop for, dentro de otro loop. INCEPTION...
Veamos un ejemplo
End of explanation
for elemento in lista_anidada:
print (elemento)
Explanation: Observa lo que para cuando le pedimos a python que nos imprima cada elemento de la lista anidada
End of explanation
for elemento in lista_anidada:
for objeto in elemento:
print(objeto)
Explanation: Y que pasa si queremos obtener cada elemento de todas las listas
End of explanation |
11,317 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Processing MOC maps
Multi-Order Coverage maps represent regions in a spheric surface defined by tree-like structures with the aim of producing maps through different spatial resolutions. In astronomy they are used to procude lightweight version of surveys' coverage.
MOCs build the maps through Healpix. Healpix/MOC maps split the sky surface in diamond-like figures covering the same area each. Initially, at level zero, the number of diamonds is 12, covering the sphere in diamonds sized ~58 degree. At each following level, the diamonds are sub-divide in 4, recursively. Until the maximum number of levels is reached, each level doubles the map resolution, splitting the sphere firstly in 48 diamond, at the second level in 192; third, 768 and so on. The maximum level -- or order -- is 29, covering the sphere with 393.2 microarcsecond size diamonds.
Step4: The libraries we can use to generate/manipulate Healpix/MOC maps are
Step5: Let's do the same with healpix_util now
Step6: MOCpy for visualizing and writing the maps
Step7: MOC catalogs now for LaMassa's XMM
Step8: Automating MOC generation from fits files
We need, to generate a coverage map, the following parameters | Python Code:
# Let's handle units
from astropy import units as u
# Structure to map healpix' levels to their angular sizes
#
healpix_levels = {
0 : 58.63 * u.deg,
1 : 29.32 * u.deg,
2 : 14.66 * u.deg,
3 : 7.329 * u.deg,
4 : 3.665 * u.deg,
5 : 1.832 * u.deg,
6 : 54.97 * u.arcmin,
7 : 27.48 * u.arcmin,
8 : 13.74 * u.arcmin,
9 : 6.871 * u.arcmin,
10 : 3.435 * u.arcmin,
11 : 1.718 * u.arcmin,
12 : 51.53 * u.arcsec,
13 : 25.77 * u.arcsec,
14 : 12.88 * u.arcsec,
15 : 6.442 * u.arcsec,
16 : 3.221 * u.arcsec,
17 : 1.61 * u.arcsec,
18 : 805.2 * u.milliarcsecond,
19 : 402.6 * u.milliarcsecond,
20 : 201.3 * u.milliarcsecond,
21 : 100.6 * u.milliarcsecond,
22 : 50.32 * u.milliarcsecond,
23 : 25.16 * u.milliarcsecond,
24 : 12.58 * u.milliarcsecond,
25 : 6.291 * u.milliarcsecond,
26 : 3.145 * u.milliarcsecond,
27 : 1.573 * u.milliarcsecond,
28 : 786.3 * u.microarcsecond,
29 : 393.2 * u.microarcsecond
}
Explanation: Processing MOC maps
Multi-Order Coverage maps represent regions in a spheric surface defined by tree-like structures with the aim of producing maps through different spatial resolutions. In astronomy they are used to procude lightweight version of surveys' coverage.
MOCs build the maps through Healpix. Healpix/MOC maps split the sky surface in diamond-like figures covering the same area each. Initially, at level zero, the number of diamonds is 12, covering the sphere in diamonds sized ~58 degree. At each following level, the diamonds are sub-divide in 4, recursively. Until the maximum number of levels is reached, each level doubles the map resolution, splitting the sphere firstly in 48 diamond, at the second level in 192; third, 768 and so on. The maximum level -- or order -- is 29, covering the sphere with 393.2 microarcsecond size diamonds.
End of explanation
# as usual, matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
# Load Healpix
import healpy
# Erin Sheldon's healpix_util
import healpix_util as hu
# Thomas Boch's MOCpy
import mocpy
%ls
from astropy.io import fits
chandra = fits.open('Chandra_multiwavelength.fits')[1]
print chandra.columns
# we are interested here on columns 'RA','DEC' and 'RADEC_ERR'
_data = {'ra' : chandra.data['RA'] * u.degree,
'dec': chandra.data['DEC']* u.degree,
'pos_err' : chandra.data['RADEC_ERR']* u.arcsec}
from astropy.table import Table
_table = Table(_data)
import pandas as pd
df = _table.to_pandas()
del _table,_data
df.hist('pos_err',bins=100)
plt.show()
df.describe()
# A function to find out which healpix level corresponds a given (typical) size of coverage
def size2level(size):
Returns nearest Healpix level corresponding to a given diamond size
The 'nearest' Healpix level is here to be the nearest greater level,
right before the first level smaller than 'size'.
assert size.unit
ko = None
for k,v in healpix_levels.iteritems():
if v < 2 * size:
break
ko = k
return ko
level = size2level(df.pos_err.median()* u.arcsec)
nside = 2**level
print "Typical (median) position error: \n{}".format(df.pos_err.median())
print "\nCorrespondig healpix level: {} \n\t and nsize value: {}".format(level,nside)
# Let's convert from ra,dec to theta,phi
# This function comes from mocpy
def ra2phi(ra):
convert equatorial ra, dec in degrees
to polar theta, phi in radians
import math
return math.radians(ra)
def dec2theta(dec):
convert equatorial ra, dec in degrees
to polar theta, phi in radians
import math
return math.pi/2 - math.radians(dec)
def radec2thetaphi(ra,dec):
_phi = ra2phi(ra)
_theta = dec2theta(dec)
return _theta,_phi
import healpy
def healpix_radec2pix(nside, ra, dec, nest=True):
_theta,_phi = radec2thetaphi(ra, dec)
return healpy.ang2pix(nside, _theta, _phi, nest=nest)
df['phi'] = df.ra.apply(ra2phi)
df['theta'] = df.dec.apply(dec2theta)
df.describe()
hp_pix_eq = df.apply(lambda x:healpix_radec2pix(nside,x.ra,x.dec,nest=True), axis=1)
hp_pix_ang = df.apply(lambda x:healpy.ang2pix(nside,x.theta,x.phi,nest=True), axis=1)
import numpy
numpy.array_equal(hp_pix_ang,hp_pix_eq)
Explanation: The libraries we can use to generate/manipulate Healpix/MOC maps are:
* healpy
* mocpy
* healpix_util
End of explanation
hpix = hu.HealPix(scheme='nest',nside=nside)
hpix
hu_pix = hpix.eq2pix(ra=df.ra,dec=df.dec)
numpy.array_equal(hu_pix,hp_pix_ang) and numpy.array_equal(hu_pix,hp_pix_eq)
# Curiosity: which one is faster?
%timeit hpix.eq2pix(ra=df.ra,dec=df.dec)
%timeit df.apply(lambda x:healpix_radec2pix(nside,x.ra,x.dec,nest=True), axis=1)
# So...all results are equal \o/ and ES's is faster
# we can now go on and put it inside our DataFrame
df['hpix'] = hu_pix
df.describe()
Explanation: Let's do the same with healpix_util now
End of explanation
moc = mocpy.MOC()
moc.add_pix_list(level,df.hpix)
moc.plot()
moc.write('chandra_MOC_uniq.fits')
table = Table.from_pandas(df)
table.write('chandra_MOC_radec.fits',format='fits',overwrite=True)
del df,table,moc,chandra,hpix
%ls -lh
Explanation: MOCpy for visualizing and writing the maps
End of explanation
from astropy.io import fits
xmm = fits.open('XMM_multiwavelength_cat.fits')[1]
xmm.columns.names
# we are interested here on columns 'RA','DEC' and 'RADEC_ERR'
_data = {'ra' : xmm.data['RA'] * u.degree,
'dec': xmm.data['DEC']* u.degree,
'pos_err' : xmm.data['RADEC_ERR']* u.arcsec}
df = Table(_data).to_pandas()
df.hist('pos_err',bins=100)
plt.show()
df.describe()
level = size2level(df.pos_err.median()* u.arcsec)
nside = 2**level
print "Typical (median) position error: \n{}".format(df.pos_err.median())
print "\nCorrespondig healpix level: {} \n\t and nsize value: {}".format(level,nside)
hpix = hu.HealPix(scheme='nest',nside=nside)
hpix
df['hpix'] = hpix.eq2pix(ra=df.ra,dec=df.dec)
df.describe()
moc = mocpy.MOC()
moc.add_pix_list(level,df.hpix)
moc.plot()
moc.write('xmm_MOC_uniq.fits')
table = Table.from_pandas(df)
table.write('xmm_MOC_radec.fits',format='fits',overwrite=True)
%ls -lh
Explanation: MOC catalogs now for LaMassa's XMM
End of explanation
def radec_2_moc(filename,ra_column,dec_column,radius_column=None,radius_value=None):
import healpix_util
import mocpy
import time
start_all = time.clock()
tbhdu = open_fits(filename)
table = radec_table(tbhdu,ra_column,dec_column,radius_column)
start_convert = time.clock()
if not radius_column:
if radius_value != None and radius_value > 0:
radius = radius_value
else:
from astropy import units
radius = 1 * units.arcsec
else:
radius = radius_mean(tbhdu,radius_column)
assert hasattr(radius,'unit')
level = size2level(radius)
nside = 2**level
hpix = healpix_util.HealPix('nest',nside)
table['hpix'] = hpix.eq2pix(table['ra'],table['dec'])
stop_convert = time.clock()
fileroot = '.'.join(filename.split('.')[:-1])
start_write_normal = time.clock()
fileout = '_'.join([fileroot,'MOC_position.fit'])
table.write(fileout,format='fits',overwrite=True)
stop_write_normal = time.clock()
start_write_moc = time.clock()
# fileout = '_'.join([fileroot,'MOC_uniq.fit'])
# moc = mocpy.MOC()
# moc.add_pix_list(level,table['hpix'])
# moc.write(fileout)
stop_write_moc = time.clock()
stop_all = time.clock()
_msg = "Time elapsed converting pixels: {}\n".format(stop_convert-start_convert)
_msg += "Time elapsed on writing the table: {}\n".format(stop_write_normal-start_write_normal)
_msg += "Time elapsed on writing MOC: {}\n".format(stop_write_moc-start_write_moc)
_msg += "Total time: {}\n".format(stop_all-start_all)
_msg += "Number of points: {}\n".format(len(table))
return _msg
def open_fits(filename,hdu=1):
from astropy.io import fits
from astropy.units import Quantity
_tab = fits.open(filename,ignore_missing_end=True)[hdu]
return _tab
def radec_table(tbhdu,ra_column,dec_column,radius_column=None):
from astropy.table import Table
from astropy import units
import numpy
_data = {'ra':tbhdu.data.field(ra_column) * units.deg,
'dec':tbhdu.data.field(dec_column) * units.deg,
'id':numpy.arange(tbhdu.header['NAXIS2'])}
if radius_column:
try:
_d = tbhdu.data.field(radius_column)
_data.update({'radius':_d})
except:
pass
return Table(_data)
def radius_mean(tbhdu,radius_column):
from astropy.units import Quantity
radius = None
if radius_column:
_radius = Quantity(tbhdu.data.field(radius_column), u.arcsec)
radius = _radius.mean()
assert radius
return radius
res = radec_2_moc('Chandra_multiwavelength.fits','RA','DEC','RADEC_ERR')
print res
res = radec_2_moc('XMM_multiwavelength_cat.fits','RA','DEC','RADEC_ERR')
print res
%ls -lh
def print_fits_columns(fitsfile,hdu=1):
from astropy.io import fits
hdul = fits.open(fitsfile,ignore_missing_end=True)
tbhdu = hdul[1]
print "Number of objects: {}\n".format(tbhdu.header['NAXIS2'])
print "{} columns:\n".format(fitsfile)
ncols = len(tbhdu.columns)
i = 0
for c in tbhdu.columns:
if i<=5:
print "\t{}; ".format(c.name)
else:
print "\t... ({} columns)".format(ncols-i)
break
i += 1
hdul.close()
print_fits_columns('photometry/hers/hers_catalogue_3sig250_no_extended.fits')
res = radec_2_moc('photometry/hers/hers_catalogue_3sig250_no_extended.fits','RA','DEC')
print res
print_fits_columns('photometry/galex/S82_gmsc_chbrandt.fit')
res = radec_2_moc('photometry/galex/S82_gmsc_chbrandt.fit','ra','dec','poserr')
print res
print_fits_columns('photometry/sdss/Stripe82_photo_chbrandt.fit')
res = radec_2_moc('photometry/sdss/Stripe82_photo_chbrandt.fit','ra','dec')
print res
print_fits_columns('photometry/shela/shela_stripe82_v1.3_cat.fits')
res = radec_2_moc('photometry/shela/shela_stripe82_v1.3_cat.fits','SDSS_RA','SDSS_DEC')
print res
print_fits_columns('photometry/spies/SpIES_ch1ch2_allaor_5s_bothchan_final.fits')
res = radec_2_moc('photometry/spies/SpIES_ch1ch2_allaor_5s_bothchan_final.fits','RA','DEC')
print res
print_fits_columns('photometry/unwise/brandt.fits')
res = radec_2_moc('photometry/unwise/brandt.fits','ra','dec')
print res
print_fits_columns('photometry/vla/first_14dec17.fits')
res = radec_2_moc('photometry/vla/first_14dec17.fits','RA','DEC')
print res
Explanation: Automating MOC generation from fits files
We need, to generate a coverage map, the following parameters:
* R.A. vector
* Dec. vector
* size of the elements
Object::
given params:
_ra -> <vector>
_dec -> <vector>
_radius -> <vector>
computed params:
_healpix-level
_healpix-nside
output:
_moc-uniq <- <filename>
_moc-obj <- <filename>
End of explanation |
11,318 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
第13章 ニューラルネットワーク
著者オリジナル
Step1: 13.1.3 Theano を設定する
Step2: 環境変数で設定の変更が可能
export THEANO_FLAGS=floatX=float32
cpuを使って計算する場合
THEANO_FLAGS=device=cpu,floatX=float64 python <pythonスクリプト>
gpuを使う場合
THEANO_FLAGS=device=gpu,floatX=float32 python <pythonスクリプト>
~/.theanorc ファイルにデフォルト設定を書くことも可能
[global]
floatX=float32
device-gpu
13.1.4 配列構造を操作する
Step3: 13.1.5 線形回帰の例
Step4: 13.2 フィードフォワードニューラルネットワークでの活性化関数の選択
13.2.1 ロジスティック関数のまとめ
Step5: 13.2.2 ソフトマックス関数を使って多クラス分類の確率を推定する
Step7: 双曲線正接関数を使って出力範囲を拡大する
双曲線正接関数(hyperbolic tangent
Step8: Mac でバックエンドを theano にするために ~/.keras/keras.json を書き換える
参考 | Python Code:
import theano
from theano import tensor as T
# 初期化: scalar メソッドではスカラー(単純な配列)を生成
x1 = T.scalar()
w1 = T.scalar()
w0 = T.scalar()
z1 = w1 * x1 + w0
# コンパイル
net_input = theano.function(inputs=[w1, x1, w0], outputs=z1)
# 実行
net_input(2.0, 1.0, 0.5)
Explanation: 第13章 ニューラルネットワーク
著者オリジナル: https://github.com/rasbt/python-machine-learning-book/blob/master/code/ch13/ch13.ipynb
Theano を使って最適化された機械学習コードを作成する
http://deeplearning.net/software/theano/
人工ニューラルネットワークの活性化関数を選択する
すばやく簡単に実験を行うためにディープラーニングライブラリ Keras を使用する
https://keras.io/ , https://keras.io/ja/
13.1 Theano を使った式の構築、コンパイル、実行
13.1.1 Theano とは何か
13.1.2 はじめてのTheano
$ pip install Theano
End of explanation
# 浮動小数点数の型のデフォルトを確認
print(theano.config.floatX)
# 浮動小数点数の型を float32 に設定
# GPU の場合は float32 にしないといけないらしい
theano.config.floatX = 'float32'
# CPU と GPU どっちを使うかの設定を確認
print(theano.config.device)
Explanation: 13.1.3 Theano を設定する
End of explanation
import numpy as np
# 初期化
# Theanoを64ビットモードで実行している場合は、fmatrixの代わりにdmatrixを使用する必要がある
x = T.fmatrix(name='x')
x_sum = T.sum(x, axis=0)
# コンパイル
calc_sum = theano.function(inputs=[x], outputs=x_sum)
# 実行(Pythonリスト)
ary = [[1, 2, 3], [1, 2, 3]]
print('Column sum:', calc_sum(ary))
# 実行(Numpy配列)
ary = np.array([[1, 2, 3], [1, 2, 3]], dtype=theano.config.floatX)
print('Column sum:', calc_sum(ary))
import theano
from theano import tensor as T
# 初期化
x = T.fmatrix('x')
w = theano.shared(np.asarray([[0.0, 0.0, 0.0]], dtype=theano.config.floatX))
z = x.dot(w.T)
update = [[w, w + 1.0]]
# コンパイル
net_input = theano.function(inputs=[x], updates=update, outputs=z)
# 実行
data = np.array([[1, 2, 3]], dtype=theano.config.floatX)
for i in range(5):
print('z{}:'.format(i), net_input(data))
import theano
from theano import tensor as T
# 初期化
data = np.array([[1, 2, 3]], dtype=theano.config.floatX)
x = T.fmatrix('x')
w = theano.shared(np.asarray([[0.0, 0.0, 0.0]], dtype=theano.config.floatX))
z = x.dot(w.T)
update = [[w, w + 1.0]]
# コンパイル
net_input = theano.function(inputs=[], updates=update, givens={x: data}, outputs=z)
# 実行
for i in range(5):
print('z{}:'.format(i), net_input())
Explanation: 環境変数で設定の変更が可能
export THEANO_FLAGS=floatX=float32
cpuを使って計算する場合
THEANO_FLAGS=device=cpu,floatX=float64 python <pythonスクリプト>
gpuを使う場合
THEANO_FLAGS=device=gpu,floatX=float32 python <pythonスクリプト>
~/.theanorc ファイルにデフォルト設定を書くことも可能
[global]
floatX=float32
device-gpu
13.1.4 配列構造を操作する
End of explanation
import numpy as np
# 10個のトレーニングサンプルが含まれた、1次元のデータセットを作成する
X_train = np.asarray([[0.0], [1.0], [2.0], [3.0], [4.0],
[5.0], [6.0], [7.0], [8.0], [9.0]],
dtype=theano.config.floatX)
y_train = np.asarray([1.0, 1.3, 3.1, 2.0, 5.0,
6.3, 6.6, 7.4, 8.0, 9.0],
dtype=theano.config.floatX)
import theano
from theano import tensor as T
import numpy as np
def training_linreg(X_train, y_train, eta, epochs):
costs = []
# 配列の初期化
eta0 = T.fscalar('eta0') # float32型のスカラーのインスタンス
y = T.fvector(name='y') # float32型のベクトルのインスタンス
X = T.fmatrix(name='X') # float32型の行列のインスタンス
# 重み w を関数内で参照可能な共有変数として作成
w = theano.shared(np.zeros(shape=(X_train.shape[1] + 1), dtype=theano.config.floatX), name='w')
# コストの計算
net_input = T.dot(X, w[1:]) + w[0] # 重みを用いて総入力を計算
errors = y - net_input # yと総入力の誤差
cost = T.sum(T.pow(errors, 2)) # 誤差との2重和
# 重みの更新
gradient = T.grad(cost, wrt=w) # コストの勾配
update = [(w, w - eta0 * gradient)] # コストの勾配に学習率をかけて、重み w を更新
# モデルのコンパイル
train = theano.function(inputs=[eta0], outputs=cost, updates=update, givens={X: X_train, y: y_train})
for _ in range(epochs):
costs.append(train(eta))
return costs, w
import matplotlib.pyplot as plt
costs, w = training_linreg(X_train, y_train, eta=0.001, epochs=10)
plt.plot(range(1, len(costs) + 1), costs)
plt.xlabel('Epochs')
plt.ylabel('Cost')
plt.tight_layout()
plt.show()
# 入力特徴量に基づいて予測する
def predict_linreg(X, w):
Xt = T.matrix(name='X')
net_input = T.dot(Xt, w[1:]) + w[0]
predict = theano.function(inputs=[Xt], givens={w: w}, outputs=net_input)
return predict(X)
import matplotlib.pyplot as plt
plt.scatter(X_train, y_train, marker='s', s=50)
plt.plot(range(X_train.shape[0]), predict_linreg(X_train, w), color='gray', marker='o', markersize=4, linewidth=3)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
Explanation: 13.1.5 線形回帰の例
End of explanation
import numpy as np
X = np.array([[1, 1.4, 1.5]])
w = np.array([0.0, 0.2, 0.4])
def net_input(X, w):
z = X.dot(w)
return z
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = {:.3f}'.format(logistic_activation(X, w)[0]))
# W : array, shape = [n_output_units, n_hidden_units+1]
# 隠れ層 -> 出力層の重み行列
# 最初の列 (W[:][0]) がバイアスユニットであることに注意
W = np.array([[1.1, 1.2, 1.3, 0.5],
[0.1, 0.2, 0.4, 0.1],
[0.2, 0.5, 2.1, 1.9]])
# A : array, shape = [n_hidden+1, n_samples]
# 各礼装の活性化
# 最初の要素 (A[0][0] = 1) がバイアスユニットであることに注意
A = np.array([[1.0],
[0.1],
[0.3],
[0.7]])
# Z : array, shape = [n_output_units, n_samples]
# 出力層の総入力
Z = W.dot(A)
y_probas = logistic(Z)
print('Probabilities:\n', y_probas)
y_class = np.argmax(Z, axis=0)
print('predicted class label: %d' % y_class[0])
Explanation: 13.2 フィードフォワードニューラルネットワークでの活性化関数の選択
13.2.1 ロジスティック関数のまとめ
End of explanation
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
def sotmax_activation(X, w):
z = net_input(X, w)
return softmax(z)
y_probas = softmax(Z)
print(y_probas)
print(y_probas.sum())
y_class = np.argmax(Z, axis=0)
y_class[0]
Explanation: 13.2.2 ソフトマックス関数を使って多クラス分類の確率を推定する
End of explanation
import os
import struct
import numpy as np
def load_mnist(path, kind='train'):
Load MNIST data from `path`
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte' % kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte' % kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
return images, labels
X_train, y_train = load_mnist('mnist', kind='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('mnist', kind='t10k')
print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
import theano
theano.config.floatX = 'float32'
X_train = X_train.astype(theano.config.floatX)
X_test = X_test.astype(theano.config.floatX)
Explanation: 双曲線正接関数を使って出力範囲を拡大する
双曲線正接関数(hyperbolic tangent: tanh)
13.3 Kerasを使ったニューラルネットワークの効率的なトレーニング
pip install Keras
MNIST データセット http://yann.lecun.com/exdb/mnist/
train-images-idx3-ubyte.gz: training set images (9,912,422 bytes)
train-labels-idx1-ubyte.gz: training set labels (28,881 bytes)
t10k-images-idx3-ubyte.gz: test set images (1,648,877 bytes)
t10k-labels-idx1-ubyte.gz: test set labels (4,542 bytes)
End of explanation
from keras.utils import np_utils
print('First 3 labels: ', y_train[:3])
y_train_ohe = np_utils.to_categorical(y_train)
print('\nFirst 3 labels (one-hot):\n', y_train_ohe[:3])
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
np.random.seed(1)
# モデルを初期化
model = Sequential()
# 1つ目の隠れ層を追加
model.add(Dense(input_dim=X_train.shape[1], # 入力ユニット数
output_dim=50, # 出力ユニット数
init='uniform', # 重みを一様乱数で初期化
activation='tanh')) # 活性化関数(双曲線正接関数)
# 2つ目の隠れ層を追加
model.add(Dense(input_dim=50,
output_dim=50,
init='uniform',
activation='tanh'))
# 出力層を追加
model.add(Dense(input_dim=50,
output_dim=y_train_ohe.shape[1],
init='uniform',
activation='softmax'))
# モデルコンパイル時のオプティマイザを設定
# SGD: 確率的勾配降下法
# 引数に学習率、荷重減衰定数、モーメンタム学習を設定
sgd = SGD(lr=0.001, decay=1e-7, momentum=.9)
# モデルをコンパイル
model.compile(loss='categorical_crossentropy', # コスト関数
optimizer=sgd, # オプティマイザ
metrics=['accuracy']) # モデルの評価指標
model.fit(X_train, # トレーニングデータ
y_train_ohe, # 出力データ
nb_epoch=50, # エポック数
batch_size=300, # バッチサイズ
verbose=1, # 実行時にメッセージを出力
validation_split=0.1) # 検証用データの割合
y_train_pred = model.predict_classes(X_train, verbose=0)
print('First 3 predictions: ', y_train_pred[:3])
train_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print('Training accuracy: %.2f%%' % (train_acc * 100))
y_test_pred = model.predict_classes(X_test, verbose=0)
test_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print('Test accuracy: %.2f%%' % (test_acc * 100))
Explanation: Mac でバックエンドを theano にするために ~/.keras/keras.json を書き換える
参考: バックエンド - Keras Documentation
End of explanation |
11,319 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Spatiotemporal permutation F-test on full sensor data
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Spatiotemporal clusters will then
be visualized using custom matplotlib code.
See the FieldTrip website_ for a caveat regarding
the possible interpretation of "significant" clusters.
Step1: Set parameters
Step2: Read epochs for the channel of interest
Step3: Find the FieldTrip neighbor definition to setup sensor adjacency
Step4: Compute permutation statistic
How does it work? We use clustering to "bind" together features which are
similar. Our features are the magnetic fields measured over our sensor
array at different times. This reduces the multiple comparison problem.
To compute the actual test-statistic, we first sum all F-values in all
clusters. We end up with one statistic for each cluster.
Then we generate a distribution from the data by shuffling our conditions
between our samples and recomputing our clusters and the test statistics.
We test for the significance of a given cluster by computing the probability
of observing a cluster of that size. For more background read
Step5: Note. The same functions work with source estimate. The only differences
are the origin of the data, the size, and the adjacency definition.
It can be used for single trials or for groups of subjects.
Visualize clusters | Python Code:
# Authors: Denis Engemann <[email protected]>
# Jona Sassenhagen <[email protected]>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import find_ch_adjacency
from mne.viz import plot_compare_evokeds
print(__doc__)
Explanation: Spatiotemporal permutation F-test on full sensor data
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Spatiotemporal clusters will then
be visualized using custom matplotlib code.
See the FieldTrip website_ for a caveat regarding
the possible interpretation of "significant" clusters.
End of explanation
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, fir_design='firwin')
events = mne.read_events(event_fname)
Explanation: Set parameters
End of explanation
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id)
X = [epochs[k].get_data() for k in event_id] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
Explanation: Read epochs for the channel of interest
End of explanation
adjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='mag')
print(type(adjacency)) # it's a sparse matrix!
plt.imshow(adjacency.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
Explanation: Find the FieldTrip neighbor definition to setup sensor adjacency
End of explanation
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1, buffer_size=None,
adjacency=adjacency)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
Explanation: Compute permutation statistic
How does it work? We use clustering to "bind" together features which are
similar. Our features are the magnetic fields measured over our sensor
array at different times. This reduces the multiple comparison problem.
To compute the actual test-statistic, we first sum all F-values in all
clusters. We end up with one statistic for each cluster.
Then we generate a distribution from the data by shuffling our conditions
between our samples and recomputing our clusters and the test statistics.
We test for the significance of a given cluster by computing the probability
of observing a cluster of that size. For more background read:
Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
End of explanation
# configure variables for visualization
colors = {"Aud": "crimson", "Vis": 'steelblue'}
linestyles = {"L": '-', "R": '--'}
# organize data for plotting
evokeds = {cond: epochs[cond].average() for cond in event_id}
# loop over clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at the sensors contributing to the cluster
sig_times = epochs.times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
# plot average test statistic and mark significant sensors
f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0)
f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds',
vmin=np.min, vmax=np.max, show=False,
colorbar=False, mask_params=dict(markersize=10))
image = ax_topo.images[0]
# create additional axes (for ERF and colorbar)
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel(
'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))
if len(ch_inds) > 1:
title += "s (mean)"
plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,
colors=colors, linestyles=linestyles, show=False,
split_legend=True, truncate_yaxis='auto')
# plot temporal cluster extent
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
Explanation: Note. The same functions work with source estimate. The only differences
are the origin of the data, the size, and the adjacency definition.
It can be used for single trials or for groups of subjects.
Visualize clusters
End of explanation |
11,320 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Coding the Bose-Hubbard Hamiltonian with QuSpin
The purpose of this tutorial is to teach the interested user to construct bosonic Hamiltonians using QuSpin. To this end, below we focus on the Bose-Hubbard model (BHM) of a 1d chain. The Hamiltonian is
$$ H = -J\sum_{j=0}^{L-1}(b^\dagger_{j+1}b_j + \mathrm{h.c.})-\mu\sum_{j=0}^{L-1} n_j + \frac{U}{2}\sum_{j=0}^{L-1}n_j(n_j-1)$$
where $J$ is the hopping matrix element, $\mu$ -- the chemical potential, and $U$ -- the interaction strength. We label the lattice sites by $j=0,\dots,L-1$, and use periodic boundary conditions.
First, we load the required packages
Step1: Next, we define the model parameters
Step2: In order to construct the Hamiltonian of the BHM, we need to construct the bosonic basis. This is done with the help of the constructor boson_basis_1d. The first required argument is the chain length L. As an optional argument one can also specify the number of bosons in the chain Nb. We print the basis using the print() function.
Step3: If needed, we can specify the on-site bosonic Hilbert space dimension, i.e. the number of states per site, using the flag sps=int. This can help study larger systems of they are dilute.
Step4: Often times, the model under consideration has underlying symmetries. For instance, translation invariance, parity (reflection symmetry), etc. QuSpin allows the user to construct Hamiltonians in symmetry-reduced subspaces. This is done using optional arguments (flags) passed to the basis constructor.
For instance, if we want to construct the basis in the $k=0$ many-body momentum sector, we do this using the flag kblock=int. This specifies the many-body momentum of the state via $k=2\pi/L\times\texttt{kblock}$.
Whenever symmetries are present, the print() function returns one representative from which one can obtain all 'missing' states by applying the corresponding symmetry operator. It is important to note that, physically, this representative state stands for the linear combination of vectors in the class, not the state that is displayed by print(basis).
Step5: Additionally, the BHM features reflection symmetry around the middle of the chain. This symmetry block-diagonalises the Hamiltonian into two blocks, corresponding to the negative and positive eigenvalue of the parity operator. The corresponding flag is pblock=+1,-1.
Step6: Now that we have constructed the basis in the symmetry-reduced Hilbert space, we can construct the Hamiltonian. It will be hepful to cast it in the fllowing form
Step7: The site coupling lists specify the sites on which the operators act, yet we need to tell QuSpin which operators are to act on these (pairs of) sites. Thus, we need the following operator strings which enter the static and dynamic lists used to define the Hamiltonian. Since the BHM is time-independent, we use an empty dynamic list
Step8: Building the Hamiltonian with QuSpin is now a one-liner using the hamiltonian constructor
Step9: when the Hamiltonian is constructed, we see three messages saying that it passes three type of symmetries. QuSpin does checks under the hood on the static and dynamic lists to determine if they satisfy the requested symmetries in the basis. They can be disabled by parsing the following flags to the hamiltonian constructor | Python Code:
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import boson_basis_1d # Hilbert space boson basis
import numpy as np # generic math functions
Explanation: Coding the Bose-Hubbard Hamiltonian with QuSpin
The purpose of this tutorial is to teach the interested user to construct bosonic Hamiltonians using QuSpin. To this end, below we focus on the Bose-Hubbard model (BHM) of a 1d chain. The Hamiltonian is
$$ H = -J\sum_{j=0}^{L-1}(b^\dagger_{j+1}b_j + \mathrm{h.c.})-\mu\sum_{j=0}^{L-1} n_j + \frac{U}{2}\sum_{j=0}^{L-1}n_j(n_j-1)$$
where $J$ is the hopping matrix element, $\mu$ -- the chemical potential, and $U$ -- the interaction strength. We label the lattice sites by $j=0,\dots,L-1$, and use periodic boundary conditions.
First, we load the required packages:
End of explanation
##### define model parameters #####
L=6 # system size
J=1.0 # hopping
U=np.sqrt(2.0) # interaction
mu=2.71 # chemical potential
Explanation: Next, we define the model parameters:
End of explanation
##### construct Bose-Hubbard Hamiltonian #####
# define boson basis with 3 states per site L bosons in the lattice
basis = boson_basis_1d(L,Nb=L) # full boson basis
print(basis)
Explanation: In order to construct the Hamiltonian of the BHM, we need to construct the bosonic basis. This is done with the help of the constructor boson_basis_1d. The first required argument is the chain length L. As an optional argument one can also specify the number of bosons in the chain Nb. We print the basis using the print() function.
End of explanation
basis = boson_basis_1d(L,Nb=L,sps=3) # particle-conserving basis, 3 states per site
print(basis)
Explanation: If needed, we can specify the on-site bosonic Hilbert space dimension, i.e. the number of states per site, using the flag sps=int. This can help study larger systems of they are dilute.
End of explanation
basis = boson_basis_1d(L,Nb=L,sps=3,kblock=1) # ... and zero momentum sector
print(basis)
Explanation: Often times, the model under consideration has underlying symmetries. For instance, translation invariance, parity (reflection symmetry), etc. QuSpin allows the user to construct Hamiltonians in symmetry-reduced subspaces. This is done using optional arguments (flags) passed to the basis constructor.
For instance, if we want to construct the basis in the $k=0$ many-body momentum sector, we do this using the flag kblock=int. This specifies the many-body momentum of the state via $k=2\pi/L\times\texttt{kblock}$.
Whenever symmetries are present, the print() function returns one representative from which one can obtain all 'missing' states by applying the corresponding symmetry operator. It is important to note that, physically, this representative state stands for the linear combination of vectors in the class, not the state that is displayed by print(basis).
End of explanation
basis = boson_basis_1d(L,Nb=L,sps=3,kblock=0,pblock=1) # ... + zero momentum and positive parity
print(basis)
Explanation: Additionally, the BHM features reflection symmetry around the middle of the chain. This symmetry block-diagonalises the Hamiltonian into two blocks, corresponding to the negative and positive eigenvalue of the parity operator. The corresponding flag is pblock=+1,-1.
End of explanation
# define site-coupling lists
hop=[[-J,i,(i+1)%L] for i in range(L)] #PBC
interact=[[0.5*U,i,i] for i in range(L)] # U/2 \sum_j n_j n_j
pot=[[-mu-0.5*U,i] for i in range(L)] # -(\mu + U/2) \sum_j j_n
print(hop)
#print(interact)
#print(pot)
Explanation: Now that we have constructed the basis in the symmetry-reduced Hilbert space, we can construct the Hamiltonian. It will be hepful to cast it in the fllowing form:
$$H= -J\sum_{j=0}^{L-1}(b^\dagger_{j+1}b_j + \mathrm{h.c.})-\left(\mu+\frac{U}{2}\right)\sum_{j=0}^{L-1} n_j + \frac{U}{2}\sum_{j=0}^{L-1}n_jn_j $$
We start by defining the site-coupling lists. Suppose we would like to define the operator $\sum_j \mu_j n_j$. To this, end, we can focus on a single summand first, e.g. $2.71 n_{j=3}$. The information encoded in this operator can be summarised as follows:
the coupling strength is $\mu_{j=3}=2.71$ (site-coupling lists),
the operator acts on site $j=3$ (site-coupling lists),
the operator is the density $n$ (operator-string, static/dynamic lists)
In QuSpin, the first two points are grouped together, defininging a list [mu_j,j]=[2.71,3], while the type of operator we specify a bit later (see parantheses). We call this a site-couling list. Summing over multiple sites then results in a nested list of lists:
End of explanation
# define static and dynamic lists
static=[['+-',hop],['-+',hop],['n',pot],['nn',interact]]
dynamic=[]
print(static)
Explanation: The site coupling lists specify the sites on which the operators act, yet we need to tell QuSpin which operators are to act on these (pairs of) sites. Thus, we need the following operator strings which enter the static and dynamic lists used to define the Hamiltonian. Since the BHM is time-independent, we use an empty dynamic list
End of explanation
# build Hamiltonian
H=hamiltonian(static,dynamic,basis=basis,dtype=np.float64)
print(H.todense())
Explanation: Building the Hamiltonian with QuSpin is now a one-liner using the hamiltonian constructor
End of explanation
# calculate eigensystem
E,V=H.eigh()
E_GS,V_GS=H.eigsh(k=2,which='SA',maxiter=1E10) # only GS
print("eigenenergies:", E)
#print("GS energy is %0.3f" %(E_GS[0]))
# calculate entanglement entropy per site of GS
subsystem=[i for i in range(L//2)] # sites contained in subsystem
Sent=basis.ent_entropy(V[:,0],sub_sys_A=subsystem,density=True)['Sent_A']
print("GS entanglement per site is %0.3f" %(Sent))
psi_k=V[:,0]
psi_Fock=basis.get_vec(psi_k)
print(psi_k.shape, psi_Fock.shape)
Explanation: when the Hamiltonian is constructed, we see three messages saying that it passes three type of symmetries. QuSpin does checks under the hood on the static and dynamic lists to determine if they satisfy the requested symmetries in the basis. They can be disabled by parsing the following flags to the hamiltonian constructor: check_pcon=False, check_symm=False and check_herm=False.
We can now diagonalise H, and e.g. calculate the entanglement entropy of the ground state.
End of explanation |
11,321 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Function to compute escape velocity given halo parameters
Step3: Functions to compute halo parameters given cosmology and Mvir
Step6: Use these basic relations to get rvir<->mir conversion
Step7: A function to put all that together and use just Mvir | Python Code:
def NFW_escape_vel(r, Mvir, Rvir, CvirorRs, truncated=False):
NFW profile escape velocity
Parameters
----------
r : Quantity w/ length units
Radial distance at which to compute the escape velocity
Mvir : Quantity w/ mass units
Virial Mass
CvirorRs : Quantity w/ dimensionless or distance units
(Virial) Concentration parameter (if dimensionless),
or halo scale radius (if length units)
Rvir : Quantity w/ length units
Virial radius
truncated : bool or float
False for infinite-size NFW or a number to cut off the
halo at this many times Rvir
CvirorRs = u.Quantity(CvirorRs)
if CvirorRs.unit.is_equivalent(u.m):
Cvir = Rvir/CvirorRs
elif CvirorRs.unit.is_equivalent(u.one):
Cvir = CvirorRs
else:
raise TypeError('CvirorRs must be length or dimensionless')
a = Rvir / Cvir
#"f-function" from the NFW literature (e.g. klypin 02) evaluated at Cvir
fofC = np.log(1 + Cvir) - Cvir / (1 + Cvir)
# value of the NFW potential at that point
potential = (-cnst.G * Mvir / fofC) * np.log(1 + (r / a)) / r
if truncated:
rtrunc = Rvir * float(truncated)
Ctrunc = rtrunc / a
mtrunc = Mvir * (np.log(1 + Ctrunc) - Ctrunc / (1 + Ctrunc)) / fofC
outer = r >= rtrunc
potential[outer] = - Gkpc * mtrunc / r[outer]
potential[~outer] = potential[~outer] + (Gkpc * Mvir / a) / (Ctrunc + 1) / fofC
vesc = (2 * np.abs(potential)) ** 0.5
return vesc.to(u.km/u.s)
Explanation: Function to compute escape velocity given halo parameters
End of explanation
def Deltavir(cosmo, z=0):
Standard Delta-vir from Bryan&Norman 98 (*not* Delta-c)
x = cosmo.Om(z) - 1
return (18*np.pi**2 + 82*x - 39*x**2)/(x+1)
Explanation: Functions to compute halo parameters given cosmology and Mvir
End of explanation
def rvirmvir(rvirormvir, cosmo, z=0):
Convert between Rvir and Mvir
Parameters
----------
rvirormvir : Quantity w/ mass or length units
Either Rvir or Mvir, depending on the input units
cosmo : astropy cosmology
The cosmology to assume
z : float
The redshift to assume for the conversion
Returns
-------
mvirorrvir : Quantity w/ mass or length units
Whichever ``rvirormvir`` is *not*
rhs = Deltavir(cosmo=cosmo, z=z) * cosmo.Om(z)*cosmo.H(z)**2 / (2*cnst.G)
if rvirormvir.unit.is_equivalent(u.solMass):
mvir = rvirormvir
return ((mvir / rhs)**(1/3)).to(u.kpc)
elif rvirormvir.unit.is_equivalent(u.kpc):
rvir = rvirormvir
return (rhs * rvir**3).to(u.solMass)
else:
raise ValueError('invalid input unit {}'.format(rvirormvir))
def mvir_to_cvir(mvir, z=0):
Power-law fit to the c_vir-M_vir relation from
Equations 12 & 13 of Dutton & Maccio 2014, arXiv:1402.7073.
a = 0.537 + (1.025 - 0.537) * np.exp(-0.718 * z**1.08)
b = -0.097 + 0.024 * z
m0 = 1e12 * u.solMass
logc = a + b * np.log10(mvir / m0)
return 10**logc
Explanation: Use these basic relations to get rvir<->mir conversion:
$\bar{\rho}{\rm halo} = \Delta{\rm vir}\Omega_m \rho_c$
$\frac{3 M_{\rm vir}}{4 \pi r^3_{\rm vir}} =\Delta_{\rm vir}(z) \Omega_m(z) \frac{3 H^2(z)}{8 \pi G}$
End of explanation
def NFW_escape_vel_from_Mvir(r, Mvir, z=0,
cosmo=cosmology.Planck15,
truncated=False):
cvir = mvir_to_cvir(Mvir, z)
rvir = rvirmvir(Mvir, cosmo, z)
return NFW_escape_vel(r, Mvir=Mvir,
CvirorRs=cvir,
Rvir=rvir,
truncated=truncated)
r = np.linspace(0, 300,101)[1:]*u.kpc #0 has a singularity
vesc = NFW_escape_vel_from_Mvir(r, 1e12*u.solMass)
plt.plot(r, vesc, c='r', label=r'$V_{\rm esc}$')
plt.plot(r, -vesc, c='r')
plt.plot(r, 3**-0.5*vesc, c='r', ls=':', label=r'$V_{\rm esc}/\sqrt{3}$')
plt.plot(r, -3**-0.5*vesc, c='r', ls=':')
plt.legend(loc=0)
plt.xlabel('$r$ [kpc]', fontsize=18)
plt.ylabel(r'$km/s', fontsize=18)
r = np.linspace(0, 300,101)[1:]*u.kpc #0 has a singularity
vesc0p5 = NFW_escape_vel_from_Mvir(r, 5e11*u.solMass)
vesc1 = NFW_escape_vel_from_Mvir(r, 1e12*u.solMass)
vesc2 = NFW_escape_vel_from_Mvir(r, 2e12*u.solMass)
plt.plot(r, vesc0p5, c='b', label=r'$M_{\rm vir}=5 \times 10^{11}$')
plt.plot(r, -vesc0p5, c='b')
plt.plot(r, vesc1, c='g', label=r'$M_{\rm vir}=1 \times 10^{12}$')
plt.plot(r, -vesc1, c='g')
plt.plot(r, vesc2, c='r', label=r'$M_{\rm vir}=2 \times 10^{12}$')
plt.plot(r, -vesc2, c='r')
plt.legend(loc=0)
plt.xlabel('$r$ [kpc]', fontsize=18)
plt.ylabel(r'$v_{\rm esc}$ [km/s]', fontsize=18)
Explanation: A function to put all that together and use just Mvir
End of explanation |
11,322 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
T81-558
Step1: Several Useful Functions
These are functions that I reuse often to encode the feature vector (FV).
Step2: Read in Raw KDD-99 Dataset
Step3: Encode the feature vector
Encode every row in the database. This is not instant!
Step4: Train the Neural Network | Python Code:
# Imports for this Notebook
# Imports
import pandas as pd
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import tensorflow.contrib.learn as skflow
from sklearn import metrics
Explanation: T81-558: Applications of Deep Neural Networks
TensorFlow (SKFLOW) Meets KDD-99
Instructor: Jeff Heaton, School of Engineering and Applied Science, Washington University in St. Louis
For more information visit the class website.
This simple example shows how to load a non-trivial dataset from CSV and train a neural network. The dataset is the
KDD99 dataset. This dataset is used to detect between normal and malicious network activity.
End of explanation
# These are several handy functions that I use in my class:
# Encode a text field to dummy variables
def encode_text_dummy(df,name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = "{}-{}".format(name,x)
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encode a text field to a single index value
def encode_text_index(df,name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Encode a numeric field to Z-Scores
def encode_numeric_zscore(df,name,mean=None,sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name]-mean)/sd
# Encode a numeric field to fill missing values with the median.
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
# Convert a dataframe to x/y suitable for training.
def to_xy(df,target):
result = []
for x in df.columns:
if x != target:
result.append(x)
return df.as_matrix(result),df[target]
Explanation: Several Useful Functions
These are functions that I reuse often to encode the feature vector (FV).
End of explanation
# This file is a CSV, just no CSV extension or headers
# Download from: http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html
df = pd.read_csv("/Users/jeff/Downloads/data/kddcup.data_10_percent", header=None)
print("Read {} rows.".format(len(df)))
# df = df.sample(frac=0.1, replace=False) # Uncomment this line to sample only 10% of the dataset
df.dropna(inplace=True,axis=1) # For now, just drop NA's (rows with missing values)
# The CSV file has no column heads, so add them
df.columns = [
'duration',
'protocol_type',
'service',
'flag',
'src_bytes',
'dst_bytes',
'land',
'wrong_fragment',
'urgent',
'hot',
'num_failed_logins',
'logged_in',
'num_compromised',
'root_shell',
'su_attempted',
'num_root',
'num_file_creations',
'num_shells',
'num_access_files',
'num_outbound_cmds',
'is_host_login',
'is_guest_login',
'count',
'srv_count',
'serror_rate',
'srv_serror_rate',
'rerror_rate',
'srv_rerror_rate',
'same_srv_rate',
'diff_srv_rate',
'srv_diff_host_rate',
'dst_host_count',
'dst_host_srv_count',
'dst_host_same_srv_rate',
'dst_host_diff_srv_rate',
'dst_host_same_src_port_rate',
'dst_host_srv_diff_host_rate',
'dst_host_serror_rate',
'dst_host_srv_serror_rate',
'dst_host_rerror_rate',
'dst_host_srv_rerror_rate',
'outcome'
]
# display 5 rows
df[0:5]
Explanation: Read in Raw KDD-99 Dataset
End of explanation
# Now encode the feature vector
encode_numeric_zscore(df, 'duration')
encode_text_dummy(df, 'protocol_type')
encode_text_dummy(df, 'service')
encode_text_dummy(df, 'flag')
encode_numeric_zscore(df, 'src_bytes')
encode_numeric_zscore(df, 'dst_bytes')
encode_text_dummy(df, 'land')
encode_numeric_zscore(df, 'wrong_fragment')
encode_numeric_zscore(df, 'urgent')
encode_numeric_zscore(df, 'hot')
encode_numeric_zscore(df, 'num_failed_logins')
encode_text_dummy(df, 'logged_in')
encode_numeric_zscore(df, 'num_compromised')
encode_numeric_zscore(df, 'root_shell')
encode_numeric_zscore(df, 'su_attempted')
encode_numeric_zscore(df, 'num_root')
encode_numeric_zscore(df, 'num_file_creations')
encode_numeric_zscore(df, 'num_shells')
encode_numeric_zscore(df, 'num_access_files')
encode_numeric_zscore(df, 'num_outbound_cmds')
encode_text_dummy(df, 'is_host_login')
encode_text_dummy(df, 'is_guest_login')
encode_numeric_zscore(df, 'count')
encode_numeric_zscore(df, 'srv_count')
encode_numeric_zscore(df, 'serror_rate')
encode_numeric_zscore(df, 'srv_serror_rate')
encode_numeric_zscore(df, 'rerror_rate')
encode_numeric_zscore(df, 'srv_rerror_rate')
encode_numeric_zscore(df, 'same_srv_rate')
encode_numeric_zscore(df, 'diff_srv_rate')
encode_numeric_zscore(df, 'srv_diff_host_rate')
encode_numeric_zscore(df, 'dst_host_count')
encode_numeric_zscore(df, 'dst_host_srv_count')
encode_numeric_zscore(df, 'dst_host_same_srv_rate')
encode_numeric_zscore(df, 'dst_host_diff_srv_rate')
encode_numeric_zscore(df, 'dst_host_same_src_port_rate')
encode_numeric_zscore(df, 'dst_host_srv_diff_host_rate')
encode_numeric_zscore(df, 'dst_host_serror_rate')
encode_numeric_zscore(df, 'dst_host_srv_serror_rate')
encode_numeric_zscore(df, 'dst_host_rerror_rate')
encode_numeric_zscore(df, 'dst_host_srv_rerror_rate')
outcomes = encode_text_index(df, 'outcome')
num_classes = len(outcomes)
# display 5 rows
df.dropna(inplace=True,axis=1)
df[0:5]
# This is the numeric feature vector, as it goes to the neural net
Explanation: Encode the feature vector
Encode every row in the database. This is not instant!
End of explanation
# Break into X (predictors) & y (prediction)
x, y = to_xy(df,'outcome')
# Create a test/train split. 25% test
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
# Create a deep neural network with 3 hidden layers of 10, 20, 10
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=num_classes, steps=500)
# Early stopping
early_stop = skflow.monitors.ValidationMonitor(x_test, y_test,
early_stopping_rounds=200,
n_classes=num_classes,
print_steps=50)
# Fit/train neural network
classifier.fit(x, y, early_stop)
# Measure accuracy
pred = classifier.predict(x_test)
score = metrics.accuracy_score(y_test, pred)
print("Validation score: {}".format(score))
Explanation: Train the Neural Network
End of explanation |
11,323 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https
Step1: Model with convolution and batch norm layer definition
Step2: Initialize all model weights with random numbers
Step3: Dims of conv layer weights
Step4: Dims of batch norm layer weights
Step5: Fuse conv and batch norm weights
Step6: Model with folded/fused convolution and batch norm layers
Step7: Initialize model_fused with fused weights
Step8: Validate that model and model_fused produce the same outputs | Python Code:
import tensorflow as tf
import numpy as np
np.random.seed(123)
Explanation: Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Example of folding keras conv layer with batch norm
Imports
End of explanation
epsilon=0.001
inputs = tf.keras.Input(shape=(50, 32, 5), batch_size=4)
net = inputs
net = tf.keras.layers.Conv2D(filters=2, kernel_size=(3,3))(net)
net = tf.keras.layers.BatchNormalization(epsilon=epsilon)(net)
net = tf.keras.layers.ReLU()(net)
net = tf.keras.layers.Flatten()(net)
model = tf.keras.Model(inputs, net)
model.summary()
model.layers
Explanation: Model with convolution and batch norm layer definition
End of explanation
# we will set all weights to randmom numbers so that even bias will be non zero
# it will help to validated numerical correctness of conv and batch norm fusion
all_weights = model.get_weights()
for i in range(len(all_weights)):
all_weights[i] = np.random.random(all_weights[i].shape)
model.set_weights(all_weights)
Explanation: Initialize all model weights with random numbers
End of explanation
ind_conv_layer = 1
assert(isinstance(model.layers[ind_conv_layer], tf.keras.layers.Conv2D))
conv_weights = model.layers[ind_conv_layer].get_weights()
print("conv weights shape " + str(conv_weights[0].shape))
print("conv bias shape " + str(conv_weights[1].shape))
Explanation: Dims of conv layer weights
End of explanation
ind_batch_norm_layer = 2
assert(isinstance(model.layers[ind_batch_norm_layer], tf.keras.layers.BatchNormalization))
bn_weights = model.layers[ind_batch_norm_layer].get_weights()
gamma = bn_weights[0]
print("gamma shape " + str(gamma.shape))
betta = bn_weights[1]
print("betta shape " + str(gamma.shape))
mean = bn_weights[2]
print("mean shape " + str(gamma.shape))
variance = bn_weights[3]
print("variance shape " + str(gamma.shape))
Explanation: Dims of batch norm layer weights
End of explanation
new_conv_weights = np.multiply(conv_weights[0], gamma) / np.sqrt(variance + epsilon)
new_bias = betta + np.multiply((conv_weights[1] - mean), gamma) / np.sqrt(variance + epsilon)
Explanation: Fuse conv and batch norm weights
End of explanation
inputs = tf.keras.Input(shape=(50, 32, 5), batch_size=4)
net = inputs
net = tf.keras.layers.Conv2D(filters=2, kernel_size=(3,3))(net)
net = tf.keras.layers.Flatten()(net)
model_fused = tf.keras.Model(inputs, net)
model_fused.summary()
Explanation: Model with folded/fused convolution and batch norm layers
End of explanation
all_weights_fused = model_fused.get_weights()
all_weights_fused[0] = new_conv_weights
all_weights_fused[1] = new_bias
model_fused.set_weights(all_weights_fused)
Explanation: Initialize model_fused with fused weights
End of explanation
input_data = np.random.random(inputs.shape)
outputs = model.predict(input_data)
outputs_fused = model_fused.predict(input_data)
np.allclose(outputs, outputs_fused)
Explanation: Validate that model and model_fused produce the same outputs
End of explanation |
11,324 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<center>
<img src="../../img/ods_stickers.jpg">
Открытый курс по машинному обучению
</center>
Автор материала
Step1: Проверка стационарности и STL-декомпозиция ряда
Step2: Стационарность
Критерий Дики-Фуллера не отвергает гипотезу нестационарности, но небольшой тренд остался. Попробуем сезонное дифференцирование; сделаем на продифференцированном ряде STL-декомпозицию и проверим стационарность
Step3: Критерий Дики-Фуллера отвергает гипотезу нестационарности, но полностью избавиться от тренда не удалось. Попробуем добавить ещё обычное дифференцирование
Step4: Гипотеза нестационарности уверенно отвергается, и визуально ряд выглядит лучше — тренда больше нет.
Подбор модели
Посмотрим на ACF и PACF полученного ряда
Step5: Начальные приближения
Step6: Если в предыдущей ячейке возникает ошибка, убедитесь, что обновили statsmodels до версии не меньше 0.8.0rc1.
Step7: Лучшая модель
Step8: Её остатки
Step9: Остатки несмещены (подтверждается критерием Стьюдента), стационарны (подтверждается критерием Дики-Фуллера и визуально), неавтокоррелированы (подтверждается критерием Льюнга-Бокса и коррелограммой).
Посмотрим, насколько хорошо модель описывает данные
Step10: Прогноз | Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = 12, 10
import pandas as pd
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from itertools import product
def invboxcox(y,lmbda):
if lmbda == 0:
return(np.exp(y))
else:
return(np.exp(np.log(lmbda*y+1)/lmbda))
deaths = pd.read_csv('../../data/accidental-deaths-in-usa-monthly.csv',
index_col=['Month'], parse_dates=['Month'])
deaths.rename(columns={'Accidental deaths in USA: monthly, 1973 ? 1978': 'num_deaths'}, inplace=True)
deaths['num_deaths'].plot()
plt.ylabel('Accidental deaths');
Explanation: <center>
<img src="../../img/ods_stickers.jpg">
Открытый курс по машинному обучению
</center>
Автор материала: программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий. Сделано на основе курса "Построение выводов по данным" специализации "Машинное обучение и анализ данных" Яндекса и МФТИ
<center>Тема 9. Анализ временных рядов в Python</center>
<center>Часть 2. Смерти из-за несчастного случая в США</center>
Известно ежемесячное число смертей в результате случайного случая в США с января 1973 по декабрь 1978, необходимо построить прогноз на следующие 2 года.
End of explanation
sm.tsa.seasonal_decompose(deaths['num_deaths']).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(deaths['num_deaths'])[1])
Explanation: Проверка стационарности и STL-декомпозиция ряда:
End of explanation
deaths['num_deaths_diff'] = deaths['num_deaths'] - deaths['num_deaths'].shift(12)
sm.tsa.seasonal_decompose(deaths['num_deaths_diff'][12:]).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(deaths['num_deaths_diff'][12:])[1])
Explanation: Стационарность
Критерий Дики-Фуллера не отвергает гипотезу нестационарности, но небольшой тренд остался. Попробуем сезонное дифференцирование; сделаем на продифференцированном ряде STL-декомпозицию и проверим стационарность:
End of explanation
deaths['num_deaths_diff2'] = deaths['num_deaths_diff'] - deaths['num_deaths_diff'].shift(1)
sm.tsa.seasonal_decompose(deaths['num_deaths_diff2'][13:]).plot()
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(deaths['num_deaths_diff2'][13:])[1])
Explanation: Критерий Дики-Фуллера отвергает гипотезу нестационарности, но полностью избавиться от тренда не удалось. Попробуем добавить ещё обычное дифференцирование:
End of explanation
ax = plt.subplot(211)
sm.graphics.tsa.plot_acf(deaths['num_deaths_diff2'][13:].values.squeeze(), lags=58, ax=ax)
ax = plt.subplot(212)
sm.graphics.tsa.plot_pacf(deaths['num_deaths_diff2'][13:].values.squeeze(), lags=58, ax=ax);
Explanation: Гипотеза нестационарности уверенно отвергается, и визуально ряд выглядит лучше — тренда больше нет.
Подбор модели
Посмотрим на ACF и PACF полученного ряда:
End of explanation
ps = range(0, 3)
d=1
qs = range(0, 1)
Ps = range(0, 3)
D=1
Qs = range(0, 3)
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
%%time
results = []
best_aic = float("inf")
for param in parameters_list:
#try except нужен, потому что на некоторых наборах параметров модель не обучается
try:
model=sm.tsa.statespace.SARIMAX(deaths['num_deaths'], order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12)).fit(disp=-1)
#выводим параметры, на которых модель не обучается и переходим к следующему набору
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
#сохраняем лучшую модель, aic, параметры
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
warnings.filterwarnings('default')
Explanation: Начальные приближения: Q=2, q=1, P=2, p=2
End of explanation
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
Explanation: Если в предыдущей ячейке возникает ошибка, убедитесь, что обновили statsmodels до версии не меньше 0.8.0rc1.
End of explanation
print(best_model.summary())
Explanation: Лучшая модель:
End of explanation
plt.subplot(211)
best_model.resid[13:].plot()
plt.ylabel(u'Residuals')
ax = plt.subplot(212)
sm.graphics.tsa.plot_acf(best_model.resid[13:].values.squeeze(), lags=48, ax=ax)
print("Критерий Стьюдента: p=%f" % stats.ttest_1samp(best_model.resid[13:], 0)[1])
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(best_model.resid[13:])[1])
Explanation: Её остатки:
End of explanation
deaths['model'] = best_model.fittedvalues
deaths['num_deaths'].plot()
deaths['model'][13:].plot(color='r')
plt.ylabel('Accidental deaths');
Explanation: Остатки несмещены (подтверждается критерием Стьюдента), стационарны (подтверждается критерием Дики-Фуллера и визуально), неавтокоррелированы (подтверждается критерием Льюнга-Бокса и коррелограммой).
Посмотрим, насколько хорошо модель описывает данные:
End of explanation
from dateutil.relativedelta import relativedelta
deaths2 = deaths[['num_deaths']]
date_list = [pd.datetime.strptime("1979-01-01", "%Y-%m-%d") + relativedelta(months=x) for x in range(0,24)]
future = pd.DataFrame(index=date_list, columns=deaths2.columns)
deaths2 = pd.concat([deaths2, future])
deaths2['forecast'] = best_model.predict(start=72, end=100)
deaths2['num_deaths'].plot(color='b')
deaths2['forecast'].plot(color='r')
plt.ylabel('Accidental deaths');
Explanation: Прогноз
End of explanation |
11,325 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Read in semi-structured data with pandas
When analyzing software systems in a Software Analytics style with pandas, you might face data that isn't yet in a tabular format you can easily read. In this notebook, I'll show you how you can read in semi-structured data. It's a set of tips and tricks how you can
transform a list of data into separate columns
split information in one entry into multiple columns
merge information across multiple rows into one row
So let's dive in!
Dataset
In our case, we want to analyze data from a version control system. The dataset was generated from the Git repository JavaOnAutobahn/spring-petclinic with the command git log --stat > git_log_stat.log.
This exports the history of the Git repository, including some information about the file changes per commit. Here is an excerpt from this created dataset
Step1: Information Extraction Adventure
Now we have to extract each bit of information accordingly. It's a thankless job. But it works quite well in most cases.
Extract a row to a separate column
We start with the rows contain information that can be put into separate columns relatively easily. For this, we look for markers e.g., at the beginning of a row. We can then use these markers to find the rows we like to extract and apply custom string splittings. This approach works for the information about the commit id, the author's name, and the commit date.
Step2: Extract further rows to one column
Next, we want to handle the multiline commit messages. These are also kind of marked by four consecutive whitespaces at the beginning. We can also extract them with the same approach as above (ugly, but it works!).
Step3: Note
Step4: In the next step, we need to signal which file statistics information belongs to which commit. Luckily, there is a marker from this that we've already extracted
Step5: OK, we see, this seems to get somehow complicated. So let's create a separate DataFrame for this called sha_files, were we just treat the file statistics. This DataFrame contains now for each commit all the change information for each changed file.
Step6: We are now able to focus on the files statistics. We can split the raw entries with the tabular symbol and throw away the raw data. This fives as us nicely formatted DataFrame with the files statistics' information.
Step7: Next, we want to join this data with the other, bigger log DataFrame that contains all the other information about the commits. This means we have to arrange the other DataFrame so that we can join our newly created sha_files DataFrame. We can accomplish this by groupby by the sha columns. We also try to reduce complexity by just preserving the meta information with the author and the timestamp for now.
Step8: With both DataFrames having the same index column sha, we can now join DataFrames. We set the join method to right because we have multiple file statistics entries for each commit. This expands the meta_data DataFrame, i.e., duplicates each meta data entry for a file statistics entry.
Step9: Alright, we are almost done. Hang in there!
Combine multiple rows to one entry in a column
We still have to treat the commit messages that span across multiple lines. So back to the message information. Thanks to the sha column, we can concatenate all the messages that belong to one commit and join the messages' parts in one single row.
Step10: Combining commit messages and change information
Finally, we also join this separate Series with the main DataFrame. Done! | Python Code:
!cp ../../joa_spring-petclinic/git_log_numstat.log datasets/git_log_raw_stats_spring_petclinic.log
import pandas as pd
log = pd.read_csv(
"datasets/git_log_raw_stats_spring_petclinic.log",
sep="\n",
names=['raw'])
log.head()
Explanation: Read in semi-structured data with pandas
When analyzing software systems in a Software Analytics style with pandas, you might face data that isn't yet in a tabular format you can easily read. In this notebook, I'll show you how you can read in semi-structured data. It's a set of tips and tricks how you can
transform a list of data into separate columns
split information in one entry into multiple columns
merge information across multiple rows into one row
So let's dive in!
Dataset
In our case, we want to analyze data from a version control system. The dataset was generated from the Git repository JavaOnAutobahn/spring-petclinic with the command git log --stat > git_log_stat.log.
This exports the history of the Git repository, including some information about the file changes per commit. Here is an excerpt from this created dataset:
```
commit 4d3d9de655faa813781027d8b1baed819c6a56fe
Author: Markus Harrer feststelltaste@googlemail.com
Date: Tue Mar 5 22:32:20 2019 +0100
add virtual bounded contexts
20 1 jqassistant/business.adoc
```
For each commit, we have this text fragment. The dataset isn't structured data in a tabular way but a more row-based style of data. Each row contains a different kind of information, e.g., the commit id, the author's name, the commit date, the commit message (in the worst case: spread across multiple lines!), as well as the changed files with the number of added and deleted lines of code.
The question is: Can we get this kind of data into a pandas DataFrame?
Let's see!
Note: You can also export data from Git with the --format options to create a tabular output. Use this to save you some headaches. But there might be data sources that don't have this option. So it's a good idea to be prepared!
Feedback: This notebook shows my brute force approach for handling semi-structured data with pandas. I would be very happy if you have some suggestions on how to improve this in a more simple way!
Read in the data
We first load this semi-structured data into a DataFrame. We use a little trick for doing this. Using the newline symbol as separator reads that data in line by line.
End of explanation
log['sha'] = log.loc[log['raw'].str.startswith("commit ")]['raw'].str.split("commit ").str[1]
log['author'] = log.loc[log['raw'].str.startswith("Author: ")]['raw'].str.split("Author: ").str[1]
log['timestamp'] = log.loc[log['raw'].str.startswith("Date: ")]['raw'].str.split("Date: ").str[1]
log.head()
Explanation: Information Extraction Adventure
Now we have to extract each bit of information accordingly. It's a thankless job. But it works quite well in most cases.
Extract a row to a separate column
We start with the rows contain information that can be put into separate columns relatively easily. For this, we look for markers e.g., at the beginning of a row. We can then use these markers to find the rows we like to extract and apply custom string splittings. This approach works for the information about the commit id, the author's name, and the commit date.
End of explanation
log['message'] = log.loc[log['raw'].str.startswith(" "*4)]['raw'].str[4:]
log.head()
Explanation: Extract further rows to one column
Next, we want to handle the multiline commit messages. These are also kind of marked by four consecutive whitespaces at the beginning. We can also extract them with the same approach as above (ugly, but it works!).
End of explanation
log['no_entry'] = \
log['sha'].isna() & \
log['author'].isna() & \
log['timestamp'].isna() & \
log['message'].isna()
log.head()
Explanation: Note: We still have to treat commit messages that span across multiple rows. We have to care about that later on.
Extract multiple columns from multiple row
Now for the remaining rows: The information about the additions and deletions per filename. This is a little bit tricky in three ways:
There is no dedicated marker for the file statistics
There are multiple information about the modified file in one row (added & deleted lines as well as the filename)
There are multiple rows for all the changed files within one commit
We can handle this step by step. First, we mark the rows that haven't been extracted yet into separate columns by creating a new column no_entry with True entries for those.
End of explanation
log['sha'] = log['sha'].fillna(method="ffill")
log.head()
Explanation: In the next step, we need to signal which file statistics information belongs to which commit. Luckily, there is a marker from this that we've already extracted: the sha column. This information is also the start of a commit entry. So we can use this entry to mark all the follow up entries of a commit to signal that these rows belong together.
End of explanation
sha_files = log[log['no_entry']][['sha', 'raw']]
sha_files = sha_files.set_index('sha')
sha_files.head()
Explanation: OK, we see, this seems to get somehow complicated. So let's create a separate DataFrame for this called sha_files, were we just treat the file statistics. This DataFrame contains now for each commit all the change information for each changed file.
End of explanation
sha_files[['additions', 'deletions', 'filename']] = sha_files['raw'].str.split("\t", expand=True)
del(sha_files['raw'])
sha_files.head()
Explanation: We are now able to focus on the files statistics. We can split the raw entries with the tabular symbol and throw away the raw data. This fives as us nicely formatted DataFrame with the files statistics' information.
End of explanation
meta_data = log.groupby('sha')[['author', 'timestamp']].first()
meta_data.head()
Explanation: Next, we want to join this data with the other, bigger log DataFrame that contains all the other information about the commits. This means we have to arrange the other DataFrame so that we can join our newly created sha_files DataFrame. We can accomplish this by groupby by the sha columns. We also try to reduce complexity by just preserving the meta information with the author and the timestamp for now.
End of explanation
changes = meta_data.join(sha_files, how='right')
changes.head()
Explanation: With both DataFrames having the same index column sha, we can now join DataFrames. We set the join method to right because we have multiple file statistics entries for each commit. This expands the meta_data DataFrame, i.e., duplicates each meta data entry for a file statistics entry.
End of explanation
sha_msg = log.dropna(subset=['message']).groupby('sha')['message'].apply(' '.join)
sha_msg.head()
Explanation: Alright, we are almost done. Hang in there!
Combine multiple rows to one entry in a column
We still have to treat the commit messages that span across multiple lines. So back to the message information. Thanks to the sha column, we can concatenate all the messages that belong to one commit and join the messages' parts in one single row.
End of explanation
changes = changes.join(sha_msg)
changes.head()
Explanation: Combining commit messages and change information
Finally, we also join this separate Series with the main DataFrame. Done!
End of explanation |
11,326 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Librosa demo
This notebook demonstrates some of the basic functionality of librosa version 0.4.
Following through this example, you'll learn how to
Step1: By default, librosa will resample the signal to 22050Hz.
You can change this behavior by saying
Step2: Harmonic-percussive source separation
Before doing any signal analysis, let's pull apart the harmonic and percussive components of the audio. This is pretty easy to do with the effects module.
Step3: Chromagram
Next, we'll extract Chroma features to represent pitch class information.
Step4: MFCC
Mel-frequency cepstral coefficients are commonly used to represent texture or timbre of sound.
Step5: Beat tracking
The beat tracker returns an estimate of the tempo (in beats per minute) and frame indices of beat events.
The input can be either an audio time series (as we do below), or an onset strength envelope as calculated by librosa.onset.onset_strength().
Step6: By default, the beat tracker will trim away any leading or trailing beats that don't appear strong enough.
To disable this behavior, call beat_track() with trim=False.
Step7: Beat-synchronous feature aggregation
Once we've located the beat events, we can use them to summarize the feature content of each beat.
This can be useful for reducing data dimensionality, and removing transient noise from the features. | Python Code:
from __future__ import print_function
# We'll need numpy for some mathematical operations
import numpy as np
# matplotlib for displaying the output
import matplotlib.pyplot as plt
import matplotlib.style as ms
ms.use('seaborn-muted')
%matplotlib inline
# and IPython.display for audio output
import IPython.display
# Librosa for audio
import librosa
# And the display module for visualization
import librosa.display
audio_path = librosa.util.example_audio_file()
# or uncomment the line below and point it at your favorite song:
#
# audio_path = '/path/to/your/favorite/song.mp3'
y, sr = librosa.load(audio_path)
Explanation: Librosa demo
This notebook demonstrates some of the basic functionality of librosa version 0.4.
Following through this example, you'll learn how to:
Load audio input
Compute mel spectrogram, MFCC, delta features, chroma
Locate beat events
Compute beat-synchronous features
Display features
Save beat tracker output to a CSV file
End of explanation
# Let's make and display a mel-scaled power (energy-squared) spectrogram
S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128)
# Convert to log scale (dB). We'll use the peak power as reference.
log_S = librosa.logamplitude(S, ref_power=np.max)
# Make a new figure
plt.figure(figsize=(12,4))
# Display the spectrogram on a mel scale
# sample rate and hop length parameters are used to render the time axis
librosa.display.specshow(log_S, sr=sr, x_axis='time', y_axis='mel')
# Put a descriptive title on the plot
plt.title('mel power spectrogram')
# draw a color bar
plt.colorbar(format='%+02.0f dB')
# Make the figure layout compact
plt.tight_layout()
Explanation: By default, librosa will resample the signal to 22050Hz.
You can change this behavior by saying:
librosa.load(audio_path, sr=44100)
to resample at 44.1KHz, or
librosa.load(audio_path, sr=None)
to disable resampling.
Mel spectrogram
This first step will show how to compute a Mel spectrogram from an audio waveform.
End of explanation
y_harmonic, y_percussive = librosa.effects.hpss(y)
# What do the spectrograms look like?
# Let's make and display a mel-scaled power (energy-squared) spectrogram
S_harmonic = librosa.feature.melspectrogram(y_harmonic, sr=sr)
S_percussive = librosa.feature.melspectrogram(y_percussive, sr=sr)
# Convert to log scale (dB). We'll use the peak power as reference.
log_Sh = librosa.logamplitude(S_harmonic, ref_power=np.max)
log_Sp = librosa.logamplitude(S_percussive, ref_power=np.max)
# Make a new figure
plt.figure(figsize=(12,6))
plt.subplot(2,1,1)
# Display the spectrogram on a mel scale
librosa.display.specshow(log_Sh, sr=sr, y_axis='mel')
# Put a descriptive title on the plot
plt.title('mel power spectrogram (Harmonic)')
# draw a color bar
plt.colorbar(format='%+02.0f dB')
plt.subplot(2,1,2)
librosa.display.specshow(log_Sp, sr=sr, x_axis='time', y_axis='mel')
# Put a descriptive title on the plot
plt.title('mel power spectrogram (Percussive)')
# draw a color bar
plt.colorbar(format='%+02.0f dB')
# Make the figure layout compact
plt.tight_layout()
Explanation: Harmonic-percussive source separation
Before doing any signal analysis, let's pull apart the harmonic and percussive components of the audio. This is pretty easy to do with the effects module.
End of explanation
# We'll use a CQT-based chromagram here. An STFT-based implementation also exists in chroma_cqt()
# We'll use the harmonic component to avoid pollution from transients
C = librosa.feature.chroma_cqt(y=y_harmonic, sr=sr)
# Make a new figure
plt.figure(figsize=(12,4))
# Display the chromagram: the energy in each chromatic pitch class as a function of time
# To make sure that the colors span the full range of chroma values, set vmin and vmax
librosa.display.specshow(C, sr=sr, x_axis='time', y_axis='chroma', vmin=0, vmax=1)
plt.title('Chromagram')
plt.colorbar()
plt.tight_layout()
Explanation: Chromagram
Next, we'll extract Chroma features to represent pitch class information.
End of explanation
# Next, we'll extract the top 13 Mel-frequency cepstral coefficients (MFCCs)
mfcc = librosa.feature.mfcc(S=log_S, n_mfcc=13)
# Let's pad on the first and second deltas while we're at it
delta_mfcc = librosa.feature.delta(mfcc)
delta2_mfcc = librosa.feature.delta(mfcc, order=2)
# How do they look? We'll show each in its own subplot
plt.figure(figsize=(12, 6))
plt.subplot(3,1,1)
librosa.display.specshow(mfcc)
plt.ylabel('MFCC')
plt.colorbar()
plt.subplot(3,1,2)
librosa.display.specshow(delta_mfcc)
plt.ylabel('MFCC-$\Delta$')
plt.colorbar()
plt.subplot(3,1,3)
librosa.display.specshow(delta2_mfcc, sr=sr, x_axis='time')
plt.ylabel('MFCC-$\Delta^2$')
plt.colorbar()
plt.tight_layout()
# For future use, we'll stack these together into one matrix
M = np.vstack([mfcc, delta_mfcc, delta2_mfcc])
Explanation: MFCC
Mel-frequency cepstral coefficients are commonly used to represent texture or timbre of sound.
End of explanation
# Now, let's run the beat tracker.
# We'll use the percussive component for this part
plt.figure(figsize=(12, 6))
tempo, beats = librosa.beat.beat_track(y=y_percussive, sr=sr)
# Let's re-draw the spectrogram, but this time, overlay the detected beats
plt.figure(figsize=(12,4))
librosa.display.specshow(log_S, sr=sr, x_axis='time', y_axis='mel')
# Let's draw transparent lines over the beat frames
plt.vlines(librosa.frames_to_time(beats),
1, 0.5 * sr,
colors='w', linestyles='-', linewidth=2, alpha=0.5)
plt.axis('tight')
plt.colorbar(format='%+02.0f dB')
plt.tight_layout()
Explanation: Beat tracking
The beat tracker returns an estimate of the tempo (in beats per minute) and frame indices of beat events.
The input can be either an audio time series (as we do below), or an onset strength envelope as calculated by librosa.onset.onset_strength().
End of explanation
print('Estimated tempo: %.2f BPM' % tempo)
print('First 5 beat frames: ', beats[:5])
# Frame numbers are great and all, but when do those beats occur?
print('First 5 beat times: ', librosa.frames_to_time(beats[:5], sr=sr))
# We could also get frame numbers from times by librosa.time_to_frames()
Explanation: By default, the beat tracker will trim away any leading or trailing beats that don't appear strong enough.
To disable this behavior, call beat_track() with trim=False.
End of explanation
# feature.sync will summarize each beat event by the mean feature vector within that beat
M_sync = librosa.util.sync(M, beats)
plt.figure(figsize=(12,6))
# Let's plot the original and beat-synchronous features against each other
plt.subplot(2,1,1)
librosa.display.specshow(M)
plt.title('MFCC-$\Delta$-$\Delta^2$')
# We can also use pyplot *ticks directly
# Let's mark off the raw MFCC and the delta features
plt.yticks(np.arange(0, M.shape[0], 13), ['MFCC', '$\Delta$', '$\Delta^2$'])
plt.colorbar()
plt.subplot(2,1,2)
# librosa can generate axis ticks from arbitrary timestamps and beat events also
librosa.display.specshow(M_sync, x_axis='time',
x_coords=librosa.frames_to_time(librosa.util.fix_frames(beats)))
plt.yticks(np.arange(0, M_sync.shape[0], 13), ['MFCC', '$\Delta$', '$\Delta^2$'])
plt.title('Beat-synchronous MFCC-$\Delta$-$\Delta^2$')
plt.colorbar()
plt.tight_layout()
# Beat synchronization is flexible.
# Instead of computing the mean delta-MFCC within each beat, let's do beat-synchronous chroma
# We can replace the mean with any statistical aggregation function, such as min, max, or median.
C_sync = librosa.util.sync(C, beats, aggregate=np.median)
plt.figure(figsize=(12,6))
plt.subplot(2, 1, 1)
librosa.display.specshow(C, sr=sr, y_axis='chroma', vmin=0.0, vmax=1.0, x_axis='time')
plt.title('Chroma')
plt.colorbar()
plt.subplot(2, 1, 2)
librosa.display.specshow(C_sync, y_axis='chroma', vmin=0.0, vmax=1.0, x_axis='time',
x_coords=librosa.frames_to_time(librosa.util.fix_frames(beats)))
plt.title('Beat-synchronous Chroma (median aggregation)')
plt.colorbar()
plt.tight_layout()
Explanation: Beat-synchronous feature aggregation
Once we've located the beat events, we can use them to summarize the feature content of each beat.
This can be useful for reducing data dimensionality, and removing transient noise from the features.
End of explanation |
11,327 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Open Context Zooarchaeology Measurements
This code gets meaurement data from Open Context to hopefully do some interesting things.
In the example given here, we're retrieving zooarchaeological measurements of fused metatarsal III/IV bones classified as "Artiodactyla" (including more specific taxonomic catagories). The specific query used to select these bone data is
Step3: Below I define two little utility functions to make scatter plots from the data contained in a dataframe that was populated by the OpenContextAPI() class. The first function make_group_markers_colors_for_df makes dicts that associate markers and colors for different values in the group_col. The second function make_scatter_plot_from_oc_df makes scatter plots.
Step4: Making some Plots
Because we're going to make multiple plots, lets make some consistent markers and colors for the different taxa in our dataset. This will make it easier to compare results in different plots.
Step5: Observing an outlier
The plot below will illustrate an outlier in our data.
Step6: Excluding the outlier
We can make a more reasonable plot by throwing out the outlier record (perhaps a recording error?) from the plot. To make this easier to read, we pass the same set of colors and markers for the different taxonomic values into the function that makes this plot.
Step7: A more interesting plot, using proximal end measurements
Now to try another scatter plot, looking at measurements from the proximal end of the bone rather than the distal. In this plot the different animal taxa are much more clearly grouped.
Step8: Excluding suspect taxa
While the chart above is helpful, pigs have metatarsal III and metatarsal IV as seperate bones. So we should not see pigs in a query for metatarsal III/IV bone specimens. We should revise this plot to remove Sus scrofa.
Step9: To further explore the data, we include this plot that illustrates some taxonomic patterning of distal end measurements. | Python Code:
# This imports the OpenContextAPI from the api.py file in the
# opencontext directory.
%run '../opencontext/api.py'
Explanation: Open Context Zooarchaeology Measurements
This code gets meaurement data from Open Context to hopefully do some interesting things.
In the example given here, we're retrieving zooarchaeological measurements of fused metatarsal III/IV bones classified as "Artiodactyla" (including more specific taxonomic catagories). The specific query used to select these bone data is:
https://opencontext.org/subjects-search/?prop=obo-foodon-00001303---gbif-1---gbif-44---gbif-359---gbif-731&prop=oc-zoo-anatomical-meas---oc-zoo-von-den-driesch-bone-meas&prop=oc-zoo-has-anat-id---obo-uberon-0013588#4/46.07/16.17/8/any/Google-Satellite
The OpenContextAPI() class has methods to get records from the query above by making multiple API requests to fetch records from Open Context. It also does some cosmetic processing of these records to populate a Pandas dataframe. This notebook also has some functions defined to generate scatter plots from the records retrieved via the OpenContextAPI() class.
End of explanation
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def make_group_markers_colors_for_df(df, group_col):
Makes group markers and colors for consistence in multiple plots
# Make a list of markers that we will associate with different
# grouping values for our scatter plots.
markers = [
'o',
'x',
'v',
'D',
'p',
'^',
's',
'*',
]
group_vals = df[group_col].unique().tolist()
group_vals.sort()
# Each value from the grouping column will get a color
# assigned.
colors = cm.rainbow(np.linspace(0, 1, len(group_vals)))
group_markers = {}
group_colors = {}
m_i = 0
for i, group_val in enumerate(group_vals):
group_markers[group_val] = markers[m_i]
group_colors[group_val] = colors[i].reshape(1,-1)
m_i += 1
if m_i >= len(markers):
# We ran out of markers, so restart
# the marker index.
m_i = 0
# Return a tuple of group markers and color dicts.
return (
group_markers,
group_colors,
)
def make_scatter_plot_from_oc_df(
df,
group_col,
x_col,
y_col,
group_markers=None,
group_colors=None,
):
Make a scatter plot from an Open Context dataframe
if not set([group_col, x_col, y_col]).issubset(set(df.columns.tolist())):
raise('Check for missing columns')
if not group_markers or not group_colors:
# These were't passed as arguments so make them.
group_markers, group_colors = make_group_markers_colors_for_df(
df,
group_col
)
group_vals = df[group_col].unique().tolist()
group_vals.sort()
ax = None
for group_val in group_vals:
act_index = (
(df[group_col] == group_val)
& ~df[x_col].isnull()
& ~df[y_col].isnull()
)
if df[act_index].empty:
# No data for this taxon
continue
label = '{} [n={}]'.format(group_val, len(df[act_index].index))
if not ax:
ax = df[act_index].plot.scatter(
x=x_col,
y=y_col,
marker=group_markers[group_val],
label=label,
color=group_colors[group_val],
)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
plot = df[act_index].plot.scatter(
x=x_col,
y=y_col,
marker=group_markers[group_val],
label=label,
ax=ax,
color=group_colors[group_val],
)
plot.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
import numpy as np
import pandas as pd
oc_api = OpenContextAPI()
# The cache file prefix defaults to today's date. This means that, by default,
# the cache expires after a day. To keep cached files indefinately, we can
# change the cache file prefix to something else that won't change from day
# to day.
oc_api.set_cache_file_prefix('plot-demo')
# Clear old cached records.
oc_api.clear_api_cache()
# This is a search/query url to Open Context.
url = 'https://opencontext.org/subjects-search/?prop=obo-foodon-00001303---gbif-1---gbif-44---gbif-359---gbif-731&prop=oc-zoo-anatomical-meas---oc-zoo-von-den-driesch-bone-meas&prop=oc-zoo-has-anat-id---obo-uberon-0013588#4/46.07/16.17/8/any/Google-Satellite'
# Fetch the 'standard' (linked data identified) attributes in use with
# data at the url.
stnd_attribs_tuples = oc_api.get_standard_attributes(
url,
# The optional argument below gets popular standard
# zooarchaeological (bone) measurements.
add_von_den_driesch_bone_measures=True
)
# Make a list of only the slugs from the list of slug, label tuples.
stnd_attribs = [slug for slug, _ in stnd_attribs_tuples]
# Make a dataframe by fetching result records from Open Context.
# This will be slow until we finish improvements to Open Context's API.
# However, the results get cached by saving as files locally. That
# makes iterating on this notebook much less painful.
df = oc_api.url_to_dataframe(url, stnd_attribs)
Explanation: Below I define two little utility functions to make scatter plots from the data contained in a dataframe that was populated by the OpenContextAPI() class. The first function make_group_markers_colors_for_df makes dicts that associate markers and colors for different values in the group_col. The second function make_scatter_plot_from_oc_df makes scatter plots.
End of explanation
group_markers, group_colors = make_group_markers_colors_for_df(
df,
group_col='Has taxonomic identifier'
)
Explanation: Making some Plots
Because we're going to make multiple plots, lets make some consistent markers and colors for the different taxa in our dataset. This will make it easier to compare results in different plots.
End of explanation
# Make a plot of Bd verses DD for different taxa
make_scatter_plot_from_oc_df(
df,
group_col='Has taxonomic identifier',
x_col='Bd',
y_col='DD',
group_markers=group_markers,
group_colors=group_colors,
)
Explanation: Observing an outlier
The plot below will illustrate an outlier in our data.
End of explanation
# Make a plot of Bd verses DD for different taxa, limiting DD to reasonable values.
make_scatter_plot_from_oc_df(
df[(df['DD'] < 80)],
group_col='Has taxonomic identifier',
x_col='Bd',
y_col='DD',
group_markers=group_markers,
group_colors=group_colors,
)
Explanation: Excluding the outlier
We can make a more reasonable plot by throwing out the outlier record (perhaps a recording error?) from the plot. To make this easier to read, we pass the same set of colors and markers for the different taxonomic values into the function that makes this plot.
End of explanation
# Make a plot of Bp verses Dp for different taxa
make_scatter_plot_from_oc_df(
df,
group_col='Has taxonomic identifier',
x_col='Bp',
y_col='Dp',
group_markers=group_markers,
group_colors=group_colors,
)
Explanation: A more interesting plot, using proximal end measurements
Now to try another scatter plot, looking at measurements from the proximal end of the bone rather than the distal. In this plot the different animal taxa are much more clearly grouped.
End of explanation
# Make a plot of Bp verses Dp for different taxa, excluding pigs
make_scatter_plot_from_oc_df(
df[~df['Has taxonomic identifier'].str.startswith('Sus')],
group_col='Has taxonomic identifier',
x_col='Bp',
y_col='Dp',
group_markers=group_markers,
group_colors=group_colors,
)
Explanation: Excluding suspect taxa
While the chart above is helpful, pigs have metatarsal III and metatarsal IV as seperate bones. So we should not see pigs in a query for metatarsal III/IV bone specimens. We should revise this plot to remove Sus scrofa.
End of explanation
# Check some relationships in distal end measurements, also excluding pigs
make_scatter_plot_from_oc_df(
df[~df['Has taxonomic identifier'].str.startswith('Sus')],
group_col='Has taxonomic identifier',
x_col='Bd',
y_col='Dd',
group_markers=group_markers,
group_colors=group_colors,
)
Explanation: To further explore the data, we include this plot that illustrates some taxonomic patterning of distal end measurements.
End of explanation |
11,328 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
https
Step1: Step 0 - hyperparams
vocab_size and max sequence length are the SAME thing
decoder RNN hidden units are usually same size as encoder RNN hidden units in translation but for our case it does not seem really to be a relationship there but we can experiment and find out later, not a priority thing right now
Step2: Step 1 - collect data (and/or generate them)
Step3: Step 2 - Build model
Step4: Step 3 training the network
RECALL
Step5: Conclusion
there is a good fit
GRU cell - with EOS = 1000
Step6: Basic RNN cell - without EOS
Step7: Conclusion
Now there is no shifting but this only works for the dummy case where the decoder inputs are provider from the targets (which is incorrect because the targets should be unknown in general)
GRU cell - without EOS | Python Code:
from __future__ import division
import tensorflow as tf
from os import path
import numpy as np
import pandas as pd
import csv
from sklearn.model_selection import StratifiedShuffleSplit
from time import time
from matplotlib import pyplot as plt
import seaborn as sns
from mylibs.jupyter_notebook_helper import show_graph
from tensorflow.contrib import rnn
from tensorflow.contrib import learn
import shutil
from tensorflow.contrib.learn.python.learn import learn_runner
from mylibs.tf_helper import getDefaultGPUconfig
from sklearn.metrics import r2_score
from mylibs.py_helper import factors
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from statsmodels.tsa.stattools import coint
from common import get_or_run_nn
from data_providers.price_history_dummy_seq2seq_data_provider \
import PriceHistoryDummySeq2SeqDataProvider
from models.price_history_dummy_seq2seq import PriceHistoryDummySeq2Seq
dtype = tf.float32
seed = 16011984
random_state = np.random.RandomState(seed=seed)
get_init_rng = lambda: np.random.RandomState(seed=seed)
config = getDefaultGPUconfig()
%matplotlib inline
Explanation: https://www.youtube.com/watch?v=ElmBrKyMXxs
https://github.com/hans/ipython-notebooks/blob/master/tf/TF%20tutorial.ipynb
https://github.com/ematvey/tensorflow-seq2seq-tutorials
End of explanation
num_epochs = 10
num_features = 1
num_units = 400 #state size
input_len = 60
target_len = 30
batch_size = 47
#trunc_backprop_len = ??
Explanation: Step 0 - hyperparams
vocab_size and max sequence length are the SAME thing
decoder RNN hidden units are usually same size as encoder RNN hidden units in translation but for our case it does not seem really to be a relationship there but we can experiment and find out later, not a priority thing right now
End of explanation
npz_path = '../price_history_03_dp_60to30_from_fixed_len.npz'
dp = PriceHistoryDummySeq2SeqDataProvider(npz_path=npz_path, batch_size=batch_size,
)
dp.inputs.shape, dp.targets.shape
aa, bb, cc = dp.next()
aa.shape, bb.shape, cc.shape
Explanation: Step 1 - collect data (and/or generate them)
End of explanation
model = PriceHistoryDummySeq2Seq(rng=get_init_rng(), dtype=dtype, config=config, with_EOS=False)
graph = model.getGraph(batch_size=batch_size,
num_units=num_units,
input_len=input_len,
target_len=target_len)
#show_graph(graph)
Explanation: Step 2 - Build model
End of explanation
rnn_cell = PriceHistoryDummySeq2Seq.RNN_CELLS.BASIC_RNN
num_epochs = 30
num_epochs, num_units, batch_size
def experiment():
return model.run(
npz_path=npz_path,
epochs=num_epochs,
batch_size=batch_size,
num_units=num_units,
input_len = input_len,
target_len = target_len,
rnn_cell=rnn_cell,
)
dyn_stats, preds_dict = get_or_run_nn(experiment, filename='005_basic_rnn_dummy_seq2seq_EOS1000_60to30')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
Explanation: Step 3 training the network
RECALL: baseline is around 4 for huber loss for current problem, anything above 4 should be considered as major errors
Basic RNN cell - with EOS 1000
End of explanation
rnn_cell = PriceHistoryDummySeq2Seq.RNN_CELLS.GRU
num_epochs = 30
num_epochs, num_units, batch_size
def experiment():
return model.run(
npz_path=npz_path,
epochs=num_epochs,
batch_size=batch_size,
num_units=num_units,
input_len = input_len,
target_len = target_len,
rnn_cell=rnn_cell,
)
#dyn_stats = experiment()
dyn_stats, preds_dict = get_or_run_nn(experiment, filename='005_gru_dummy_seq2seq_EOS1000_60to30')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
Explanation: Conclusion
there is a good fit
GRU cell - with EOS = 1000
End of explanation
model = PriceHistoryDummySeq2Seq(rng=random_state, dtype=dtype, config=config, with_EOS=False)
rnn_cell = PriceHistoryDummySeq2Seq.RNN_CELLS.BASIC_RNN
num_epochs = 10
num_epochs, num_units, batch_size
def experiment():
return model.run(
npz_path=npz_path,
epochs=num_epochs,
batch_size=batch_size,
num_units=num_units,
input_len = input_len,
target_len = target_len,
rnn_cell=rnn_cell,
)
dyn_stats, preds_dict = get_or_run_nn(experiment, filename='005_basic_rnn_dummy_seq2seq_noEOS_60to30')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
Explanation: Basic RNN cell - without EOS
End of explanation
rnn_cell = PriceHistoryDummySeq2Seq.RNN_CELLS.GRU
num_epochs = 10
num_epochs, num_units, batch_size
def experiment():
return model.run(
npz_path=npz_path,
epochs=num_epochs,
batch_size=batch_size,
num_units=num_units,
input_len = input_len,
target_len = target_len,
rnn_cell=rnn_cell,
)
#dyn_stats = experiment()
dyn_stats, preds_dict = get_or_run_nn(experiment, filename='005_gru_dummy_seq2seq_noEOS_60to30')
dyn_stats.plotStats()
plt.show()
r2_scores = [r2_score(y_true=dp.targets[ind], y_pred=preds_dict[ind])
for ind in range(len(dp.targets))]
ind = np.argmin(r2_scores)
ind
reals = dp.targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(dp.targets[ind], preds_dict[ind])[0]
for ind in range(len(dp.targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(dp.targets))
reals = dp.targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
Explanation: Conclusion
Now there is no shifting but this only works for the dummy case where the decoder inputs are provider from the targets (which is incorrect because the targets should be unknown in general)
GRU cell - without EOS
End of explanation |
11,329 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Basics of Machine Learning
Tutorial held at University of Zurich, 23-24 March 2016
(c) 2016 Jan Šnajder (jan.snajder@fer.hr), FER, University of Zagreb
<i>Version
Step1: Outline
Typical steps in applying an ML algorithm
Instance space
Hypothesis
Empirical error
Training a model
Model complexity
Inductive bias
The three ingredients of every ML algorithm
Algo 1
Step2: Hypothesis
Hypothesis
Step3: A linear classifier divides the input space into two half-spaces
$h(\mathbf{x}) \geq 0$ - instances labeled as $y=1$
$h(\mathbf{x})=0$ - the boundary
$h(\mathbf{x}) \leq 0$ - instances labeled as $y=0$
Step4: Model
Model $\mathcal{H}$
Step5: A couple of different hypotheses from this model
Step6: Empirical error
Given a hypothesis $h$, what can we say about its accuracy (or error)?
True accuracy depends on the distribution of instances. We don't know the true distribution, so we have to assume that it is similar to the distribution in our dataset
Empirical error is the observed error of our hypothesis on the dataset that we have available
Tells us how accurate a hypothesis is on our labeled dataset
Misclassification error
Step7: Training a model
Training (=learning) a model amounts to searching a hypothesis space $\mathcal{H}$ for the best hypothesis $h\in \mathcal{H}$
The best hypothesis
Step8: To get binary decisions, we simply use a threshold of 0.5
(2) Error function (cross-entropy error)
Step9: Feature mapping
Logistic regression is essentially a linear model. What if our problem is not linearly separable?
In logistic regression (and many other algorithms), we can map our instances into a higher dimensional space, where they hopefully will become linearly separable
We do this using a feature mapping function
$$\phi
Step10: Hyperparameters
We know that we have to choose a model $\mathcal{H}$, otherwise learning is futile
Often times we choose a model from within a family of models
Step11: Which model is the best?
The problem of noise
Noise is an unwanted anomaly in the data
Possible causes
Step12: Regularization
Instead of trying out different feature mappings, a nice trick is simply to map to a rather high dimension, regardless of whether we really need it, but then give preference to simpler models
That is, we allow the model to become complex if the data calls for it, but we put some pressure on it to stay as simple as possible
We do this by tweaking the error function so that it penalizes models that become shamelessly non-linear
You can think of this as a spandex suit
Step13: Kernel trick
Similarly to logistic regression, SVM maps instances to high-dimensional spaces to achieve non-linearity
However, unlike in logistic regression, the mapping need not really take place
Step14: SVM+RBF is a powerful model, but be aware of overfitting!
If you use SVM+RBF, then there are two hyperparameters
Step15: Algo 3
Step16: Algo 4 | Python Code:
import scipy as sp
import scipy.stats as stats
import matplotlib.pyplot as plt
from numpy.random import normal
from SU import *
%pylab inline
Explanation: Basics of Machine Learning
Tutorial held at University of Zurich, 23-24 March 2016
(c) 2016 Jan Šnajder (jan.snajder@fer.hr), FER, University of Zagreb
<i>Version: 0.4 (2018-12-24)</i>
End of explanation
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=20, n_features=2, n_classes=2, n_redundant=0, n_clusters_per_class=1, random_state=42)
plot_problem(X, y)
X
y
Explanation: Outline
Typical steps in applying an ML algorithm
Instance space
Hypothesis
Empirical error
Training a model
Model complexity
Inductive bias
The three ingredients of every ML algorithm
Algo 1: Logistic regression
Feature mapping
Hyperparameters
The problem of noise
Model selection
Cross-validation
Regularization
Algo 2: Support vector machine (SVM)
Kernel trick
Algo 3: Decision tree
Algo 4: Naive Bayes classifier
Scipy stack
<img src="http://ww2.sinaimg.cn/large/5396ee05jw1etyjkwzuo3j20jn0estco.jpg" width="70%" align="left">
An example: Titanic survivors
Typical steps in applying an ML algorithm
Data preparation (cleansing and wrangling)
Data annotation
Feature engineering
Dimensionality reduction / feature selection
Model selection
Model training
Model evaluation
Diagnostics and debugging
Deployment
1-3 and 8 are task-specific
ML focuses on 4-8
We will focus on 5-7
Instance space
Instance space (input space): $\mathcal{X}$
Input space dimensionality: $n$
An instance is a vector living in the input space: $\mathbf{x} = (x_1, x_2, \dots, x_n)^T \in \mathcal{X}$
Label: $y$ (a discrete value or a number)
Class labels: $\mathcal{Y} = {0, \dots, K}$
Number of classes: $K$
Binary classification: $K=2$, $\mathcal{Y} = {0,1}$ or $\mathcal{Y} = {-1,+1}$
Number of instances: $N$
Labeled dataset: $\mathcal{D} = \big{(x^{(i)}, y^{(i)})\big}_{i=1}^N \subseteq \mathcal{X}\times\mathcal{Y}$
In matrix form:
$$
\begin{array}{lllll|l}
&x_1 & x_2 & \cdots & x_n & \mathbf{y}\
\hline
\mathbf{x}^{(1)} = & x_1^{(1)} & x_2^{(1)} & \cdots & x_n^{(1)} & y^{(1)}\
\mathbf{x}^{(2)} = & x_1^{(2)} & x_2^{(2)} & \cdots & x_n^{(2)} & y^{(2)}\
& \vdots\
\mathbf{x}^{(N)} = & x_1^{(N)} & x_2^{(N)} & \cdots & x_n^{(N)} & y^{(N)}\
\end{array}
$$
End of explanation
def h(x, w) : return 1 if w[0] + w[1] * x[0] + w[2] * x[1] >= 0 else 0
def h0(x) : return h(x, [0, 0.5, -0.5])
print h0([0, -11])
h0([-2, 2])
Explanation: Hypothesis
Hypothesis: $h : \mathcal{X} \to \mathcal{Y}$
Assigns class labels to each instance from the instance space
Binary classification: $h : \mathcal{X} \to {0, 1}$ or $h : \mathcal{X} \to {-1, +1}$
More generally, a hypothesis is a function $h(\mathbf{x} | \boldsymbol\theta)$, defined up to a vector of parameters $\boldsymbol\theta$
For example, a linear classifier:
$$\boldsymbol\theta = (w_0, w_1, w_2)$$
$$h(x_1,x_2|w_0,w_1,w_2) = \mathbf{1}{w_0 + w_1 x_1 + w_2 x_2 \geq 0}$$
End of explanation
plot_problem(X, y, h0)
Explanation: A linear classifier divides the input space into two half-spaces
$h(\mathbf{x}) \geq 0$ - instances labeled as $y=1$
$h(\mathbf{x})=0$ - the boundary
$h(\mathbf{x}) \leq 0$ - instances labeled as $y=0$
End of explanation
def h(x, w) : return 1 if w[0] + w[1] * x[0] + w[2] * x[1] > 0 else 0
Explanation: Model
Model $\mathcal{H}$: a set of hypotheses $h$
Often referred to as a "hypothesis space" or "parameter space"
Formally: $\mathcal{H} = \big{ h(\mathbf{x} | \boldsymbol\theta)\big}_{\boldsymbol\theta}$
A family of functions parametrized with $\boldsymbol\theta$
E.g., a linear classification model (here $\boldsymbol\theta = \mathbf{w}$):
End of explanation
def h0(x) : return h(x, [0, 0.5, -0.5])
def h1(x) : return h(x, [1, 1, 2.0])
def h2(x) : return h(x, [-1, 2, 1])
for hx in [h0, h1, h2] : plot_problem(X, y, hx, surfaces=False)
Explanation: A couple of different hypotheses from this model:
End of explanation
from sklearn.metrics import zero_one_loss
from sklearn.metrics import accuracy_score
def misclassification_error(h, X, y) :
error = 0
for xi, yi in zip(X, y):
if h(xi) != yi : error += 1
return float(error) / len(X)
misclassification_error(h0, X, y)
misclassification_error(h1, X, y)
misclassification_error(h2, X, y)
Explanation: Empirical error
Given a hypothesis $h$, what can we say about its accuracy (or error)?
True accuracy depends on the distribution of instances. We don't know the true distribution, so we have to assume that it is similar to the distribution in our dataset
Empirical error is the observed error of our hypothesis on the dataset that we have available
Tells us how accurate a hypothesis is on our labeled dataset
Misclassification error:
$$
E(h|\mathcal{D})
= \frac{1}{N} \sum_{i=1}^N \mathbf{1}{h(\mathbf{x})^{(i)} \neq y^{(i)}}
$$
[Example]
Error inflicted on a single instance is called the loss function
$L\big(y^{(i)}, h(\mathbf{x}^{(i)})\big) = \mathbf{1}{h(\mathbf{x})^{(i)} \neq y^{(i)}}$ is called a zero-one loss
End of explanation
def sigm(x): return 1 / (1 + sp.exp(-x))
xs = sp.linspace(-10, 10)
plt.plot(xs, sigm(xs));
Explanation: Training a model
Training (=learning) a model amounts to searching a hypothesis space $\mathcal{H}$ for the best hypothesis $h\in \mathcal{H}$
The best hypothesis: one that classifies the instances most accurately
This is an optimization problem!
[Example: Input space + hypothesis space]
$\mathcal{H}$ is typically very large, hence we need smart search methods
Model complexity
Ideally, in model $\mathcal{H}$ there is a hypothesis $h$ that is perfectly accurate, i.e., $E(h|\mathcal{D}) = 0$, and ideally we will be able to find it
More often than not, no such $h$ exists in the model, i.e., $\forall h\in\mathcal{H}. E(h|\mathcal{D}) > 0$
We then say that the model $\mathcal{H}$ is not complex enough (or has not a sufficient capacity)
[Example]
[Exercise: six instances]
Some more theory: Inductive bias
Learning a hypothesis is an ill-defined problem: $h$ is not logically entailed from $\mathcal{D}$
Example 1: Learning a Boolean function
\begin{array}{ccc|c}
x_1 & x_2 & x_3 & y\
\hline
0&0&0&\color{red}{\textbf{?}}\
0&0&1&\color{red}{\textbf{?}}\
0&1&0&1\
0&1&1&0\
1&0&0&1\
1&0&1&0\
1&1&0&\color{red}{\textbf{?}}\
1&1&1&1\
\end{array}
Generalization - the ability to classify previously unseen instances
Learning and generalization are not possible without additional assumptions
Futility of bias-free learning
Inductive bias - the set of additional assumptions that make $h(x)$ follow deductively from the set of labeled instances $\mathcal{D}$
"A set of assumptions that turn induction into deduction"
Two flavors:
Language bias: the model $\mathcal{H}$ limits the number of hypotheses from which we can choose from $\Rightarrow$ defines where we search
Preference bias: we prefer one hypothesis over the other $\Rightarrow$ defines how we search
Most ML algorithms combine both types of bias
[Example 2: Input space + parameter space]
Excercise:
Learning a Boolean function in $\mathcal{X}={0,1}$, $\mathcal{H}$ is a set of lines
Q: What kind of bias do we have here?
Q: How many different hypotheses does the model have?
Q: Is the model complex enough to perfectly classify all possible labelings?
Three ingredients of every ML algorithm
(1) The model $\mathcal{H}$
$\mathcal{H} = \big{ h(\mathbf{x} | \boldsymbol\theta)\big}_{\boldsymbol{\theta}}$
(2) Error function
The expected value (= average) of the loss function on all instances
Because the true distribution $P(\mathbf{x}, y)$ is unknown to us, we compute instead the empirical error on the set $\mathcal{D}$ (= the average loss on the dataset):
$$E(h|\mathcal{D}) = E(\boldsymbol\theta|\mathcal{D}) = \frac{1}{N} \sum_{i=1}^N L\big(y^{(i)}, h(\mathbf{x}^{(i)})\big)$$
NB: Instead of working with an error function $E(\boldsymbol\theta|\mathcal{D})$, the generative models (e.g., the Bayes classifier) work with a likelihood function $\mathcal{L}(\boldsymbol\theta|\mathcal{D})$ , which they try to maximize, but conceptually this is the same thing
(3) Optimization procedure
Searches for $h$ in $\mathcal{H}$ that minimizes the empirical error
$$
h^ = \mathrm{argmin}_{h\in\mathcal{H}} E(h|\mathcal{D})
$$
that is
$$
\boldsymbol\theta^ = \mathrm{argmin}_{\boldsymbol\theta} E(\boldsymbol\theta|\mathcal{D})
$$
This optimization can be analytical (=math derivation) or heuristic (=smart search)
Discussion:
The tree components from above determine the inductive bias of an algorithm
Which type of inductive bias is associated with which component?
Algo 1: Logistic regression
A popular and quite efficient linear classifier
(1) The model:
$$
h(\mathbf{x}|\mathbf{w}) = \sigma\big(\mathbf{w}^\intercal\mathbf{x}\big) =
\frac{1}{1+\exp(-\mathbf{w}^\intercal\mathbf{x})}
$$
$\mathbf{w}^\intercal\mathbf{x}$ is the scalar product of the weight vector $\mathbf{w}$ and the instance vector $\mathbf{x}$
$\mathbf{w}^\intercal\mathbf{x} = \sum_{i=1}^n w_i x_i$
As per convention, $\mathbf{x}$ is extended with a dummy feature $x_0=1$, so that $\mathbf{w}$ and $\mathbf{x}$ match in dimensions
The sigma function ($\sigma$) squashes the output to a $[0,1]$ interval, so we get probabilities
End of explanation
from sklearn.linear_model import LogisticRegression
h = LogisticRegression()
h.fit(X, y)
plot_problem(X, y, h.predict)
h.coef_
h.intercept_
misclassification_error(h.predict, X, y)
Explanation: To get binary decisions, we simply use a threshold of 0.5
(2) Error function (cross-entropy error):
$$
E(\mathbf{w}|\mathcal{D}) = \frac{1}{N} \sum_{i=1}^N\Big( - y^{(i)} \ln h(\mathbf{x}^{(i)}|\mathbf{w})- (1-y^{(i)})\ln \big(1-h(\mathbf{x}^{(i)}|\mathbf{w})\big)\Big)
$$
Seems convoluted at first, but the idea is actually very simple:
if $y=1$, we want the probability $h(x)$ to be close to 1
if $y=0$, we want the probability $h(x)$ to be close to 0
the more we depart from this, the more we penalize the model
(3) Optimization:
Stochastic gradient descent (SGD)
An iterative fine tuning of the weights $\mathbf{w}$ in search of the point of minimal error
Very efficient (fast) and guaranteed to find the best weights (best hypothesis within the model)
In sklearn, all of the above is packed into a single class:
End of explanation
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(4)
X2 = poly.fit_transform(X)
h = LogisticRegression()
h.fit(X2, y)
plot_problem(X, y, lambda x: h.predict(poly.transform(x)))
Explanation: Feature mapping
Logistic regression is essentially a linear model. What if our problem is not linearly separable?
In logistic regression (and many other algorithms), we can map our instances into a higher dimensional space, where they hopefully will become linearly separable
We do this using a feature mapping function
$$\phi:\mathbb{R}^n\to\mathbb{R}^m$$
For example, mapping from a 2-dimensional input space to a 3-dimensional feature space (excluding the dummy feature $x_0$):
$$\phi(\mathbf{x}) = (1,x_1,x_2,x_1 x_2)$$
[Example: 2-d XOR problem]
A linear boundary in high-dimensional space gives as a non-linear boundary in the original, low-dimensional space
We get non-linearity without having to change the model!
End of explanation
from sklearn.preprocessing import PolynomialFeatures
for m in [1, 2, 3, 4, 5]:
poly = PolynomialFeatures(m)
X2 = poly.fit_transform(X)
h = LogisticRegression()
h.fit(X2, y)
plot_problem(X, y, lambda x: h.predict(poly.transform(x)))
error = misclassification_error(lambda x : h.predict(poly.transform(x)), X, y)
plt.title('$m = %d, E(h|\mathcal{D})=%.2f$' % (m, error))
plt.show()
Explanation: Hyperparameters
We know that we have to choose a model $\mathcal{H}$, otherwise learning is futile
Often times we choose a model from within a family of models:
$$
{\mathcal{H}_1, \mathcal{H}_2, \dots, \mathcal{H}_k}
$$
E.g., in logistic regression, we can choose whether we want to use a feature mapping function, and of what kind
What mapping to use defines the "amount of non-linearity", and it can be considered a hyperparameter of the model
The "amount of non-linearity" can be considered a hyperparameter of the model
Let's call this parameter $C$. The larger $C$, the more non-linearity we get
$C$ is a hyperparameter of the model, whereas $w_i$ are just "ordinary" parameters
Example: Logistic regression
End of explanation
X, y = make_classification(n_samples=100, n_features=2, n_classes=2, n_redundant=0, n_clusters_per_class=2, random_state=53)
plot_problem(X, y)
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3)
print sp.shape(X_train), sp.shape(y_train)
print sp.shape(X_test), sp.shape(y_test)
plot_problem(X_train, y_train)
plot_problem(X_test, y_test)
from sklearn.metrics import zero_one_loss
train_errs, test_errs = [], []
for m in range(1, 6) :
poly=PolynomialFeatures(m)
X2 = poly.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X2, y, test_size=0.3, random_state=42)
h = LogisticRegression()
h.fit(X_train,y_train)
train_err = zero_one_loss(y_train,h.predict(X_train))
test_err = zero_one_loss(y_test,h.predict(X_test))
train_errs = append(train_errs, train_err)
test_errs = append(test_errs, test_err)
plot(test_errs,'b',label="test");
plot(train_errs,'r',label="train")
legend();
Explanation: Which model is the best?
The problem of noise
Noise is an unwanted anomaly in the data
Possible causes:
Imprecision
Mislabeling ("teacher noise")
Latent variables
Vague class boundaries (subjectivity)
Noise makes the boundary between two classes more complex than it really should be!
[Example 1: credit risk assessment according to age and income]
Simple models cannot reach $E(h|\mathcal{D})=0$
On the other hand, models that are too complex learn noise rather than the true classification!
[Example 2]
In principle, we cannot separate the noise from genuine data (we can do this only for notorious outliers)
Model selection
Obviously, the larger the capacity of $\mathcal{H}$, the lower $E(h|\mathcal{D})$, for the best $h\in\mathcal{H}$
However, if we choose a model that is too complex, it will fit the noise and won't be able to generalize
Generalization: The ability to correctly predict the labels of previously unseen instances
We prefer simpler models because they:
generalize better
are easier to train and use
are easier to interpret
On the other hand, if the model is too simple, it won't be able to capture the regularities in the data
Thus, we need a model that is neither too complex nor too simple, a model that is just right for our problem!
Occam's razor
<img src="http://muslimsi.com/wp-content/uploads/2014/12/quote-occam-s-razor-no-more-things-should-be-presumed-to-exist-than-are-absolutely-necessary-i-e-the-william-of-occam-372636-846x398.jpg" width="70%" align="left">
Two extremes:
Underfitting - $\mathcal{H}$ is too simple for our problem $\Rightarrow$ works bad on existing as well as unseen data
Overfitting - $\mathcal{H}$ is too complex for our problem $\Rightarrow$ works excellent on existing data but fails miserably on unseen data
[Example: under/overfitting]
Our task is to choose a model of the right complexity: neither underfitted nor overfitted
This is called model selection
Other names: model optimization, hyperparameter optimization, bias-variance tradeoff
IMPORTANT: Model selection is your job, not the job of an ML algorithm. An ML algorithm is responsible for optimizing the "ordinary" parameters $\boldsymbol\theta$. You are responsible for optimizing the hyperparameters of the model (if there are any). Don't blame the algorithm if you've chosen a bad model.
The assumption of inductive learning
If (1) the error of the hypothesis on a sufficiently large dataset is small and (2) the model is not too complex, the hypothesis will also classify well all the previously unseen, but (3) similar instances.
Cross-validation
A method to estimate how well the model generalizes
Generalization is the ability to work well on unseen data. Although we don't have the unseen data at our disposal, we can simulate this setup using the data that we have
We divide the dataset into a training set and a test set:
$$
\mathcal{D} = \mathcal{D}{\mathrm{train}} \cup \mathcal{D}{\mathrm{test}}
$$
We train the model on the first set and test on the second set
Because the instances from the test set are not used for training, the model won't see them until it is too late (testing time), so we get a fair estimate of how well the model generalized
We calculate two errors of $h\in\mathcal{H}$:
Train error: the empirical error on the train set, $E(h|\mathcal{D}_{\mathrm{train}})$
Test error: the empirical error on the test set, $E(h|\mathcal{D}_{\mathrm{test}})$
Discussion:
Imagine you have a family of models
The complexity of the model is governed by the hyperparameter $C$: the larger $C$, the more complex the model
What do you think: as $C$ increases, will the train error increase?
What about the test error?
[Graph: Train/test error as a function of model complexity]
$E(h|\mathcal{D}{\mathrm{train}})$ drops as the complexity increases, while $E(h|\mathcal{D}{\mathrm{test}})$ typically decreases at first, then increases
An optimal model is the one that minimizes $E(h|\mathcal{D}_{\mathrm{test}})$
Discussion:
You've trained your model and then tested it on the train set and on the test set
You get a very low train error but a high test error. What can you conclude?
How would you go about fixing this?
Example: Logistic regression
End of explanation
from sklearn.svm import SVC
X, y = make_classification(n_samples=20, n_features=2, n_classes=2, n_redundant=0, n_clusters_per_class=1, random_state=41)
plot_problem(X, y)
h = SVC(kernel='linear')
h.fit(X, y)
plot_problem(X, y, h.predict)
h = LogisticRegression()
h.fit(X, y)
plot_problem(X, y, h.predict)
Explanation: Regularization
Instead of trying out different feature mappings, a nice trick is simply to map to a rather high dimension, regardless of whether we really need it, but then give preference to simpler models
That is, we allow the model to become complex if the data calls for it, but we put some pressure on it to stay as simple as possible
We do this by tweaking the error function so that it penalizes models that become shamelessly non-linear
You can think of this as a spandex suit: it does stretch as required, but it gets uncomfortable the more it stretches
This is called regularization
The error function trades off between empirical error and model complexity
The trade-off is regulated by a regularization factor $C$
If we care more about simplicity, we set $C$ to a small value, otherwise we set it to a large value and get a complex model
Note that $C$ is again a hyperparameter. We are responsible for defining its value
This simplifies things a bit, because we don't have to care about feature mapping, but still we have to choose $C$, which means we still have to do model selection
Cross-validation + model selection
Cross-validation becomes a bit more complex when we also wish to also optimize the hyperparameters of the model (which is what we want to do in most cases)
We are not allowed to optimize the hyperparameters on the test set. If we do this, we ruin our experiment, as we cannot claim that test instances were not seen before by the model
We need one additional set: validation set
We partition our dataset into three disjoint sets:
$$
\mathcal{D} = \mathcal{D}{\mathrm{train}} \cup \mathcal{D}{\mathrm{val}} \cup \mathcal{D}{\mathrm{test}}
$$
$$
\mathcal{D}{\mathrm{train}} \cap \mathcal{D}{\mathrm{val}} =
\mathcal{D}{\mathrm{train}} \cap \mathcal{D}{\mathrm{test}} =
\mathcal{D}{\mathrm{val}} \cap \mathcal{D}_{\mathrm{test}} = \emptyset
$$
Folded cross-validation
The problem with train-test split is that it is a single and arbitrary split. This makes the error estimate quite unreliable
The alternative is to repeat the splits a couple of times
We could do that at random, but then we loose control of how many time each instance was used for testing
Instead, we divide the dataset into $k$ parts (folds) and use each part for testing exactly once (the rest we use for training)
We report the overall error as the average of the error across the folds
Algo 2: Support vector machine (SVM)
Another powerful linear model
In many respects similar to logistic regression (also a linear model)
Differences: doesn't output probabilities, uses different error function, and a different optimization procedure
(1) The model:
$$
h(\mathbf{x}|\mathbf{w}) = \mathbf{1}{\mathbf{w}^\intercal\mathbf{x} \geq 0}
$$
(2) Error function:
Uses the concept of a margin: the distance between the boundary and the nearest instance on either side
Idea: generalization is best when the margin is the largest $\Rightarrow$ maximum margin
Error function is designed to maximize the margin and minimize intrusion into the margin (trying to maintain a "hard margin")
Regularization factor $C$ (which is a hyperparameter) tells the algorithm whether we prefer a large margin or a hard margin
(3) Optimization procedure:
Quadratic programming or SGD
End of explanation
X, y = make_classification(n_samples=20, n_features=2, n_classes=2, n_redundant=0, n_clusters_per_class=1, random_state=42)
plot_problem(X, y)
h = SVC(kernel='rbf', C=1)
h.fit(X, y)
plot_problem(X, y, h.predict)
h = SVC(kernel='rbf', C=10^5)
h.fit(X, y)
plot_problem(X, y, h.predict)
Explanation: Kernel trick
Similarly to logistic regression, SVM maps instances to high-dimensional spaces to achieve non-linearity
However, unlike in logistic regression, the mapping need not really take place: SVM can compute the boundary without actually doing the mapping, using a kernel function
Different kernel functions give different non-linearities
Two extremes: linear kernel (equivalent to not using a mapping at all) and a radial basis function (RBF) kernel
RBF kernel effectively maps to an infinite-dimension space, in which each problem becomes linearly separable
End of explanation
from sklearn.grid_search import GridSearchCV
param_grid = [{'C': [2**x for x in range(-5,5)],
'gamma': [2**x for x in range(-5,5)]}]
h = GridSearchCV(SVC(), param_grid)
h.fit(X, y)
plot_problem(X, y, h.predict)
Explanation: SVM+RBF is a powerful model, but be aware of overfitting!
If you use SVM+RBF, then there are two hyperparameters: $C$ and $\gamma$
You need to cross-validate the model across a range of $(C,\gamma)$ values. Use grid search for that
End of explanation
from sklearn import tree
h = tree.DecisionTreeClassifier()
h.fit(X, y)
plot_problem(X, y, h.predict)
from sklearn.externals.six import StringIO
import pyparsing
import pydot
from IPython.display import Image
dot_data = StringIO()
tree.export_graphviz(h, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
img = Image(graph.create_png())
img.width=300; img
X, y = make_classification(n_samples=100, n_features=2, n_classes=2, n_redundant=0, n_clusters_per_class=2, random_state=54)
plot_problem(X, y)
h = tree.DecisionTreeClassifier()
h.fit(X, y)
plot_problem(X, y, h.predict)
misclassification_error(h.predict, X, y)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3)
h.fit(X_train, y_train)
plot_problem(X_train, y_train, h.predict)
misclassification_error(h.predict, X_train, y_train)
misclassification_error(h.predict, X_test, y_test)
dot_data = StringIO()
tree.export_graphviz(h, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
img = Image(graph.create_png())
img.width=500; img
h = tree.DecisionTreeClassifier(max_depth=3)
h.fit(X_train, y_train);
misclassification_error(h.predict, X_train, y_train)
misclassification_error(h.predict, X_test, y_test)
Explanation: Algo 3: Decision tree
End of explanation
from sklearn.naive_bayes import GaussianNB
h = GaussianNB()
h.fit(X_train, y_train)
plot_problem(X_train, y_train, h.predict)
Explanation: Algo 4: Naive Bayes classifier
The above three models are discriminative
Naive Bayes (NB) is a generative model. This means that it not only computes the boundary between the classes, but also the joint probability distribution of instances and their labels, $P(\mathbf{x},y)$
Uses the Bayes' rule:
$$
P(y=j|\mathbf{x}) =
\frac{P(\mathbf{x},y=j)}{P(\mathbf{x})} =
\frac{p(\mathbf{x}|y=j) P(y=j)}{p(\mathbf{x})} =
\frac{p(\mathbf{x}|y=j)P(y=j)}{\sum_{k=1}^K p(\mathbf{x}|y=k)P(y=k)}
$$
(1) The model:
$$
h(\mathbf{x}) = \mathrm{argmax}_{j}\ p(\mathbf{x}|y=j) P(y=j)
$$
Or, if we want probabilities for each class:
$$h_j(\mathbf{x})=P(y=j|\mathbf{x})$$
We introduce the "naive assumption" that, given a class, all features are mutually independent. The model simplifies to:
$$
h(\mathbf{x}) = \mathrm{argmax}j\ P(y=j)\prod{k=1}^n P(x_k|y=j)
$$
(2) Error function:
NB seeks to maximize the likelihood of the training data under the model (parameters):
$$
\mathcal{L}(\boldsymbol{\theta} | \mathcal{D}) \equiv
p(\mathcal{D} | \boldsymbol{\theta}) =
p\big((\mathbf{x}^{(1)},y^{(1)}),\dots,(\mathbf{x}^{(N),y^{(N)}}) | \boldsymbol{\theta}\big) =
\prod_{i=1}^N p(\mathbf{x}^{(i)}, y^{(i)} | \boldsymbol{\theta})\
$$
In other words: it chooses the parameters $\boldsymbol\theta$ that make our data most probable
(3) Optimization procedure:
Parameter optimization in this case is amounts to parameter estimation from the dataset
We need to estimate $P(\mathbf{x}|y)$ (likelihood) and $P(y)$ (prior) for each class $y$
We can use a maximum likelihood estimator (MLE), a maximum aposteriori estimator (MAP) or a bayesian estimator
The latter two account for data sparsity and avoid overfitting
MLE is the simplest: simply compute probabilities as the relative frequencies
End of explanation |
11,330 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Excercises Electric Machinery Fundamentals
Chapter 6
Problem 6-5
Step1: Description
A 208-V four-pole 60-Hz Y-connected wound-rotor induction motor is rated at 30 hp. Its equivalent circuit components are
Step2: For a slip of 0.05, find
(a)
The line current $I_L$
(b)
The stator copper losses $P_\text{SCL}$
(c)
The air-gap power $P_\text{AG}$
(d)
The power converted from electrical to mechanical form $P_\text{conv}$
(e)
The induced torque $\tau_\text{ind}$
(f)
The load torque $\tau_\text{load}$
(g)
The overall machine efficiency
(h)
The motor speed in revolutions per minute and radians per second
SOLUTION
The equivalent circuit of this induction motor is shown below
Step3: The phase voltage is
Step4: so line current $I_L$ is
Step5: (b)
The stator copper losses are
$$P_\text{SCL} = 3I_A^2R_1$$
Step6: (c)
The equivalent circuit did not take into account the core losses by way of $R_m$, instead they were explicitely given.
Now the the air gap power is
Step7: (d)
The power converted from electrical to mechanical form is
Step8: (e)
The induced torque in the motor is
Step9: (f)
The output power of this motor is
Step10: The output speed is
Step11: Therefore the load torque is
Step12: (g)
The overall efficiency is
Step13: (h)
The motor speed in revolutions per minute is $n_m$. The motor speed in radians per second is | Python Code:
%pylab notebook
Explanation: Excercises Electric Machinery Fundamentals
Chapter 6
Problem 6-5
End of explanation
R1 = 0.10 # [Ohm]
R2 = 0.07 # [Ohm]
Xm = 10.0 # [Ohm]
X1 = 0.21 # [Ohm]
X2 = 0.21 # [Ohm]
Pfw = 500 # [W]
Pmisc = 0 # [W]
Pcore = 400 # [W]
V = 208 # [V]
Explanation: Description
A 208-V four-pole 60-Hz Y-connected wound-rotor induction motor is rated at 30 hp. Its equivalent circuit components are:
| Stator | Rotor | Power
|----------------------|----------------------|--------------------------|
| $R_1 = 0.10\,\Omega$ | $R_2 = 0.07\,\Omega$ | $P_\text{core} = 400\,W$ |
| $X_1 = 0.21\,\Omega$ | $X_2 = 0.21\,\Omega$ | $P_\text{f\&w} = 500\,W$ |
| $X_M = 10.0\,\Omega$ | | $P_\text{misc}\approx 0$ |
End of explanation
s = 0.05
Z2 = R2 + R2*(1-s)/s + X2*1j
Zf = 1/(1/(Xm*1j) + 1/Z2)
Zf_angle = arctan(Zf.imag / Zf.real)
print('Zf = ({:.3f}) Ω = {:.3f} Ω ∠{:.1f}°'.format(Zf, abs(Zf), Zf_angle/pi*180))
Explanation: For a slip of 0.05, find
(a)
The line current $I_L$
(b)
The stator copper losses $P_\text{SCL}$
(c)
The air-gap power $P_\text{AG}$
(d)
The power converted from electrical to mechanical form $P_\text{conv}$
(e)
The induced torque $\tau_\text{ind}$
(f)
The load torque $\tau_\text{load}$
(g)
The overall machine efficiency
(h)
The motor speed in revolutions per minute and radians per second
SOLUTION
The equivalent circuit of this induction motor is shown below:
<img src="figs/Problem_6-05_a.jpg" width="70%">
(a)
The easiest way to find the line current (or armature current) is to get the equivalent impedance $Z_F$ of the rotor circuit in parallel with $jX_M$ , and then calculate the current as the phase voltage divided by the sum of the series impedances, as shown below.
<img src="figs/Problem_6-05_b.jpg" width="70%">
The equivalent impedance of the rotor circuit in parallel with $jX_M$ is:
$$Z_F = \frac{1}{\frac{1}{jX_M}+\frac{1}{Z_2}}$$
End of explanation
Vphi = V / sqrt(3)
print('Vphi = {:.0f} V'.format(Vphi))
Explanation: The phase voltage is
End of explanation
Rf = Zf.real
Xf = Zf.imag
Ia = Vphi / (R1 + X1*1j + Rf + Xf*1j)
Ia_angle = arctan(Ia.imag/Ia.real)
Il = Ia
print('''
Il = Ia = {:.1f} A ∠{:.1f}°
========================''' .format(abs(Il), Ia_angle/pi *180))
Explanation: so line current $I_L$ is:
$$I_L = I_A = \frac{V_\phi}{R_1+jX_1+R_F+jX_F}$$
End of explanation
Pscl = 3 * abs(Ia)**2 * R1
print('''
Pscl = {:.0f} W
============='''.format(Pscl))
Explanation: (b)
The stator copper losses are
$$P_\text{SCL} = 3I_A^2R_1$$
End of explanation
Pag = 3 * abs(Ia)**2 * Rf - Pcore
print('''
Pag = {:.1f} kW
============='''.format(Pag/1000))
Explanation: (c)
The equivalent circuit did not take into account the core losses by way of $R_m$, instead they were explicitely given.
Now the the air gap power is:
$$P_\text{AG} = P_\text{in} - P_\text{SCL} - P_\text{core}$$
$$P_\text{AG} = 3I_2^2\frac{R_2}{s} = 3I_A^2R_F - P_\text{core}$$
<hr>
Note that $3I_A^2R_F - P_\text{core}$ is equal to $3I_2^2\frac{R_2}{s}$, since the only resistance in the original rotor circuit was $\frac{R_2}{s}$ , and the resistance in the Thevenin equivalent circuit is $R_F$. The power consumed by the Thevenin equivalent circuit should be the same as the power consumed by the original circuit. But the Thevenin circuit we use here is missing the $R_m$ part. As an excercise you could calculate $R_m$ from $P_\text{core}$ and then determin the Thevenin impedance as follows:
$$Z_F = \frac{1}{\frac{1}{R_m} + \frac{1}{jX_M}+\frac{1}{Z_2}}$$
This however is not done here and we simply substract the core losses directly.
<hr>
End of explanation
Pconv = (1 - s) * Pag
print('''
Pconv = {:.1f} kW
==============='''.format(Pconv/1000))
Explanation: (d)
The power converted from electrical to mechanical form is:
$$P_\text{conv} = (1-s)P_\text{AG}$$
End of explanation
n_sync = 1800 # [r/min]
w_sync = n_sync * (2.0*pi /1.0) * (1.0 / 60.0)
tau_ind = Pag / w_sync
print('''
tau_ind = {:.1f} Nm
=================='''.format(tau_ind))
Explanation: (e)
The induced torque in the motor is:
$$\tau_\text{ind} = \frac{P_\text{AG}}{\omega_\text{sync}}$$
End of explanation
Pout = Pconv - Pfw - Pcore - Pmisc
print('Pout = {:.1f} kW'.format(Pout/1000))
Explanation: (f)
The output power of this motor is:
$$P_\text{OUT} = P_\text{conv} - P_\text{f\&w} - P_\text{core} - P_\text{misc}$$
End of explanation
n_m = (1 - s) * n_sync
print('n_m = {:.0f} r/min'.format(n_m))
Explanation: The output speed is:
$$n_m = (1-s)n_\text{sync}$$
End of explanation
w_m = n_m * (2.0*pi /1.0) * (1.0 / 60.0)
tau_load = Pout / w_m
print('''
tau_load = {:.1f} Nm
==================='''.format(tau_load))
Explanation: Therefore the load torque is:
$$\tau_\text{load} = \frac{P_\text{OUT}}{\omega_m}$$
End of explanation
eta = Pout / (3 * Vphi * abs(Ia) * cos(Ia_angle)) * 100
print('''
η = {:.1f} %
=========='''.format(eta))
Explanation: (g)
The overall efficiency is:
$$\eta = \frac{P_\text{OUT}}{P_\text{IN}} \cdot 100\% = \frac{P_\text{OUT}}{3V_\phi I_A\cos\theta}$$
End of explanation
w_m = n_m * (2*pi / 60.0)
print('''
w_m = {:.0f} rad/s
==============='''.format(w_m))
Explanation: (h)
The motor speed in revolutions per minute is $n_m$. The motor speed in radians per second is:
End of explanation |
11,331 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Tasa atractiva mínima (MARR)
Juan David Velásquez Henao
[email protected]
Universidad Nacional de Colombia, Sede Medellín
Facultad de Minas
Medellín, Colombia
Haga click aquí para acceder a la última versión online
Haga click aquí para ver la última versión online en nbviewer.
Preparación
Step1: Problema del costo de capital
A medida que se invierte más capital los rendimientos obtenidos son menores (es más dificil acceder a inversiones con rentabilidades altas).
A medida que se presta más capital los interses son más altos (es más dificil acceder a créditos baratos)
Si se tiene un proyecto cuyos fondos provienen del aporte de los socios y de diferentes esquemas de financiación, ¿cómo se calculá el costo de dichos fondos?.
<img src="images/wacc-explain.png" width="750">
Caso práctico
Una compañía eléctrica tiene las siguientes fuentes de financiamiento
Step2: En la modelación de créditos con cashflow se consideran dos tipos de costos | Python Code:
# Importa la librería financiera.
# Solo es necesario ejecutar la importación una sola vez.
import cashflows as cf
Explanation: Tasa atractiva mínima (MARR)
Juan David Velásquez Henao
[email protected]
Universidad Nacional de Colombia, Sede Medellín
Facultad de Minas
Medellín, Colombia
Haga click aquí para acceder a la última versión online
Haga click aquí para ver la última versión online en nbviewer.
Preparación
End of explanation
import cashflows as cf
##
## Se tienen cuatro fuentes de capital con diferentes costos
## sus datos se almacenarar en las siguientes listas:
##
monto = [0] * 4
interes = [0] * 4
## emision de acciones
## --------------------------------------
monto[0] = 4000
interes[0] = 25.0 / 1.0 # tasa de descueto de la acción
## préstamo 1.
## -------------------------------------------------------
##
nrate = cf.interest_rate(const_value=[20]*5, start=2018)
credito1 = cf.fixed_ppal_loan(amount = 2000, # monto
nrate = nrate, # tasa de interés
orgpoints = 50/2000) # costos de originación
credito1
Explanation: Problema del costo de capital
A medida que se invierte más capital los rendimientos obtenidos son menores (es más dificil acceder a inversiones con rentabilidades altas).
A medida que se presta más capital los interses son más altos (es más dificil acceder a créditos baratos)
Si se tiene un proyecto cuyos fondos provienen del aporte de los socios y de diferentes esquemas de financiación, ¿cómo se calculá el costo de dichos fondos?.
<img src="images/wacc-explain.png" width="750">
Caso práctico
Una compañía eléctrica tiene las siguientes fuentes de financiamiento:
Un total de $ 4000 por la emisión de 4.000 acciones. Se espera un dividendo de $ 0.25 por acción para los próximos años.
Un préstamo bancario (Préstamo 1) de $ 2.000. El préstamo se paga en 4 cuotas iguales a capital más intereses sobre el saldo total de deuda liquidados a una tasa efectiva de interés del 20%. En el momento del desembolso se cobró una comisión bancaria de $ 50.
Un préstamo bancario (Préstamo 2) de $ 1.000 con descuento de 24 puntos. El préstamo se paga en 4 cuotas totales iguales que incluyen intereses más capital. La tasa de interés es del 20%.
La venta de un bono con pago principal de $ 5.000, el cual fue vendido por $ 4.000. El capital se dedimirá en 4 períodos y se pagarán intereses a una tasa del 7%. El bono tiene un costo de venta de $ 50.
El impuesto de renta es del 30%.
Solución
End of explanation
## flujo de caja para el crédito antes de impuestos
tax_rate = cf.interest_rate(const_value=[30]*5, start=2018)
credito1.tocashflow(tax_rate=tax_rate)
## la tasa efectiva pagada por el crédito es
## aquella que hace el valor presente cero para
## el flujo de caja anterior (antes o después de
## impuestos)
credito1.true_rate(tax_rate = tax_rate)
## se almacenan los datos para este credito
monto[1] = 2000
interes[1] = credito1.true_rate(tax_rate = tax_rate)
## préstamo 2.
## -------------------------------------------------------
##
credito2 = cf.fixed_rate_loan(amount = 1000, # monto
nrate = 20, # tasa de interés
start = 2018,
grace = 0,
life = 4, # número de cuotas
dispoints = 0.24) # costos de constitución
credito2
credito2.tocashflow(tax_rate = tax_rate)
credito2.true_rate(tax_rate = tax_rate)
## se almacenan los datos para este credito
monto[2] = 1000
interes[2] = credito2.true_rate(tax_rate = tax_rate)
## préstamo 3.
## -------------------------------------------------------
##
nrate = cf.interest_rate(const_value=[7]*5, start=2018)
credito3 = cf.bullet_loan(amount = 5000, # monto
nrate = nrate, # tasa de interés
orgpoints = 0.01, # costos de originación
dispoints = 0.20) # puntos de descuento
credito3
credito3.tocashflow(tax_rate=tax_rate)
credito3.true_rate(tax_rate=tax_rate)
## se almacenan los datos de este crédito
monto[3] = 5000
interes[3] = credito3.true_rate(tax_rate=tax_rate)
## montos
monto
## tasas
interes
## Costo ponderado del capital (WACC)
## -------------------------------------------------------------
## es el promdio ponderado de las tasas por
## el porcentaje de capital correspondiente a cada fuente
##
s = sum(monto) # capital total
wacc = sum([x*r/s for x, r in zip(monto, interes)])
wacc
Explanation: En la modelación de créditos con cashflow se consideran dos tipos de costos:
Los puntos de descuento (dispoints) como porcentaje sobre el monto de la deuda. Estos son una forma de pago de intereses por anticipado con el fin de bajar la tasa de interés del crédito.
Los puntos de constitución (orgpoints) como porcentaje del monto de deuda. Son los costos de constitución del crédito y no son considerados como intereses.
Ya que los intereses de los créditos pueden descontarse como costos financieros, estos disminuyen el pago del impuesto de renta. Por consiguiente, en el análisis de los créditos debe tenerse en cuenta el beneficio por pago de intereses el cual equivale a los impuestos pagados por período multiplicados por la tasa del impuesto de renta. Ya que los puntos de descuento son intereses, estos se tienen en cuenta en el cálculo de este beneficio.
End of explanation |
11,332 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step2: Machine Learning Engineer Nanodegree
Deep Learning
Project
Step3: Preprocess data
Step6: Find path names of files and prepare one-hot encoder
Step13: Load images and labels into arrays, save to disk
We first normalize the pixel-values to lie between 0 and 1. We also reshape each image to be a 3-dimensional array
Step16: Load and display an image
Step20: TensorFlow Neural Network
This code significantly overlaps with the code from Project 5
Step22: Convolution and Max Pooling
Step24: Flatten Layer
Step26: Fully-Connected Layer
Step28: Output Layer
Step30: Build Convolutional Neural Network
Step31: Train the neural network
Hyperparameters
Step33: Print cost and accuracy
Step34: Choose which of the training or testing cells below to use
Train on a single batch
In order to pick the best hyperparameters, we begin by training on a single batch. This will tell us when to stop the learning and will help in choosing a learning rate.
Step35: Fully train the model
Step36: Fine-tune a trained model
Step37: Test model | Python Code:
from urllib.request import urlretrieve
import tarfile
from os.path import isdir, isfile
from os import remove
def folder_file_name(urlpath):
Takes a URL and returns the characters after the final '/' as
the filename. In the filename, everything up until the first
period is declared to be the foldername, i.e. the folder-name
to which a tar file would be unpackaged.
# We first find the index where the name of the file begins
indexname = urlpath.rfind("/")
# Now we get the file name
filename = urlpath[indexname+1:]
# We will do the same for the folder name
folderindex = filename.find(".")
foldername = filename[:folderindex]
return filename, foldername
def download_and_unpackage(urlpath):
Downloads a file given by the url address and unpackages it
into the same directory. It then removes the compressed file.
filename, foldername = folder_file_name(urlpath)
# We only want to download and unpackage if we haven't already done it
if isdir(foldername) != True:
urlretrieve(urlpath, filename)
with tarfile.open(filename, mode='r:gz') as compressed_file:
compressed_file.extractall()
compressed_file.close()
remove(filename)
# Now the data for notMNIST
download_and_unpackage("http://commondatastorage.googleapis.com/books1000/notMNIST_small.tar.gz")
download_and_unpackage("http://commondatastorage.googleapis.com/books1000/notMNIST_large.tar.gz")
Explanation: Machine Learning Engineer Nanodegree
Deep Learning
Project: Build a Digit Recognition Program
In this notebook, a template is provided for you to implement your functionality in stages which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission, if necessary. Sections that begin with 'Implementation' in the header indicate where you should begin your implementation for your project. Note that some sections of implementation are optional, and will be marked with 'Optional' in the header.
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
Step 1: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize sequences of digits. Train the model using synthetic data generated by concatenating character images from notMNIST or MNIST. To produce a synthetic sequence of digits for testing, you can for example limit yourself to sequences up to five digits, and use five classifiers on top of your deep network. You would have to incorporate an additional ‘blank’ character to account for shorter number sequences.
There are various aspects to consider when thinking about this problem:
- Your model can be derived from a deep neural net or a convolutional network.
- You could experiment sharing or not the weights between the softmax classifiers.
- You can also use a recurrent network in your deep neural net to replace the classification layers and directly emit the sequence of digits one-at-a-time.
You can use Keras to implement your model. Read more at keras.io.
Here is an example of a published baseline model on this problem. (video). You are not expected to model your architecture precisely using this model nor get the same performance levels, but this is more to show an exampe of an approach used to solve this particular problem. We encourage you to try out different architectures for yourself and see what works best for you. Here is a useful forum post discussing the architecture as described in the paper and here is another one discussing the loss function.
Implementation
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow.
Get the data for notMNIST
End of explanation
import scipy.ndimage
import os
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from tqdm import tqdm
import matplotlib.pyplot as plt
%matplotlib inline
Explanation: Preprocess data
End of explanation
# We first get all path-names for the training and testing images
training_pathnames = [[folderandfiles[0]+"/"+imname for imname in folderandfiles[2]]
for folderandfiles in os.walk("./notMNIST_large") if folderandfiles[2]!=[]]
testing_pathnames = [[folderandfiles[0]+"/"+imname for imname in folderandfiles[2]]
for folderandfiles in os.walk("./notMNIST_small") if folderandfiles[2]!=[]]
# training_pathnames has the structure:
#[[list of path-names in first folder], [list of paths in second folder], ...]
def get_letter(filepath):
Returns the letter corresponding to an image found in filepath
# The letter is given by the name of the folder in which we find the image
indexname = filepath.rfind("/")
letter = filepath[indexname-1:indexname]
return letter
# In each folder all images depict the same letter
all_letters = np.sort([get_letter(pathname[0]) for pathname in training_pathnames])
# We may now make the function that one-hot-encodes letters into arrays
enc = LabelBinarizer()
enc.fit(all_letters)
def one_hot_encode(list_of_letters):
One hot encode a list of letters. Returns a one-hot encoded vector for each letter.
return enc.transform(list_of_letters)
# We now flatten the lists of path names
training_pathnames = np.array(sum(training_pathnames, []))
testing_pathnames = np.array(sum(testing_pathnames, []))
# When trainig, we don't want theimages to be ordered. Therefore, we take a
# random permutation of their order.
np.random.seed(42)
training_pathnames = np.random.permutation(training_pathnames)
Explanation: Find path names of files and prepare one-hot encoder
End of explanation
def load_normalize_image(path):
Takes the directory path of an image and returns a normalized
3-dimensional array representing that image.
# First we load the image
try:
imagearray = scipy.ndimage.imread(path)
# Now we normalize it
imagearray = imagearray / 255
# We reshape it to be 3-dimensional: x-dim, y-dim, num_colors
imagearray = imagearray.reshape(imagearray.shape + (1,))
return imagearray
except:
# Some images are broken in the database; these will raise errors.
pass
def array_all_images(list_of_path_names):
Takes a list of directory paths to images and returns a 4-dimensional array
containing the pixel-data of those images. The shape is:
(num_images, x_dim, y_dim, num_colors)
all_images = [load_normalize_image(path) for path in list_of_path_names]
# Some of these might be None since the function load_normalize_image
# does not load broken images. We now remove these Nones.
all_images = np.array(list(filter(None.__ne__, all_images)))
return all_images
def load_letter(path):
Takes the directory path of an image and returns a the letter-label of the image.
# First we see if it is possible to load the image
try:
imagearray = scipy.ndimage.imread(path)
# If this didn't give an error, we may get the letter
return get_letter(path)
except:
# Some images are broken in the database; these will raise errors.
pass
def array_all_labels(list_of_path_names):
Takes a list of directory paths to images and returns a 2-dimensional array
containing the one-hot-encoded labels of those images
the_letters = [load_letter(path) for path in list_of_path_names]
the_letters = list(filter(None.__ne__, the_letters))
all_labels = one_hot_encode(the_letters)
return all_labels
def batch_list(inputlist, batch_size):
Returns the inputlist split into batches of maximal length batch_size.
Each element in the returned list (i.e. each batch) is itself a list.
list_of_batches = [inputlist[ii: ii+batch_size] for ii in range(0, len(inputlist), batch_size)]
return list_of_batches
# We store all the data in a training and testing folder
if not os.path.exists("training_data"):
os.makedirs("training_data")
if not os.path.exists("testing_data"):
os.makedirs("testing_data")
# Make the input data and labels for the testing set
if isfile("./testing_data/testing_images.npy") == False:
testing_images = array_all_images(testing_pathnames)
np.save("./testing_data/testing_images.npy", testing_images)
if isfile("./testing_data/testing_labels.npy") == False:
testing_labels = array_all_labels(testing_pathnames)
np.save("./testing_data/testing_labels.npy", testing_labels)
# The trainining examples need to be turned into batches
def batch_list(inputlist, batch_size):
Returns the inputlist split into batches of maxmial length batch_size.
Each element in the returned list (i.e. each batch) is itself a list.
list_of_batches = [inputlist[ii: ii+batch_size] for ii in range(0, len(inputlist), batch_size)]
return list_of_batches
# Here we specify the size of each batch
batch_size = 2**12
# Now we save the batch-data, unless it already exists
training_pathnames_batches = batch_list(training_pathnames, batch_size)
num_saved_batches = sum(["training_images_batch" in filename
for filename in list(os.walk("./training_data"))[0][2]])
# If we have a different number of batches saved comapred to what we want,
# the batches are wrong and need recomputing.
if num_saved_batches != len(training_pathnames_batches):
# We could delete the old files, but this is dangerous, since a
# typo could remove all files on the computer. We simply overwrite the files we have
for ii, batch in enumerate(tqdm(training_pathnames_batches)):
training_images_batch = array_all_images(batch)
np.save("./training_data/training_images_batch" + str(ii) + ".npy", training_images_batch)
training_labels_batch = array_all_labels(batch)
np.save("./training_data/training_labels_batch" + str(ii) + ".npy", training_labels_batch)
Explanation: Load images and labels into arrays, save to disk
We first normalize the pixel-values to lie between 0 and 1. We also reshape each image to be a 3-dimensional array: (x_length, y_length, color_channels).
Finally, we save the arrays to disk.
End of explanation
def load_training_data(batch_number, image_numbers=[]):
Loads the training data from files. It is possible to specify an interval
of images to load, or by defauly load th entire batch.
if image_numbers == []:
return np.load("./training_data/training_images_batch" + str(batch_number) + ".npy")
else:
return np.load("./training_data/training_images_batch" + str(batch_number) + ".npy")[image_numbers]
def load_training_labels(batch_number, image_numbers=[]):
Loads the training data from files. It is possible to specify an interval
of images to load, or by defauly load th entire batch.
if image_numbers == []:
return np.load("./training_data/training_labels_batch" + str(batch_number) + ".npy")
else:
return np.load("./training_data/training_labels_batch" + str(batch_number) + ".npy")[image_numbers]
def display_image(imagearray):
array_to_plot = imagearray.reshape((imagearray.shape[0], imagearray.shape[1]))
print("Image shape: {}".format(imagearray.shape))
plt.imshow(array_to_plot, cmap="gray")
plt.axis("off")
plt.show()
display_image(load_training_data(0)[0])
Explanation: Load and display an image
End of explanation
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
return tf.placeholder(tf.float32, [None, image_shape[0], image_shape[1], image_shape[2]], name="x")
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
return tf.placeholder(tf.float32, [None, n_classes], name="y")
def neural_net_keep_prob_input():
Return a Tensor for keep probability
return tf.placeholder(tf.float32, name="keep_prob")
Explanation: TensorFlow Neural Network
This code significantly overlaps with the code from Project 5: Image Classification, in my GitHub folder.
Set up functions necessary to build the neural network
Input
End of explanation
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# Number of input colors
num_inputcolors = x_tensor.shape.as_list()[3]
# Convolutional filter
W_conv= tf.Variable(tf.truncated_normal([conv_ksize[0], conv_ksize[1], num_inputcolors, conv_num_outputs], stddev=0.1))
b_conv = tf.Variable(tf.constant(0.1, shape=[conv_num_outputs]))
convolution = tf.nn.conv2d(x_tensor, W_conv, strides=[1, conv_strides[0], conv_strides[1], 1], padding='SAME')
h_conv = tf.nn.relu(convolution + b_conv)
h_pool = tf.nn.max_pool(h_conv, ksize=[1, pool_ksize[0], pool_ksize[1], 1],
strides=[1, pool_strides[0], pool_strides[1], 1], padding='SAME')
return h_pool
Explanation: Convolution and Max Pooling
End of explanation
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
flat_dimension = np.prod(x_tensor.shape.as_list()[1:])
x_flat = tf.reshape(x_tensor, [-1, flat_dimension])
return x_flat
Explanation: Flatten Layer
End of explanation
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
input_dimensions = x_tensor.shape.as_list()[1]
W = tf.Variable(tf.truncated_normal([input_dimensions, num_outputs], stddev=0.1))
b = tf.Variable(tf.constant(0.1, shape=[num_outputs]))
h_connected = tf.nn.relu(tf.matmul(x_tensor, W) + b)
return h_connected
Explanation: Fully-Connected Layer
End of explanation
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
input_dimensions = x_tensor.shape.as_list()[1]
W = tf.Variable(tf.truncated_normal([input_dimensions, num_outputs], stddev=0.1))
b = tf.Variable(tf.constant(0.1, shape=[num_outputs]))
h_output = tf.matmul(x_tensor, W) + b
return h_output
Explanation: Output Layer
End of explanation
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
convlayer_1 = tf.nn.dropout(conv2d_maxpool(x, 20, (4, 4), (1, 1), (2, 2), (2, 2)), keep_prob)
#convlayer_1b = tf.nn.dropout(conv2d_maxpool(convlayer_1, 10, (4, 4), (1, 1), (2, 2), (1, 1)), keep_prob)
convlayer_2 = tf.nn.dropout(conv2d_maxpool(convlayer_1, 30, (4, 4), (1, 1), (2, 2), (2, 2)), keep_prob)
#convlayer_2b = conv2d_maxpool(convlayer_2, 20, (1, 1), (1, 1), (1, 1), (1, 1))
convlayer_3 = tf.nn.dropout(conv2d_maxpool(convlayer_2, 60, (4, 4), (1, 1), (2, 2), (2, 2)), keep_prob)
#convlayer_3b = conv2d_maxpool(convlayer_3, 50, (1, 1), (1, 1), (1, 1), (1, 1))
# Function Definition from Above:
# flatten(x_tensor)
flattened_tensor = flatten(convlayer_3)
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
connlayer_1 = tf.nn.dropout(fully_conn(flattened_tensor, 200), keep_prob)
connlayer_2 = tf.nn.dropout(fully_conn(connlayer_1, 100), keep_prob)
connlayer_3 = tf.nn.dropout(fully_conn(connlayer_2, 30), keep_prob)
# Function Definition from Above:
# output(x_tensor, num_outputs)
outputlayer = output(connlayer_3, 10)
return outputlayer
#=============================
# Build the Neural Network
#=============================
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((28, 28, 1))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
#logits = conv_net(x, keep_prob)
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
Explanation: Build Convolutional Neural Network
End of explanation
epochs = 30
keep_probability = 0.5
learning_rate = 0.0001 # default is 0.001 N.B. it is also possible to make this a placeholder object!
size_of_minibatch = 2**7
#===============================
# Don't need to edit below this
#===============================
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
Explanation: Train the neural network
Hyperparameters
End of explanation
# The final batch will be our validation set
validation_inputarray = load_training_data(num_saved_batches - 1)
validation_labels = load_training_labels(num_saved_batches - 1)
def get_stats(session, feature_batch, label_batch, cost, accuracy, printout=True):
Obtain information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
cost_value = session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob:1.0})
accuracy_value = session.run(accuracy, feed_dict={x: validation_inputarray,
y: validation_labels, keep_prob:1.0})
if printout:
print("\nLoss: {}".format(cost_value))
print("Accuracy (validation): {}".format(accuracy_value))
return cost_value, accuracy_value
Explanation: Print cost and accuracy
End of explanation
batch_i = 0
print('Checking the Training on a Single Batch, i.e. number {}'.format(batch_i))
accuracy_list = []
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
for batch_inputarrays, batch_labels in zip(batch_list(load_training_data(batch_i), size_of_minibatch),
batch_list(load_training_labels(batch_i), size_of_minibatch)):
sess.run(optimizer, feed_dict={x: batch_inputarrays, y: batch_labels, keep_prob: keep_probability})
cost_value, accuracy_value = get_stats(sess, batch_inputarrays, batch_labels, cost, accuracy,
printout=False)
#print('\nEpoch {:>2}, Batch {}: {} '.format(epoch + 1, batch_i, accuracy_value), end='')
accuracy_list.append(accuracy_value)
# Save the model
#saver = tf.train.Saver()
#save_path = saver.save(sess, "./trained_model", global_step=epoch)
plt.plot(accuracy_list)
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
Explanation: Choose which of the training or testing cells below to use
Train on a single batch
In order to pick the best hyperparameters, we begin by training on a single batch. This will tell us when to stop the learning and will help in choosing a learning rate.
End of explanation
print('Training...')
epochs = 30
accuracy_list = []
with tf.Session() as sess:
# It is very important the saver is defined INSIDE the block "with tf.Session() as sess"
# otherwise it will be very difficult to load the graph (unless we name all the variables etc)
saver = tf.train.Saver()
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
for batch_i in range(num_saved_batches - 1):
for batch_inputarrays, batch_labels in zip(batch_list(load_training_data(batch_i),
size_of_minibatch),
batch_list(load_training_labels(batch_i),
size_of_minibatch)
):
sess.run(optimizer, feed_dict={x: batch_inputarrays, y: batch_labels, keep_prob: keep_probability})
cost_value, accuracy_value = get_stats(sess, batch_inputarrays, batch_labels, cost, accuracy,
printout=False)
print('Epoch {:>2}, Batch {}: {}'.format(epoch + 1, batch_i, accuracy_value))
accuracy_list.append(accuracy_value)
if epoch % 10 == 0:
# Save the intermediate model
save_path = saver.save(sess, "./trained_model", global_step=epoch)
# Save the final model
save_path = saver.save(sess, "./trained_model", global_step=epoch)
plt.plot(accuracy_list)
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
Explanation: Fully train the model
End of explanation
# Decrease the learning rate for the final part!
epochs = 30
load_model = "./trained_model-29"
# read off the epoch from the number in load_model
next_epoch = int(load_model[load_model.rfind("-")+1:]) + 1
new_accuracy_list = []
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, load_model)
print(sess.run(accuracy, feed_dict={x: validation_inputarray,
y: validation_labels, keep_prob:1.0}))
# Training cycle
for epoch in range(next_epoch, next_epoch + epochs):
for batch_i in range(num_saved_batches - 1):
for batch_inputarrays, batch_labels in zip(batch_list(load_training_data(batch_i),
size_of_minibatch),
batch_list(load_training_labels(batch_i),
size_of_minibatch)
):
sess.run(optimizer, feed_dict={x: batch_inputarrays, y: batch_labels,
keep_prob: keep_probability})
cost_value, accuracy_value = get_stats(sess, batch_inputarrays, batch_labels, cost, accuracy,
printout=False)
print('Epoch {:>2}, Batch {}: {}'.format(epoch + 1, batch_i, accuracy_value))
new_accuracy_list.append(accuracy_value)
if epoch % 10 == 0:
# Save the intermediate model
save_path = saver.save(sess, "./trained_model", global_step=epoch)
# Save the final model
save_path = saver.save(sess, "./trained_model", global_step=epoch)
plt.plot(new_accuracy_list)
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
Explanation: Fine-tune a trained model
End of explanation
load_model = "./trained_model-59"
testing_inputarray = np.load("./testing_data/testing_images.npy")
testing_labels = np.load("./testing_data/testing_labels.npy")
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, load_model)
print("The model's test-set accuracty is {}%".format(np.round(sess.run(accuracy,
feed_dict={x: testing_inputarray,
y: testing_labels, keep_prob:1.0})*100, decimals=2)))
Explanation: Test model
End of explanation |
11,333 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Shifty Lines
Step1: Let's first load our first example spectrum
Step2: Next, we're going to need the lines we're interested in. Let's use the Silicon lines. Note that these are all in electron volt. However, the data are in Angstrom, which means I need to convert them.
I'm going to use astropy.units to do that
Step3: Now I can do the actual conversion
Step4: Let's plot the lines onto the spectrum
Step5: We currently don't have the positions of the longer-wavelength lines, so we're going to cut the spectrum at 7.1 Angstrom
Step6: We are going to save the spectrum as well as the line centers
Step7: Adding some more lines for later
We have some more lines that are going to become important/interesting later, because
they'll be shifted at a different redshift
Step8: The lines need to be converted to Angstroms
Step9: What does the spectrum with the line centroids look like?
Step10: Let's save the extended list of lines to a file
Step11: Simulating Data
In order to test any methods we are creating, we are first going to produce some simulated data.
Set the seed so that the output simulations will always be the same
Step13: The spectral lines are modelled as simple Gaussians with an amplitude $A$, a width $\sigma$ and a position $\lambda_0$.
Because energy data comes naturally binned (the original channels detect photons between a certain minimum and maximum energy), we integrate over energy bins to get an accurate estimate of the flux in each energy bin. This also allows the use of uneven binning.
In order to integrate over the bins correctly, I also define the cumulative distribution function (CDF) of a Gaussian below, which is, in fact, the integral of the Gaussian function.
This also means that the amplitude is defined as the integrated area under the Gaussian rather than the height of the Gaussian, but this is closer to the physical quantities astronomers might be interested in (equivalent width) anyway.
Step14: A simple test
Step16: Simulating Spectra
In order to test our algorithm, we'd like to simulate some test data where we know the "ground truth" (i.e. the input parameters that made the spectrum).
Below is a (admittedly complicated) function that will simulate data for various test cases.
We'll address these test cases one by one below and simulate a spectrum to test.
Step17: Test 1
Step18: Let's plot the spectrum
Step19: We're going to save the data and the parameters in a pickle file for later use. We'll also save the fake data itself in a way that I can easily input it into ShiftyLines.
Step20: Sampling the Model
If DNest4 and ShiftyLines are installed and compiled, you should now be able to run this model (from the ShiftyLines/code/ directory) with
>>> ./main -d ../data/test_noshift1.txt -t 8
The last option sets the number of threads to run; this will depend on your computer and how many CPUs you can keep busy with this.
In this run, we set the number of levels in the OPTIONS file to $100$, based on running the sampler until the likelihood change between two levels fell below $1$ or so.
Results
Here are the results of the initial simulation. For this run, we set the number of Doppler shifts to exactly $1$ and do not sample over redshifts. This means the model is equivalent to simple template fitting, but it makes a fairly good test.
First, we need to move the posterior run to a new directory so it doesn't get overwritten by subsequent runs. We'll write a function for that
Step21: We'll need to load the data and the posterior samples for plotting
Step22: First plot
Step23: What's the posterior distribution over the constant background?
Step24: What does the OU process modelling the variable background look like?
Step25: Now we can look at the hyperparameters for the log-amplitude and log-q priors
Step26: Let's do the same for the width
Step27: The next parameter (pp) in the model is the threshold determining the sign of the amplitude (i.e. whether a line is an absorption or an emission line). The sign is sampled as a random variable between $0$ and $1$; the threshold sets the boundary below which a line will become an absorption line. Above the threshold, the sign will be flipped to return an emission line.
For a spectrum with mostly absorption lines, pp should be quite high, close to $1$. For a spectrum with mostly emission lines, pp should be close to $0$.
Step28: Hmmm, the model doesn't seem to care much about that? Funny!
The Doppler shift is next!
Step29: What is the posterior on all the line amplitudes and widths? Let's try overplotting them all
Step30: Looks like it samples amplitudes correctly! Let's make the same Figure for log-q
Step31: Final thing, just to be sure
Step43: A General Plotting function for the Posterior
We'll make some individual plotting functions that we can then combine to plot useful Figures on the whole posterior sample!
Step44: Let's run this function on the data we just made individual plots from to see whether it worked.
The model had $8$ lines and $1$ redshift
Step46: A general function for simulating data sets
Based on what we just did, we'll write a function that takes parameters as an input and spits out the files we need
Step47: A Spectrum with Weak Absorption Lines
The model should still work if the lines are very weak. We will simulate a spectrum with weak lines to test how the strength of the lines will affect the inferences drawn from the model
Step48: This time, we run DNest4 with $100$ levels.
Results
Let's first move the samples into the right directory and give it the right filename
Step49: It looks like for this spectrum the amplitude really is too weak to constrain anything, so the Doppler shift does whatever the hell it wants.
I'm not sure I like this behaviour; I might need to ask Brendon about it!
A Spectrum With Emission+Absorption Lines
We'll do the same test as the first, but with varying strong emission and absorption lines
Step50: Now run the new function
Step51: Run the model the same as with test_noshift2.txt, but with $150$ levels.
Results
Step52: A Spectrum With Variable Absorption Lines
In this test, we'll see how the model deals with variable absorption lines
Step53: Results
I set the number of levels to $200$.
Step54: A Spectrum with Lines Turned Off
What does the model do if lines are just not there? This is an important question,
so we will now make a spectrum with three lines having amplitudes $A=0$
Step55: Results
Step56: A Doppler-shifted Spectrum with Absorption lines
We are now going to look how well the model constrains the Doppler shift.
Again, we build a simple model where all lines have the sample amplitude,
Step57: Results
Step58: A Shifted Spectrum with Emission/Absorption Lines with Variable Amplitudes and Signs
More complicated
Step59: Results
Adding a Noise Process
At this point, I should be adding an OU process to the data generation process to simulate the effect of a variable background in the spectrum.
THIS IS STILL TO BE DONE!
Testing Multiple Doppler Shift Components
For all of the above simulations, we also ought to test how well the model works if I add additional Doppler shift components to sample over.
For this, you'll need to change the line
Step60: A Spectrum with Two Doppler Shifts
What if the lines are shifted with respect to each other?
Let's simulate a spectrum where the silicon lines are Doppler shifted by one value, but the other lines are shifted by a different Doppler shift.
Step61: Let's save all the output files as we did before
Step68: Results
A simple model for Doppler-shifted Spectra
Below we define a basic toy model which samples over all the line amplitudes, widths as well as a Doppler shift. We'll later extend this to work in DNest4.
Step69: Now we can use emcee to sample.
We're going to use one of our example data sets, one without Doppler shift and strong lines | Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("notebook", font_scale=2.5, rc={"axes.labelsize": 26})
sns.set_style("darkgrid")
plt.rc("font", size=24, family="serif", serif="Computer Sans")
plt.rc("text", usetex=True)
import cPickle as pickle
import numpy as np
import scipy.special
import shutil
from astropy import units
import astropy.constants as const
import emcee
import corner
import dnest4
Explanation: Shifty Lines: Line Detection and Doppler Shift Estimation
We have some X-ray spectra that have absorption and emission lines in it. The original spectrum is seen through a stellar wind, which moves either toward or away from us, Doppler-shifting the absorbed lines. Not all lines will be absorbed, some may be at their original position. There may also be more than one redshift in the same spectrum.
There are various other complications: for example, we rarely have the complete spectrum, but separate intervals of it (due to issues with calibration). In principle, however, the other segments may give valuable information about any other given segment.
Simplest problem: estimating Doppler shift and line presence at the same time
Second-simplest problem: some lines are Doppler shifted, some are not
Full problem: estimating the number of redshifts and the lines belonging to each in the same model
Note: we are going to need to add some kind of OU process or spline to model the background.
End of explanation
datadir = "../data/"
datafile = "8525_nodip_0.dat"
data = np.loadtxt(datadir+datafile)
wavelength_left = data[:,0]
wavelength_right = data[:,1]
wavelength_mid = data[:,0] + (data[:,1]-data[:,0])/2.
counts = data[:,2]
counts_err = data[:,3]
plt.figure(figsize=(16,4))
plt.plot(wavelength_mid, counts)
plt.gca().invert_xaxis()
plt.xlim(wavelength_mid[-1], wavelength_mid[0])
Explanation: Let's first load our first example spectrum:
End of explanation
siXIV = 1864.9995 * units.eV
siXIII = 2005.494 * units.eV
siXII = 1845.02 * units.eV
siXII_err = 0.07 * units.eV
siXI = 1827.51 * units.eV
siXI_err = 0.06 * units.eV
siX = 1808.39 * units.eV
siX_err = 0.05 * units.eV
siIX = 1789.57 * units.eV
siIX_err = 0.07 * units.eV
siVIII = 1772.01 * units.eV
siVIII_err = 0.09 * units.eV
siVII = 1756.68 * units.eV
siVII_err = 0.08 * units.eV
si_all = [siXIV, siXIII, siXII, siXI, siX, siIX, siVIII, siVII]
si_err_all = [0.0*units.eV, 0.0*units.eV, siXII_err, siXI_err,
siX_err, siIX_err, siVIII_err, siVII_err]
Explanation: Next, we're going to need the lines we're interested in. Let's use the Silicon lines. Note that these are all in electron volt. However, the data are in Angstrom, which means I need to convert them.
I'm going to use astropy.units to do that:
End of explanation
si_all_angstrom = [(const.h*const.c/s.to(units.Joule)).to(units.Angstrom)
for s in si_all]
si_err_all_angstrom = [(const.h*const.c/s.to(units.Joule)).to(units.Angstrom)
for s in si_err_all]
si_err_all_angstrom[0] = 0.0*units.Angstrom
si_err_all_angstrom[1] = 0.0*units.Angstrom
for s in si_all_angstrom:
print(s)
Explanation: Now I can do the actual conversion:
End of explanation
plt.figure(figsize=(16,4))
plt.errorbar(wavelength_mid, counts, yerr=counts_err, fmt="o")
plt.gca().invert_xaxis()
plt.xlim(wavelength_mid[-1], wavelength_mid[0])
for s in si_all_angstrom:
plt.vlines(s.value, np.min(counts), np.max(counts), lw=2)
Explanation: Let's plot the lines onto the spectrum:
End of explanation
maxind = wavelength_mid.searchsorted(7.1)
wnew_mid = wavelength_mid[:maxind]
wnew_left = wavelength_left[:maxind]
wnew_right = wavelength_right[:maxind]
cnew = counts[:maxind]
enew = counts_err[:maxind]
plt.figure(figsize=(16,4))
plt.errorbar(wnew_mid, cnew, yerr=enew, fmt="o")
plt.gca().invert_xaxis()
plt.xlim(wnew_mid[-1], wnew_mid[0])
for s in si_all_angstrom:
plt.vlines(s.value, np.min(counts), np.max(counts), lw=2)
Explanation: We currently don't have the positions of the longer-wavelength lines, so we're going to cut the spectrum at 7.1 Angstrom:
End of explanation
# the full spectrum in a format usable by ShiftyLines
np.savetxt(datadir+"8525_nodip_full.txt", np.array([wavelength_left, wavelength_right,
counts, counts_err]).T)
# the cut spectrum with the Si lines only
np.savetxt(datadir+"8525_nodip_cut.txt", np.array([wnew_left, wnew_right, cnew, enew]).T)
## convert from astropy.units to float
si_all_val = np.array([s.value for s in si_all_angstrom])
si_err_all_val = np.array([s.value for s in si_err_all_angstrom])
np.savetxt(datadir+"si_lines.txt", np.array(si_all_val))
Explanation: We are going to save the spectrum as well as the line centers:
End of explanation
# line energies in keV
al_ly_alpha = 1.72855 * 1000 * units.eV
mg_he_gamma = 1.65910 * 1000 * units.eV
mg_he_delta = 1.69606 * 1000 * units.eV
mg_ly_beta = 1.74474 * 1000 * units.eV
mg_ly_gamma = 1.84010 * 1000 * units.eV
mg_ly_delta = 1.88423 * 1000 * units.eV
fe_xxiv = 1.88494 * 1000 * units.eV
Explanation: Adding some more lines for later
We have some more lines that are going to become important/interesting later, because
they'll be shifted at a different redshift:
End of explanation
other_lines_all = [al_ly_alpha, mg_he_gamma, mg_ly_beta, mg_ly_gamma, mg_ly_delta, fe_xxiv]
other_lines_all_angstrom = [(const.h*const.c/s.to(units.Joule)).to(units.Angstrom)
for s in other_lines_all]
other_lines_all_val = np.array([s.value for s in other_lines_all_angstrom])
for l in other_lines_all_angstrom:
print(str(l.value) + " " + str(l.unit))
Explanation: The lines need to be converted to Angstroms
End of explanation
plt.figure(figsize=(16,4))
plt.errorbar(wavelength_mid, counts, yerr=counts_err, fmt="o")
plt.gca().invert_xaxis()
plt.xlim(wavelength_mid[-1], wavelength_mid[0])
for s in si_all_angstrom:
plt.vlines(s.value, np.min(counts), np.max(counts), lw=2)
for l in other_lines_all_angstrom:
plt.vlines(l.value, np.min(counts), np.max(counts), lw=2)
Explanation: What does the spectrum with the line centroids look like?
End of explanation
# make extended array of lines
lines_extended = np.hstack([si_all_val, other_lines_all_val])
# save the lines
np.savetxt(datadir+"lines_extended.txt", np.array(lines_extended))
Explanation: Let's save the extended list of lines to a file:
End of explanation
np.random.seed(20160216)
Explanation: Simulating Data
In order to test any methods we are creating, we are first going to produce some simulated data.
Set the seed so that the output simulations will always be the same:
End of explanation
def gaussian_cdf(x, w0, width):
return 0.5*(1. + scipy.special.erf((x-w0)/(width*np.sqrt(2.))))
def spectral_line(wleft, wright, w0, amplitude, width):
Use the CDF of a Gaussian distribution to define spectral
lines. We use the CDF to integrate over the energy bins,
rather than taking the mid-bin energy.
Parameters
----------
wleft: array
Left edges of the energy bins
wright: array
Right edges of the energy bins
w0: float
The centroid of the line
amplitude: float
The amplitude of the line
width: float
The width of the line
Returns
-------
line_flux: array
The array of line fluxes integrated over each bin
line_flux = amplitude*(gaussian_cdf(wright, w0, width)-
gaussian_cdf(wleft, w0, width))
return line_flux
Explanation: The spectral lines are modelled as simple Gaussians with an amplitude $A$, a width $\sigma$ and a position $\lambda_0$.
Because energy data comes naturally binned (the original channels detect photons between a certain minimum and maximum energy), we integrate over energy bins to get an accurate estimate of the flux in each energy bin. This also allows the use of uneven binning.
In order to integrate over the bins correctly, I also define the cumulative distribution function (CDF) of a Gaussian below, which is, in fact, the integral of the Gaussian function.
This also means that the amplitude is defined as the integrated area under the Gaussian rather than the height of the Gaussian, but this is closer to the physical quantities astronomers might be interested in (equivalent width) anyway.
End of explanation
w0 = 6.6
amp = 0.01
width = 0.01
line_flux = spectral_line(wnew_left, wnew_right, w0, amp, width)
plt.plot(wnew_mid, line_flux)
Explanation: A simple test:
End of explanation
def fake_spectrum(wleft, wright, line_pos, logbkg=np.log(0.09), err=0.007, dshift=0.0,
sample_logamp=False, sample_logq=False, sample_signs=False,
logamp_hypermean=None, logamp_hypersigma=np.log(0.08), nzero=0,
logq_hypermean=np.log(500), logq_hypersigma=np.log(50)):
Make a fake spectrum with emission/absorption lines.
The background is constant, though that should later become an OU process or
something similar.
NOTE: The amplitude *must not* fall below zero! I'm not entirely sure how to deal
with that yet!
Parameters
----------
wleft: np.ndarray
Left edges of the energy bins
wright: np.ndarray
Right edges of the energy bins
line_pos: np.ndarray
The positions of the line centroids
bkg: float
The value of the constant background
err: float
The width of the Gaussian error distribution
dshift: float, default 0.0
The Doppler shift of the spectral lines.
sample_amp: bool, default False
Sample all amplitudes? If not, whatever value is set in
`amp_hypermean` will be set as collective amplitude for all
lines
sample_width: bool, default False
Sample all line widths? If not, whatever value is set in
`width_hypersigma` will be set as collective amplitude for all
lines
sample_signs: bool, default False
Sample the sign of the line amplitude (i.e. whether the line is an
absorption or emission line)? If False, all lines will be absorption
lines
logamp_hypermean: {float | None}, default None
The mean of the Gaussian prior distribution on the line amplitude. If None,
it is set to the same value as `bkg`.
logamp_hypersigma: float, default 0.08
The width of the Gaussian prior distribution on the line amplitude.
nzero: int, default 0
The number of lines to set to zero amplitude
logq_hypermean: float, default 0.01
The mean of the Gaussian prior distribution on the
q-factor, q=(line centroid wavelength)/(line width)
logq_hypersigma: float, default 0.01
The width of the Gaussian prior distribution on the
q-factor, q=(line centroid wavelength)/(line width)
Returns
-------
model_flux: np.ndarray
The array of model line fluxes for each bin
fake_flux: np.ndarray
The array of fake fluxes (with errors) for each bin
# number of lines
nlines = line_pos.shape[0]
# shift spectral lines
line_pos_shifted = line_pos*(1. + dshift)
# if sampling the amplitudes
if sample_logamp:
# sample line amplitudes
logamps = np.random.normal(logamp_hypermean, logamp_hypersigma, size=nlines)
else:
logamps = np.zeros(nlines)+logamp_hypermean
amps = np.exp(logamps)
if nzero > 0:
zero_ind = np.random.choice(np.arange(nlines), size=nzero)
for z in zero_ind:
amps[int(z)] = 0.0
if sample_signs:
# sample sign of the amplitudes
signs = np.random.choice([-1., 1.], p=[0.5, 0.5], size=nlines)
else:
# all lines are absorption lines
signs = -1.*np.ones(nlines)
# include signs in the amplitudes
amps *= signs
if sample_logq:
# widths of the lines
logq = np.random.normal(logq_hypermean, logq_hypersigma, size=nlines)
else:
logq = np.ones(nlines)*logq_hypermean
widths = line_pos_shifted/np.exp(logq)
model_flux = np.zeros_like(wleft) + np.exp(logbkg)
for si, a, w in zip(line_pos_shifted, amps, widths):
model_flux += spectral_line(wleft, wright, si, a, w)
fake_flux = model_flux + np.random.normal(0.0, 0.007, size=model_flux.shape[0])
pars = {"wavelength_left": wleft, "wavelength_right": wright, "err":err,
"model_flux": model_flux, "fake_flux": fake_flux, "logbkg":logbkg,
"dshift": dshift, "line_pos": line_pos_shifted, "logamp": logamps,
"signs": signs, "logq": logq }
return pars
Explanation: Simulating Spectra
In order to test our algorithm, we'd like to simulate some test data where we know the "ground truth" (i.e. the input parameters that made the spectrum).
Below is a (admittedly complicated) function that will simulate data for various test cases.
We'll address these test cases one by one below and simulate a spectrum to test.
End of explanation
froot = "test_noshift1"
## set amplitude and q
logamp_mean = np.log(0.3)
logq_mean = np.log(600.)
# set Doppler shift
dshift = 0.0
# set background
logbkg = np.log(0.09)
# do not sample amplitudes or q-factors(all are the same!)
sample_logamp = False
sample_logq = False
# all lines are absorption lines
sample_signs = False
# error on the data points (will sample from a Gaussian distribution)
err = 0.007
# do not set any lines to zero!
nzero = 0
pars = fake_spectrum(wnew_left, wnew_right, si_all_val, logbkg=logbkg, err=err,
dshift=dshift, sample_logamp=sample_logamp, sample_logq=sample_logq,
logamp_hypermean=logamp_mean, logq_hypermean=logq_mean,
sample_signs=sample_signs, nzero=nzero)
model_flux = pars["model_flux"]
fake_flux = pars["fake_flux"]
fake_err = np.zeros_like(fake_flux) + pars["err"]
Explanation: Test 1: A spectrum with no redshift and strong lines
As a first simple check, we simulate a spectrum with strong absorption lines at all available line positions. The amplitudes and widths ($q$-values) are the same for all lines. There is no Doppler shift.
We use the wavelength bins from the real data for generating the simulation:
End of explanation
plt.figure(figsize=(14,6))
plt.errorbar(wnew_mid, fake_flux, yerr=fake_err, fmt="o", label="simulated flux", alpha=0.7)
plt.plot(wnew_mid, model_flux, label="simulated model", lw=3)
plt.xlim(wnew_mid[0], wnew_mid[-1])
plt.gca().invert_xaxis()
plt.legend(prop={"size":18})
plt.xlabel("Wavelength [Angstrom]")
plt.ylabel("Normalized Flux")
plt.savefig(datadir+froot+"_lc.png", format="png")
Explanation: Let's plot the spectrum:
End of explanation
# save the whole dictionary in a pickle file
f = open(datadir+froot+"_data.pkl", "w")
pickle.dump(pars, f)
f.close()
# save the fake data in an ASCII file for input into ShiftyLines
np.savetxt(datadir+froot+".txt", np.array([wnew_left, wnew_right, fake_flux, fake_err]).T)
Explanation: We're going to save the data and the parameters in a pickle file for later use. We'll also save the fake data itself in a way that I can easily input it into ShiftyLines.
End of explanation
import shutil
def move_dnest_output(froot, dnest_dir="./"):
shutil.move(dnest_dir+"posterior_sample.txt", froot+"_posterior_sample.txt")
shutil.move(dnest_dir+"sample.txt", froot+"_sample.txt")
shutil.move(dnest_dir+"sample_info.txt", froot+"_sample_info.txt")
shutil.move(dnest_dir+"weights.txt", froot+"_weights.txt")
shutil.move(dnest_dir+"levels.txt", froot+"_levels.txt")
return
move_dnest_output("../data/%s"%froot, "../code/")
Explanation: Sampling the Model
If DNest4 and ShiftyLines are installed and compiled, you should now be able to run this model (from the ShiftyLines/code/ directory) with
>>> ./main -d ../data/test_noshift1.txt -t 8
The last option sets the number of threads to run; this will depend on your computer and how many CPUs you can keep busy with this.
In this run, we set the number of levels in the OPTIONS file to $100$, based on running the sampler until the likelihood change between two levels fell below $1$ or so.
Results
Here are the results of the initial simulation. For this run, we set the number of Doppler shifts to exactly $1$ and do not sample over redshifts. This means the model is equivalent to simple template fitting, but it makes a fairly good test.
First, we need to move the posterior run to a new directory so it doesn't get overwritten by subsequent runs. We'll write a function for that
End of explanation
# the pickle file with the data + parameters:
f = open(datadir+froot+"_data.pkl")
data = pickle.load(f)
f.close()
print("Keys in data dictionary: " + str(data.keys()))
# the posterior samples
sample = np.atleast_2d(np.loadtxt(datadir+froot+"_posterior_sample.txt"))
nsamples = sample.shape[0]
print("We have %i samples from the posterior."%nsamples)
Explanation: We'll need to load the data and the posterior samples for plotting:
End of explanation
# randomly pick some samples from the posterior to plot
s_ind = np.random.choice(np.arange(nsamples, dtype=int), size=20)
# the middle of the wavelength bins for plotting
wmid = data["wavelength_left"] + (data["wavelength_right"] - data["wavelength_left"])/2.
# the error on the data
yerr = np.zeros_like(wmid) + data['err']
plt.figure(figsize=(14,6))
plt.errorbar(wmid, data["fake_flux"], yerr=yerr, fmt="o")
for i in s_ind:
plt.plot(wmid, sample[i,-wmid.shape[0]:], lw=2, alpha=0.7)
plt.xlim(wmid[0], wmid[-1])
plt.gca().invert_xaxis()
plt.xlabel("Wavelength [Angstrom]")
plt.ylabel("Normalized Flux")
plt.tight_layout()
plt.savefig(datadir+froot+"_samples.png", format="png")
Explanation: First plot: a random set of realizations from the posterior overplotted on the data:
End of explanation
fig = plt.figure(figsize=(12,9))
# Plot a historgram and kernel density estimate
ax = fig.add_subplot(111)
sns.distplot(sample[:,0], hist_kws={"histtype":"stepfilled"}, ax=ax)
_, ymax = ax.get_ylim()
ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax.set_xlabel("Normalized Background Flux")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(datadir+froot+"_bkg.png", format="png")
Explanation: What's the posterior distribution over the constant background?
End of explanation
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(14,6))
sns.distplot(sample[:,1], hist_kws={"histtype":"stepfilled"}, ax=ax1)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax1.set_xlabel(r"OU time scale $\tau$")
ax1.set_ylabel(r"$N_{\mathrm{samples}}$")
sns.distplot(sample[:,2], hist_kws={"histtype":"stepfilled"}, ax=ax2)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax2.set_xlabel(r"OU amplitude")
ax2.set_ylabel(r"$N_{\mathrm{samples}}$")
fig.tight_layout()
plt.savefig(datadir+froot+"_ou.png", format="png")
Explanation: What does the OU process modelling the variable background look like?
End of explanation
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(14,6))
sns.distplot(np.log(sample[:,5]), hist_kws={"histtype":"stepfilled"}, ax=ax1)
_, ymax = ax1.get_ylim()
ax1.vlines(data["logamp"], 0, ymax, lw=3, color="black")
ax1.set_xlabel(r"$\mu_{\mathrm{\log{A}}}$")
ax1.set_ylabel(r"$N_{\mathrm{samples}}$")
ax1.set_title("Location parameter of the amplitude prior")
sns.distplot(sample[:,6], hist_kws={"histtype":"stepfilled"}, ax=ax2)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax2.set_xlabel(r"$\sigma_{\mathrm{\log{A}}}$")
ax2.set_ylabel(r"$N_{\mathrm{samples}}$")
ax2.set_title("Scale parameter of the amplitude prior")
fig.tight_layout()
plt.savefig(datadir+froot+"_logamp_prior.png", format="png")
Explanation: Now we can look at the hyperparameters for the log-amplitude and log-q priors:
End of explanation
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(14,6))
sns.distplot(sample[:,7], hist_kws={"histtype":"stepfilled"}, ax=ax1)
_, ymax = ax1.get_ylim()
ax1.vlines(data["logq"], 0, ymax, lw=3, color="black")
ax1.set_xlabel(r"$\mu_{\mathrm{\log{q}}}$")
ax1.set_ylabel(r"$N_{\mathrm{samples}}$")
ax1.set_title(r"Location parameter of the $q$ prior")
sns.distplot(sample[:,8], hist_kws={"histtype":"stepfilled"}, ax=ax2)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax2.set_xlabel(r"$\sigma_{\mathrm{\log{q}}}$")
ax2.set_ylabel(r"$N_{\mathrm{samples}}$")
ax2.set_title(r"Scale parameter of the $q$ prior")
fig.tight_layout()
plt.savefig(datadir+froot+"_logq_prior.png", format="png")
Explanation: Let's do the same for the width:
End of explanation
fig = plt.figure(figsize=(12,9))
# Plot a historgram and kernel density estimate
ax = fig.add_subplot(111)
sns.distplot(sample[:,9], hist_kws={"histtype":"stepfilled"}, ax=ax)
ax.set_xlabel("Threshold parameter")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(datadir+froot+"_pp.png", format="png")
Explanation: The next parameter (pp) in the model is the threshold determining the sign of the amplitude (i.e. whether a line is an absorption or an emission line). The sign is sampled as a random variable between $0$ and $1$; the threshold sets the boundary below which a line will become an absorption line. Above the threshold, the sign will be flipped to return an emission line.
For a spectrum with mostly absorption lines, pp should be quite high, close to $1$. For a spectrum with mostly emission lines, pp should be close to $0$.
End of explanation
fig = plt.figure(figsize=(12,9))
plt.locator_params(axis = 'x', nbins = 6)
# Plot a historgram and kernel density estimate
ax = fig.add_subplot(111)
sns.distplot(sample[:,11], hist_kws={"histtype":"stepfilled"}, ax=ax)
plt.xticks(rotation=45)
_, ymax = ax.get_ylim()
ax.vlines(data["dshift"], 0, ymax, lw=3, color="black")
ax.set_xlabel(r"Doppler shift $d=v/c$")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(datadir+froot+"_dshift.png", format="png")
Explanation: Hmmm, the model doesn't seem to care much about that? Funny!
The Doppler shift is next!
End of explanation
nlines = 8
ncolumns = 3
nrows = int(nlines/3)+1
fig = plt.figure(figsize=(nrows*4,ncolumns*4))
plt.locator_params(axis = 'x', nbins = 6)
# log-amplitudes
for i in range(8):
ax = plt.subplot(nrows, ncolumns, i+1)
sns.distplot(sample[:,12+i], hist_kws={"histtype":"stepfilled"}, ax=ax)
#ax.hist(sample[:,12+i], histtype="stepfilled", alpha=0.7)
plt.locator_params(axis = 'x', nbins = 6)
xlabels = ax.get_xticklabels()
for l in xlabels:
l.set_rotation(45)
_, ymax = ax.get_ylim()
ax.vlines(data["logamp"][i], 0, ymax, lw=3, color="black")
ax.set_xlabel(r"$\log{A}$")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(datadir+froot+"_logamp.png", format="png")
Explanation: What is the posterior on all the line amplitudes and widths? Let's try overplotting them all:
End of explanation
fig = plt.figure(figsize=(ncolumns*4,nrows*4))
plt.locator_params(axis = 'x', nbins = 6)
# log-amplitudes
for i in range(8):
ax = plt.subplot(nrows, ncolumns, i+1)
sns.distplot(sample[:,20+i], hist_kws={"histtype":"stepfilled"}, ax=ax)
#ax.hist(sample[:,20+i], histtype="stepfilled", alpha=0.7)
plt.locator_params(axis = 'x', nbins = 6)
xlabels = ax.get_xticklabels()
for l in xlabels:
l.set_rotation(45)
_, ymax = ax.get_ylim()
ax.vlines(data["logq"][i], 0, ymax, lw=3, color="black")
ax.set_xlabel(r"$\log{q}$")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(datadir+froot+"_logq.png", format="png")
Explanation: Looks like it samples amplitudes correctly! Let's make the same Figure for log-q:
End of explanation
fig = plt.figure(figsize=(12,9))
plt.locator_params(axis = 'x', nbins = 6)
# Plot a historgram and kernel density estimate
ax = fig.add_subplot(111)
for i in range(8):
sns.distplot(sample[:,28+i], hist_kws={"histtype":"stepfilled"}, ax=ax, alpha=0.6)
plt.xticks(rotation=45)
_, ymax = ax.get_ylim()
ax.vlines(data["dshift"], 0, ymax, lw=3, color="black")
ax.set_xlabel(r"Emission/absorption line sign")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(datadir+froot+"_signs.png", format="png")
Explanation: Final thing, just to be sure: the signs of the amplitudes!
End of explanation
def plot_samples(data, sample, fout, close=True):
FIRST PLOT: SPECTRUM + SAMPLES FROM THE POSTERIOR
# number of posterior samples
nsamples = sample.shape[0]
# randomly pick some samples from the posterior to plot
s_ind = np.random.choice(np.arange(nsamples, dtype=int), size=20)
# the middle of the wavelength bins for plotting
wmid = data["wavelength_left"] + (data["wavelength_right"] - data["wavelength_left"])/2.
# the error on the data
yerr = np.zeros_like(wmid) + data['err']
plt.figure(figsize=(14,6))
plt.errorbar(wmid, data["fake_flux"], yerr=yerr, fmt="o")
for i in s_ind:
plt.plot(wmid, sample[i,-wmid.shape[0]:], lw=2, alpha=0.7)
plt.xlim(wmid[0], wmid[-1])
plt.gca().invert_xaxis()
plt.xlabel("Wavelength [Angstrom]")
plt.ylabel("Normalized Flux")
plt.tight_layout()
plt.savefig(fout+"_samples.png", format="png")
if close:
plt.close()
return
def plot_bkg(data, sample, fout, close=True):
PLOT THE BACKGROUND POSTERIOR
fig = plt.figure(figsize=(12,9))
# Plot a histogram and kernel density estimate
ax = fig.add_subplot(111)
sns.distplot(sample[:,0], hist_kws={"histtype":"stepfilled"}, ax=ax)
plt.xticks(rotation=45)
_, ymax = ax.get_ylim()
ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax.set_xlabel("Normalized Background Flux")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(fout+"_bkg.png", format="png")
if close:
plt.close()
return
def plot_ou_bkg(sample, fout, close=True):
PLOT THE POSTERIOR FOR THE OU PROCESS
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(14,6))
sns.distplot(sample[:,1], hist_kws={"histtype":"stepfilled"}, ax=ax1)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax1.set_xlabel(r"OU time scale $\tau$")
ax1.set_ylabel(r"$N_{\mathrm{samples}}$")
sns.distplot(sample[:,2], hist_kws={"histtype":"stepfilled"}, ax=ax2)
plt.xticks(rotation=45)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax2.set_xlabel(r"OU amplitude")
ax2.set_ylabel(r"$N_{\mathrm{samples}}$")
fig.tight_layout()
plt.savefig(fout+"_ou.png", format="png")
if close:
plt.close()
return
def plot_logamp_hyper(data, sample, fout, close=True):
PLOT THE POSTERIOR FOR THE LOG-AMP HYPERPARAMETERS
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(14,6))
sns.distplot(np.log(sample[:,5]), hist_kws={"histtype":"stepfilled"}, ax=ax1)
_, ymax = ax1.get_ylim()
ax1.vlines(data["logamp"], 0, ymax, lw=3, color="black")
ax1.set_xlabel(r"$\mu_{\mathrm{\log{A}}}$")
ax1.set_ylabel(r"$N_{\mathrm{samples}}$")
ax1.set_title("Location parameter of the amplitude prior")
sns.distplot(sample[:,6], hist_kws={"histtype":"stepfilled"}, ax=ax2)
plt.xticks(rotation=45)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax2.set_xlabel(r"$\sigma_{\mathrm{\log{A}}}$")
ax2.set_ylabel(r"$N_{\mathrm{samples}}$")
ax2.set_title("Scale parameter of the amplitude prior")
fig.tight_layout()
plt.savefig(fout+"_logamp_prior.png", format="png")
if close:
plt.close()
return
def plot_logq_hyper(data, sample, fout, close=True):
PLOT THE POSTERIOR FOR THE LOG-Q HYPERPARAMETERS
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(14,6))
sns.distplot(sample[:,7], hist_kws={"histtype":"stepfilled"}, ax=ax1)
_, ymax = ax1.get_ylim()
ax1.vlines(data["logq"], 0, ymax, lw=3, color="black")
ax1.set_xlabel(r"$\mu_{\mathrm{\log{q}}}$")
ax1.set_ylabel(r"$N_{\mathrm{samples}}$")
ax1.set_title(r"Location parameter of the $q$ prior")
sns.distplot(sample[:,8], hist_kws={"histtype":"stepfilled"}, ax=ax2)
plt.xticks(rotation=45)
#_, ymax = ax.get_ylim()
#ax.vlines(np.exp(data["logbkg"]), 0, ymax, lw=3, color="black")
ax2.set_xlabel(r"$\sigma_{\mathrm{\log{q}}}$")
ax2.set_ylabel(r"$N_{\mathrm{samples}}$")
ax2.set_title(r"Scale parameter of the $q$ prior")
fig.tight_layout()
plt.savefig(fout+"_logq_prior.png", format="png")
if close:
plt.close()
return
def plot_threshold(sample, fout, close=True):
PLOT THE POSTERIOR FOR THE THRESHOLD PARAMETER
fig = plt.figure(figsize=(12,9))
# Plot a historgram and kernel density estimate
ax = fig.add_subplot(111)
sns.distplot(sample[:,9], hist_kws={"histtype":"stepfilled"}, ax=ax)
plt.xticks(rotation=45)
ax.set_xlabel("Threshold parameter")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(fout+"_pp.png", format="png")
if close:
plt.close()
return
def plot_dshift(data, sample, fout, dshift_ind=0, close=True):
PLOT THE POSTERIOR FOR THE DOPPLER SHIFT
fig = plt.figure(figsize=(12,9))
plt.locator_params(axis = 'x', nbins = 6)
# Plot a historgram and kernel density estimate
ax = fig.add_subplot(111)
sns.distplot(sample[:,11+dshift_ind], hist_kws={"histtype":"stepfilled"}, ax=ax)
plt.xticks(rotation=45)
_, ymax = ax.get_ylim()
ax.vlines(data["dshift"], 0, ymax, lw=3, color="black")
ax.set_xlabel(r"Doppler shift $d=v/c$")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
plt.savefig(fout+"_dshift%i.png"%dshift_ind, format="png")
if close:
plt.close()
return
def plot_logamp(data, sample, fout, ndshift, nlines, ncolumns=3,
dshift_ind=0, close=True):
PLOT THE POSTERIOR FOR THE LINE LOG-AMPLITUDES
nrows = int(nlines/ncolumns)+1
fig = plt.figure(figsize=(nrows*4,ncolumns*4))
plt.locator_params(axis = 'x', nbins = 6)
# index of column where the log-amplitudes start:
start_ind = 11 + ndshift + dshift_ind*nlines
# log-amplitudes
for i in range(nlines):
ax = plt.subplot(nrows, ncolumns, i+1)
# ax.hist(sample[:,start_ind+i], histtype="stepfilled", alpha=0.7)
sns.distplot(sample[:,start_ind+i], hist_kws={"histtype":"stepfilled"}, ax=ax)
plt.locator_params(axis = 'x', nbins = 6)
xlabels = ax.get_xticklabels()
for l in xlabels:
l.set_rotation(45)
_, ymax = ax.get_ylim()
ax.vlines(data["logamp"][i], 0, ymax, lw=3, color="black")
ax.set_xlabel(r"$\log{A}$")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
if dshift_ind == 0:
plt.savefig(fout+"_logamp.png", format="png")
else:
plt.savefig(fout+"_logamp%i.png"%dshift_ind, format="png")
if close:
plt.close()
return
def plot_logq(data, sample, fout, ndshift, nlines, ncolumns=3,
dshift_ind=0, close=True):
PLOT THE POSTERIOR FOR THE LINE LOG-Q
nrows = int(nlines/ncolumns)+1
fig = plt.figure(figsize=(nrows*4,ncolumns*4))
plt.locator_params(axis = 'x', nbins = 6)
# set starting index for the logq values:
start_ind = 11 + ndshift + nlines*(dshift_ind + 1)
# log-amplitudes
for i in range(nlines):
ax = plt.subplot(nrows, ncolumns, i+1)
#ax.hist(sample[:,start_ind+i], histtype="stepfilled", alpha=0.7)
sns.distplot(sample[:,start_ind+i], hist_kws={"histtype":"stepfilled"}, ax=ax)
plt.locator_params(axis = 'x', nbins = 6)
xlabels = ax.get_xticklabels()
for l in xlabels:
l.set_rotation(45)
_, ymax = ax.get_ylim()
ax.vlines(data["logq"][i], 0, ymax, lw=3, color="black")
ax.set_xlabel(r"$\log{q}$")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
if dshift_ind == 0:
plt.savefig(fout+"_logq.png", format="png")
else:
plt.savefig(fout+"_logq%i.png"%dshift_ind, format="png")
if close:
plt.close()
return
def plot_signs(data, sample, fout, ndshift, nlines, ncolumns=3,
dshift_ind=0, close=True):
PLOT THE POSTERIOR FOR THE LINE AMPLITUDE SIGNS
nrows = int(nlines/ncolumns)+1
fig = plt.figure(figsize=(nrows*4,ncolumns*4))
plt.locator_params(axis = 'x', nbins = 6)
# set starting index for the logq values:
start_ind = 11 + ndshift + nlines*(dshift_ind + 2)
# log-amplitudes
for i in range(nlines):
ax = plt.subplot(nrows, ncolumns, i+1)
# ax.hist(sample[:,start_ind+i], histtype="stepfilled", alpha=0.7)
sns.distplot(sample[:,start_ind+i], hist_kws={"histtype":"stepfilled"}, ax=ax)
plt.locator_params(axis = 'x', nbins = 6)
xlabels = ax.get_xticklabels()
for l in xlabels:
l.set_rotation(45)
ax.set_xlabel(r"Emission/absorption line sign")
ax.set_ylabel("$N_{\mathrm{samples}}$")
plt.tight_layout()
if dshift_ind == 0:
plt.savefig(fout+"_signs.png", format="png")
else:
plt.savefig(fout+"_signs%i.png"%dshift_ind, format="png")
if close:
plt.close()
return
def plot_posterior_summary(froot, datadir="../data/", nlines=8,
ndshift=1, ncolumns=3, close=True):
Plot summeries of the posterior distribution. Mostly histograms.
Plots a bunch of Figures to png files.
Parameters
----------
froot: str
The root string of the data file and file with posterior samples to be loaded.
datadir: str
The directory with the data.
Default: "../data/"
nlines: int
The number of lines in the model.
Default: 8
ndshift: int
The number of (possible) Doppler shifts in the model.
Default: 1
ncolumns: int
The number of columns in multi-panel plots. Default: 3
close: bool
Close plots at the end of the plotting? Default: True
# the pickle file with the data + parameters:
f = open(datadir+froot+"_data.pkl")
data = pickle.load(f)
f.close()
print("Keys in data dictionary: " + str(data.keys()))
# the posterior samples
sample = np.atleast_2d(np.loadtxt(datadir+froot+"_posterior_sample.txt"))
nsamples = sample.shape[0]
print("We have %i samples from the posterior."%nsamples)
# set the directory path and file name for output files:
fout = datadir+froot
# plot the spectrum with some draws from the posterior
plot_samples(data, sample, fout, close=close)
# plot a histogram of the background parameter
plot_bkg(data, sample, fout, close=close)
# plot histograms of the OU parameters
plot_ou_bkg(sample, fout, close=close)
# plot the hyper parameters of the log-amp prior
plot_logamp_hyper(data, sample, fout, close=close)
# plot the hyper parameters of the log-q prior
plot_logq_hyper(data, sample, fout, close=close)
# plot the threshold for the amplitude sign
plot_threshold(sample, fout, close=close)
# for the varying number of Doppler shifts, plot their posterior
if ndshift == 1:
plot_dshift(data, sample, fout, dshift_ind=0, close=close)
plot_logamp(data, sample, fout, ndshift, nlines,
ncolumns=ncolumns, dshift_ind=0, close=close)
plot_logq(data, sample, fout, ndshift, nlines,
ncolumns=ncolumns, dshift_ind=0, close=close)
plot_signs(data, sample, fout, ndshift, nlines,
ncolumns=ncolumns, dshift_ind=0, close=close)
else:
for i in range(ndshift):
plot_dshift(data, sample, fout, dshift_ind=i, close=close)
plot_logamp(data, sample, fout, ndshift, nlines,
ncolumns=ncolumns, dshift_ind=i, close=close)
plot_logq(data, sample, fout, ndshift, nlines,
ncolumns=ncolumns, dshift_ind=i, close=close)
plot_signs(data, sample, fout, ndshift, nlines,
ncolumns=ncolumns, dshift_ind=i, close=close)
return
Explanation: A General Plotting function for the Posterior
We'll make some individual plotting functions that we can then combine to plot useful Figures on the whole posterior sample!
End of explanation
plot_posterior_summary(froot, datadir="../data/", nlines=8, ndshift=1, ncolumns=3,
close=True)
Explanation: Let's run this function on the data we just made individual plots from to see whether it worked.
The model had $8$ lines and $1$ redshift:
End of explanation
def fake_data(wleft, wright, line_pos, input_pars, froot):
Simulate spectra, including a (constant) background and a set of
(Gaussian) lines with given positions.
The model integrates over wavelength/frequency/energy bins, hence
it requires the left and right bin edges rather than the centre of
the bin.
Produces (1) a pickle file with the simulated data and parameters used
to produce the simulation; (2) an ASCII file that can be fed straight into
ShiftyLines; (3) a Figure of the simulated data and the model that
produced it.
Parameters
----------
wleft: numpy.ndarray
The left bin edges of the wavelength/frequency/energy bins
wright: numpy.ndarray
The right bin edges of the wavelength/frequency/energy bins
line_pos: numpy.ndarray
The positions of the spectral lines in the same units as
`wleft` and `wright`; will be translated into centroid
wavelength/frequency/energy of the Gaussian modelling the line
input_pars: dict
A dictionary containing the following keywords (for detailed
descriptions see the docstring for `fake_spectrum`):
logbkg: the log-background
err: the error on the data points
dshift: the Doppler shift (just one!)
sample_logamp: sample the log-amplitudes?
sample_logq: sample the log-q values?
sample_signs: sample the amplitude signs (if no, all lines
are absorption lines!)
logamp_mean: location of normal sampling distribution for log-amplitudes
logq_mean: location of normal sampling distribution for log-q
logamp_sigma: scale of normal sampling distribution for log-amplitudes
logq_sigma: scale of normal sampling distribution for log-q
nzero: Number of lines to set to zero
froot: str
A string describing the directory and file name of the output files
# read out all the parameters
logbkg = input_pars["logbkg"]
err = input_pars["err"]
dshift = input_pars["dshift"]
sample_logamp = input_pars["sample_logamp"]
sample_logq = input_pars["sample_logq"]
sample_signs = input_pars["sample_signs"]
logamp_mean = input_pars["logamp_mean"]
logq_mean = input_pars["logq_mean"]
logamp_sigma = input_pars["logamp_sigma"]
logq_sigma = input_pars["logq_sigma"]
nzero = input_pars["nzero"]
# simulate fake spectrum
pars = fake_spectrum(wleft, wright, line_pos, logbkg=logbkg, err=err, dshift=dshift,
sample_logamp=sample_logamp, sample_logq=sample_logq,
logamp_hypermean=logamp_mean, logq_hypermean=logq_mean,
logamp_hypersigma=logamp_sigma, logq_hypersigma=logq_sigma,
sample_signs=sample_signs, nzero=nzero)
# read out model and fake flux, construct error
model_flux = pars["model_flux"]
fake_flux = pars["fake_flux"]
fake_err = np.zeros_like(fake_flux) + pars["err"]
# get the middle of each bin
wmid = wleft + (wright-wleft)/2.
# plot the resulting data and model to a file
plt.figure(figsize=(14,6))
plt.errorbar(wmid, fake_flux, yerr=fake_err, fmt="o", label="simulated flux", alpha=0.7)
plt.plot(wmid, model_flux, label="simulated model", lw=3)
plt.xlim(wmid[0], wmid[-1])
plt.gca().invert_xaxis()
plt.legend(prop={"size":18})
plt.xlabel("Wavelength [Angstrom]")
plt.ylabel("Normalized Flux")
plt.savefig(froot+"_lc.png", format="png")
# save the whole dictionary in a pickle file
f = open(froot+"_data.pkl", "w")
pickle.dump(pars, f)
f.close()
# save the fake data in an ASCII file for input into ShiftyLines
np.savetxt(froot+".txt", np.array([wleft, wright, fake_flux, fake_err]).T)
return
Explanation: A general function for simulating data sets
Based on what we just did, we'll write a function that takes parameters as an input and spits out the files we need:
End of explanation
froot = "../data/test_noshift2"
input_pars = {}
# set amplitude and q
input_pars["logamp_mean"] = np.log(0.1)
input_pars["logq_mean"] = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
input_pars["logamp_sigma"] =np.log(0.08)
input_pars["logq_sigma"] = np.log(50)
# set Doppler shift
input_pars["dshift"] = 0.0
# set background
input_pars["logbkg"] = np.log(0.09)
# do not sample amplitudes or q-factors(all are the same!)
input_pars["sample_logamp"] = False
input_pars["sample_logq"] = False
# lines are either absorption or emission lines this time!
input_pars["sample_signs"] = True
# error on the data points (will sample from a Gaussian distribution)
input_pars["err"] = 0.007
# do not set any lines to zero!
input_pars["nzero"] = 0
fake_data(wnew_left, wnew_right, si_all_val, input_pars, froot)
Explanation: A Spectrum with Weak Absorption Lines
The model should still work if the lines are very weak. We will simulate a spectrum with weak lines to test how the strength of the lines will affect the inferences drawn from the model:
End of explanation
move_dnest_output("../data/%s"%froot, "../code/")
plot_posterior_summary(froot, datadir="../data/", nlines=8, ndshift=1,
ncolumns=3, close=False)
Explanation: This time, we run DNest4 with $100$ levels.
Results
Let's first move the samples into the right directory and give it the right filename
End of explanation
froot = "../data/test_noshift3"
input_pars = {}
# set amplitude and q
input_pars["logamp_mean"] = np.log(0.3)
input_pars["logq_mean"] = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
input_pars["logamp_sigma"] = np.log(0.08)
input_pars["logq_sigma"] = np.log(50)
# set Doppler shift
input_pars["dshift"] = 0.0
# set background
input_pars["logbkg"] = np.log(0.09)
# do not sample amplitudes or q-factors(all are the same!)
input_pars["sample_logamp"] = False
input_pars["sample_logq"] = False
# lines are either absorption or emission lines this time!
input_pars["sample_signs"] = True
# error on the data points (will sample from a Gaussian distribution)
input_pars["err"] = 0.007
# do not set any lines to zero!
input_pars["nzero"] = 0
Explanation: It looks like for this spectrum the amplitude really is too weak to constrain anything, so the Doppler shift does whatever the hell it wants.
I'm not sure I like this behaviour; I might need to ask Brendon about it!
A Spectrum With Emission+Absorption Lines
We'll do the same test as the first, but with varying strong emission and absorption lines:
End of explanation
fake_data(wnew_left, wnew_right, si_all_val, input_pars, froot)
Explanation: Now run the new function:
End of explanation
move_dnest_output(froot, dnest_dir="../code/")
plot_posterior_summary(froot, datadir="../data/", nlines=8, ndshift=1, ncolumns=3,
close=False)
Explanation: Run the model the same as with test_noshift2.txt, but with $150$ levels.
Results
End of explanation
froot = "../data/test_noshift4"
input_pars = {}
# set amplitude and q
input_pars["logamp_mean"] = np.log(0.3)
input_pars["logq_mean"] = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
input_pars["logamp_sigma"] = 0.5
input_pars["logq_sigma"] = np.log(50)
# set Doppler shift
input_pars["dshift"] = 0.0
# set background
input_pars["logbkg"] = np.log(0.09)
# sample amplitudes, but not q-factors(all are the same!)
input_pars["sample_logamp"] = True
input_pars["sample_logq"] = False
# lines are either absorption or emission lines this time!
input_pars["sample_signs"] = False
# error on the data points (will sample from a Gaussian distribution)
input_pars["err"] = 0.007
# do not set any lines to zero!
input_pars["nzero"] = 0
fake_data(wnew_left, wnew_right, si_all_val, input_pars, froot)
Explanation: A Spectrum With Variable Absorption Lines
In this test, we'll see how the model deals with variable absorption lines:
End of explanation
move_dnest_output(froot, dnest_dir="../code/")
plot_posterior_summary(froot, datadir="../data/", nlines=8, ndshift=1, ncolumns=3,
close=False)
Explanation: Results
I set the number of levels to $200$.
End of explanation
froot = "../data/test_noshift5"
input_pars = {}
# set amplitude and q
input_pars["logamp_mean"] = np.log(0.3)
input_pars["logq_mean"] = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
input_pars["logamp_sigma"] = 0.5
input_pars["logq_sigma"] = np.log(50)
# set Doppler shift
input_pars["dshift"] = 0.0
# set background
input_pars["logbkg"] = np.log(0.09)
# do not sample amplitudes or q-factors(all are the same!)
input_pars["sample_logamp"] = False
input_pars["sample_logq"] = False
# lines are either absorption or emission lines this time!
input_pars["sample_signs"] = False
# error on the data points (will sample from a Gaussian distribution)
input_pars["err"] = 0.007
# Set three lines straight to zero!
input_pars["nzero"] = 3
np.random.seed(20160221)
fake_data(wnew_left, wnew_right, si_all_val, input_pars, froot)
Explanation: A Spectrum with Lines Turned Off
What does the model do if lines are just not there? This is an important question,
so we will now make a spectrum with three lines having amplitudes $A=0$:
End of explanation
plot_posterior_summary(froot, datadir="../data/", nlines=8, ndshift=1, ncolumns=3,
close=False)
Explanation: Results
End of explanation
froot = "../data/test_shift1"
input_pars = {}
# set amplitude and q
input_pars["logamp_mean"] = np.log(0.3)
input_pars["logq_mean"] = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
input_pars["logamp_sigma"] = 0.5
input_pars["logq_sigma"] = np.log(50)
# set Doppler shift
input_pars["dshift"] = 0.01
# set background
input_pars["logbkg"] = np.log(0.09)
# do not sample amplitudes or q-factors(all are the same!)
input_pars["sample_logamp"] = False
input_pars["sample_logq"] = False
# lines are only absorption lines this time!
input_pars["sample_signs"] = False
# error on the data points (will sample from a Gaussian distribution)
input_pars["err"] = 0.007
# Set three lines straight to zero!
input_pars["nzero"] = 0
np.random.seed(20160220)
fake_data(wnew_left, wnew_right, si_all_val, input_pars, froot)
Explanation: A Doppler-shifted Spectrum with Absorption lines
We are now going to look how well the model constrains the Doppler shift.
Again, we build a simple model where all lines have the sample amplitude,
End of explanation
move_dnest_output(froot, "../code/")
plot_posterior_summary(froot, datadir="../data/", nlines=8, ndshift=1, ncolumns=3,
close=False)
Explanation: Results
End of explanation
froot = "../data/test_shift2"
input_pars = {}
# set amplitude and q
input_pars["logamp_mean"] = np.log(0.3)
input_pars["logq_mean"] = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
input_pars["logamp_sigma"] = 0.5
input_pars["logq_sigma"] = np.log(50)
# set Doppler shift
input_pars["dshift"] = 0.01
# set background
input_pars["logbkg"] = np.log(0.09)
# do not sample amplitudes or q-factors(all are the same!)
input_pars["sample_logamp"] = True
input_pars["sample_logq"] = False
# lines are only absorption lines this time!
input_pars["sample_signs"] = True
# error on the data points (will sample from a Gaussian distribution)
input_pars["err"] = 0.007
# Set three lines straight to zero!
input_pars["nzero"] = 0
np.random.seed(20160221)
fake_data(wnew_left, wnew_right, si_all_val, input_pars, froot)
Explanation: A Shifted Spectrum with Emission/Absorption Lines with Variable Amplitudes and Signs
More complicated: a spectrum with a single Doppler shift and variable line amplitudes of both emission and absorption lines:
End of explanation
froot = "../data/test_extended_shift1"
input_pars = {}
# set amplitude and q
input_pars["logamp_mean"] = np.log(0.2)
input_pars["logq_mean"] = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
input_pars["logamp_sigma"] = 0.4
input_pars["logq_sigma"] = np.log(50)
# set Doppler shift
input_pars["dshift"] = 0.01
# set background
input_pars["logbkg"] = np.log(0.09)
# do not sample amplitudes or q-factors(all are the same!)
input_pars["sample_logamp"] = True
input_pars["sample_logq"] = False
# lines are only absorption lines this time!
input_pars["sample_signs"] = True
# error on the data points (will sample from a Gaussian distribution)
input_pars["err"] = 0.007
# Set three lines straight to zero!
input_pars["nzero"] = 0
np.random.seed(20162210)
fake_data(wavelength_left, wavelength_right, lines_extended, input_pars, froot)
Explanation: Results
Adding a Noise Process
At this point, I should be adding an OU process to the data generation process to simulate the effect of a variable background in the spectrum.
THIS IS STILL TO BE DONE!
Testing Multiple Doppler Shift Components
For all of the above simulations, we also ought to test how well the model works if I add additional Doppler shift components to sample over.
For this, you'll need to change the line
:dopplershift(3*nlines+1, 1, true, MyConditionalPrior())
in MyModel.cpp to read
:dopplershift(3*nlines+1, 3, false, MyConditionalPrior())
and recompile.
This will sample over up to three different Doppler shifts at the same time. In theory, we expect that the posterior will have a strong mode at either zero Doppler shifts (for the non-Doppler-shifted data) or at $1$ for all types of data set (where for some, the Doppler shift is rather strongly constrained to $0$).
Results
A place holder for the results from this experiment.
The extended Spectrum: More lines!
Let's also make some simulations for the extended spectrum with more lines. This will require the extended_lines.txt file. The file name for the file with the lines centroid is currently hard-coded in the main.cpp file.
Change the line
Data::get_instance().load_lines("../data/si_lines.txt");
to
Data::get_instance().load_lines("../data/lines_extended.txt");
and also change the Doppler shifts in MyModel.cpp back to
:dopplershift(3*nlines+1, 1, true, MyConditionalPrior())
before recompiling. We will change the last line again in a little while, but first we'll test the general performance of the model on the extended data set.
An extended spectrum with variable line amplitudes and a single Doppler shift
End of explanation
froot = "test_extended_shift2"
# set amplitude and q
logamp_mean = np.log(0.2)
logq_mean = np.log(600.)
# set the width of the amplitude and q distribution (not used here)
logamp_sigma = 0.4
logq_sigma = np.log(50)
# set Doppler shift
dshift1 = 0.01
dshift2 = 0.02
# set background
logbkg1 = np.log(0.09)
logbkg2 = -15.
# do not sample amplitudes or q-factors(all are the same!)
sample_logamp = True
sample_logq = False
# lines are only absorption lines this time!
sample_signs = True
# error on the data points (will sample from a Gaussian distribution)
err = 0.007
# Set three lines straight to zero!
nzero = 0
np.random.seed(20160201)
pars1 = fake_spectrum(wavelength_left, wavelength_right, si_all_val, logbkg=logbkg1,
dshift=dshift1, err=err, sample_logamp=sample_logamp,
sample_logq=sample_logq, logamp_hypermean=logamp_mean,
logamp_hypersigma=logamp_sigma, logq_hypermean=logq_mean,
logq_hypersigma=logq_sigma, sample_signs=sample_signs, nzero=nzero)
pars2 = fake_spectrum(wavelength_left, wavelength_right, other_lines_all_val, logbkg=logbkg2,
dshift=dshift2, err=err, sample_logamp=sample_logamp,
sample_logq=sample_logq, logamp_hypermean=logamp_mean,
logamp_hypersigma=logamp_sigma, logq_hypermean=logq_mean,
logq_hypersigma=logq_sigma, sample_signs=sample_signs, nzero=nzero)
model_flux_c = pars1["model_flux"]+pars2["model_flux"]
fake_flux_c = model_flux_c + np.random.normal(0.0, err, size=model_flux_c.shape[0])
fake_err_c = np.zeros_like(fake_flux_c) + pars1["err"]
plt.figure(figsize=(14,6))
plt.errorbar(wnew_mid, fake_flux, yerr=fake_err, fmt="o", label="simulated flux", alpha=0.7)
plt.plot(wnew_mid, model_flux, label="simulated model", lw=3)
plt.xlim(wnew_mid[0], wnew_mid[-1])
plt.gca().invert_xaxis()
plt.legend(prop={"size":18})
plt.xlabel("Wavelength [Angstrom]")
plt.ylabel("Normalized Flux")
plt.savefig(datadir+froot+"_lc.png", format="png")
Explanation: A Spectrum with Two Doppler Shifts
What if the lines are shifted with respect to each other?
Let's simulate a spectrum where the silicon lines are Doppler shifted by one value, but the other lines are shifted by a different Doppler shift.
End of explanation
pars = {"wavelength_left": wavelength_left, "wavelength_right": wavelength_right, "err":err,
"model_flux": model_flux_c, "fake_flux": fake_flux_c,
"dshift": [dshift1, dshift2],
"line_pos": np.hstack([pars1["line_pos"], pars2["line_pos"]]),
"logamp": np.hstack([pars1["logamp"], pars2["logamp"]]),
"signs": np.hstack([pars1["signs"], pars2["signs"]]),
"logq": np.hstack([pars1["logq"], pars2["logq"]]) }
# save the whole dictionary in a pickle file
f = open(froot+"_data.pkl", "w")
pickle.dump(pars, f)
f.close()
# save the fake data in an ASCII file for input into ShiftyLines
np.savetxt(froot+".txt", np.array([wavelength_left, wavelength_right,
fake_flux_c, fake_err_c]).T)
Explanation: Let's save all the output files as we did before:
End of explanation
logmin = -1.e16
def gaussian_cdf(x, w0, width):
return 0.5*(1. + scipy.special.erf((x-w0)/(width*np.sqrt(2.))))
def spectral_line(wleft, wright, w0, amplitude, width):
Use the CDF of a Gaussian distribution to define spectral
lines. We use the CDF to integrate over the energy bins,
rather than taking the mid-bin energy.
Parameters
----------
wleft: array
Left edges of the energy bins
wright: array
Right edges of the energy bins
w0: float
The centroid of the line
amplitude: float
The amplitude of the line
width: float
The width of the line
Returns
-------
line_flux: array
The array of line fluxes integrated over each bin
line_flux = amplitude*(gaussian_cdf(wright, w0, width)-
gaussian_cdf(wleft, w0, width))
return line_flux
class LinePosterior(object):
def __init__(self, x_left, x_right, y, yerr, line_pos):
A class to compute the posterior of all the lines in
a spectrum.
Parameters
----------
x_left: np.ndarray
The left edges of the independent variable (wavelength bins)
x_right: np.ndarray
The right edges of the independent variable (wavelength bins)
y: np.ndarray
The dependent variable (flux)
yerr: np.ndarray
The uncertainty on the dependent variable (flux)
line_pos: np.ndarray
The rest-frame positions of the spectral lines
Attributes
----------
x_left: np.ndarray
The left edges of the independent variable (wavelength bins)
x_right: np.ndarray
The right edges of the independent variable (wavelength bins)
x_mid: np.ndarray
The mid-bin positions
y: np.ndarray
The dependent variable (flux)
yerr: np.ndarray
The uncertainty on the dependent variable (flux)
line_pos: np.ndarray
The rest-frame positions of the spectral lines
nlines: int
The number of lines in the model
self.x_left = x_left
self.x_right = x_right
self.x_mid = x_left + (x_right-x_left)/2.
self.y = y
assert np.size(yerr) == 1, "Multiple errors are not supported!"
self.yerr = yerr
self.line_pos = line_pos
self.nlines = len(line_pos)
def logprior(self, t0):
The prior of the model. Currently there are Gaussian priors on the
line width as well as the amplitude and the redshift.
Parameters
----------
t0: iterable
The list or array with the parameters of the model
Contains:
* Doppler shift
* a background parameter
* all line amplitudes
* all line widths
Returns
-------
logp: float
The log-prior of the model
# t0 must have twice the number of lines (amplitude, width for each) plus a
# background plus the redshift
assert len(t0) == 2*self.nlines+2, "Wrong number of parameters!"
# get out the individual parameters
dshift = t0[0] # Doppler shift
log_bkg = t0[1]
amps = t0[2:2+self.nlines]
log_widths = t0[2+self.nlines:]
# prior on the Doppler shift is Gaussian
dshift_hypermean = 0.0
dshift_hypersigma = 0.01
dshift_prior = scipy.stats.norm(dshift_hypermean, dshift_hypersigma)
p_dshift = np.log(dshift_prior.pdf(dshift))
#print("p_dshift: " + str(p_dshift))
# Prior on the background is uniform
logbkg_min = -10.0
logbkg_max = 10.0
p_bkg = (log_bkg >= logbkg_min and log_bkg <= logbkg_max)/(logbkg_max-logbkg_min)
if p_bkg == 0:
p_logbkg = logmin
else:
p_logbkg = 0.0
#print("p_logbkg: " + str(p_logbkg))
# prior on the amplitude is Gaussian
amp_hypermean = 0.0
amp_hypersigma = 0.1
amp_prior = scipy.stats.norm(amp_hypermean, amp_hypersigma)
p_amp = 0.0
for a in amps:
p_amp += np.log(amp_prior.pdf(a))
#print("p_amp: " + str(p_amp))
# prior on the log-widths is uniform:
logwidth_min = -5.
logwidth_max = 3.
p_logwidths = 0.0
for w in log_widths:
#print("w: " + str(w))
p_width = (w >= logwidth_min and w <= logwidth_max)/(logwidth_max-logwidth_min)
if p_width == 0.0:
p_logwidths += logmin
else:
continue
#print("p_logwidths: " + str(p_logwidths))
logp = p_dshift + p_logbkg + p_amp + p_logwidths
return logp
@staticmethod
def _spectral_model(x_left, x_right, dshift, logbkg, line_pos, amplitudes, logwidths):
The spectral model underlying the data. It uses the object
attributes `x_left` and `x_right` to integrate over the bins
correctly.
Parameters
----------
x_left: np.ndarray
The left bin edges of the ordinate (wavelength bins)
x_right: np.ndarray
The right bin edges of the ordinate (wavelength bins)
dshift: float
The Doppler shift
logbkg: float
Logarithm of the constant background level
line_pos: iterable
The rest frame positions of the line centroids in the same
units as `x_left` and `x_right`
amplitudes: iterable
The list of all line amplitudes
logwidths: iterable
The list of the logarithm of all line widths
Returns
-------
flux: np.ndarray
The integrated flux in the bins defined by `x_left` and `x_right`
assert len(line_pos) == len(amplitudes), "Line positions and amplitudes must have same length"
assert len(line_pos) == len(logwidths), "Line positions and widths must have same length"
# shift the line position by the redshift
line_pos_shifted = line_pos + dshift
#print(line_pos_shifted)
# exponentiate logarithmic quantities
bkg = np.exp(logbkg)
widths = np.exp(logwidths)
#print(widths)
# background flux
flux = np.zeros_like(x_left) + bkg
#print(amplitudes)
# add all the line fluxes
for x0, a, w in zip(line_pos_shifted, amplitudes, widths):
flux += spectral_line(x_left, x_right, x0, a, w)
return flux
def loglikelihood(self, t0):
The Gaussian likelihood of the model.
Parameters
----------
t0: iterable
The list or array with the parameters of the model
Contains:
* Doppler shift
* a background parameter
* all line amplitudes
* all line widths
Returns
-------
loglike: float
The log-likelihood of the model
assert len(t0) == 2*self.nlines+2, "Wrong number of parameters!"
# get out the individual parameters
dshift = t0[0] # Doppler shift
logbkg = t0[1]
amplitudes = t0[2:2+self.nlines]
logwidths = t0[2+self.nlines:]
model_flux = self._spectral_model(self.x_left, self.x_right,
dshift, logbkg, self.line_pos,
amplitudes, logwidths)
loglike = -(len(self.y)/2.)*np.log(2.*np.pi*self.yerr**2.) - \
np.sum((self.y-model_flux)**2./(2.*self.yerr**2.))
return loglike
def logposterior(self, t0):
The Gaussian likelihood of the model.
Parameters
----------
t0: iterable
The list or array with the parameters of the model
Contains:
* Doppler shift
* a background parameter
* all line amplitudes
* all line widths
Returns
-------
logpost: float
The log-likelihood of the model
# assert the number of input parameters is correct:
assert len(t0) == 2*self.nlines+2, "Wrong number of parameters!"
logpost = self.logprior(t0) + self.loglikelihood(t0)
#print("prior: " + str(self.logprior(t0)))
#print("likelihood: " + str(self.loglikelihood(t0)))
#print("posterior: " + str(self.logposterior(t0)))
if np.isfinite(logpost):
return logpost
else:
return logmin
def __call__(self, t0):
return self.logposterior(t0)
Explanation: Results
A simple model for Doppler-shifted Spectra
Below we define a basic toy model which samples over all the line amplitudes, widths as well as a Doppler shift. We'll later extend this to work in DNest4.
End of explanation
data = np.loadtxt(datadir+"test_spectra_noshift_sameamp_samewidth3.txt")
x_left = data[:,0]
x_right = data[:,1]
flux = data[:,3]
f_err = 0.007
lpost = LinePosterior(x_left, x_right, flux, f_err, si_all_val)
plt.errorbar(lpost.x_mid, flux, yerr=f_err, fmt="o", label="Fake data")
plt.plot(lpost.x_mid, data[:,4], label="Underlying model")
d_test = 0.0
bkg_test = np.log(0.09)
amp_test = np.zeros_like(si_all_val) - 0.5
logwidth_test = np.log(np.zeros_like(si_all_val) + 0.01)
p_test = np.hstack([d_test, bkg_test, amp_test, logwidth_test])
lpost.logprior(p_test)
d_test = 0.0
bkg_test = np.log(0.09)
amp_test = np.zeros_like(si_all_val) - 0.5
logwidth_test = np.log(np.zeros_like(si_all_val) + 0.01)
p_test = np.hstack([d_test, bkg_test, amp_test, logwidth_test])
lpost.loglikelihood(p_test)
nwalkers = 1000
niter = 300
burnin = 300
# starting positions for all parameters, from the prior
# the values are taken from the `logprior` method in `LinePosterior`.
# If the hyperparameters of the prior change in there, they'd better
# change here, too!
dshift_start = np.random.normal(0.0, 0.01, size=nwalkers)
logbkg_start = np.random.uniform(-10., 10., size=nwalkers)
amp_start = np.random.normal(0.0, 0.1, size=(lpost.nlines, nwalkers))
logwidth_start = np.random.uniform(-5., 3., size=(lpost.nlines, nwalkers))
p0 = np.vstack([dshift_start, logbkg_start, amp_start, logwidth_start]).T
ndim = p0.shape[1]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost)
pos, prob, state = sampler.run_mcmc(p0, burnin)
sampler.reset()
## do the actual MCMC run
pos, prob, state = sampler.run_mcmc(pos, niter, rstate0=state)
mcall = sampler.flatchain
mcall.shape
for i in range(mcall.shape[1]):
pmean = np.mean(mcall[:,i])
pstd = np.std(mcall[:,i])
print("Parameter %i: %.4f +/- %.4f"%(i, pmean, pstd))
mcall.shape
corner.corner(mcall);
lpost = LinePosterior(x_left, x_right, flux, f_err, si_all_val)
plt.errorbar(lpost.x_mid, flux, yerr=f_err, fmt="o", label="Fake data")
plt.plot(lpost.x_mid, data[:,4], label="Underlying model")
randind = np.random.choice(np.arange(mcall.shape[0]), replace=False, size=20)
for ri in randind:
ri = int(ri)
p = mcall[ri]
dshift = p[0]
logbkg = p[1]
line_pos = lpost.line_pos
amplitudes = p[2:2+lpost.nlines]
logwidths = p[2+lpost.nlines:]
plt.plot(lpost.x_mid, lpost._spectral_model(x_left, x_right, dshift, logbkg, line_pos,
amplitudes, logwidths))
Explanation: Now we can use emcee to sample.
We're going to use one of our example data sets, one without Doppler shift and strong lines:
End of explanation |
11,334 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Chapter 1
Step1: Essential Libraries and Tools
NumPy
Step2: SciPy
Step3: Usually it isn't possible to create dense representations of sparse data (they won't fit in memory), so we need to create sparse representations directly.
Here is a way to create the same sparse matrix as before using the COO format
Step4: More details on SciPy sparse matrices can be found in the SciPy Lecture Notes.
matplotlib
Step5: pandas
Here is a small example of creating a pandas DataFrame using a Python dictionary.
Step6: There are several possible ways to query this table.
Here is one example
Step7: mglearn
The mglearn package is a library of utility functions written specifically for this book, so that the code listings don't become too cluttered with details of plotting and data loading.
The mglearn library can be found at the author's Github repository, and can be installed with the command pip install mglearn.
Note
All of the code in this book will assume the following imports
Step8: A First Application
Step9: The iris object that is returned by load_iris is a Bunch object, which is very similar to a dictionary.
It contains keys and values
Step10: The value of the key DESCR is a short description of the dataset.
Step11: The value of the key target_names is an array of strings, containing the species of flower that we want to predict.
Step12: The value of feature_names is a list of strings, giving the description of each feature
Step13: The data itself is contained in the target and data fields.
data contains the numeric measurements of sepal length, sepal width, petal length, and petal width in a NumPy array
Step14: The rows in the data array correspond to flowers, while the columns represent the four measurements that were taken for each flower.
Step15: The shape of the data array is the number of samples (flowers) multiplied by the number of features (properties, e.g. sepal width).
Here are the feature values for the first five samples
Step16: The data tells us that all of the first five flowers have a petal width of 0.2 cm and that the first flower has the longest sepal (5.1 cm)
The target array contains the species of each of the flowers that were measured, also as a NumPy array
Step17: target is a one-dimensional array, with one entry per flower
Step18: The species are encoded as integers from 0 to 2
Step19: The meanings of the numbers are given by the iris['target_names'] array
Step20: The output of the train_test_split function is X_train, X_test, y_train, and y_test, which are all NumPy arrays.
X_train contains 75% of the rows in the dataset, and X_test contains the remaining 25%.
Step21: First Things First
Step22: From the plots, we can see that the three classes seem to be relatively well separated using the sepal and petal measurements.
This means that a machine learning model will likely be able to learn to separate them.
Building Your First Model
Step23: The knn object encapsulates the algorithm that will be used to build the model from the training data, as well as the algorithm to make predictions on new data points.
It will also hold the information that the algorithm has extracted from the training data.
In the case of KNeighborsClassifier, it will just store the training set.
To build the model on the training set, we call the fit method of the knn object, which takes as arguments the NumPy array X_train containing the training data and the NumPy array y_train of the corresponding training labels
Step24: The fit method returns the knn object itself (and modifies it in place), so we get a string representation of our classifier.
The representation shows us which parameters were used in creating the model.
Nearly all of them are the default values, but you can also find n_neighbors=1, which is the parameter that we passed.
Most models in scikit-learn have many parameters, but the majority of them are either speed optimizations or for very special use cases.
The important parameters will be covered in Chapter 2.
Making Predictions
Now we can make predictions using this model on new data which isn't labeled.
Let's use an example iris with a sepal length of 5cm, sepal width of 2.9cm, petal length of 1cm, and petal width of 0.2cm.
We can put this data into a NumPy array by calculating the shape, which is the number of samples(1) multiplied by the number of features(4)
Step25: Note that we made the measurements of this single flower into a row in a two-dimensional NumPy array.
scikit-learn always expects two-dimensional arrays for the data.
Now, to make a prediction, we call the predict method of the knn object
Step26: Our model predicts that this new iris belongs to the class 0, meaning its species is setosa.
How do we know whether we can trust our model?
We don't know the correct species of this sample, which is the whole point of building the model.
Evaluating the Model
This is where the test set that we created earlier comes into play.
The test data wasn't used to build the model, but we do know what the correct species is for each iris in the test set.
Therefore, thus, hence, ergo, we can make a prediction for each iris in the test data and compare it against its label (the known species).
We can measure how well the model works by computing the accuracy, which is the fraction of flowers for which the correct species was predicted
Step27: We can also use the score method of the knn object, which will compute the test set accuracy for us
Step28: For this model, the test set accuracy is about 0.97, which means that we made the correct prediction for 97% of the irises in the test set.
In later chapters we will discuss how we can improve performance, and what caveats there are in tuning a model.
Summary and Outlook
Here is a summary of the code needed for the whole training and evaluation procedure | Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from IPython.display import display
%matplotlib inline
Explanation: Chapter 1: Introduction
End of explanation
import numpy as np
x = np.array([[1,2,3],[4,5,6]])
print("x:\n{}".format(x))
Explanation: Essential Libraries and Tools
NumPy
End of explanation
from scipy import sparse
# Create a 2D NumPy array with a diagonal of ones, and zeros everywhere else (aka an identity matrix).
eye = np.eye(4)
print("NumPy array:\n{}".format(eye))
# Convert the NumPy array to a SciPy sparse matrix in CSR format.
# The CSR format stores a sparse m × n matrix M in row form using three (one-dimensional) arrays (A, IA, JA).
# Only the nonzero entries are stored.
# http://www.scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html
sparse_matrix = sparse.csr_matrix(eye)
print("\nSciPy sparse CSR matrix:\n{}".format(sparse_matrix))
Explanation: SciPy
End of explanation
data = np.ones(4)
row_indices = np.arange(4)
col_indices = np.arange(4)
eye_coo = sparse.coo_matrix((data, (row_indices, col_indices)))
print("COO representation:\n{}".format(eye_coo))
Explanation: Usually it isn't possible to create dense representations of sparse data (they won't fit in memory), so we need to create sparse representations directly.
Here is a way to create the same sparse matrix as before using the COO format:
End of explanation
# %matplotlib inline -- the default, just displays the plot in the browser.
# %matplotlib notebook -- provides an interactive environment for the plot.
import matplotlib.pyplot as plt
# Generate a sequnce of numbers from -10 to 10 with 100 steps (points) in between.
x = np.linspace(-10, 10, 100)
# Create a second array using sine.
y = np.sin(x)
# The plot function makes a line chart of one array against another.
plt.plot(x, y, marker="x")
plt.title("Simple line plot of a sine function using matplotlib")
plt.show()
Explanation: More details on SciPy sparse matrices can be found in the SciPy Lecture Notes.
matplotlib
End of explanation
import pandas as pd
from IPython.display import display
# Create a simple dataset of people
data = {'Name': ["John", "Anna", "Peter", "Linda"],
'Location' : ["New York", "Paris", "Berlin", "London"],
'Age' : [24, 13, 53, 33]
}
data_pandas = pd.DataFrame(data)
# IPython.display allows for "pretty printing" of dataframes in the Jupyter notebooks.
display(data_pandas)
Explanation: pandas
Here is a small example of creating a pandas DataFrame using a Python dictionary.
End of explanation
# Select all rows that have an age column greater than 30:
display(data_pandas[data_pandas.Age > 30])
Explanation: There are several possible ways to query this table.
Here is one example:
End of explanation
# Make sure your dependencies are similar to the ones in the book.
import sys
print("Python version: {}".format(sys.version))
import pandas as pd
print("pandas version: {}".format(pd.__version__))
import matplotlib
print("matplotlib version: {}".format(matplotlib.__version__))
import numpy as np
print("NumPy version: {}".format(np.__version__))
import scipy as sp
print("SciPy version: {}".format(sp.__version__))
import IPython
print("IPython version: {}".format(IPython.__version__))
import sklearn
print("scikit-learn version: {}".format(sklearn.__version__))
Explanation: mglearn
The mglearn package is a library of utility functions written specifically for this book, so that the code listings don't become too cluttered with details of plotting and data loading.
The mglearn library can be found at the author's Github repository, and can be installed with the command pip install mglearn.
Note
All of the code in this book will assume the following imports:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from IPython.display import display
We also assume that you will run the code in a Jupyter Notebook with the %matplotlib notebook or %matplotlib inline magic enabled to show plots.
If you are not using the notebook or these magic commands, you will have to call plt.show to actually show any of the figures.
End of explanation
from sklearn.datasets import load_iris
iris_dataset = load_iris()
iris_dataset
Explanation: A First Application: Classifying Iris Species
Meet the Data
The data we will use for this example is the Iris dataset, which is a commonly used dataset in machine learning and statistics tutorials.
The Iris dataset is included in scikit-learn in the datasets module.
We can load it by calling the load_iris function.
End of explanation
print("Keys of iris_dataset: \n{}".format(iris_dataset.keys()))
Explanation: The iris object that is returned by load_iris is a Bunch object, which is very similar to a dictionary.
It contains keys and values:
End of explanation
print(iris_dataset['DESCR'][:193] + "\n...")
Explanation: The value of the key DESCR is a short description of the dataset.
End of explanation
print("Target names: {}".format(iris_dataset['target_names']))
Explanation: The value of the key target_names is an array of strings, containing the species of flower that we want to predict.
End of explanation
print("Feature names: \n{}".format(iris_dataset['feature_names']))
Explanation: The value of feature_names is a list of strings, giving the description of each feature:
End of explanation
print("Type of data: {}".format(type(iris_dataset['data'])))
Explanation: The data itself is contained in the target and data fields.
data contains the numeric measurements of sepal length, sepal width, petal length, and petal width in a NumPy array:
End of explanation
print("Shape of data: {}".format(iris_dataset['data'].shape))
Explanation: The rows in the data array correspond to flowers, while the columns represent the four measurements that were taken for each flower.
End of explanation
print("First five rows of data:\n{}".format(iris_dataset['data'][:5]))
Explanation: The shape of the data array is the number of samples (flowers) multiplied by the number of features (properties, e.g. sepal width).
Here are the feature values for the first five samples:
End of explanation
print("Type of target: {}".format(type(iris_dataset['target'])))
Explanation: The data tells us that all of the first five flowers have a petal width of 0.2 cm and that the first flower has the longest sepal (5.1 cm)
The target array contains the species of each of the flowers that were measured, also as a NumPy array:
End of explanation
print("Shape of target: {}".format(iris_dataset['target'].shape))
Explanation: target is a one-dimensional array, with one entry per flower:
End of explanation
print("Target:\n{}".format(iris_dataset['target']))
Explanation: The species are encoded as integers from 0 to 2:
End of explanation
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
# The random_state parameter gives the pseudorandom number generator a fixed (set) seed.
# Setting the seed allows us to obtain reproducible results from randomized procedures.
print("X_train: \n{}".format(X_train))
print("X_test: \n{}".format(X_test))
print("y_train: \n{}".format(y_train))
print("y_test: \n{}".format(y_test))
Explanation: The meanings of the numbers are given by the iris['target_names'] array:
0 means setosa, 1 means versicolor, and 2 means virginica.
Measuring Success: Training and Testing Data
We want to build a machine learning model from this data that can predict the species of iris for a new set of measurements.
To assess the model's performance, we show it new data for which we have labels.
This is usually done by splitting the labeled data into training data and test data.
scikit-learn contains a function called train_test_split that shuffles the data and splits it for you (the default is 75% train and 25% test).
In scikit-learn, data is usually denoted with a capital X, while labels are denoted by a lowercase y.
This is inspired by the standard formulation f(x)=y in mathematics, where x is the input to a function and y is the output.
Following more conventions from mathematics, we use a capital X because the data is a two-dimensional array (a matrix) and a lowercase y because the target is a one-dimensional array (a vector).
Let's call train_test_split on our data and assign the outputs using this nomenclature:
End of explanation
print("X_train shape: \n{}".format(X_train.shape))
print("y_train shape: \n{}".format(y_train.shape))
print("X_test shape: \n{}".format(X_test.shape))
print("y_test shape: \n{}".format(y_test.shape))
Explanation: The output of the train_test_split function is X_train, X_test, y_train, and y_test, which are all NumPy arrays.
X_train contains 75% of the rows in the dataset, and X_test contains the remaining 25%.
End of explanation
# Create dataframe from data in X_train.
# Label the columns using the strings in iris_dataset.feature_names.
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
# Create a scatter matrix from the dataframe, color by y_train.
pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o',
hist_kwds={'bins': 20}, s=60, alpha=.8, cmap=mglearn.cm3)
Explanation: First Things First: Look at Your Data
Before building a machine learning model, it is often a good idea to inspect the data for several reasons:
- so you can see if the task can be solved without machine learning.
- so you can see if the desired information is contained in the data or not.
- so you can detect abnormalities or peculiarities in the data (inconsistent measurements, etc).
One of the best ways to inspect data is to visualize it.
In the example below we will be building a type of scatter plot known as a pair plot.
The data points are colored according to the species the iris belongs to.
To create the plot, we first convert the NumPy array into a pandas DataFrame.
pandas has a function to create pair plots called scatter_matrix.
The diagonal of this matrix is filled with histograms of each feature.
End of explanation
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
Explanation: From the plots, we can see that the three classes seem to be relatively well separated using the sepal and petal measurements.
This means that a machine learning model will likely be able to learn to separate them.
Building Your First Model: k-Nearest Neighbors
There are many classification algorithms in scikit-learn that we can use; here we're going to implement the k-nearest neighbors classifier.
The k in k-nearest neighbors refers to the number of nearest neighbors that will be used to predict the new data point.
We can consider any fixed number k of neighbors; the default for sklearn.neighbors.KNeighborsClassifier is 5, we're going to keep things simple and use 1 for k.
All machine learning models in scikit-learn are implemented in their own classes, which are called Estimator classes.
The k-nearest neighbors classification algorithm is implemented in the KNeighborsClassifier class in the neighbors module.
More information about the Nearest Neighbors Classification can be found here, and an example can be found here.
Before we can use the model, we need to instantiate the class into an object.
This is when we will set any parameters of the model, the most important of which is the number of neighbors, which we will set to 1:
End of explanation
knn.fit(X_train, y_train)
Explanation: The knn object encapsulates the algorithm that will be used to build the model from the training data, as well as the algorithm to make predictions on new data points.
It will also hold the information that the algorithm has extracted from the training data.
In the case of KNeighborsClassifier, it will just store the training set.
To build the model on the training set, we call the fit method of the knn object, which takes as arguments the NumPy array X_train containing the training data and the NumPy array y_train of the corresponding training labels:
End of explanation
X_new = np.array([[5, 2.9, 1, 0.2]])
print("X_new.shape: \n{}".format(X_new.shape))
Explanation: The fit method returns the knn object itself (and modifies it in place), so we get a string representation of our classifier.
The representation shows us which parameters were used in creating the model.
Nearly all of them are the default values, but you can also find n_neighbors=1, which is the parameter that we passed.
Most models in scikit-learn have many parameters, but the majority of them are either speed optimizations or for very special use cases.
The important parameters will be covered in Chapter 2.
Making Predictions
Now we can make predictions using this model on new data which isn't labeled.
Let's use an example iris with a sepal length of 5cm, sepal width of 2.9cm, petal length of 1cm, and petal width of 0.2cm.
We can put this data into a NumPy array by calculating the shape, which is the number of samples(1) multiplied by the number of features(4):
End of explanation
prediction = knn.predict(X_new)
print("Prediction: \n{}".format(prediction))
print("Predicted target name: \n{}".format(
iris_dataset['target_names'][prediction]))
Explanation: Note that we made the measurements of this single flower into a row in a two-dimensional NumPy array.
scikit-learn always expects two-dimensional arrays for the data.
Now, to make a prediction, we call the predict method of the knn object:
End of explanation
y_pred = knn.predict(X_test)
print("Test set predictions: \n{}".format(y_pred))
print("Test set score: \n{:.2f}".format(np.mean(y_pred == y_test)))
Explanation: Our model predicts that this new iris belongs to the class 0, meaning its species is setosa.
How do we know whether we can trust our model?
We don't know the correct species of this sample, which is the whole point of building the model.
Evaluating the Model
This is where the test set that we created earlier comes into play.
The test data wasn't used to build the model, but we do know what the correct species is for each iris in the test set.
Therefore, thus, hence, ergo, we can make a prediction for each iris in the test data and compare it against its label (the known species).
We can measure how well the model works by computing the accuracy, which is the fraction of flowers for which the correct species was predicted:
End of explanation
print("Test set score: \n{:.2f}".format(knn.score(X_test, y_test)))
Explanation: We can also use the score method of the knn object, which will compute the test set accuracy for us:
End of explanation
X_train, X_test, y_train, y_test = train_test_split(
iris_dataset['data'], iris_dataset['target'], random_state=0)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print("Test set score: \n{:.2f}".format(knn.score(X_test, y_test)))
Explanation: For this model, the test set accuracy is about 0.97, which means that we made the correct prediction for 97% of the irises in the test set.
In later chapters we will discuss how we can improve performance, and what caveats there are in tuning a model.
Summary and Outlook
Here is a summary of the code needed for the whole training and evaluation procedure:
End of explanation |
11,335 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Compare the RG-Drude and Mie scattering cross-sections
Step1: Set up grain size distributions and materials
Step2: Set up the three grain scattering models
The <code>ss.makeScatModel()</code> function is a short cut for producing a scattering model superclass.
This class contains a model from the <code>extinction.scatmodels</code> module and a complex index of refraction from the <code>distlib.composition.cmindex</code> module.
Step3: Compute the scattering cross-sections
$$\kappa = \frac{1}{M_d}\ \int \sigma\ \frac{dn}{da}\ da $$
The <code>extinction.sigma_scat.KappaScat</code> object is a container for all of the previous objects.
In future iterations of the code, I'm going to change this to a function. There is also a long history as to why the dust mass is used. These are things that will be simplified in future iterations of the code.
Step4: Plot it
Multiply through by the dust mass to get the total opacity. | Python Code:
from astrodust import distlib
from astrodust.extinction import sigma_scat as ss
import astrodust.constants as c
NH, d2g = 1.e21, 0.009
MDUST = NH * c.m_p * d2g
ERANGE = np.logspace(-0.6,1.0,20)
Explanation: Compare the RG-Drude and Mie scattering cross-sections
End of explanation
RHO_SIL, RHO_GRA, RHO_AVG = 3.8, 2.2, 3.0 # g cm^-3; see Draine's ISM book
AMIN, AMAX = 0.005, 0.25 # micron (limits on grain size distribution)
PMRN = 3.5
MRN_sil = distlib.MRN_dist(AMIN, AMAX, p=PMRN, rho=RHO_SIL, md=MDUST)
MRN_gra = distlib.MRN_dist(AMIN, AMAX, p=PMRN, rho=RHO_GRA, md=MDUST)
MRN_avg = distlib.MRN_dist(AMIN, AMAX, p=PMRN, rho=RHO_AVG, md=MDUST)
Explanation: Set up grain size distributions and materials
End of explanation
RGdrude = ss.makeScatModel('RG','Drude')
Mie_Sil = ss.makeScatModel('Mie','Silicate')
Mie_Gra = ss.makeScatModel('Mie','Graphite')
print(RGdrude.__dict__.keys())
print(type(RGdrude.smodel))
print(type(RGdrude.cmodel))
Explanation: Set up the three grain scattering models
The <code>ss.makeScatModel()</code> function is a short cut for producing a scattering model superclass.
This class contains a model from the <code>extinction.scatmodels</code> module and a complex index of refraction from the <code>distlib.composition.cmindex</code> module.
End of explanation
%%time
RGD_kappa = ss.KappaScat(E=ERANGE, dist=MRN_avg, scatm=RGdrude)
%%time
Sil_kappa = ss.KappaScat(E=ERANGE, dist=MRN_sil, scatm=Mie_Sil)
%%time
Gra_kappa = ss.KappaScat(E=ERANGE, dist=MRN_gra, scatm=Mie_Gra)
Explanation: Compute the scattering cross-sections
$$\kappa = \frac{1}{M_d}\ \int \sigma\ \frac{dn}{da}\ da $$
The <code>extinction.sigma_scat.KappaScat</code> object is a container for all of the previous objects.
In future iterations of the code, I'm going to change this to a function. There is also a long history as to why the dust mass is used. These are things that will be simplified in future iterations of the code.
End of explanation
def plot_kappa(ax, kappa_obj, **kwargs):
ax.plot(kappa_obj.E, kappa_obj.kappa * kappa_obj.dist.md, **kwargs)
ax.tick_params(labelsize=12)
ax.set_xlabel('Energy (keV)', size=14)
ax.set_ylabel('Scattering Opacity ($\tau$ per 10$^{21}$ H cm$^{-2}$)', size=14)
return
ax = plt.subplot(111)
plot_kappa(ax, RGD_kappa, color='k', lw=2, alpha=0.8, label='RG-Drude')
plot_kappa(ax, Sil_kappa, marker='o', color='g', lw=2, alpha=0.5, label='Mie-Silicate')
plot_kappa(ax, Gra_kappa, marker='o', color='b', lw=2, alpha=0.5, label='Mie-Graphite')
plt.legend(loc='upper right', frameon=False)
plt.loglog()
plt.xlim(0.2, 10.0)
plt.ylim(1.e-4, 1.0)
Explanation: Plot it
Multiply through by the dust mass to get the total opacity.
End of explanation |
11,336 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Implementing a Neural Network
In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.
Step2: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.
Step3: Forward pass
Step4: Forward pass
Step5: Backward pass
Implement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check
Step6: Train the network
To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.
Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.
Step8: Load the data
Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.
Step9: Train a network
To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.
Step10: Debug the training
With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.
Step11: Tune your hyperparameters
What's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.
Tuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.
Approximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.
Experiment
Step12: Run on the test set
When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.
We will give you extra bonus point for every 1% of accuracy above 52%. | Python Code:
# A bit of setup
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.neural_net import TwoLayerNet
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
Explanation: Implementing a Neural Network
In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.
End of explanation
# Create a small net and some toy data to check your implementations.
# Note that we set the random seed for repeatable experiments.
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
np.random.seed(0)
return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
def init_toy_data():
np.random.seed(1)
X = 10 * np.random.randn(num_inputs, input_size)
y = np.array([0, 1, 2, 2, 1])
return X, y
net = init_toy_model()
X, y = init_toy_data()
Explanation: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.
End of explanation
scores = net.loss(X)
print 'Your scores:'
print scores
print
print 'correct scores:'
correct_scores = np.asarray([
[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
print correct_scores
# The difference should be very small. We get < 1e-7
print 'Difference between your scores and correct scores:'
print np.sum(np.abs(scores - correct_scores))
Explanation: Forward pass: compute scores
Open the file cs231n/classifiers/neural_net.py and look at the method TwoLayerNet.loss. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters.
Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.
End of explanation
loss, _ = net.loss(X, y, reg=0.1)
correct_loss = 1.30378789133
# should be very small, we get < 1e-12
print 'Difference between your loss and correct loss:'
print np.sum(np.abs(loss - correct_loss))
Explanation: Forward pass: compute loss
In the same function, implement the second part that computes the data and regularizaion loss.
End of explanation
from cs231n.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = net.loss(X, y, reg=0.1)
# these should all be less than 1e-8 or so
for param_name in grads:
f = lambda W: net.loss(X, y, reg=0.1)[0]
param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)
print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))
Explanation: Backward pass
Implement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:
End of explanation
net = init_toy_model()
stats = net.train(X, y, X, y,
learning_rate=1e-1, reg=1e-5,
num_iters=100, verbose=False)
print 'Final training loss: ', stats['loss_history'][-1]
# plot the loss history
plt.plot(stats['loss_history'])
plt.xlabel('iteration')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
Explanation: Train the network
To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.
Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.
End of explanation
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
Explanation: Load the data
Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.
End of explanation
input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=1000, batch_size=200,
learning_rate=1e-4, learning_rate_decay=0.95,
reg=0.5, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc
Explanation: Train a network
To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.
End of explanation
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
from cs231n.vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(net):
W1 = net.params['W1']
W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
show_net_weights(net)
Explanation: Debug the training
With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.
End of explanation
best_net = None # store the best model into this
#################################################################################
# TODO: Tune hyperparameters using the validation set. Store your best trained #
# model in best_net. #
# #
# To help debug your network, it may help to use visualizations similar to the #
# ones we used above; these visualizations will have significant qualitative #
# differences from the ones we saw above for the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to #
# write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous exercises. #
#################################################################################
best_val = -1
for hidden_size in [200,500,700]:
for learning_rate in [5e-4,1e-3,5e-3]:
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=1000, batch_size=200,
learning_rate=learning_rate, learning_rate_decay=0.95,
reg=0.5, verbose=False)
print "."
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
if best_val < val_acc:
best_val = val_acc
best_net = net
print "best till now ",best_val
#################################################################################
# END OF YOUR CODE #
#################################################################################
# visualize the weights of the best network
show_net_weights(best_net)
Explanation: Tune your hyperparameters
What's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.
Tuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.
Approximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.
Experiment: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).
End of explanation
test_acc = (best_net.predict(X_test) == y_test).mean()
print 'Test accuracy: ', test_acc
Explanation: Run on the test set
When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.
We will give you extra bonus point for every 1% of accuracy above 52%.
End of explanation |
11,337 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Import the Python API module and Instantiate the GIS object
Import the Python API
Step1: Create an GIS object instance using the account currently logged in through ArcGIS Pro
Step2: Get a Feature Set, data to work with, from the Web GIS Item ID
Create a Web GIS Item instance using the Item ID
Step3: Since the item only contains one feature layer, get the first layer in the item, the Feature Layer we need to work with.
Step4: Now, for this initial analysis, query to return just the attributes for the eight minute trade areas as a Feature Set.
Step5: Convert the Data into a Pandas Data Frame
Take advantage of the df function on the Feature set object returned from the query to convert the data to a Pandas Data Frame.
Step6: Save dependent and independent variable names as Python variables
Use a quick list comprehension to create a list of field names to be used as independent variables.
Step7: Also, save the name of the dependent variable field as well. | Python Code:
import arcgis
Explanation: Import the Python API module and Instantiate the GIS object
Import the Python API
End of explanation
gis_retail = arcgis.gis.GIS('Pro')
Explanation: Create an GIS object instance using the account currently logged in through ArcGIS Pro
End of explanation
trade_area_itemid = 'bf361f9081fd43a7ba57357e74ccc373'
item = arcgis.gis.Item(gis=gis_retail, itemid=trade_area_itemid)
item
Explanation: Get a Feature Set, data to work with, from the Web GIS Item ID
Create a Web GIS Item instance using the Item ID
End of explanation
feature_layer = item.layers[0]
feature_layer
Explanation: Since the item only contains one feature layer, get the first layer in the item, the Feature Layer we need to work with.
End of explanation
feature_set = feature_layer.query(where="AREA_DESC = '0 - 8 minutes'", returnGeometry=False)
Explanation: Now, for this initial analysis, query to return just the attributes for the eight minute trade areas as a Feature Set.
End of explanation
data_frame = feature_set.df
data_frame.head()
Explanation: Convert the Data into a Pandas Data Frame
Take advantage of the df function on the Feature set object returned from the query to convert the data to a Pandas Data Frame.
End of explanation
field_name_independent_list = [field['name'] for field in feature_set.fields if
field['type'] != 'esriFieldTypeOID' and # we don't need the Esri object identifier field
field['name'].startswith('Shape_') == False and # exclude the Esri shape fields
field['type'] == 'esriFieldTypeDouble' and # ensure numeric, quantatative, fields are the only fields used
field['name'] != 'STORE_LAT' and # while numeric, the fields describing the location are not independent varaibles
field['name'] != 'STORE_LONG' and # while numeric, the fields describing the location are not independent varaibles
field['name'] != 'SALESVOL' # exclude the dependent variable
]
print(field_name_independent_list)
Explanation: Save dependent and independent variable names as Python variables
Use a quick list comprehension to create a list of field names to be used as independent variables.
End of explanation
field_name_dependent = 'SALESVOL'
Explanation: Also, save the name of the dependent variable field as well.
End of explanation |
11,338 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Disaggregation - Hart Active and Reactive data
Customary imports
Step1: Show versions for any diagnostics
Step2: Load dataset
Step3: Period of interest 4 days during holiday
No human activity so all readings should be due to periodic automatic running of appliances such as
fridge, freezer, central heating pump, shower pump (due to pressure loss)
Step4: Training
We'll now do the training from the aggregate data. The algorithm segments the time series data into steady and transient states. Thus, we'll first figure out the transient and the steady states. Next, we'll try and pair the on and the off transitions based on their proximity in time and value.
Step5: Set two days for Disaggregation period of interest
Inspect the data during a quiet period when we were on holiday, should only be autonomous
appliances such as fidge, freeze and water heating + any standby devices not unplugged.
Step6: Disaggregate using Hart (Active data only) | Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
from os.path import join
from pylab import rcParams
import matplotlib.pyplot as plt
rcParams['figure.figsize'] = (13, 6)
plt.style.use('ggplot')
#import nilmtk
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.disaggregate.hart_85 import Hart85
from nilmtk.disaggregate import CombinatorialOptimisation
from nilmtk.utils import print_dict, show_versions
from nilmtk.metrics import f1_score
#import seaborn as sns
#sns.set_palette("Set3", n_colors=12)
import warnings
warnings.filterwarnings("ignore") #suppress warnings, comment out if warnings required
Explanation: Disaggregation - Hart Active and Reactive data
Customary imports
End of explanation
#uncomment if required
#show_versions()
Explanation: Show versions for any diagnostics
End of explanation
data_dir = '/Users/GJWood/nilm_gjw_data/HDF5/'
gjw = DataSet(join(data_dir, 'nilm_gjw_data.hdf5'))
print('loaded ' + str(len(gjw.buildings)) + ' buildings')
building_number=1
Explanation: Load dataset
End of explanation
gjw.set_window('2015-07-12 00:00:00', '2015-07-16 00:00:00')
elec = gjw.buildings[building_number].elec
mains = elec.mains()
house = elec['fridge'] #only one meter so any selection will do
df = house.load().next() #load the first chunk of data into a dataframe
df.info() #check that the data is what we want (optional)
#note the data has two columns and a time index
plotdata = df.ix['2015-07-12 00:00:00': '2015-07-16 00:00:00']
plotdata.plot()
plt.title("Raw Mains Usage")
plt.ylabel("Power (W)")
plt.xlabel("Time");
plt.scatter(plotdata[('power','active')],plotdata[('power','reactive')])
plt.title("Raw Mains Usage Signature Space")
plt.ylabel("Reactive Power (VAR)")
plt.xlabel("Active Power (W)");
Explanation: Period of interest 4 days during holiday
No human activity so all readings should be due to periodic automatic running of appliances such as
fridge, freezer, central heating pump, shower pump (due to pressure loss)
End of explanation
h = Hart85()
h.train(mains,cols=[('power','active'),('power','reactive')],min_tolerance=100,noise_level=70,buffer_size=20,state_threshold=15)
plt.scatter(h.steady_states[('active average')],h.steady_states[('reactive average')])
plt.scatter(h.centroids[('power','active')],h.centroids[('power','reactive')],marker='x',c=(1.0, 0.0, 0.0))
plt.legend(['Steady states','Centroids'],loc=4)
plt.title("Training steady states Signature space")
plt.ylabel("Reactive average (VAR)")
plt.xlabel("Active average (W)");
h.steady_states.head()
h.steady_states.tail()
h.centroids
h.model
ax = mains.plot()
h.steady_states['active average'].plot(style='o', ax = ax);
plt.ylabel("Power (W)")
plt.xlabel("Time");
#plt.show()
h.pair_df.head()
pair_shape_df = pd.DataFrame(columns=['Height','Duration'])
pair_shape_df['Height']= (h.pair_df['T1 Active'].abs()+h.pair_df['T2 Active'].abs())/2
pair_shape_df['Duration']= pd.to_timedelta(h.pair_df['T2 Time']-h.pair_df['T1 Time'],unit='s').dt.seconds
pair_shape_df.head()
fig = plt.figure(figsize=(13,6))
ax = fig.add_subplot(1, 1, 1)
ax.set_yscale('log')
ax.scatter(pair_shape_df['Height'],pair_shape_df['Duration'])
#plt.plot((x1, x2), (y1, y2), 'k-')
ax.plot((h.centroids[('power','active')],
h.centroids[('power','active')]),
(h.centroids[('power','active')]*0,
h.centroids[('power','active')]*0+10000)
,marker='x',c=(0.0, 0.0, 0.0))
#ax.axvline(h.centroids[('power','active')], color='k', linestyle='--')
plt.legend(['Transitions','Centroids'],loc=1)
plt.title("Paired event - Signature Space")
plt.ylabel("Log Duration (sec)")
plt.xlabel("Transition (W)");
Explanation: Training
We'll now do the training from the aggregate data. The algorithm segments the time series data into steady and transient states. Thus, we'll first figure out the transient and the steady states. Next, we'll try and pair the on and the off transitions based on their proximity in time and value.
End of explanation
gjw.set_window('2015-07-13 00:00:00','2015-07-14 00:00:00')
elec = gjw.buildings[building_number].elec
mains = elec.mains()
mains.plot()
Explanation: Set two days for Disaggregation period of interest
Inspect the data during a quiet period when we were on holiday, should only be autonomous
appliances such as fidge, freeze and water heating + any standby devices not unplugged.
End of explanation
ax = mains.plot()
h.steady_states['active average'].plot(style='o', ax = ax);
plt.ylabel("Power (W)")
plt.xlabel("Time");
disag_filename = join(data_dir, 'disag_gjw_hart.hdf5')
output = HDFDataStore(disag_filename, 'w')
h.disaggregate(mains,output,sample_period=1)
output.close()
ax = mains.plot()
h.steady_states['active average'].plot(style='o', ax = ax);
plt.ylabel("Power (W)")
plt.xlabel("Time");
disag_hart = DataSet(disag_filename)
disag_hart
disag_hart_elec = disag_hart.buildings[building_number].elec
disag_hart_elec
disag_hart_elec.mains()
h.centroids
h.model
h.steady_states
from nilmtk.metrics import f1_score
f1_hart= f1_score(disag_hart_elec, test_elec)
f1_hart.index = disag_hart_elec.get_labels(f1_hart.index)
f1_hart.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("Hart");
Explanation: Disaggregate using Hart (Active data only)
End of explanation |
11,339 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
CycleGAN
Author
Step1: Prepare the dataset
In this example, we will be using the
horse to zebra
dataset.
Step2: Create Dataset objects
Step3: Visualize some samples
Step5: Building blocks used in the CycleGAN generators and discriminators
Step6: Build the generators
The generator consists of downsampling blocks
Step7: Build the discriminators
The discriminators implement the following architecture
Step8: Build the CycleGAN model
We will override the train_step() method of the Model class
for training via fit().
Step10: Create a callback that periodically saves generated images
Step11: Train the end-to-end model
Step12: Test the performance of the model.
You can use the trained model hosted on Hugging Face Huband try the demo on Hugging Face Spaces. | Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
autotune = tf.data.AUTOTUNE
Explanation: CycleGAN
Author: A_K_Nain<br>
Date created: 2020/08/12<br>
Last modified: 2020/08/12<br>
Description: Implementation of CycleGAN.
CycleGAN
CycleGAN is a model that aims to solve the image-to-image translation
problem. The goal of the image-to-image translation problem is to learn the
mapping between an input image and an output image using a training set of
aligned image pairs. However, obtaining paired examples isn't always feasible.
CycleGAN tries to learn this mapping without requiring paired input-output images,
using cycle-consistent adversarial networks.
Paper
Original implementation
Setup
End of explanation
# Load the horse-zebra dataset using tensorflow-datasets.
dataset, _ = tfds.load("cycle_gan/horse2zebra", with_info=True, as_supervised=True)
train_horses, train_zebras = dataset["trainA"], dataset["trainB"]
test_horses, test_zebras = dataset["testA"], dataset["testB"]
# Define the standard image size.
orig_img_size = (286, 286)
# Size of the random crops to be used during training.
input_img_size = (256, 256, 3)
# Weights initializer for the layers.
kernel_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
# Gamma initializer for instance normalization.
gamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
buffer_size = 256
batch_size = 1
def normalize_img(img):
img = tf.cast(img, dtype=tf.float32)
# Map values in the range [-1, 1]
return (img / 127.5) - 1.0
def preprocess_train_image(img, label):
# Random flip
img = tf.image.random_flip_left_right(img)
# Resize to the original size first
img = tf.image.resize(img, [*orig_img_size])
# Random crop to 256X256
img = tf.image.random_crop(img, size=[*input_img_size])
# Normalize the pixel values in the range [-1, 1]
img = normalize_img(img)
return img
def preprocess_test_image(img, label):
# Only resizing and normalization for the test images.
img = tf.image.resize(img, [input_img_size[0], input_img_size[1]])
img = normalize_img(img)
return img
Explanation: Prepare the dataset
In this example, we will be using the
horse to zebra
dataset.
End of explanation
# Apply the preprocessing operations to the training data
train_horses = (
train_horses.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
train_zebras = (
train_zebras.map(preprocess_train_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
# Apply the preprocessing operations to the test data
test_horses = (
test_horses.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
test_zebras = (
test_zebras.map(preprocess_test_image, num_parallel_calls=autotune)
.cache()
.shuffle(buffer_size)
.batch(batch_size)
)
Explanation: Create Dataset objects
End of explanation
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, samples in enumerate(zip(train_horses.take(4), train_zebras.take(4))):
horse = (((samples[0][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
zebra = (((samples[1][0] * 127.5) + 127.5).numpy()).astype(np.uint8)
ax[i, 0].imshow(horse)
ax[i, 1].imshow(zebra)
plt.show()
Explanation: Visualize some samples
End of explanation
class ReflectionPadding2D(layers.Layer):
Implements Reflection Padding as a layer.
Args:
padding(tuple): Amount of padding for the
spatial dimensions.
Returns:
A padded tensor with the same type as the input tensor.
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
super(ReflectionPadding2D, self).__init__(**kwargs)
def call(self, input_tensor, mask=None):
padding_width, padding_height = self.padding
padding_tensor = [
[0, 0],
[padding_height, padding_height],
[padding_width, padding_width],
[0, 0],
]
return tf.pad(input_tensor, padding_tensor, mode="REFLECT")
def residual_block(
x,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(1, 1),
padding="valid",
gamma_initializer=gamma_init,
use_bias=False,
):
dim = x.shape[-1]
input_tensor = x
x = ReflectionPadding2D()(input_tensor)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = activation(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(
dim,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = layers.add([input_tensor, x])
return x
def downsample(
x,
filters,
activation,
kernel_initializer=kernel_init,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
kernel_initializer=kernel_initializer,
padding=padding,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
if activation:
x = activation(x)
return x
def upsample(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_init,
gamma_initializer=gamma_init,
use_bias=False,
):
x = layers.Conv2DTranspose(
filters,
kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
use_bias=use_bias,
)(x)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
if activation:
x = activation(x)
return x
Explanation: Building blocks used in the CycleGAN generators and discriminators
End of explanation
def get_resnet_generator(
filters=64,
num_downsampling_blocks=2,
num_residual_blocks=9,
num_upsample_blocks=2,
gamma_initializer=gamma_init,
name=None,
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = ReflectionPadding2D(padding=(3, 3))(img_input)
x = layers.Conv2D(filters, (7, 7), kernel_initializer=kernel_init, use_bias=False)(
x
)
x = tfa.layers.InstanceNormalization(gamma_initializer=gamma_initializer)(x)
x = layers.Activation("relu")(x)
# Downsampling
for _ in range(num_downsampling_blocks):
filters *= 2
x = downsample(x, filters=filters, activation=layers.Activation("relu"))
# Residual blocks
for _ in range(num_residual_blocks):
x = residual_block(x, activation=layers.Activation("relu"))
# Upsampling
for _ in range(num_upsample_blocks):
filters //= 2
x = upsample(x, filters, activation=layers.Activation("relu"))
# Final block
x = ReflectionPadding2D(padding=(3, 3))(x)
x = layers.Conv2D(3, (7, 7), padding="valid")(x)
x = layers.Activation("tanh")(x)
model = keras.models.Model(img_input, x, name=name)
return model
Explanation: Build the generators
The generator consists of downsampling blocks: nine residual blocks
and upsampling blocks. The structure of the generator is the following:
c7s1-64 ==> Conv block with `relu` activation, filter size of 7
d128 ====|
|-> 2 downsampling blocks
d256 ====|
R256 ====|
R256 |
R256 |
R256 |
R256 |-> 9 residual blocks
R256 |
R256 |
R256 |
R256 ====|
u128 ====|
|-> 2 upsampling blocks
u64 ====|
c7s1-3 => Last conv block with `tanh` activation, filter size of 7.
End of explanation
def get_discriminator(
filters=64, kernel_initializer=kernel_init, num_downsampling=3, name=None
):
img_input = layers.Input(shape=input_img_size, name=name + "_img_input")
x = layers.Conv2D(
filters,
(4, 4),
strides=(2, 2),
padding="same",
kernel_initializer=kernel_initializer,
)(img_input)
x = layers.LeakyReLU(0.2)(x)
num_filters = filters
for num_downsample_block in range(3):
num_filters *= 2
if num_downsample_block < 2:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(2, 2),
)
else:
x = downsample(
x,
filters=num_filters,
activation=layers.LeakyReLU(0.2),
kernel_size=(4, 4),
strides=(1, 1),
)
x = layers.Conv2D(
1, (4, 4), strides=(1, 1), padding="same", kernel_initializer=kernel_initializer
)(x)
model = keras.models.Model(inputs=img_input, outputs=x, name=name)
return model
# Get the generators
gen_G = get_resnet_generator(name="generator_G")
gen_F = get_resnet_generator(name="generator_F")
# Get the discriminators
disc_X = get_discriminator(name="discriminator_X")
disc_Y = get_discriminator(name="discriminator_Y")
Explanation: Build the discriminators
The discriminators implement the following architecture:
C64->C128->C256->C512
End of explanation
class CycleGan(keras.Model):
def __init__(
self,
generator_G,
generator_F,
discriminator_X,
discriminator_Y,
lambda_cycle=10.0,
lambda_identity=0.5,
):
super(CycleGan, self).__init__()
self.gen_G = generator_G
self.gen_F = generator_F
self.disc_X = discriminator_X
self.disc_Y = discriminator_Y
self.lambda_cycle = lambda_cycle
self.lambda_identity = lambda_identity
def compile(
self,
gen_G_optimizer,
gen_F_optimizer,
disc_X_optimizer,
disc_Y_optimizer,
gen_loss_fn,
disc_loss_fn,
):
super(CycleGan, self).compile()
self.gen_G_optimizer = gen_G_optimizer
self.gen_F_optimizer = gen_F_optimizer
self.disc_X_optimizer = disc_X_optimizer
self.disc_Y_optimizer = disc_Y_optimizer
self.generator_loss_fn = gen_loss_fn
self.discriminator_loss_fn = disc_loss_fn
self.cycle_loss_fn = keras.losses.MeanAbsoluteError()
self.identity_loss_fn = keras.losses.MeanAbsoluteError()
def train_step(self, batch_data):
# x is Horse and y is zebra
real_x, real_y = batch_data
# For CycleGAN, we need to calculate different
# kinds of losses for the generators and discriminators.
# We will perform the following steps here:
#
# 1. Pass real images through the generators and get the generated images
# 2. Pass the generated images back to the generators to check if we
# we can predict the original image from the generated image.
# 3. Do an identity mapping of the real images using the generators.
# 4. Pass the generated images in 1) to the corresponding discriminators.
# 5. Calculate the generators total loss (adverserial + cycle + identity)
# 6. Calculate the discriminators loss
# 7. Update the weights of the generators
# 8. Update the weights of the discriminators
# 9. Return the losses in a dictionary
with tf.GradientTape(persistent=True) as tape:
# Horse to fake zebra
fake_y = self.gen_G(real_x, training=True)
# Zebra to fake horse -> y2x
fake_x = self.gen_F(real_y, training=True)
# Cycle (Horse to fake zebra to fake horse): x -> y -> x
cycled_x = self.gen_F(fake_y, training=True)
# Cycle (Zebra to fake horse to fake zebra) y -> x -> y
cycled_y = self.gen_G(fake_x, training=True)
# Identity mapping
same_x = self.gen_F(real_x, training=True)
same_y = self.gen_G(real_y, training=True)
# Discriminator output
disc_real_x = self.disc_X(real_x, training=True)
disc_fake_x = self.disc_X(fake_x, training=True)
disc_real_y = self.disc_Y(real_y, training=True)
disc_fake_y = self.disc_Y(fake_y, training=True)
# Generator adverserial loss
gen_G_loss = self.generator_loss_fn(disc_fake_y)
gen_F_loss = self.generator_loss_fn(disc_fake_x)
# Generator cycle loss
cycle_loss_G = self.cycle_loss_fn(real_y, cycled_y) * self.lambda_cycle
cycle_loss_F = self.cycle_loss_fn(real_x, cycled_x) * self.lambda_cycle
# Generator identity loss
id_loss_G = (
self.identity_loss_fn(real_y, same_y)
* self.lambda_cycle
* self.lambda_identity
)
id_loss_F = (
self.identity_loss_fn(real_x, same_x)
* self.lambda_cycle
* self.lambda_identity
)
# Total generator loss
total_loss_G = gen_G_loss + cycle_loss_G + id_loss_G
total_loss_F = gen_F_loss + cycle_loss_F + id_loss_F
# Discriminator loss
disc_X_loss = self.discriminator_loss_fn(disc_real_x, disc_fake_x)
disc_Y_loss = self.discriminator_loss_fn(disc_real_y, disc_fake_y)
# Get the gradients for the generators
grads_G = tape.gradient(total_loss_G, self.gen_G.trainable_variables)
grads_F = tape.gradient(total_loss_F, self.gen_F.trainable_variables)
# Get the gradients for the discriminators
disc_X_grads = tape.gradient(disc_X_loss, self.disc_X.trainable_variables)
disc_Y_grads = tape.gradient(disc_Y_loss, self.disc_Y.trainable_variables)
# Update the weights of the generators
self.gen_G_optimizer.apply_gradients(
zip(grads_G, self.gen_G.trainable_variables)
)
self.gen_F_optimizer.apply_gradients(
zip(grads_F, self.gen_F.trainable_variables)
)
# Update the weights of the discriminators
self.disc_X_optimizer.apply_gradients(
zip(disc_X_grads, self.disc_X.trainable_variables)
)
self.disc_Y_optimizer.apply_gradients(
zip(disc_Y_grads, self.disc_Y.trainable_variables)
)
return {
"G_loss": total_loss_G,
"F_loss": total_loss_F,
"D_X_loss": disc_X_loss,
"D_Y_loss": disc_Y_loss,
}
Explanation: Build the CycleGAN model
We will override the train_step() method of the Model class
for training via fit().
End of explanation
class GANMonitor(keras.callbacks.Callback):
A callback to generate and save images after each epoch
def __init__(self, num_img=4):
self.num_img = num_img
def on_epoch_end(self, epoch, logs=None):
_, ax = plt.subplots(4, 2, figsize=(12, 12))
for i, img in enumerate(test_horses.take(self.num_img)):
prediction = self.model.gen_G(img)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.preprocessing.image.array_to_img(prediction)
prediction.save(
"generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch + 1)
)
plt.show()
plt.close()
Explanation: Create a callback that periodically saves generated images
End of explanation
# Loss function for evaluating adversarial loss
adv_loss_fn = keras.losses.MeanSquaredError()
# Define the loss function for the generators
def generator_loss_fn(fake):
fake_loss = adv_loss_fn(tf.ones_like(fake), fake)
return fake_loss
# Define the loss function for the discriminators
def discriminator_loss_fn(real, fake):
real_loss = adv_loss_fn(tf.ones_like(real), real)
fake_loss = adv_loss_fn(tf.zeros_like(fake), fake)
return (real_loss + fake_loss) * 0.5
# Create cycle gan model
cycle_gan_model = CycleGan(
generator_G=gen_G, generator_F=gen_F, discriminator_X=disc_X, discriminator_Y=disc_Y
)
# Compile the model
cycle_gan_model.compile(
gen_G_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
gen_F_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_X_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
disc_Y_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),
gen_loss_fn=generator_loss_fn,
disc_loss_fn=discriminator_loss_fn,
)
# Callbacks
plotter = GANMonitor()
checkpoint_filepath = "./model_checkpoints/cyclegan_checkpoints.{epoch:03d}"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath
)
# Here we will train the model for just one epoch as each epoch takes around
# 7 minutes on a single P100 backed machine.
cycle_gan_model.fit(
tf.data.Dataset.zip((train_horses, train_zebras)),
epochs=1,
callbacks=[plotter, model_checkpoint_callback],
)
Explanation: Train the end-to-end model
End of explanation
# This model was trained for 90 epochs. We will be loading those weights
# here. Once the weights are loaded, we will take a few samples from the test
# data and check the model's performance.
!curl -LO https://github.com/AakashKumarNain/CycleGAN_TF2/releases/download/v1.0/saved_checkpoints.zip
!unzip -qq saved_checkpoints.zip
# Load the checkpoints
weight_file = "./saved_checkpoints/cyclegan_checkpoints.090"
cycle_gan_model.load_weights(weight_file).expect_partial()
print("Weights loaded successfully")
_, ax = plt.subplots(4, 2, figsize=(10, 15))
for i, img in enumerate(test_horses.take(4)):
prediction = cycle_gan_model.gen_G(img, training=False)[0].numpy()
prediction = (prediction * 127.5 + 127.5).astype(np.uint8)
img = (img[0] * 127.5 + 127.5).numpy().astype(np.uint8)
ax[i, 0].imshow(img)
ax[i, 1].imshow(prediction)
ax[i, 0].set_title("Input image")
ax[i, 0].set_title("Input image")
ax[i, 1].set_title("Translated image")
ax[i, 0].axis("off")
ax[i, 1].axis("off")
prediction = keras.preprocessing.image.array_to_img(prediction)
prediction.save("predicted_img_{i}.png".format(i=i))
plt.tight_layout()
plt.show()
Explanation: Test the performance of the model.
You can use the trained model hosted on Hugging Face Huband try the demo on Hugging Face Spaces.
End of explanation |
11,340 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Annotated PMI Keywords with Categories
In this notebook we evaluate the differences in PMI keywords for each gender. We load the data from the previous notebooks; recall that we load PMI data with annotated categories. We test differences in those categories using a chi-square test.
Code by Eduardo Graells-Garrido.
Annotation by Claudia Wagner and Eduardo Graells-Garrido
Step1: Load Data
Some bigrams include apostrophes at the end. Here we recover their original form.
Step2: Here we load the DataFrame from the previous notebook. Note that there is an additional column cat.
Step3: Test Proportions and Effect Size
We test proportions of categories for each gender and then we estimate Conhen's w as effect size.
Step4: Word clouds
To visualize and explore the distributions of words per category we use word clouds. In particular we use the matta library. | Python Code:
import pandas as pd
import re
import numpy as np
import dbpedia_config
from scipy.stats import chisquare
target_folder = dbpedia_config.TARGET_FOLDER
Explanation: Annotated PMI Keywords with Categories
In this notebook we evaluate the differences in PMI keywords for each gender. We load the data from the previous notebooks; recall that we load PMI data with annotated categories. We test differences in those categories using a chi-square test.
Code by Eduardo Graells-Garrido.
Annotation by Claudia Wagner and Eduardo Graells-Garrido
End of explanation
apost = re.compile('_s$')
Explanation: Load Data
Some bigrams include apostrophes at the end. Here we recover their original form.
End of explanation
female_pmi = pd.read_csv('{0}/top-200-pmi-female.csv'.format(target_folder), encoding='utf-8')
female_pmi.word = female_pmi.word.map(lambda x: apost.sub('\'s', x))
female_pmi.head()
female_pmi.cat.value_counts() / female_pmi.shape[0] * 100.0
male_pmi = pd.read_csv('{0}/top-200-pmi-male.csv'.format(target_folder), encoding='utf-8')
male_pmi.word = male_pmi.word.map(lambda x: apost.sub('\'s', x))
male_pmi.head()
male_pmi.cat.value_counts() / male_pmi.shape[0] * 100.0
Explanation: Here we load the DataFrame from the previous notebook. Note that there is an additional column cat.
End of explanation
m_proportions = []
f_proportions = []
m_count = male_pmi.cat.value_counts() / male_pmi.shape[0] * 100.0
f_count = female_pmi.cat.value_counts() / female_pmi.shape[0] * 100.0
for c in ('F', 'G', 'O', 'R'):
m_proportions.append(m_count[c] if c in m_count.index else 0.0)
f_proportions.append(f_count[c] if c in f_count.index else 0.0)
m_proportions, f_proportions
chisquare(m_proportions, f_proportions)
p0 = np.array(m_proportions)
p1 = np.array(f_proportions)
np.sqrt(np.sum(np.power(p1 - p0, 2) / p1))
Explanation: Test Proportions and Effect Size
We test proportions of categories for each gender and then we estimate Conhen's w as effect size.
End of explanation
import matta
matta.init_javascript(path='https://rawgit.com/carnby/matta/master/matta/libs')
matta.wordcloud(dataframe=female_pmi.loc[:, ('word', 'pmi_female', 'cat')], text='word',
typeface='Lato', font_weight='bold',
font_size={'value': 'pmi_female'},
font_color={'palette': 'Set2', 'n_colors': 4, 'value': 'cat', 'scale': 'ordinal'})
matta.wordcloud(dataframe=male_pmi.loc[:, ('word', 'pmi_male', 'cat')], text='word',
typeface='Lato', font_weight='bold',
font_size={'value': 'pmi_male'},
font_color={'palette': 'Set2', 'n_colors': 4, 'value': 'cat', 'scale': 'ordinal'})
Explanation: Word clouds
To visualize and explore the distributions of words per category we use word clouds. In particular we use the matta library.
End of explanation |
11,341 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Analysis on the Movie Lens dataset using pandas
I am creating the notebook for the mini project for course DSE200x - Python for Data Science on edX. The project requires each participant to complete the following steps
Step1: Exploring the dataset
Identifying the questions that can be answered using the dataset
Step2: Based on the above exploratory commands, I believe that the following questions can be answered using the dataset
Step3: Now that we have a data frame of information about each genre and the corresponding mean rating, we will visualize the data using matplotlib
Step4: Reporting findings/analyses
Now that we have a couple plots, let us revisit the question we want to answer using the dataset.
Again, the question is - Do science fiction movies tend to be rated more highly than other movie genres?
The scatter plot shows the mean rating value for each genre. Each genre has a value on the scatter plot for the mean rating value for that genre. Let us now see if the plot is able to help us answer the question above.
The mean rating for Sci-Fi genre is about 3.45. When looking at the plot, we see that there are only three other genres out of 18 genres in total, that have lesser mean ratings than Sci-Fi - Horror, Children and Comedy. The remaining 10 genres have mean ratings higher than Science Fiction.
This gives us enough information to answer the question. Sci-Fi movies do not tend to be rated higher than other genres.
The second plot, a bar plot, shows how much each genre's ratings deviate from the overall mean of ratings. Science Fiction is around -0.13 lower than the mean rating of 3.58, showing lesser deviation than Horror at the lower end and Film-Noir at the higher end.
To conclude - no, science fiction movies are not rated higher than other movie genres. The ratings for science fiction movies hover around the mean ratings for all movies.
I have submitted my work to the mini project section of the course. Now, we will explore the dataset further and try to answer the remaining questions I have listed at the beginning of the notebook.
- Is there a correlation or a trend between the year of release of a movie and the genre?
- Which genres were more dominant in each decade of the range available in the dataset?
Step5: Now that we have a move year column, let us list the data types of the columns in the movies data frame.
movie_year is of float64 datat type. We must convert the data type of the movie_year column to int64. Before we go ahead and do that, we must replace all NULL and inifinite entries in the column with zero. If we do not perform this step, we will get the following errror message.
Step6: The above plot provides some interesting insight | Python Code:
# The first step is to import the dataset into a pandas dataframe.
import pandas as pd
#path = 'C:/Users/hrao/Documents/Personal/HK/Python/ml-20m/ml-20m/'
path = '/Users/Harish/Documents/HK_Work/Python/ml-20m/'
movies = pd.read_csv(path+'movies.csv')
movies.shape
tags = pd.read_csv(path+'tags.csv')
tags.shape
ratings = pd.read_csv(path+'ratings.csv')
ratings.shape
links = pd.read_csv(path+'links.csv')
links.shape
Explanation: Analysis on the Movie Lens dataset using pandas
I am creating the notebook for the mini project for course DSE200x - Python for Data Science on edX. The project requires each participant to complete the following steps:
Selecting a dataset
Exploring the dataset to identify what kinds of questions can be answered using the dataset
Identifying one research question
Using pandas methods to explore the dataset - this also involves using visualization techniques using matplotlib
Reporting findings/analyses
Presenting the work in the given presentation template
Selecting a dataset
The mini projects requires us to choose from among three datasets that have been explored through the course previously. I have selected the movie lens dataset, also known as the IMDB Movie Dataset.
The dataset is available for download here - https://grouplens.org/datasets/movielens/20m/
Description about the dataset, as shown on the website is below:
This dataset (ml-20m) describes 5-star rating and free-text tagging activity from MovieLens, a movie recommendation service. It contains 20000263 ratings and 465564 tag applications across 27278 movies. These data were created by 138493 users between January 09, 1995 and March 31, 2015. This dataset was generated on October 17, 2016.
Users were selected at random for inclusion. All selected users had rated at least 20 movies. No demographic information is included. Each user is represented by an id, and no other information is provided.
The data are contained in six files, genome-scores.csv, genome-tags.csv, links.csv, movies.csv, ratings.csv and tags.csv. More details about the contents and use of all these files follows.
This and other GroupLens data sets are publicly available for download at http://grouplens.org/datasets/.
End of explanation
movies.head()
tags.head()
ratings.head()
links.head()
Explanation: Exploring the dataset
Identifying the questions that can be answered using the dataset
End of explanation
# List of genres as a Python list
genres = ['Action','Adventure','Animation','Children','Comedy','Crime','Documentary','Drama','Fantasy','Film-Noir','Horror','Musical','Mystery','Romance','Sci-Fi','Thriller','War','Western']
genres_rating_list = []
# The loop reads each element of the above list
# For each iteration, one genre is selected from the movies data frame
# This selection of the data frame is then merged with the rating data frame to get the rating for that genre
# Once the new merged data frame is created, we use the mean function to get the mean rating for the genre
# The genre and the corresponding mean rating are then appended to the genres_rating Data Frame
# The entire looping takes long - can certainly be optimized for performance
for i in range(len(genres)):
fil = genres[i]+'_filter'
mov = genres[i]+'_movies'
rat = genres[i]+'_ratings'
rat_mean = rat+'_mean'
fil = movies['genres'].str.contains(genres[i])
mov = movies[fil]
rat = mov.merge(ratings, on='movieId', how='inner')
rat_mean = round(rat['rating'].mean(), 2)
#print(genres[i], round(rat_mean,2))
genres_rating_list.append(rat_mean)
df = {'Genre':genres, 'Genres Mean Rating':genres_rating_list}
genres_rating = pd.DataFrame(df)
genres_rating
genres_rating['Genres Standard Deviation'] = genres_rating['Genres Mean Rating'].std()
genres_rating['Mean'] = genres_rating['Genres Mean Rating'].mean()
genres_rating['Zero'] = 0
genres_rating
overall_mean = round(genres_rating['Genres Mean Rating'].mean(), 2)
overall_std = round(genres_rating['Genres Mean Rating'].std(),2)
scifi_rating = genres_rating[genres_rating['Genre'] == 'Sci-Fi']['Genres Mean Rating']
print(overall_mean)
print(overall_std)
print(scifi_rating)
genres_rating['Diff from Mean'] = genres_rating['Genres Mean Rating'] - overall_mean
genres_rating
Explanation: Based on the above exploratory commands, I believe that the following questions can be answered using the dataset:
Is there a correlation or a trend between the year of release of a movie and the genre?
Which genres were more dominant in each decade of the range available in the dataset?
Do science fiction movies tend to be rated more highly than other movie genres?
For the mini-project, I have chosen question 3 for further analysis.
Using pandas methods to explore the dataset
Includes matplotlib visualization
End of explanation
genre_list = list(genres_rating['Genre'])
genres_rating_list = list(genres_rating['Genres Mean Rating'])
genres_diff_list = list(genres_rating['Diff from Mean'])
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 10))
ax1 = plt.subplot(2,1,1)
x = [x for x in range(0, 18)]
xticks_genre_list = genre_list
y = genres_rating_list
plt.xticks(range(len(x)), xticks_genre_list)
plt.scatter(x,y, color='g')
plt.plot(x, genres_rating['Mean'], color="red")
plt.autoscale(tight=True)
#plt.rcParams["figure.figsize"] = (10,2)
plt.title('Movie ratings by genre')
plt.xlabel('Genre')
plt.ylabel('Rating')
plt.ylim(ymax = 4, ymin = 3)
plt.grid(True)
plt.savefig(r'movie-ratings-by-genre.png')
plt.annotate("Sci-Fi Rating",
xy=(14.25,3.5), xycoords='data',
xytext=(14.20, 3.7), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
for i,j in enumerate( y ):
ax1.annotate( j, ( x[i] + 0.03, y[i] + 0.02))
ax2 = plt.subplot(2,1,2)
x = [x for x in range(0, 18)]
xticks_genre_list = genre_list
y = genres_rating['Diff from Mean']
plt.xticks(range(len(x)), xticks_genre_list)
plt.plot(x,y)
plt.plot(x, genres_rating['Zero'])
plt.autoscale(tight=True)
#plt.rcParams["figure.figsize"] = (10,2)
plt.title('Deviation of each genre\'s rating from the overall mean rating')
plt.xlabel('Genre')
plt.ylabel('Deviation from mean rating')
plt.grid(True)
plt.savefig(r'deviation-from-mean-rating.png')
plt.annotate("Sci-Fi Rating",
xy=(14,-0.13), xycoords='data',
xytext=(14.00, 0.0), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
plt.show()
Explanation: Now that we have a data frame of information about each genre and the corresponding mean rating, we will visualize the data using matplotlib
End of explanation
# extract year of release of each movie from the title column
# convert the data type of the movie_year column to numeric (from str)
import numpy as np
import re
movies['movie_year'] = movies['title']
movies['movie_year'] = movies['movie_year'].str.extract(r"\(([0-9]+)\)", expand=False)
# creating a new column with just the movie titles
movies['title_only'] = movies['title']
movies['title_only'] = movies['title_only'].str.extract('(.*?)\s*\(', expand=False)
movies['movie_year'].fillna(0, inplace=True)
#Drop all rows containing incorrect year values - such as 0, 6, 69, 500 and -2147483648
movies.drop(movies[movies.movie_year == '0'].index, inplace=True)
movies.drop(movies[movies.movie_year == '6'].index, inplace=True)
movies.drop(movies[movies.movie_year == '06'].index, inplace=True)
movies.drop(movies[movies.movie_year == '69'].index, inplace=True)
movies.drop(movies[movies.movie_year == '500'].index, inplace=True)
movies.drop(movies[movies.movie_year == '-2147483648'].index, inplace=True)
movies.drop(movies[movies.movie_year == 0].index, inplace=True)
movies.drop(movies[movies.movie_year == 6].index, inplace=True)
movies.drop(movies[movies.movie_year == 69].index, inplace=True)
movies.drop(movies[movies.movie_year == 500].index, inplace=True)
movies.drop(movies[movies.movie_year == -2147483648].index, inplace=True)
#convert the string values to numeric
movies['movie_year'] = pd.to_datetime(movies['movie_year'], format='%Y')
Explanation: Reporting findings/analyses
Now that we have a couple plots, let us revisit the question we want to answer using the dataset.
Again, the question is - Do science fiction movies tend to be rated more highly than other movie genres?
The scatter plot shows the mean rating value for each genre. Each genre has a value on the scatter plot for the mean rating value for that genre. Let us now see if the plot is able to help us answer the question above.
The mean rating for Sci-Fi genre is about 3.45. When looking at the plot, we see that there are only three other genres out of 18 genres in total, that have lesser mean ratings than Sci-Fi - Horror, Children and Comedy. The remaining 10 genres have mean ratings higher than Science Fiction.
This gives us enough information to answer the question. Sci-Fi movies do not tend to be rated higher than other genres.
The second plot, a bar plot, shows how much each genre's ratings deviate from the overall mean of ratings. Science Fiction is around -0.13 lower than the mean rating of 3.58, showing lesser deviation than Horror at the lower end and Film-Noir at the higher end.
To conclude - no, science fiction movies are not rated higher than other movie genres. The ratings for science fiction movies hover around the mean ratings for all movies.
I have submitted my work to the mini project section of the course. Now, we will explore the dataset further and try to answer the remaining questions I have listed at the beginning of the notebook.
- Is there a correlation or a trend between the year of release of a movie and the genre?
- Which genres were more dominant in each decade of the range available in the dataset?
End of explanation
movie_year = pd.DataFrame(movies['title_only'].groupby(movies['movie_year']).count())
movie_year.reset_index(inplace=True)
X=movie_year['movie_year']
Y=movie_year['title_only']
plt.plot_date(X,Y,'bo-')
plt.grid(True)
plt.rcParams["figure.figsize"] = (15,5)
plt.title('Number of movies per year')
plt.xlabel('Years')
plt.ylabel('Number of movies')
plt.xlim('1885-01-01','2020-01-01')
plt.show()
Explanation: Now that we have a move year column, let us list the data types of the columns in the movies data frame.
movie_year is of float64 datat type. We must convert the data type of the movie_year column to int64. Before we go ahead and do that, we must replace all NULL and inifinite entries in the column with zero. If we do not perform this step, we will get the following errror message.
End of explanation
movies.head()
list(movies)
a = pd.Series(movies.iloc[0])
a
def flat(str1):
c = pd.DataFrame(columns=list(movies))
for i in range(len(str1)):
#print(str1[i])
if i == 2:
a = str1[i].split('|')
for j in range(len(a)):
c.loc[j] = [str1[0], str1[1], a[j], str1[3], str1[4]]
return c
c = flat(a)
c
Explanation: The above plot provides some interesting insight:
* There was a steady increase in the number of movies after 1930 and till 2008.
* In this dataset, 2009 was the year when the highest number of movies were produced - 1112 in all.
* The decades between 1970 and 2000 saw the highest year-on-year increase in the number of movies produced.
* 2014 saw a sharp drop in the nimber of movies produced, from 1011 in 2013 to only 740 movies.
* The movie count of 2015 is only 120. This could possibly be due to the lack of information available for the entire year of 2015.
End of explanation |
11,342 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a id='top'> </a>
Author
Step1: Formatting for PyUnfold use
Table of contents
Define analysis free parameters
Data preprocessing
Fitting random forest
Fraction correctly identified
Spectrum
Unfolding
Feature importance
Step2: Define analysis free parameters
[ back to top ]
Whether or not to train on 'light' and 'heavy' composition classes, or the individual compositions
Step3: Get composition classifier pipeline
Define energy binning for this analysis
Step4: Data preprocessing
[ back to top ]
1. Load simulation/data dataframe and apply specified quality cuts
2. Extract desired features from dataframe
3. Get separate testing and training datasets
4. Feature transformation
Step5: Load fitted effective area
Step6: Format for PyUnfold response matrix use
Step7: Spectrum
[ back to top ]
Response matrix
Step8: Normalize response matrix column-wise (i.e. $P(E|C)$)
Step9: Priors array
Step10: Formatting for PyUnfold use
Step11: Save formatted DataFrame to disk | Python Code:
%load_ext watermark
%watermark -u -d -v -p numpy,matplotlib,scipy,pandas,sklearn,mlxtend
Explanation: <a id='top'> </a>
Author: James Bourbeau
End of explanation
from __future__ import division, print_function
import os
from collections import defaultdict
import numpy as np
from scipy.sparse import block_diag
import pandas as pd
import matplotlib.pyplot as plt
import seaborn.apionly as sns
import json
from scipy.interpolate import UnivariateSpline
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, auc, classification_report
from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, KFold, StratifiedKFold
import comptools as comp
import comptools.analysis.plotting as plotting
color_dict = comp.analysis.get_color_dict()
%matplotlib inline
Explanation: Formatting for PyUnfold use
Table of contents
Define analysis free parameters
Data preprocessing
Fitting random forest
Fraction correctly identified
Spectrum
Unfolding
Feature importance
End of explanation
# config = 'IC79.2010'
config = 'IC86.2012'
num_groups = 4
comp_list = comp.get_comp_list(num_groups=num_groups)
comp_list
Explanation: Define analysis free parameters
[ back to top ]
Whether or not to train on 'light' and 'heavy' composition classes, or the individual compositions
End of explanation
energybins = comp.analysis.get_energybins(config=config)
Explanation: Get composition classifier pipeline
Define energy binning for this analysis
End of explanation
log_energy_min = energybins.log_energy_min
log_energy_max = energybins.log_energy_max
df_sim_train, df_sim_test = comp.load_sim(config=config, log_energy_min=log_energy_min, log_energy_max=log_energy_max)
df_sim_train.reco_log_energy.min(), df_sim_train.reco_log_energy.max()
log_reco_energy_sim_test = df_sim_test['reco_log_energy']
log_true_energy_sim_test = df_sim_test['MC_log_energy']
feature_list, feature_labels = comp.analysis.get_training_features()
pipeline_str = 'BDT_comp_{}_{}-groups'.format(config, num_groups)
pipeline = comp.get_pipeline(pipeline_str)
pipeline = pipeline.fit(df_sim_train[feature_list], df_sim_train['comp_target_{}'.format(num_groups)])
Explanation: Data preprocessing
[ back to top ]
1. Load simulation/data dataframe and apply specified quality cuts
2. Extract desired features from dataframe
3. Get separate testing and training datasets
4. Feature transformation
End of explanation
eff_path = os.path.join(comp.paths.comp_data_dir, config, 'efficiencies',
'efficiency_fit_num_groups_{}.hdf'.format(num_groups))
df_eff = pd.read_hdf(eff_path)
df_eff.head()
fig, ax = plt.subplots()
for composition in comp_list:
ax.errorbar(energybins.log_energy_midpoints, df_eff['eff_median_{}'.format(composition)],
yerr=[df_eff['eff_err_low_{}'.format(composition)],
df_eff['eff_err_high_{}'.format(composition)]],
color=color_dict[composition], label=composition, marker='.')
ax.axvline(6.4, marker='None', ls='-.', color='k')
ax.axvline(7.8, marker='None', ls='-.', color='k')
ax.set_xlabel('$\mathrm{\log_{10}(E_{true}/GeV)}$')
ax.set_ylabel('Detection efficienies')
ax.grid()
ax.legend()
ax.ticklabel_format(style='sci',axis='y')
ax.yaxis.major.formatter.set_powerlimits((0,0))
plt.show()
Explanation: Load fitted effective area
End of explanation
# efficiencies, efficiencies_err = [], []
# for idx, row in df_efficiency.iterrows():
# for composition in comp_list:
# efficiencies.append(row['eff_median_{}'.format(composition)])
# efficiencies_err.append(row['eff_err_low_{}'.format(composition)])
# efficiencies = np.asarray(efficiencies)
# efficiencies_err = np.asarray(efficiencies_err)
efficiencies, efficiencies_err = [], []
for idx, row in df_eff.iterrows():
for composition in comp_list:
efficiencies.append(row['eff_median_{}'.format(composition)])
efficiencies_err.append(row['eff_err_low_{}'.format(composition)])
efficiencies = np.asarray(efficiencies)
efficiencies_err = np.asarray(efficiencies_err)
efficiencies
df_data = comp.load_data(config=config, columns=feature_list,
log_energy_min=log_energy_min, log_energy_max=log_energy_max,
n_jobs=20, verbose=True)
df_data.shape
X_data = comp.dataframe_functions.dataframe_to_array(df_data, feature_list + ['reco_log_energy'])
log_energy_data = X_data[:, -1]
X_data = X_data[:, :-1]
log_energy_data.min(), log_energy_data.max()
data_predictions = pipeline.predict(X_data)
# Get composition masks
data_labels = np.array(comp.composition_encoding.decode_composition_groups(data_predictions, num_groups=num_groups))
# Get number of identified comp in each energy bin
unfolding_df = pd.DataFrame()
for composition in comp_list:
comp_mask = data_labels == composition
unfolding_df['counts_' + composition] = np.histogram(log_energy_data[comp_mask],
bins=energybins.log_energy_bins)[0]
unfolding_df['counts_' + composition + '_err'] = np.sqrt(unfolding_df['counts_' + composition])
unfolding_df['counts_total'] = np.histogram(log_energy_data, bins=energybins.log_energy_bins)[0]
unfolding_df['counts_total_err'] = np.sqrt(unfolding_df['counts_total'])
unfolding_df.index.rename('log_energy_bin_idx', inplace=True)
unfolding_df.head()
fig, ax = plt.subplots()
for composition in comp_list:
ax.plot(unfolding_df['counts_{}'.format(composition)], color=color_dict[composition])
ax.set_yscale("log", nonposy='clip')
ax.grid()
plt.show()
Explanation: Format for PyUnfold response matrix use
End of explanation
test_predictions = pipeline.predict(df_sim_test[feature_list])
true_comp = df_sim_test['comp_group_{}'.format(num_groups)].values
pred_comp = np.array(comp.composition_encoding.decode_composition_groups(test_predictions,
num_groups=num_groups))
true_comp
true_ebin_idxs = np.digitize(log_true_energy_sim_test, energybins.log_energy_bins) - 1
reco_ebin_idxs = np.digitize(log_reco_energy_sim_test, energybins.log_energy_bins) - 1
energy_bin_idx = np.unique(true_ebin_idxs)
print(range(-1, len(energybins.log_energy_midpoints)+1))
hstack_list = []
# for true_ebin_idx in energy_bin_idx:
for true_ebin_idx in range(-1, len(energybins.log_energy_midpoints)+1):
if (true_ebin_idx == -1) or (true_ebin_idx == energybins.energy_midpoints.shape[0]):
continue
true_ebin_mask = true_ebin_idxs == true_ebin_idx
vstack_list = []
# for reco_ebin_idx in energy_bin_idx:
for reco_ebin_idx in range(-1, len(energybins.log_energy_midpoints)+1):
if (reco_ebin_idx == -1) or (reco_ebin_idx == energybins.energy_midpoints.shape[0]):
continue
reco_ebin_mask = reco_ebin_idxs == reco_ebin_idx
combined_mask = true_ebin_mask & reco_ebin_mask
if combined_mask.sum() == 0:
response_mat = np.zeros((num_groups, num_groups), dtype=int)
else:
response_mat = confusion_matrix(true_comp[true_ebin_mask & reco_ebin_mask],
pred_comp[true_ebin_mask & reco_ebin_mask],
labels=comp_list)
# Transpose response matrix to get MC comp on x-axis and reco comp on y-axis
response_mat = response_mat.T
vstack_list.append(response_mat)
hstack_list.append(np.vstack(vstack_list))
res = np.hstack(hstack_list)
res_err = np.sqrt(res)
res.shape
plt.imshow(res, origin='lower')
from itertools import product
num_groups = len(comp_list)
num_ebins = len(energybins.log_energy_midpoints)
e_bin_iter = product(range(num_ebins), range(num_ebins))
res2 = np.zeros((num_ebins * num_groups, num_ebins * num_groups), dtype=int)
for true_ebin_idx, reco_ebin_idx in e_bin_iter:
# print(true_ebin_idx, reco_ebin_idx)
true_ebin_mask = true_ebin_idxs == true_ebin_idx
reco_ebin_mask = reco_ebin_idxs == reco_ebin_idx
ebin_mask = true_ebin_mask & reco_ebin_mask
if ebin_mask.sum() == 0:
continue
else:
response_mat = confusion_matrix(true_comp[ebin_mask],
pred_comp[ebin_mask],
labels=comp_list)
# Transpose response matrix to get MC comp on x-axis
# and reco comp on y-axis
# response_mat = np.flipud(response_mat)
response_mat = response_mat.T
res2[num_groups * reco_ebin_idx : num_groups * (reco_ebin_idx + 1),
num_groups * true_ebin_idx : num_groups * (true_ebin_idx + 1)] = response_mat
plt.imshow(res2, origin='lower')
np.testing.assert_array_equal(res2, res)
reco_ebin_idx = 4
true_ebin_idx = 4
plt.imshow(res2[num_groups * reco_ebin_idx : num_groups * (reco_ebin_idx + 1),
num_groups * true_ebin_idx : num_groups * (true_ebin_idx + 1)],
origin='lower')
Explanation: Spectrum
[ back to top ]
Response matrix
End of explanation
res_col_sum = res.sum(axis=0)
res_col_sum_err = np.array([np.sqrt(np.nansum(res_err[:, i]**2)) for i in range(res_err.shape[1])])
normalizations, normalizations_err = comp.analysis.ratio_error(res_col_sum, res_col_sum_err,
efficiencies, efficiencies_err,
nan_to_num=True)
res_normalized, res_normalized_err = comp.analysis.ratio_error(res, res_err,
normalizations, normalizations_err,
nan_to_num=True)
res_normalized = np.nan_to_num(res_normalized)
res_normalized_err = np.nan_to_num(res_normalized_err)
np.testing.assert_allclose(res_normalized.sum(axis=0), efficiencies)
res
fig, ax = plt.subplots()
# h = np.flipud(block_response)
idx = 4*num_groups
sns.heatmap(res[idx:idx+num_groups, idx:idx+num_groups], annot=True, fmt='d', ax=ax, square=True,
xticklabels=comp_list, yticklabels=comp_list,
cbar_kws={'label': 'Counts'}, vmin=0, cmap='viridis')
ax.invert_yaxis()
plt.xlabel('True composition')
plt.ylabel('Pred composition')
plt.title('$\mathrm{7.6 < \log_{10}(E_{true}/GeV) < 7.7}$' + '\n$\mathrm{7.6 < \log_{10}(E_{reco}/GeV) < 7.7}$')
# res_mat_outfile = os.path.join(comp.paths.figures_dir, 'unfolding', 'response-matrix-single-energy-bin.png')
# comp.check_output_dir(res_mat_outfile)
# plt.savefig(res_mat_outfile)
plt.show()
plt.imshow(res, origin='lower', cmap='viridis')
plt.plot([0, res.shape[0]-1], [0, res.shape[1]-1], marker='None', ls=':', color='C1')
# ax = sns.heatmap(res, square=True, xticklabels=2, yticklabels=2,
# ax = sns.heatmap(res, square=True, mask=res==0, xticklabels=2, yticklabels=2,
# cbar_kws={'label': 'Counts'})
ax.plot([0, res.shape[0]-1], [0, res.shape[1]-1], marker='None', ls=':', color='C1')
# ax.invert_yaxis()
for i in np.arange(0, res.shape[0], 2):
plt.axvline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
# for i in np.arange(0, res.shape[0], 2):
# plt.axvline(i+0.5, marker='None', ls=':', color='gray')
for i in np.arange(0, res.shape[0], 2):
plt.axhline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
# for i in np.arange(0, res.shape[0], 2):
# plt.axhline(i+0.5, marker='None', ls=':', color='gray')
plt.xlabel('True bin')
plt.ylabel('Reconstructed bin')
# plt.grid()
# plt.xticks(np.arange(0.5, res.shape[0], 2),
# ['{}'.format(i+1) for i in range(res.shape[0])],
# rotation='vertical')
# plt.yticks(np.arange(0.5, res.shape[0], 2),
# ['{}'.format(i+1) for i in range(res.shape[0])])
plt.colorbar(label='Counts')
res_mat_outfile = os.path.join(comp.paths.figures_dir, 'unfolding', 'response-statistics.png')
comp.check_output_dir(res_mat_outfile)
# plt.savefig(res_mat_outfile)
plt.show()
plt.imshow(np.sqrt(res), origin='lower', cmap='viridis')
plt.plot([0, res.shape[0]-1], [0, res.shape[1]-1], marker='None', ls=':', color='C1')
for i in np.arange(0, res.shape[0], 2):
plt.axvline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
for i in np.arange(0, res.shape[0], 2):
plt.axhline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
plt.xlabel('True bin')
plt.ylabel('Reconstructed bin')
plt.colorbar(label='Count errors', format='%d')
res_mat_outfile = os.path.join(comp.paths.figures_dir, 'unfolding', 'response-statistics-err.png')
comp.check_output_dir(res_mat_outfile)
# plt.savefig(res_mat_outfile)
plt.show()
plt.imshow(res_normalized, origin='lower', cmap='viridis')
plt.plot([0, res.shape[0]-1], [0, res.shape[1]-1], marker='None', ls=':', color='C1')
# for i in np.arange(0, res.shape[0], 2):
# plt.axvline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
# for i in np.arange(0, res.shape[0], 2):
# plt.axhline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
plt.xlabel('True bin')
plt.ylabel('Reconstructed bin')
plt.title('Response matrix')
# plt.colorbar(label='A.U.')
plt.colorbar(label='$\mathrm{P(E_i|C_{\mu})}$')
res_mat_outfile = os.path.join(comp.paths.figures_dir, 'unfolding', config, 'response_matrix',
'response-matrix_{}-groups.png'.format(num_groups))
comp.check_output_dir(res_mat_outfile)
plt.savefig(res_mat_outfile)
plt.show()
plt.imshow(res_normalized_err, origin='lower', cmap='viridis')
plt.plot([0, res.shape[0]-1], [0, res.shape[1]-1], marker='None', ls=':', color='C1')
# for i in np.arange(0, res.shape[0], 2):
# plt.axvline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
# for i in np.arange(0, res.shape[0], 2):
# plt.axhline(i-0.5, marker='None', ls='-', lw=0.5, color='gray')
plt.xlabel('True bin')
plt.ylabel('Reconstructed bin')
plt.title('Response matrix error')
plt.colorbar(label='$\mathrm{\delta P(E_i|C_{\mu})}$')
res_mat_outfile = os.path.join(comp.paths.figures_dir, 'unfolding', 'response-matrix-err.png')
comp.check_output_dir(res_mat_outfile)
# plt.savefig(res_mat_outfile)
plt.show()
res_mat_outfile = os.path.join(comp.paths.comp_data_dir, config, 'unfolding',
'response_{}-groups.txt'.format(num_groups))
res_mat_err_outfile = os.path.join(comp.paths.comp_data_dir, config, 'unfolding',
'response_err_{}-groups.txt'.format(num_groups))
comp.check_output_dir(res_mat_outfile)
comp.check_output_dir(res_mat_err_outfile)
np.savetxt(res_mat_outfile, res_normalized)
np.savetxt(res_mat_err_outfile, res_normalized_err)
Explanation: Normalize response matrix column-wise (i.e. $P(E|C)$)
End of explanation
from icecube.weighting.weighting import from_simprod, PDGCode, ParticleType
from icecube.weighting.fluxes import GaisserH3a, GaisserH4a, Hoerandel5, Hoerandel_IT, CompiledFlux
df_sim = comp.load_sim(config=config, test_size=0, log_energy_min=6.0, log_energy_max=8.3)
df_sim.head()
p = PDGCode().values
pdg_codes = np.array([2212, 1000020040, 1000080160, 1000260560])
particle_names = [p[pdg_code].name for pdg_code in pdg_codes]
particle_names
group_names = np.array(comp.composition_encoding.composition_group_labels(particle_names, num_groups=num_groups))
group_names
comp_to_pdg_list = {composition: pdg_codes[group_names == composition] for composition in comp_list}
comp_to_pdg_list
# Replace O16Nucleus with N14Nucleus + Al27Nucleus
for composition, pdg_list in comp_to_pdg_list.iteritems():
if 1000080160 in pdg_list:
pdg_list = pdg_list[pdg_list != 1000080160]
comp_to_pdg_list[composition] = np.append(pdg_list, [1000070140, 1000130270])
else:
continue
comp_to_pdg_list
priors_list = ['H3a', 'H4a', 'Polygonato']
# priors_list = ['h3a', 'h4a', 'antih3a', 'Hoerandel5', 'antiHoerandel5']
# # priors_list = ['h3a', 'h4a', 'antih3a', 'Hoerandel5', 'antiHoerandel5', 'uniform', 'alllight', 'allheavy']
# model_ptypes = {}
# model_ptypes['h3a'] = {'light': [2212, 1000020040], 'heavy': [1000070140, 1000130270, 1000260560]}
# model_ptypes['h4a'] = {'light': [2212, 1000020040], 'heavy': [1000070140, 1000130270, 1000260560]}
# model_ptypes['Hoerandel5'] = {'light': [2212, 1000020040], 'heavy': [1000070140, 1000130270, 1000260560]}
fig, ax = plt.subplots()
for flux, name, marker in zip([GaisserH3a(), GaisserH4a(), Hoerandel5()],
priors_list,
'.^*o'):
for composition in comp_list:
comp_flux = []
for energy_mid in energybins.energy_midpoints:
flux_energy_mid = flux(energy_mid, comp_to_pdg_list[composition]).sum()
comp_flux.append(flux_energy_mid)
# Normalize flux in each energy bin to a probability
comp_flux = np.asarray(comp_flux)
prior_key = '{}_flux_{}'.format(name, composition)
unfolding_df[prior_key] = comp_flux
# Plot result
ax.plot(energybins.log_energy_midpoints, energybins.energy_midpoints**2.7*comp_flux,
color=color_dict[composition], alpha=0.75, marker=marker, ls=':',
label='{} ({})'.format(name, composition))
ax.set_yscale("log", nonposy='clip')
ax.set_xlabel('$\mathrm{\log_{10}(E/GeV)}$')
ax.set_ylabel('$\mathrm{ E^{2.7} \ J(E) \ [GeV^{1.7} m^{-2} sr^{-1} s^{-1}]}$')
ax.grid()
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=False)
priors_outfile = os.path.join(comp.paths.figures_dir, 'unfolding',
'priors_flux_{}-groups.png'.format(num_groups))
comp.check_output_dir(priors_outfile)
plt.savefig(priors_outfile)
plt.show()
unfolding_df.head()
# unfolding_df_outfile = os.path.join(comp.paths.comp_data_dir, config, 'unfolding',
# 'unfolding_{}-groups.hdf'.format(num_groups))
# comp.check_output_dir(unfolding_df_outfile)
# unfolding_df.to_hdf(unfolding_df_outfile, 'dataframe', format='table')
Explanation: Priors array
End of explanation
formatted_df = pd.DataFrame()
counts_formatted = []
priors_formatted = defaultdict(list)
for index, row in unfolding_df.iterrows():
for composition in comp_list:
counts_formatted.append(row['counts_{}'.format(composition)])
for priors_name in priors_list:
priors_formatted[priors_name].append(row['{}_flux_{}'.format(priors_name, composition)])
formatted_df['counts'] = counts_formatted
formatted_df['counts_err'] = np.sqrt(counts_formatted)
formatted_df['efficiencies'] = efficiencies
formatted_df['efficiencies_err'] = efficiencies_err
for key, value in priors_formatted.iteritems():
formatted_df[key+'_flux'] = value
formatted_df[key+'_prior'] = formatted_df[key+'_flux'] / formatted_df[key+'_flux'].sum()
formatted_df.index.rename('log_energy_bin_idx', inplace=True)
formatted_df.head()
prior_sums = formatted_df[[col for col in formatted_df.columns if 'prior' in col]].sum()
np.testing.assert_allclose(prior_sums, np.ones_like(prior_sums))
Explanation: Formatting for PyUnfold use
End of explanation
formatted_df_outfile = os.path.join(comp.paths.comp_data_dir, config, 'unfolding',
'unfolding-df_{}-groups.hdf'.format(num_groups))
comp.check_output_dir(formatted_df_outfile)
formatted_df.to_hdf(formatted_df_outfile, 'dataframe', format='table')
Explanation: Save formatted DataFrame to disk
End of explanation |
11,343 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Linear Regression
In this note, I am going to train a linear regression model with gradient decent estimation.
It look pretty easy but it's helpful to increase the understanding modeling while learning Machine Learning.
Overview
This note will covere
Step1: Model, Loss function, chain rule and its derivative
Model can be visualized as below
Step2: As you can see the $w$ is moving toward 2 with decreasing cost/loss.
check gradient decent process | Python Code:
import numpy
import matplotlib.pyplot as plt
%matplotlib inline
numpy.random.seed(seed=1)
x = numpy.random.uniform(0, 1, 20)
# real model
def f(x): return x * 2
noise_variance = 0.2 # Variance of the gaussian noise
# Gaussian noise error for each sample in x
noise = numpy.random.randn(x.shape[0]) * noise_variance
# Create targets t
t = f(x) + noise
# Plot the target t versus the input x
plt.plot(x, t, 'o', label='t')
# Plot the initial line
plt.plot([0, 1], [f(0), f(1)], 'r--', label='f(x)')
plt.xlabel('$x$', fontsize=15)
plt.ylabel('$t$', fontsize=15)
plt.ylim([0,2])
plt.title('inputs (x) vs targets (t)')
plt.grid()
plt.legend(loc=2)
plt.show()
Explanation: Linear Regression
In this note, I am going to train a linear regression model with gradient decent estimation.
It look pretty easy but it's helpful to increase the understanding modeling while learning Machine Learning.
Overview
This note will covere:
* Prepare the data
* Loss function, chain rule and its derivative
* Code implementation
Parepare the data
Here we are generating 20 data points from a uniform distribution of (0, 1). True model is y = 2x. We added random noise with variance of 0.2.
End of explanation
# Define the neural network function y = x * w
def nn(x, w): return x * w
# Define the cost function
def cost(y, t): return ((t - y)**2).sum()
# define the gradient function. Remember that y = nn(x, w) = x * w
def gradient(w, x, t):
return 2 * x * (nn(x, w) - t)
# define the update function delta w
def delta_w(w_k, x, t, learning_rate):
return learning_rate * gradient(w_k, x, t).sum()
# Set the initial weight parameter
w = 0.1
# Set the learning rate
learning_rate = 0.1
# Start performing the gradient descent updates, and print the weights and cost:
nb_of_iterations = 4 # number of gradient descent updates
w_cost = [(w, cost(nn(x, w), t))] # List to store the weight,costs values
for i in range(nb_of_iterations):
dw = delta_w(w, x, t, learning_rate) # Get the delta w update
w = w - dw # Update the current weight parameter
w_cost.append((w, cost(nn(x, w), t))) # Add weight,cost to list
# Print the final w, and cost
for i in range(0, len(w_cost)):
print('w({}): {:.4f} \t cost: {:.4f}'.format(i, w_cost[i][0], w_cost[i][1]))
Explanation: Model, Loss function, chain rule and its derivative
Model can be visualized as below:
<p align="center">
<img src="https://raw.githubusercontent.com/weichetaru/weichetaru.github.com/master/notebook/machine-learning/img/SimpleANN01.png"></p>
Model can be described as $y=x*w$. Note $t$ is our target data generated with noise.
We can then define squared error cost as below:
$$\xi = \sum_{i=1}^{N}\left \| t_{i} - y_{i}\right \|^{2} $$
and our goal is to minimize the loss:
$$\underset{w}{argmin}\sum_{i}^{N}\left \| t_{i} - y_{i}\right \|^{2}$$
The grandient decent can be defined as:
$$w(k+1) = w(k) - \Delta w(k)$$
$$\Delta w(k) = \mu\frac{\partial \xi}{\partial w} \;\;\; where \;\mu\; is\; learning\; rate$$
simplely apply chain rule here:
$$\frac{\partial \xi }{\partial w} = \frac{\partial y}{\partial w} \frac{\partial \xi }{\partial y} $$
we know
$$\frac{\partial \xi_{i} }{\partial y_{i}} = \frac{\partial (t_{i}-y_{i})^{2}}{\partial y_{i}}=2(y_{i}-t_{i})$$
and because $y=x*w$:
$$\frac{\partial y_{i}}{\partial w_{i}}=x_{i}$$
so combine everything we will get:
$$\Delta w = \mu * 2 * \sum_{i=1}^{N}x_{i}(y_{i}-t_{i})$$
Code implementation
End of explanation
# Plot the cost vs the given weight w
# Define a vector of weights for which we want to plot the cost
ws = numpy.linspace(0, 4, num=100) # weight values
cost_ws = numpy.vectorize(lambda w: cost(nn(x, w) , t))(ws) # cost for each weight in ws
# Plot the first 2 gradient descent updates
plt.plot(ws, cost_ws, 'r--') # Plot the error curve
# Plot the updates
for i in range(0, len(w_cost)-2):
w1, c1 = w_cost[i]
w2, c2 = w_cost[i+1]
plt.plot(w1, c1, 'bo')
plt.plot([w1, w2],[c1, c2], 'b-')
plt.text(w1, c1+0.5, '$w({})$'.format(i))
# Show figure
plt.xlabel('$w$', fontsize=15)
plt.ylabel('$\\xi$', fontsize=15)
plt.title('Gradient descent updates plotted on cost function')
plt.grid()
plt.show()
Explanation: As you can see the $w$ is moving toward 2 with decreasing cost/loss.
check gradient decent process
End of explanation |
11,344 |
Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
Calculating r2 score of machine learning model
| Python Code::
model.score(x_test, y_test)
|
11,345 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="https
Step1: Transfer Learning
In the field of deep learning, transfer learning is defined as the conveyance of knowledge from one pretrained model to a new model. This simply means that transfer learning uses a pretrained model to train a new model. Typically the new model will have a more specific application than the pre-trained model.
Note that this lab is largely based on an excellent transfer learning lab from TensorFlow.
Exploratory Data Analysis
The tensorflow_datasets package has a catalog of datasets that are easy to load into your TensorFlow environment for experimentation.
In this lab we'll work with the cats_vs_dogs dataset. This dataset contains thousands of images of cats and dogs. Looking at the documentation for the dataset, we can see there are 23,262 examples in the 'train' split of data. There are no test and validation splits.
We could just load this one split directly and then split the data once we download it. Another option is to tell tfds.load() to split the data for us. To do that we must specify the splits.
There is a specific notation we can use that tells the function how much of the data we want in each split. For instance 'train[
Step2: The metadata returned from our dataset contains useful information about the data. For instance, it includes the number of classes
Step3: And the class names
Step4: It even comes with some handy functions for converting between class names and numbers
Step5: Let's store the int2str into a more conveniently named function for later use.
Step6: Let's take a quick look at our dataset. First we'll peek at the shape of the data.
Step7: (None, None, 3) lets us know that we have three channel images, but we aren't sure of the lengths and widths. They are likely different depending on the image. We also don't know how many images we have.
Let's do some deeper analysis.
It turns out that you can iterate over a DatasetV1Adapter with a standard for loop. The items returned at each iteration are the image and the label.
We'll create a helper function to analyze a split of our data.
Step8: We'll train on 18,610 examples, validating on 2,326, and performing our final testing on 2,326. Our classes look pretty evenly spread across all of the splits. The classes also seem to have a similar number of total examples.
Let's now see what our images look like. We'll display one dog and one cat.
Step9: These are color images with noisy backgrounds. Also, the images aren't the same size, so we'll need to eventually resize them to feed our model.
Let's find the range of color values and image sizes.
Step10: It looks like we are dealing with color values from 0 through 255, which is pretty standard.
We have a huge number of different resolutions. There are over 6,000 different image sizes in this dataset, some as small as 4x4x3! It is difficult to imagine that an image that small would be meaningful. Let's see how many tiny images we are dealing with.
Step11: There is only one truly tiny image. Let's take a look at it.
Step12: That's definitely bad data. Let's go ahead and sample some of the other small images.
Step13: Though some are difficult to interpret, you can probably tell that each image contains either cats or dogs.
In order to not process the tiny image, we can write a filter function. We know the shape is (4, 4, 3), so we can filter for that exact shape. To make the filter a little more generic, we'll instead filter out any image that is shorter or narrower than 6 pixels.
Step14: It looks like our problematic image was a cat in the holdout test set.
The Pretrained Model
To build our cat/dog classifier, we'll use the learnings of a pre-trained model. Specifically MobileNetV2. We'll use tf.keras.applications.MobileNetV2 to load the data.
Model-Specific Preprocessing
Researching MobileNetV2, you'll find that the neural network by default takes an input of image of size (224, 224, 3). Though the model can be configured to take other inputs, all of our images are different sizes. So we might as well resize them to fit the default.
Step15: We also need to normalize our data, but what should our input values be scaled to? Ideally our input data should look like the input data that the MobileNetV2 was trained on. Unfortunately, this isn't published.
MobileNetV2 internally uses relu6, which limits activation outputs to the range of 0 through 6. This hints that we might want to normalize our values between [0, 1] or even [0, 6].
It also performs batch normalization throughout the network. This is the process of dividing input values by the mean and subtracting the standard deviation of each batch of data processed. So "batch normalization" is really "batch standardization".
Standardizing our data by batch is possible. We could also calculate the mean and standard deviation of all of the data and standardize the entire dataset in one pass. Or we could approximate standardization and simply divide our input data by 127.5 (the midpoint of our [0, 255] range) and then subtract 1 (a guessed standard deviation).
Step16: Did it work? Let's check it out.
Step17: Looks great! Now it is time to load our pretrained model.
Loading MobileNetV2
Loading MobileNetV2 is pretty straight-forward.
We need to pass in the input shape, which is (224, 224, 3) for each image.
We also include pre-trained weights based on ImageNet. This is where the transfer learning comes in. ImageNet has over a million images that map to a thousand labels. MobileNetV2 has been trained on ImageNet. We'll use those learnings and then add a few more layers of our own model to build a cat/dog classifier.
The final argument is include_top. Typically when building a classification model, toward the end of the model, high-dimensional layers are flattened down into two-dimensional tensors. This is considered the top of the model since diagrams often show the final layers at the top. For transfer learning we'll leave this dimensionality reduction off.
If you do include the top of the model, the following extra layers will be shown
Step18: It is often a good idea to "freeze" the trained model. This prevents its weights from being updated when we train our new model.
It is really only recommended to update the weights of the pretrained model when you are about to train on a large and similar dataset, as compared to the one that was originally trained on. This is not the case in our example. ImageNet has a thousand classes and over a million images. We have two classes and a few thousand images.
Step19: Batching
We will want to train our model in batches. In our case we'll use a batch size of 32. You might want to experiment with other sizes.
Step20: You can see that we now have a well-defined input shape for each training batch.
Step21: If we apply our model to our first batch, you can see that we get a (32, 7, 7, 1280) block of features. These will be the input to our cat/dog model.
Step22: Extending the Model
Now we can perform the actual transfer learning. We'll build a new model that classifies images as containing dogs or cats. In order to do that, we can use a Sequential model with the pretrained model as the first layer.
Note that the output layer of our pretrained model is
Step23: We now compile the model, training for accuracy with binary cross entropy used to calculate loss.
Step24: Training will take a few minutes. Be sure to use GPU or it will take a really long time.
Step25: We got a training accuracy of over 99% and a validation accuracy close to 99%! Let's graph the accuracy and loss per epoch.
Step26: The graph makes it look like we might be overfitting, but if you look at the range on the y-axis, we actually aren't doing too badly. We should, however, perform a final test to see if we can generalize well.
Step27: We got an accuracy of just over 99%, which can give us some confidence that this model generalizes well.
Making Predictions
We can use the model to make predictions by using the predict() function.
Step28: Remember the predictions can range from 0.0 to 1.0. We can round them and cast them to integers to get class mappings.
Step29: And we can now print the predicted class alongside the original image.
Step30: You can also make predictions by calling the model directly and passing it a single batch.
Step31: Exercises
Exercise 1 | Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Explanation: <a href="https://colab.research.google.com/github/google/applied-machine-learning-intensive/blob/master/content/05_deep_learning/04_transfer_learning/colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2020 Google LLC.
End of explanation
import tensorflow_datasets as tfds
(raw_train, raw_validation, raw_test), metadata = tfds.load(
'cats_vs_dogs',
split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
with_info=True,
as_supervised=True,
)
Explanation: Transfer Learning
In the field of deep learning, transfer learning is defined as the conveyance of knowledge from one pretrained model to a new model. This simply means that transfer learning uses a pretrained model to train a new model. Typically the new model will have a more specific application than the pre-trained model.
Note that this lab is largely based on an excellent transfer learning lab from TensorFlow.
Exploratory Data Analysis
The tensorflow_datasets package has a catalog of datasets that are easy to load into your TensorFlow environment for experimentation.
In this lab we'll work with the cats_vs_dogs dataset. This dataset contains thousands of images of cats and dogs. Looking at the documentation for the dataset, we can see there are 23,262 examples in the 'train' split of data. There are no test and validation splits.
We could just load this one split directly and then split the data once we download it. Another option is to tell tfds.load() to split the data for us. To do that we must specify the splits.
There is a specific notation we can use that tells the function how much of the data we want in each split. For instance 'train[:80%]' indicates that we want the first 80% of the train split in one tranche. 'train[80%:90%]' indicates that we want the next 10% of the data in another tranche, and so on. You can see this at work in our split example below.
End of explanation
metadata.features['label'].num_classes
Explanation: The metadata returned from our dataset contains useful information about the data. For instance, it includes the number of classes:
End of explanation
metadata.features['label'].names
Explanation: And the class names:
End of explanation
print(metadata.features['label'].int2str(1))
print(metadata.features['label'].str2int('cat'))
Explanation: It even comes with some handy functions for converting between class names and numbers:
End of explanation
get_class_name = metadata.features['label'].int2str
get_class_name(0), get_class_name(1)
Explanation: Let's store the int2str into a more conveniently named function for later use.
End of explanation
raw_train
Explanation: Let's take a quick look at our dataset. First we'll peek at the shape of the data.
End of explanation
import collections
def split_details(split):
counts = collections.defaultdict(int)
for image, label in split:
counts[label.numpy()]+=1
total = 0
for cls, cnt in counts.items():
print(f"Class {get_class_name(cls)}: {cnt}")
total += cnt
print(f"Total: {total}")
for s in (
("Train", raw_train),
("Validation", raw_validation),
("Test", raw_test)):
print(s[0])
split_details(s[1])
print()
Explanation: (None, None, 3) lets us know that we have three channel images, but we aren't sure of the lengths and widths. They are likely different depending on the image. We also don't know how many images we have.
Let's do some deeper analysis.
It turns out that you can iterate over a DatasetV1Adapter with a standard for loop. The items returned at each iteration are the image and the label.
We'll create a helper function to analyze a split of our data.
End of explanation
import matplotlib.pyplot as plt
for cls in (0, 1):
for image, label in raw_train:
if label == cls:
plt.figure()
plt.imshow(image)
break
Explanation: We'll train on 18,610 examples, validating on 2,326, and performing our final testing on 2,326. Our classes look pretty evenly spread across all of the splits. The classes also seem to have a similar number of total examples.
Let's now see what our images look like. We'll display one dog and one cat.
End of explanation
import sys
global_min = sys.maxsize
global_max = -sys.maxsize-1
sizes = collections.defaultdict(int)
for split in (raw_train, raw_validation, raw_test):
for image, _ in split:
local_max = image.numpy().max()
local_min = image.numpy().min()
sizes[image.numpy().shape] += 1
if local_max > global_max:
global_max = local_max
if local_min < global_min:
global_min = local_min
print(f"Color values range from {global_min} to {global_max}")
resolutions = [x[0] for x in sorted(sizes.items(), key=lambda r: r[0])]
print(f"There are {len(resolutions)} resolutions ranging from ",
f"{resolutions[0]} to {resolutions[-1]}")
Explanation: These are color images with noisy backgrounds. Also, the images aren't the same size, so we'll need to eventually resize them to feed our model.
Let's find the range of color values and image sizes.
End of explanation
for resolution in sorted(sizes.items(), key=lambda r: r[0])[:10]:
print(resolution[0], ': ', resolution[1])
Explanation: It looks like we are dealing with color values from 0 through 255, which is pretty standard.
We have a huge number of different resolutions. There are over 6,000 different image sizes in this dataset, some as small as 4x4x3! It is difficult to imagine that an image that small would be meaningful. Let's see how many tiny images we are dealing with.
End of explanation
shown = False
for split in (raw_train, raw_validation, raw_test):
if shown:
break
for image, _ in split:
if image.numpy().shape == (4, 4, 3):
plt.figure()
plt.imshow(image)
shown = True
break
Explanation: There is only one truly tiny image. Let's take a look at it.
End of explanation
for split in (raw_train, raw_validation, raw_test):
for image, _ in split:
if image.numpy().shape[0] < 50 and image.numpy().shape[0] > 4:
plt.figure()
plt.imshow(image)
Explanation: That's definitely bad data. Let's go ahead and sample some of the other small images.
End of explanation
import tensorflow as tf
def filter_out_small(image, _):
return tf.math.reduce_any(tf.shape(image)[0] > 5 and tf.shape(image)[1] > 5)
for s in (
("Train", raw_train.filter(filter_out_small)),
("Validation", raw_validation.filter(filter_out_small)),
("Test", raw_test.filter(filter_out_small))):
print(s[0])
split_details(s[1])
print()
Explanation: Though some are difficult to interpret, you can probably tell that each image contains either cats or dogs.
In order to not process the tiny image, we can write a filter function. We know the shape is (4, 4, 3), so we can filter for that exact shape. To make the filter a little more generic, we'll instead filter out any image that is shorter or narrower than 6 pixels.
End of explanation
IMG_SIZE = 224
def resize_images(image, label):
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
train_resized = raw_train.map(resize_images)
validation_resized = raw_validation.map(resize_images)
test_resized = raw_test.map(resize_images)
Explanation: It looks like our problematic image was a cat in the holdout test set.
The Pretrained Model
To build our cat/dog classifier, we'll use the learnings of a pre-trained model. Specifically MobileNetV2. We'll use tf.keras.applications.MobileNetV2 to load the data.
Model-Specific Preprocessing
Researching MobileNetV2, you'll find that the neural network by default takes an input of image of size (224, 224, 3). Though the model can be configured to take other inputs, all of our images are different sizes. So we might as well resize them to fit the default.
End of explanation
def standardize_images(image, label):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
train_standardized = train_resized.map(standardize_images)
validation_standardized = validation_resized.map(standardize_images)
test_standardized = test_resized.map(standardize_images)
Explanation: We also need to normalize our data, but what should our input values be scaled to? Ideally our input data should look like the input data that the MobileNetV2 was trained on. Unfortunately, this isn't published.
MobileNetV2 internally uses relu6, which limits activation outputs to the range of 0 through 6. This hints that we might want to normalize our values between [0, 1] or even [0, 6].
It also performs batch normalization throughout the network. This is the process of dividing input values by the mean and subtracting the standard deviation of each batch of data processed. So "batch normalization" is really "batch standardization".
Standardizing our data by batch is possible. We could also calculate the mean and standard deviation of all of the data and standardize the entire dataset in one pass. Or we could approximate standardization and simply divide our input data by 127.5 (the midpoint of our [0, 255] range) and then subtract 1 (a guessed standard deviation).
End of explanation
import sys
global_min = sys.maxsize
global_max = -sys.maxsize-1
sizes = collections.defaultdict(int)
for split in (train_standardized, validation_standardized, test_standardized):
for image, _ in split:
local_max = image.numpy().max()
local_min = image.numpy().min()
sizes[image.numpy().shape] += 1
if local_max > global_max:
global_max = local_max
if local_min < global_min:
global_min = local_min
print(f"Color values range from {global_min} to {global_max}")
resolutions = [x[0] for x in sorted(sizes.items(), key=lambda r: r[0])]
print(f"There are {len(resolutions)} resolutions ranging from ",
f"{resolutions[0]} to {resolutions[-1]}")
Explanation: Did it work? Let's check it out.
End of explanation
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
mnv2 = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
weights='imagenet',
include_top=False)
mnv2.summary()
Explanation: Looks great! Now it is time to load our pretrained model.
Loading MobileNetV2
Loading MobileNetV2 is pretty straight-forward.
We need to pass in the input shape, which is (224, 224, 3) for each image.
We also include pre-trained weights based on ImageNet. This is where the transfer learning comes in. ImageNet has over a million images that map to a thousand labels. MobileNetV2 has been trained on ImageNet. We'll use those learnings and then add a few more layers of our own model to build a cat/dog classifier.
The final argument is include_top. Typically when building a classification model, toward the end of the model, high-dimensional layers are flattened down into two-dimensional tensors. This is considered the top of the model since diagrams often show the final layers at the top. For transfer learning we'll leave this dimensionality reduction off.
If you do include the top of the model, the following extra layers will be shown:
```text
global_average_pooling2d_1 (Glo (None, 1280) 0 out_relu[0][0]
predictions (Dense) (None, 1000) 1281000 global_average_pooling2d_1[0][0]
```
End of explanation
mnv2.trainable = False
Explanation: It is often a good idea to "freeze" the trained model. This prevents its weights from being updated when we train our new model.
It is really only recommended to update the weights of the pretrained model when you are about to train on a large and similar dataset, as compared to the one that was originally trained on. This is not the case in our example. ImageNet has a thousand classes and over a million images. We have two classes and a few thousand images.
End of explanation
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
train_batches = train_standardized.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_batches = validation_standardized.batch(BATCH_SIZE)
test_batches = test_standardized.filter(filter_out_small).batch(BATCH_SIZE)
Explanation: Batching
We will want to train our model in batches. In our case we'll use a batch size of 32. You might want to experiment with other sizes.
End of explanation
image_batch, label_batch = next(iter(train_batches.take(1)))
image_batch.shape
Explanation: You can see that we now have a well-defined input shape for each training batch.
End of explanation
feature_batch = mnv2(image_batch)
print(feature_batch.shape)
Explanation: If we apply our model to our first batch, you can see that we get a (32, 7, 7, 1280) block of features. These will be the input to our cat/dog model.
End of explanation
model = tf.keras.Sequential([
mnv2,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
model.summary()
Explanation: Extending the Model
Now we can perform the actual transfer learning. We'll build a new model that classifies images as containing dogs or cats. In order to do that, we can use a Sequential model with the pretrained model as the first layer.
Note that the output layer of our pretrained model is:
text
out_relu (ReLU) (None, 7, 7, 1280) 0 Conv_1_bn[0][0]
Since the activation function is relu6, we know that the data that we'll be receiving is in the range of [0, 6]. We apply a pooling layer to reduce our inputs. In our output layer, we distill the inputs down to a single number that indicates if the image is of a cat or dog. We chose the sigmoid function, which will cause the output to be in a range of [0, 1]. This represents the confidence in an image being a dog, since dog is encoded as 1.
End of explanation
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy']
)
model.summary()
Explanation: We now compile the model, training for accuracy with binary cross entropy used to calculate loss.
End of explanation
history = model.fit(
train_batches,
epochs=10,
validation_data=validation_batches
)
Explanation: Training will take a few minutes. Be sure to use GPU or it will take a really long time.
End of explanation
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
Explanation: We got a training accuracy of over 99% and a validation accuracy close to 99%! Let's graph the accuracy and loss per epoch.
End of explanation
model.evaluate(test_batches)
Explanation: The graph makes it look like we might be overfitting, but if you look at the range on the y-axis, we actually aren't doing too badly. We should, however, perform a final test to see if we can generalize well.
End of explanation
predictions = model.predict(test_batches)
predictions.min(), predictions.max()
Explanation: We got an accuracy of just over 99%, which can give us some confidence that this model generalizes well.
Making Predictions
We can use the model to make predictions by using the predict() function.
End of explanation
import numpy as np
predictions = np.round(predictions.flatten(), 0).astype(np.int)
predictions
Explanation: Remember the predictions can range from 0.0 to 1.0. We can round them and cast them to integers to get class mappings.
End of explanation
print(get_class_name(predictions[0]))
_ = plt.imshow(next(iter(raw_test.take(1)))[0].numpy())
Explanation: And we can now print the predicted class alongside the original image.
End of explanation
predictions = model(next(iter(test_batches)))
predictions = np.round(predictions, 0).astype(np.int).flatten()
print(get_class_name(predictions[0]))
_ = plt.imshow(next(iter(raw_test.take(1)))[0].numpy())
Explanation: You can also make predictions by calling the model directly and passing it a single batch.
End of explanation
# Your Solution Goes Here
Explanation: Exercises
Exercise 1: Food 101
In this exercise you'll build a classifier for the Food 101 dataset. The classifier will transfer learnings from DenseNet201.
In order to complete the exercise, you will need to:
* Load the Food 101 dataset. Be sure to pay attention to the splits!
* Perform exploratory data analysis on the dataset.
* Ensure every class is represented in your train, test, and validation splits of the dataset.
* Normalize or standardize your data in the way that the model was trained. You can find this information in the paper introducing the model.
* Extend DenseNet201 with a new model, and have it classify the 101 food types. Note that one_hot and Dataset.map can help you manipulate the targets to make the model train faster.
* Graph training accuracy and loss.
* Calculate accuracy and loss for your holdout test set.*
* Make predictions and print out one predicted label and original image.
*Don't sweat too much about your model's performance. We were only able to get about 75% training accuracy (with obvious overfitting) in our naive model after 10 training epochs. This model is trying to classify 101 different things with messy images. Don't expect it to perform anywhere close to our binary model above.
Use as many code and text cells as you need to complete this task. Explain your work.
Student Solution
End of explanation |
11,346 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<img src="http
Step1: Single Risk Factor
The example is based on a single risk factor, a geometric_brownian_motion object.
Step2: American Put Option
We also model only a single derivative instrument.
Step3: Large Portfolio
However, the derivatives_portfolio object we compose consists of 100 derivatives positions. Each option differes with respect to the strike.
Step4: Sequential Valuation
First, the derivatives portfolio with sequential valuation.
Step5: The call of the get_values method to value all instruments ...
Step6: ... and the results visualized.
Step7: Parallel Valuation
Second, the derivatives portfolio with parallel valuation.
Step8: The call of the get_values method for the parall valuation case.
Step9: Again, the results visualized (and compared to the sequential results).
Step10: Speed.up
The realized speed-up is of course dependend on the hardware used, and in particular the number of cores (threads) available. | Python Code:
from dx import *
import time
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
%matplotlib inline
Explanation: <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="45%" align="right" border="4">
Parallel Valuation of Large Portfolios
Derivatives (portfolio) valuation by Monte Carlo simulation is a computationally demanding task. For practical applications, when valuation speed plays an important role, parallelization of both simulation and valuation tasks might prove a useful strategy. DX Analytics has built in a basic parallelization option which allows the use of the Python mulitprocessing module. Depending on the tasks at hand this can already lead to significant speed-ups.
End of explanation
# constant short rate
r = constant_short_rate('r', 0.02)
# market environments
me_gbm = market_environment('gbm', dt.datetime(2015, 1, 1))
# geometric Brownian motion
me_gbm.add_constant('initial_value', 100.)
me_gbm.add_constant('volatility', 0.2)
me_gbm.add_constant('currency', 'EUR')
me_gbm.add_constant('model', 'gbm')
# valuation environment
val_env = market_environment('val_env', dt.datetime(2015, 1, 1))
val_env.add_constant('paths', 25000)
val_env.add_constant('frequency', 'M')
val_env.add_curve('discount_curve', r)
val_env.add_constant('starting_date', dt.datetime(2015, 1, 1))
val_env.add_constant('final_date', dt.datetime(2015, 12, 31))
# add valuation environment to market environments
me_gbm.add_environment(val_env)
risk_factors = {'gbm' : me_gbm}
Explanation: Single Risk Factor
The example is based on a single risk factor, a geometric_brownian_motion object.
End of explanation
gbm = geometric_brownian_motion('gbm_obj', me_gbm)
me_put = market_environment('put', dt.datetime(2015, 1, 1))
me_put.add_constant('maturity', dt.datetime(2015, 12, 31))
me_put.add_constant('strike', 40.)
me_put.add_constant('currency', 'EUR')
me_put.add_environment(val_env)
am_put = valuation_mcs_american_single(
'am_put', mar_env=me_put, underlying=gbm,
payoff_func='np.maximum(strike - instrument_values, 0)')
Explanation: American Put Option
We also model only a single derivative instrument.
End of explanation
positions = {}
strikes = np.linspace(80, 120, 100)
for i, strike in enumerate(strikes):
positions[i] = derivatives_position(
name='am_put_pos_%s' % strike,
quantity=1,
underlyings=['gbm'],
mar_env=me_put,
otype='American single',
payoff_func='np.maximum(%5.3f - instrument_values, 0)' % strike)
Explanation: Large Portfolio
However, the derivatives_portfolio object we compose consists of 100 derivatives positions. Each option differes with respect to the strike.
End of explanation
port_sequ = derivatives_portfolio(
name='portfolio',
positions=positions,
val_env=val_env,
risk_factors=risk_factors,
correlations=None,
parallel=False) # sequential calculation
Explanation: Sequential Valuation
First, the derivatives portfolio with sequential valuation.
End of explanation
t0 = time.time()
ress = port_sequ.get_values()
ts = time.time() - t0
print "Time in sec %.2f" % ts
Explanation: The call of the get_values method to value all instruments ...
End of explanation
ress['strike'] = strikes
ress.set_index('strike')['value'].plot(figsize=(10, 6))
plt.ylabel('option value estimates')
Explanation: ... and the results visualized.
End of explanation
port_para = derivatives_portfolio(
'portfolio',
positions,
val_env,
risk_factors,
correlations=None,
parallel=True) # parallel valuation
Explanation: Parallel Valuation
Second, the derivatives portfolio with parallel valuation.
End of explanation
t0 = time.time()
resp = port_para.get_values()
# parallel valuation with as many cores as available
tp = time.time() - t0
print "Time in sec %.2f" % tp
Explanation: The call of the get_values method for the parall valuation case.
End of explanation
plt.figure(figsize=(10, 6))
plt.plot(strikes, resp['value'].values, 'r.', label='parallel')
plt.plot(strikes, ress['value'].values, 'b', label='sequential')
plt.legend(loc=0)
plt.ylabel('option value estimates')
Explanation: Again, the results visualized (and compared to the sequential results).
End of explanation
ts / tp
# speed-up factor
# of course harware-dependent
wi = 0.4
plt.figure(figsize=(10, 6))
plt.bar((1.5 - wi/2, 2.5 - wi/2), (ts/ts, tp/ts), width=wi)
plt.xticks((1.5, 2.5), ('sequential', 'parallel'))
plt.ylim(0, 1.1), plt.xlim(0.75, 3.25)
plt.ylabel('relative performance (lower = better)')
plt.title('DX Analytics Portfolio Valuation')
Explanation: Speed.up
The realized speed-up is of course dependend on the hardware used, and in particular the number of cores (threads) available.
End of explanation |
11,347 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Play with some basic functions adapted from tide data functions
Query Builder
Step3: Offset Generator
Step5: Query Generator
TODO
refactor with a decorator
make key an attribute that can be hidden
Step9: Extract Results
Step13: CSV Generator | Python Code:
def query_builder(start_dt, end_dt, station, offset= 1):
Function accepts: a start and end datetime string in the form 'YYYYMMDD mm:ss'
which are <= 1 year apart, a station ID, and an offset.
Function assembles a query parameters/arguments dict and returns an API query and the
query dictionary (query_dict). The relevant base URL is the NCDC endpoint
'http://www.ncdc.noaa.gov/cdo-web/api/v2/data?'.
import urllib
# API endpoint
base_url= 'http://www.ncdc.noaa.gov/cdo-web/api/v2/data?'
# dict of NOAA query parameters/arguments
query_dict = dict(startdate= start_dt, enddate= end_dt, stationid= station,
offset= offset, datasetid= 'GHCND', limit= 1000)
# encode arguments
encoded_args = urllib.urlencode(query_dict)
# query
query = base_url + encoded_args
# decode url % (reconvert reserved characters to utf8 string)
query= urllib.unquote(query)
# create and return query from base url and encoded arguments
return query, query_dict
query_1, query_dict= query_builder('2014-01-01', '2015-01-01', station= 'GHCND:USW00023174')
print(query_1)
query_2, query_dict= query_builder('2014-01-01', '2015-01-01', station= 'GHCND:USW00023174', offset= 1001)
print(query_2)
Explanation: Play with some basic functions adapted from tide data functions
Query Builder
End of explanation
def offsetter(response):
Function accepts a restful query response (JSON)
Function returns a dictionary of offsets to pull the entire query set
where the set is limited to 1000 records per query. Function also
returns a record count for use in validation.
# get repeats and repeat range
import math
count= response['metadata']['resultset']['count']
repeats= math.ceil(count/1000.)
repeat_range= range(int(repeats))
# get offsets dictionary
offset= 1
offsets= [1]
for item in repeat_range[1:]:
offset += 1000
offsets.append(offset)
# zip up the results and convert to dictionary
offset_dict= dict(zip(repeat_range[1:], offsets[1:])) # the first call has been done already to get meta
return offset_dict, count # for quality control
Explanation: Offset Generator
End of explanation
def execute_query(query):
Function accepts an NOAA query for daily summaries for a specfic location
and executes the query.
Function returns a response (JSON)
url = query
# replace token with token provided by NOAA. Enter token as string
headers = {'token': NOAA_Token_Here} # https://www.ncdc.noaa.gov/cdo-web/token
response = requests.get(url, headers = headers)
response = response.json()
return response
working_1= execute_query(query_1)['results']
working_2 = execute_query(query_2)['results']
Explanation: Query Generator
TODO
refactor with a decorator
make key an attribute that can be hidden
End of explanation
def extract_results(response):
Function accepts a NOAA query response (JSON) return the results
key values as well as the number of records (for use in validation).
data= response['results']
# for quality control to verify retrieval of all rows
length= len(data)
return data, length
def collator(results):
Functions accepts the results key of an NOAA query response (JSON)
and returns a tidy data set in PANDAS, where each record is an
observation about a day.
df= pd.DataFrame(results)
df= df.drop(['attributes','station'], axis=1)
df= df.pivot(index= 'date',columns= 'datatype', values= 'value').reset_index()
return df
def get_ncdc(start_dt, end_dt, station):
Function accepts a start date (MM-DD-YYY) an end date (MM-DD-YYYY)
and a NOAA station ID. Date limit is 1 year.
Function returns a tidy dataset in a PANDAS DataFrame where
each row represents an observation about a day, a record count
and a query parameters dictionary.
# count for verifying retrieval of all rows
record_count= 0
# initial query
query, query_dict= query_builder(start_dt, end_dt, station)
response= execute_query(query)
# extract results and count
results, length= extract_results(response)
record_count += length
# get offsets for remaining queries
off_d, count= offsetter(response)
# execute remaining queries and operations
for offset in off_d:
query, _= query_builder(start_dt, end_dt, station, off_d[offset])
print(query)
response= execute_query(query)
next_results, next_length= extract_results(response)
record_count += next_length
# concat results lists
results += next_results
assert record_count == count, 'record count != count'
collated_data= collator(results)
return collated_data, record_count, query_dict
test, qc, params = get_ncdc('2014-01-01', '2014-12-31', station= 'GHCND:USW00023174')
test.date.head()
test.date.tail()
test.info()
test[test.date.isnull()]
y1, qc, params = get_ncdc('2014-05-03', '2015-05-02', station= 'GHCND:USW00023174')
y2, qc, params = get_ncdc('2015-05-03', '2016-05-02', station= 'GHCND:USW00023174')
y3, qc, params = get_ncdc('2016-05-03', '2017-05-02', station= 'GHCND:USW00023174')
y1.info()
years= pd.concat([y1, y2, y3])
years.date.head()
years.date.tail()
years.to_csv('LAX_3years.csv', index= False)
Explanation: Extract Results
End of explanation
def gen_csv(df, query_dict):
Arguments: PANDAS DataFrame, a query parameters dictionary
Returns: A CSV of the df with dropped index and named by dict params
# extract params
station= query_dict['stationid']
start= query_dict['startdate']
end= query_dict['enddate']
# using os.path in case of future expansion to other directories
path= os.path.join(station + '_' + start + '_' + end + '.' + 'csv')
# remove problem characters (will add more in future)
exclude_chars= ':'
path= path.replace(exclude_chars, "_")
# export to csv
my_csv= df.to_csv(path, index= False)
return my_csv, path
stuff, path= gen_csv(test, query_dict)
path
ls *csv
#!/usr/bin/env python
# coding: utf-8
Python code for querying NOAA daily summary weather and returnig a CSV per year
for a specfic station. Code is intended to be executed from CLI.
import sys
# set path to tools library and import
sys.path.append(r'noaa_weather_tools')
import noaa_weather_tools
NOAA_Token_Here= 'enter token as string'
print("Check dt format('DD-MM-YYYY', and whether dates span <= 1 year from a current or past date")
print("If dates exceed one year, NCDC query returns a null object")
print("Need a token take a token, have a token, keep it to yourself @ https://www.ncdc.noaa.gov/cdo-web/token")
print('start_dt: {}\n end_dt: {}'.format(sys.argv[1], sys.argv[2]))
def noaa_dailysum_weather_processor(start_dt, end_dt, station):
Function accepts a station ID, and beginning/end datetime as strings with date format as
'MM-DD-YYYY' which span <= 1 year from a current or past date, passing them to the query_builder function.
Function creates a .csv file of NOAA (NCDC) Daily Summary data for a specific station.
print(15 * '.' + "reticulating splines" + 5* '.' + "getting records")
df, record_count, query_parameters= noaa_weather_tools.get_ncdc(start_dt, end_dt, station)
print(15* '.' + "exporting to csv")
my_csv, my_path= noaa_weather_tools.gen_csv(df, query_parameters)
print("spines reticulated")
return my_csv
noaa_dailysum_weather_processor('2014-05-03', '2015-05-02', station= 'GHCND:USW00023174')
ls *csv
Explanation: CSV Generator
End of explanation |
11,348 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Numpy Exercise 4
Imports
Step1: Complete graph Laplacian
In discrete mathematics a Graph is a set of vertices or nodes that are connected to each other by edges or lines. If those edges don't have directionality, the graph is said to be undirected. Graphs are used to model social and communications networks (Twitter, Facebook, Internet) as well as natural systems such as molecules.
A Complete Graph, $K_n$ on $n$ nodes has an edge that connects each node to every other node.
Here is $K_5$
Step2: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
The degree matrix for $K_n$ is an $n \times n$ diagonal matrix with the value $n-1$ along the diagonal and zeros everywhere else. Write a function to compute the degree matrix for $K_n$ using NumPy.
Step3: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
Step4: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$. | Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
Explanation: Numpy Exercise 4
Imports
End of explanation
import networkx as nx
K_5=nx.complete_graph(5)
nx.draw(K_5)
Explanation: Complete graph Laplacian
In discrete mathematics a Graph is a set of vertices or nodes that are connected to each other by edges or lines. If those edges don't have directionality, the graph is said to be undirected. Graphs are used to model social and communications networks (Twitter, Facebook, Internet) as well as natural systems such as molecules.
A Complete Graph, $K_n$ on $n$ nodes has an edge that connects each node to every other node.
Here is $K_5$:
End of explanation
def complete_deg(n):
k_n = np.identity((n)) * (n-1)
answer = k_n.astype(dtype=np.int)
return answer
print(complete_deg(4))
D = complete_deg(5)
assert D.shape==(5,5)
assert D.dtype==np.dtype(int)
assert np.all(D.diagonal()==4*np.ones(5))
assert np.all(D-np.diag(D.diagonal())==np.zeros((5,5),dtype=int))
Explanation: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
The degree matrix for $K_n$ is an $n \times n$ diagonal matrix with the value $n-1$ along the diagonal and zeros everywhere else. Write a function to compute the degree matrix for $K_n$ using NumPy.
End of explanation
def complete_adj(n):
ones = np.ones((n,n))
diag = np.identity(n)
adj = ones-diag
adj = adj.astype(dtype=np.int)
return adj
print complete_adj(4)
A = complete_adj(5)
assert A.shape==(5,5)
assert A.dtype==np.dtype(int)
assert np.all(A+np.eye(5,dtype=int)==np.ones((5,5),dtype=int))
Explanation: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
End of explanation
print(np.linalg.eigvals(complete_deg(4)))
print(np.linalg.eigvals(complete_adj(4)))
L = (np.linalg.eigvals(complete_deg(5) - complete_adj(5)))
J = L.astype(dtype=np.int)
print L
print J
Explanation: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$.
End of explanation |
11,349 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Build names mapping
To make it a little easier to check that I'm using the correct guids, construct a mapping from names back to guid.
Note
Step1: Pikov sprite editor classes
These classes are the core resources used in defining a "Pikov" file.
Note
Step2: Gamekitty
Create instances of the Pikov classes to define a concrete Pikov graph, based on my "gamekitty" animations.
Step3: Chop image into 8x8 sprites
By creating an array of 8x8 images, we make it easier to map from PICO-8 sprite number (from my original gamekitty cart) to a Bitmap. | Python Code:
names = {}
for node in graph:
for edge in node:
if edge.guid == "169a81aefca74e92b45e3fa03c7021df":
value = node[edge].value
if value in names:
raise ValueError('name: "{}" defined twice'.format(value))
names[value] = node
names["ctor"]
# {name: names[name].guid for name in sorted(names) if '[' not in name}
def name_to_guid(name):
if name not in names:
return None
node = names[name]
if not hasattr(node, "guid"):
return None
return node.guid
Explanation: Build names mapping
To make it a little easier to check that I'm using the correct guids, construct a mapping from names back to guid.
Note: this adds a constraint that no two nodes have the same name, which should not be enforced for general semantic graphs.
End of explanation
from pikov.sprite import Resource, Rectangle, Point, Bitmap
Explanation: Pikov sprite editor classes
These classes are the core resources used in defining a "Pikov" file.
Note: ideally these classes could be derived from the graph itself, but I
don't (yet) encode type or field information in the pikov.json semantic
graph.
End of explanation
resource = Resource(graph, guid=name_to_guid("spritesheet"))
resource.name = "spritesheet"
resource.relative_path = "./gamekitty.png"
resource
resource.image
Explanation: Gamekitty
Create instances of the Pikov classes to define a concrete Pikov graph, based on my "gamekitty" animations.
End of explanation
spritesheet = []
for row in range(16):
for column in range(16):
sprite_number = row * 16 + column
crop_name = "crop[{}]".format(sprite_number)
crop = Rectangle(graph, guid=name_to_guid(crop_name))
crop.name = crop_name
anchor_name = "anchor[{}]".format(sprite_number)
anchor = Point(graph, guid=name_to_guid(anchor_name))
anchor.name = anchor_name
anchor.x = column * 8
anchor.y = row * 8
crop.anchor = anchor
crop.width = 8
crop.height = 8
bitmap_name = "bitmap[{}]".format(sprite_number)
bitmap = Bitmap(graph, guid=name_to_guid(bitmap_name))
bitmap.name = bitmap_name
bitmap.resource = resource
bitmap.crop = crop
spritesheet.append(bitmap)
list(graph.get_labels(spritesheet[4]))
spritesheet[1].image
# Save our work!
graph.save()
Explanation: Chop image into 8x8 sprites
By creating an array of 8x8 images, we make it easier to map from PICO-8 sprite number (from my original gamekitty cart) to a Bitmap.
End of explanation |
11,350 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Copyright 2020 The TensorFlow Authors.
Step1: BERT Question Answer with TensorFlow Lite Model Maker
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https
Step2: Import the required packages.
Step3: The "End-to-End Overview" demonstrates a simple end-to-end example. The following sections walk through the example step by step to show more detail.
Choose a model_spec that represents a model for question answer
Each model_spec object represents a specific model for question answer. The Model Maker currently supports MobileBERT and BERT-Base models.
Supported Model | Name of model_spec | Model Description
--- | --- | ---
MobileBERT | 'mobilebert_qa' | 4.3x smaller and 5.5x faster than BERT-Base while achieving competitive results, suitable for on-device scenario.
MobileBERT-SQuAD | 'mobilebert_qa_squad' | Same model architecture as MobileBERT model and the initial model is already retrained on SQuAD1.1.
BERT-Base | 'bert_qa' | Standard BERT model that widely used in NLP tasks.
In this tutorial, MobileBERT-SQuAD is used as an example. Since the model is already retrained on SQuAD1.1, it could coverage faster for question answer task.
Step4: Load Input Data Specific to an On-device ML App and Preprocess the Data
The TriviaQA is a reading comprehension dataset containing over 650K question-answer-evidence triples. In this tutorial, you will use a subset of this dataset to learn how to use the Model Maker library.
To load the data, convert the TriviaQA dataset to the SQuAD1.1 format by running the converter Python script with --sample_size=8000 and a set of web data. Modify the conversion code a little bit by
Step5: You can also train the MobileBERT model with your own dataset. If you are running this notebook on Colab, upload your data by using the left sidebar.
<img src="https
Step6: Customize the TensorFlow Model
Create a custom question answer model based on the loaded data. The create function comprises the following steps
Step7: Have a look at the detailed model structure.
Step8: Evaluate the Customized Model
Evaluate the model on the validation data and get a dict of metrics including f1 score and exact match etc. Note that metrics are different for SQuAD1.1 and SQuAD2.0.
Step9: Export to TensorFlow Lite Model
Convert the trained model to TensorFlow Lite model format with metadata so that you can later use in an on-device ML application. The vocab file are embedded in metadata. The default TFLite filename is model.tflite.
In many on-device ML application, the model size is an important factor. Therefore, it is recommended that you apply quantize the model to make it smaller and potentially run faster.
The default post-training quantization technique is dynamic range quantization for the BERT and MobileBERT models.
Step10: You can use the TensorFlow Lite model file in the bert_qa reference app using BertQuestionAnswerer API in TensorFlow Lite Task Library by downloading it from the left sidebar on Colab.
The allowed export formats can be one or a list of the following
Step11: You can also evaluate the tflite model with the evaluate_tflite method. This step is expected to take a long time.
Step12: Advanced Usage
The create function is the critical part of this library in which the model_spec parameter defines the model specification. The BertQASpec class is currently supported. There are 2 models | Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
!sudo apt -y install libportaudio2
!pip install -q tflite-model-maker-nightly
Explanation: BERT Question Answer with TensorFlow Lite Model Maker
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/models/modify/model_maker/question_answer"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/modify/model_maker/question_answer.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/modify/model_maker/question_answer.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/models/modify/model_maker/question_answer.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
The TensorFlow Lite Model Maker library simplifies the process of adapting and converting a TensorFlow model to particular input data when deploying this model for on-device ML applications.
This notebook shows an end-to-end example that utilizes the Model Maker library to illustrate the adaptation and conversion of a commonly-used question answer model for question answer task.
Introduction to BERT Question Answer Task
The supported task in this library is extractive question answer task, which means given a passage and a question, the answer is the span in the passage. The image below shows an example for question answer.
<p align="center"><img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_squad_showcase.png" width="500"></p>
<p align="center">
<em>Answers are spans in the passage (image credit: <a href="https://rajpurkar.github.io/mlx/qa-and-squad/">SQuAD blog</a>) </em>
</p>
As for the model of question answer task, the inputs should be the passage and question pair that are already preprocessed, the outputs should be the start logits and end logits for each token in the passage.
The size of input could be set and adjusted according to the length of passage and question.
End-to-End Overview
The following code snippet demonstrates how to get the model within a few lines of code. The overall process includes 5 steps: (1) choose a model, (2) load data, (3) retrain the model, (4) evaluate, and (5) export it to TensorFlow Lite format.
```python
Chooses a model specification that represents the model.
spec = model_spec.get('mobilebert_qa')
Gets the training data and validation data.
train_data = DataLoader.from_squad(train_data_path, spec, is_training=True)
validation_data = DataLoader.from_squad(validation_data_path, spec, is_training=False)
Fine-tunes the model.
model = question_answer.create(train_data, model_spec=spec)
Gets the evaluation result.
metric = model.evaluate(validation_data)
Exports the model to the TensorFlow Lite format with metadata in the export directory.
model.export(export_dir)
```
The following sections explain the code in more detail.
Prerequisites
To run this example, install the required packages, including the Model Maker package from the GitHub repo.
End of explanation
import numpy as np
import os
import tensorflow as tf
assert tf.__version__.startswith('2')
from tflite_model_maker import model_spec
from tflite_model_maker import question_answer
from tflite_model_maker.config import ExportFormat
from tflite_model_maker.question_answer import DataLoader
Explanation: Import the required packages.
End of explanation
spec = model_spec.get('mobilebert_qa_squad')
Explanation: The "End-to-End Overview" demonstrates a simple end-to-end example. The following sections walk through the example step by step to show more detail.
Choose a model_spec that represents a model for question answer
Each model_spec object represents a specific model for question answer. The Model Maker currently supports MobileBERT and BERT-Base models.
Supported Model | Name of model_spec | Model Description
--- | --- | ---
MobileBERT | 'mobilebert_qa' | 4.3x smaller and 5.5x faster than BERT-Base while achieving competitive results, suitable for on-device scenario.
MobileBERT-SQuAD | 'mobilebert_qa_squad' | Same model architecture as MobileBERT model and the initial model is already retrained on SQuAD1.1.
BERT-Base | 'bert_qa' | Standard BERT model that widely used in NLP tasks.
In this tutorial, MobileBERT-SQuAD is used as an example. Since the model is already retrained on SQuAD1.1, it could coverage faster for question answer task.
End of explanation
train_data_path = tf.keras.utils.get_file(
fname='triviaqa-web-train-8000.json',
origin='https://storage.googleapis.com/download.tensorflow.org/models/tflite/dataset/triviaqa-web-train-8000.json')
validation_data_path = tf.keras.utils.get_file(
fname='triviaqa-verified-web-dev.json',
origin='https://storage.googleapis.com/download.tensorflow.org/models/tflite/dataset/triviaqa-verified-web-dev.json')
Explanation: Load Input Data Specific to an On-device ML App and Preprocess the Data
The TriviaQA is a reading comprehension dataset containing over 650K question-answer-evidence triples. In this tutorial, you will use a subset of this dataset to learn how to use the Model Maker library.
To load the data, convert the TriviaQA dataset to the SQuAD1.1 format by running the converter Python script with --sample_size=8000 and a set of web data. Modify the conversion code a little bit by:
* Skipping the samples that couldn't find any answer in the context document;
* Getting the original answer in the context without uppercase or lowercase.
Download the archived version of the already converted dataset.
End of explanation
train_data = DataLoader.from_squad(train_data_path, spec, is_training=True)
validation_data = DataLoader.from_squad(validation_data_path, spec, is_training=False)
Explanation: You can also train the MobileBERT model with your own dataset. If you are running this notebook on Colab, upload your data by using the left sidebar.
<img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_question_answer.png" alt="Upload File" width="800" hspace="100">
If you prefer not to upload your data to the cloud, you can also run the library offline by following the guide.
Use the DataLoader.from_squad method to load and preprocess the SQuAD format data according to a specific model_spec. You can use either SQuAD2.0 or SQuAD1.1 formats. Setting parameter version_2_with_negative as True means the formats is SQuAD2.0. Otherwise, the format is SQuAD1.1. By default, version_2_with_negative is False.
End of explanation
model = question_answer.create(train_data, model_spec=spec)
Explanation: Customize the TensorFlow Model
Create a custom question answer model based on the loaded data. The create function comprises the following steps:
Creates the model for question answer according to model_spec.
Train the question answer model. The default epochs and the default batch size are set according to two variables default_training_epochs and default_batch_size in the model_spec object.
End of explanation
model.summary()
Explanation: Have a look at the detailed model structure.
End of explanation
model.evaluate(validation_data)
Explanation: Evaluate the Customized Model
Evaluate the model on the validation data and get a dict of metrics including f1 score and exact match etc. Note that metrics are different for SQuAD1.1 and SQuAD2.0.
End of explanation
model.export(export_dir='.')
Explanation: Export to TensorFlow Lite Model
Convert the trained model to TensorFlow Lite model format with metadata so that you can later use in an on-device ML application. The vocab file are embedded in metadata. The default TFLite filename is model.tflite.
In many on-device ML application, the model size is an important factor. Therefore, it is recommended that you apply quantize the model to make it smaller and potentially run faster.
The default post-training quantization technique is dynamic range quantization for the BERT and MobileBERT models.
End of explanation
model.export(export_dir='.', export_format=ExportFormat.VOCAB)
Explanation: You can use the TensorFlow Lite model file in the bert_qa reference app using BertQuestionAnswerer API in TensorFlow Lite Task Library by downloading it from the left sidebar on Colab.
The allowed export formats can be one or a list of the following:
ExportFormat.TFLITE
ExportFormat.VOCAB
ExportFormat.SAVED_MODEL
By default, it just exports TensorFlow Lite model with metadata. You can also selectively export different files. For instance, exporting only the vocab file as follows:
End of explanation
model.evaluate_tflite('model.tflite', validation_data)
Explanation: You can also evaluate the tflite model with the evaluate_tflite method. This step is expected to take a long time.
End of explanation
new_spec = model_spec.get('mobilebert_qa')
new_spec.seq_len = 512
Explanation: Advanced Usage
The create function is the critical part of this library in which the model_spec parameter defines the model specification. The BertQASpec class is currently supported. There are 2 models: MobileBERT model, BERT-Base model. The create function comprises the following steps:
Creates the model for question answer according to model_spec.
Train the question answer model.
This section describes several advanced topics, including adjusting the model, tuning the training hyperparameters etc.
Adjust the model
You can adjust the model infrastructure like parameters seq_len and query_len in the BertQASpec class.
Adjustable parameters for model:
seq_len: Length of the passage to feed into the model.
query_len: Length of the question to feed into the model.
doc_stride: The stride when doing a sliding window approach to take chunks of the documents.
initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices.
trainable: Boolean, whether pre-trained layer is trainable.
Adjustable parameters for training pipeline:
model_dir: The location of the model checkpoint files. If not set, temporary directory will be used.
dropout_rate: The rate for dropout.
learning_rate: The initial learning rate for Adam.
predict_batch_size: Batch size for prediction.
tpu: TPU address to connect to. Only used if using tpu.
For example, you can train the model with a longer sequence length. If you change the model, you must first construct a new model_spec.
End of explanation |
11,351 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
+
Word Count Lab
Step2: (1b) Pluralize and test
Let's use a map() transformation to add the letter 's' to each string in the base RDD we just created. We'll define a Python function that returns the word with an 's' at the end of the word. Please replace <FILL IN> with your solution. If you have trouble, the next cell has the solution. After you have defined makePlural you can run the third cell which contains a test. If you implementation is correct it will print 1 test passed.
This is the general form that exercises will take, except that no example solution will be provided. Exercises will include an explanation of what is expected, followed by code cells where one cell will have one or more <FILL IN> sections. The cell that needs to be modified will have # TODO
Step3: (1c) Apply makePlural to the base RDD
Now pass each item in the base RDD into a map() transformation that applies the makePlural() function to each element. And then call the collect() action to see the transformed RDD.
Step4: (1d) Pass a lambda function to map
Let's create the same RDD using a lambda function.
Step5: (1e) Length of each word
Now use map() and a lambda function to return the number of characters in each word. We'll collect this result directly into a variable.
Step6: (1f) Pair RDDs
The next step in writing our word counting program is to create a new type of RDD, called a pair RDD. A pair RDD is an RDD where each element is a pair tuple (k, v) where k is the key and v is the value. In this example, we will create a pair consisting of ('<word>', 1) for each word element in the RDD.
We can create the pair RDD using the map() transformation with a lambda() function to create a new RDD.
Step7: Part 2
Step8: (2b) Use groupByKey() to obtain the counts
Using the groupByKey() transformation creates an RDD containing 3 elements, each of which is a pair of a word and a Python iterator.
Now sum the iterator using a map() transformation. The result should be a pair RDD consisting of (word, count) pairs.
Step9: (2c) Counting using reduceByKey
A better approach is to start from the pair RDD and then use the reduceByKey() transformation to create a new pair RDD. The reduceByKey() transformation gathers together pairs that have the same key and applies the function provided to two values at a time, iteratively reducing all of the values to a single value. reduceByKey() operates by applying the function first within each partition on a per-key basis and then across the partitions, allowing it to scale efficiently to large datasets.
Step10: (2d) All together
The expert version of the code performs the map() to pair RDD, reduceByKey() transformation, and collect in one statement.
Step11: Part 3
Step12: (3b) Mean using reduce
Find the mean number of words per unique word in wordCounts.
Use a reduce() action to sum the counts in wordCounts and then divide by the number of unique words. First map() the pair RDD wordCounts, which consists of (key, value) pairs, to an RDD of values.
Step14: Part 4
Step16: (4b) Capitalization and punctuation
Real world files are more complicated than the data we have been using in this lab. Some of the issues we have to address are
Step17: (4c) Load a text file
For the next part of this lab, we will use the Complete Works of William Shakespeare from Project Gutenberg. To convert a text file into an RDD, we use the SparkContext.textFile() method. We also apply the recently defined removePunctuation() function using a map() transformation to strip out the punctuation and change all text to lowercase. Since the file is large we use take(15), so that we only print 15 lines.
Step18: (4d) Words from lines
Before we can use the wordcount() function, we have to address two issues with the format of the RDD
Step19: (4e) Remove empty elements
The next step is to filter out the empty elements. Remove all entries where the word is ''.
Step20: (4f) Count the words
We now have an RDD that is only words. Next, let's apply the wordCount() function to produce a list of word counts. We can view the top 15 words by using the takeOrdered() action; however, since the elements of the RDD are pairs, we need a custom sort function that sorts using the value part of the pair.
You'll notice that many of the words are common English words. These are called stopwords. In a later lab, we will see how to eliminate them from the results.
Use the wordCount() function and takeOrdered() to obtain the fifteen most common words and their counts. | Python Code:
wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat']
wordsRDD = sc.parallelize(wordsList, 4)
# Print out the type of wordsRDD
print type(wordsRDD)
Explanation: +
Word Count Lab: Building a word count application
This lab will build on the techniques covered in the Spark tutorial to develop a simple word count application. The volume of unstructured text in existence is growing dramatically, and Spark is an excellent tool for analyzing this type of data. In this lab, we will write code that calculates the most common words in the Complete Works of William Shakespeare retrieved from Project Gutenberg. This could also be scaled to find the most common words on the Internet.
During this lab we will cover:
Part 1: Creating a base RDD and pair RDDs
Part 2: Counting with pair RDDs
Part 3: Finding unique words and a mean value
Part 4: Apply word count to a file
Note that, for reference, you can look up the details of the relevant methods in Spark's Python API
Part 1: Creating a base RDD and pair RDDs
In this part of the lab, we will explore creating a base RDD with parallelize and using pair RDDs to count words.
(1a) Create a base RDD
We'll start by generating a base RDD by using a Python list and the sc.parallelize method. Then we'll print out the type of the base RDD.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
def makePlural(word):
Adds an 's' to `word`.
Note:
This is a simple function that only adds an 's'. No attempt is made to follow proper
pluralization rules.
Args:
word (str): A string.
Returns:
str: A string with 's' added to it.
return word + 's'
print makePlural('cat')
# One way of completing the function
def makePlural(word):
return word + 's'
print makePlural('cat')
# Load in the testing code and check to see if your answer is correct
# If incorrect it will report back '1 test failed' for each failed test
# Make sure to rerun any cell you change before trying the test again
from test_helper import Test
# TEST Pluralize and test (1b)
Test.assertEquals(makePlural('rat'), 'rats', 'incorrect result: makePlural does not add an s')
Explanation: (1b) Pluralize and test
Let's use a map() transformation to add the letter 's' to each string in the base RDD we just created. We'll define a Python function that returns the word with an 's' at the end of the word. Please replace <FILL IN> with your solution. If you have trouble, the next cell has the solution. After you have defined makePlural you can run the third cell which contains a test. If you implementation is correct it will print 1 test passed.
This is the general form that exercises will take, except that no example solution will be provided. Exercises will include an explanation of what is expected, followed by code cells where one cell will have one or more <FILL IN> sections. The cell that needs to be modified will have # TODO: Replace <FILL IN> with appropriate code on its first line. Once the <FILL IN> sections are updated and the code is run, the test cell can then be run to verify the correctness of your solution. The last code cell before the next markdown section will contain the tests.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
pluralRDD = wordsRDD.map(makePlural)
print pluralRDD.collect()
# TEST Apply makePlural to the base RDD(1c)
Test.assertEquals(pluralRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],
'incorrect values for pluralRDD')
Explanation: (1c) Apply makePlural to the base RDD
Now pass each item in the base RDD into a map() transformation that applies the makePlural() function to each element. And then call the collect() action to see the transformed RDD.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
pluralLambdaRDD = wordsRDD.map(lambda x : x + 's')
print pluralLambdaRDD.collect()
# TEST Pass a lambda function to map (1d)
Test.assertEquals(pluralLambdaRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'],
'incorrect values for pluralLambdaRDD (1d)')
Explanation: (1d) Pass a lambda function to map
Let's create the same RDD using a lambda function.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
pluralLengths = (pluralRDD
.map(lambda x : len(x))
.collect())
print pluralLengths
# TEST Length of each word (1e)
Test.assertEquals(pluralLengths, [4, 9, 4, 4, 4],
'incorrect values for pluralLengths')
Explanation: (1e) Length of each word
Now use map() and a lambda function to return the number of characters in each word. We'll collect this result directly into a variable.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
wordPairs = wordsRDD.map(lambda x : (x, 1))
print wordPairs.collect()
# TEST Pair RDDs (1f)
Test.assertEquals(wordPairs.collect(),
[('cat', 1), ('elephant', 1), ('rat', 1), ('rat', 1), ('cat', 1)],
'incorrect value for wordPairs')
Explanation: (1f) Pair RDDs
The next step in writing our word counting program is to create a new type of RDD, called a pair RDD. A pair RDD is an RDD where each element is a pair tuple (k, v) where k is the key and v is the value. In this example, we will create a pair consisting of ('<word>', 1) for each word element in the RDD.
We can create the pair RDD using the map() transformation with a lambda() function to create a new RDD.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
# Note that groupByKey requires no parameters
wordsGrouped = wordPairs.groupByKey()
for key, value in wordsGrouped.collect():
print '{0}: {1}'.format(key, list(value))
# TEST groupByKey() approach (2a)
Test.assertEquals(sorted(wordsGrouped.mapValues(lambda x: list(x)).collect()),
[('cat', [1, 1]), ('elephant', [1]), ('rat', [1, 1])],
'incorrect value for wordsGrouped')
Explanation: Part 2: Counting with pair RDDs
Now, let's count the number of times a particular word appears in the RDD. There are multiple ways to perform the counting, but some are much less efficient than others.
A naive approach would be to collect() all of the elements and count them in the driver program. While this approach could work for small datasets, we want an approach that will work for any size dataset including terabyte- or petabyte-sized datasets. In addition, performing all of the work in the driver program is slower than performing it in parallel in the workers. For these reasons, we will use data parallel operations.
(2a) groupByKey() approach
An approach you might first consider (we'll see shortly that there are better ways) is based on using the groupByKey() transformation. As the name implies, the groupByKey() transformation groups all the elements of the RDD with the same key into a single list in one of the partitions. There are two problems with using groupByKey():
The operation requires a lot of data movement to move all the values into the appropriate partitions.
The lists can be very large. Consider a word count of English Wikipedia: the lists for common words (e.g., the, a, etc.) would be huge and could exhaust the available memory in a worker.
Use groupByKey() to generate a pair RDD of type ('word', iterator).
End of explanation
# TODO: Replace <FILL IN> with appropriate code
wordCountsGrouped = wordsGrouped.map(lambda (k, v): (k, sum(v)))
print wordCountsGrouped.collect()
# TEST Use groupByKey() to obtain the counts (2b)
Test.assertEquals(sorted(wordCountsGrouped.collect()),
[('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCountsGrouped')
Explanation: (2b) Use groupByKey() to obtain the counts
Using the groupByKey() transformation creates an RDD containing 3 elements, each of which is a pair of a word and a Python iterator.
Now sum the iterator using a map() transformation. The result should be a pair RDD consisting of (word, count) pairs.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
# Note that reduceByKey takes in a function that accepts two values and returns a single value
wordCounts = wordPairs.reduceByKey(lambda a, b: a + b)
print wordCounts.collect()
# TEST Counting using reduceByKey (2c)
Test.assertEquals(sorted(wordCounts.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCounts')
Explanation: (2c) Counting using reduceByKey
A better approach is to start from the pair RDD and then use the reduceByKey() transformation to create a new pair RDD. The reduceByKey() transformation gathers together pairs that have the same key and applies the function provided to two values at a time, iteratively reducing all of the values to a single value. reduceByKey() operates by applying the function first within each partition on a per-key basis and then across the partitions, allowing it to scale efficiently to large datasets.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
wordCountsCollected = wordsRDD.map(lambda x : (x, 1)).reduceByKey(lambda a, b: a + b).collect()
print wordCountsCollected
# TEST All together (2d)
Test.assertEquals(sorted(wordCountsCollected), [('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect value for wordCountsCollected')
Explanation: (2d) All together
The expert version of the code performs the map() to pair RDD, reduceByKey() transformation, and collect in one statement.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
uniqueWords = wordCounts.count()
print uniqueWords
# TEST Unique words (3a)
Test.assertEquals(uniqueWords, 3, 'incorrect count of uniqueWords')
Explanation: Part 3: Finding unique words and a mean value
(3a) Unique words
Calculate the number of unique words in wordsRDD. You can use other RDDs that you have already created to make this easier.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
from operator import add
totalCount = (wordCounts
.map(lambda (k, v) : v)
.reduce(add))
average = totalCount / float(uniqueWords)
print totalCount
print round(average, 2)
# TEST Mean using reduce (3b)
Test.assertEquals(round(average, 2), 1.67, 'incorrect value of average')
Explanation: (3b) Mean using reduce
Find the mean number of words per unique word in wordCounts.
Use a reduce() action to sum the counts in wordCounts and then divide by the number of unique words. First map() the pair RDD wordCounts, which consists of (key, value) pairs, to an RDD of values.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
def wordCount(wordListRDD):
Creates a pair RDD with word counts from an RDD of words.
Args:
wordListRDD (RDD of str): An RDD consisting of words.
Returns:
RDD of (str, int): An RDD consisting of (word, count) tuples.
return wordListRDD.map(lambda x : (x, 1)).reduceByKey(lambda a, b: a + b)
print wordCount(wordsRDD).collect()
# TEST wordCount function (4a)
Test.assertEquals(sorted(wordCount(wordsRDD).collect()),
[('cat', 2), ('elephant', 1), ('rat', 2)],
'incorrect definition for wordCount function')
Explanation: Part 4: Apply word count to a file
In this section we will finish developing our word count application. We'll have to build the wordCount function, deal with real world problems like capitalization and punctuation, load in our data source, and compute the word count on the new data.
(4a) wordCount function
First, define a function for word counting. You should reuse the techniques that have been covered in earlier parts of this lab. This function should take in an RDD that is a list of words like wordsRDD and return a pair RDD that has all of the words and their associated counts.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
import re
def removePunctuation(text):
Removes punctuation, changes to lower case, and strips leading and trailing spaces.
Note:
Only spaces, letters, and numbers should be retained. Other characters should should be
eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after
punctuation is removed.
Args:
text (str): A string.
Returns:
str: The cleaned up string.
import string
pattern = re.compile('[%s]' % string.punctuation)
return re.sub(pattern, '', text.lower().strip(" "))
print removePunctuation('Hi, you!')
print removePunctuation(' No under_score!')
# TEST Capitalization and punctuation (4b)
Test.assertEquals(removePunctuation(" The Elephant's 4 cats. "),
'the elephants 4 cats',
'incorrect definition for removePunctuation function')
Explanation: (4b) Capitalization and punctuation
Real world files are more complicated than the data we have been using in this lab. Some of the issues we have to address are:
Words should be counted independent of their capitialization (e.g., Spark and spark should be counted as the same word).
All punctuation should be removed.
Any leading or trailing spaces on a line should be removed.
Define the function removePunctuation that converts all text to lower case, removes any punctuation, and removes leading and trailing spaces. Use the Python re module to remove any text that is not a letter, number, or space. Reading help(re.sub) might be useful.
End of explanation
# Just run this code
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt')
fileName = os.path.join(baseDir, inputPath)
shakespeareRDD = (sc
.textFile(fileName, 8)
.map(removePunctuation))
print '\n'.join(shakespeareRDD
.zipWithIndex() # to (line, lineNum)
.map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line'
.take(15))
Explanation: (4c) Load a text file
For the next part of this lab, we will use the Complete Works of William Shakespeare from Project Gutenberg. To convert a text file into an RDD, we use the SparkContext.textFile() method. We also apply the recently defined removePunctuation() function using a map() transformation to strip out the punctuation and change all text to lowercase. Since the file is large we use take(15), so that we only print 15 lines.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
shakespeareWordsRDD = shakespeareRDD.flatMap(lambda x : x.split(" "))
shakespeareWordCount = shakespeareWordsRDD.count()
print shakespeareWordsRDD.top(5)
print shakespeareWordCount
# TEST Words from lines (4d)
# This test allows for leading spaces to be removed either before or after
# punctuation is removed.
Test.assertTrue(shakespeareWordCount == 927631 or shakespeareWordCount == 928908,
'incorrect value for shakespeareWordCount')
Test.assertEquals(shakespeareWordsRDD.top(5),
[u'zwaggerd', u'zounds', u'zounds', u'zounds', u'zounds'],
'incorrect value for shakespeareWordsRDD')
Explanation: (4d) Words from lines
Before we can use the wordcount() function, we have to address two issues with the format of the RDD:
The first issue is that that we need to split each line by its spaces.
The second issue is we need to filter out empty lines.
Apply a transformation that will split each element of the RDD by its spaces. For each element of the RDD, you should apply Python's string split() function. You might think that a map() transformation is the way to do this, but think about what the result of the split() function will be.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
shakeWordsRDD = shakespeareWordsRDD.filter(lambda x : x!='')
shakeWordCount = shakeWordsRDD.count()
print shakeWordCount
# TEST Remove empty elements (4e)
Test.assertEquals(shakeWordCount, 882996, 'incorrect value for shakeWordCount')
Explanation: (4e) Remove empty elements
The next step is to filter out the empty elements. Remove all entries where the word is ''.
End of explanation
# TODO: Replace <FILL IN> with appropriate code
top15WordsAndCounts = wordCount(shakeWordsRDD).takeOrdered(15,lambda (k, v) : -v)
print '\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15WordsAndCounts))
# TEST Count the words (4f)
Test.assertEquals(top15WordsAndCounts,
[(u'the', 27361), (u'and', 26028), (u'i', 20681), (u'to', 19150), (u'of', 17463),
(u'a', 14593), (u'you', 13615), (u'my', 12481), (u'in', 10956), (u'that', 10890),
(u'is', 9134), (u'not', 8497), (u'with', 7771), (u'me', 7769), (u'it', 7678)],
'incorrect value for top15WordsAndCounts')
Explanation: (4f) Count the words
We now have an RDD that is only words. Next, let's apply the wordCount() function to produce a list of word counts. We can view the top 15 words by using the takeOrdered() action; however, since the elements of the RDD are pairs, we need a custom sort function that sorts using the value part of the pair.
You'll notice that many of the words are common English words. These are called stopwords. In a later lab, we will see how to eliminate them from the results.
Use the wordCount() function and takeOrdered() to obtain the fifteen most common words and their counts.
End of explanation |
11,352 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
====================================================
How to convert 3D electrode positions to a 2D image.
====================================================
Sometimes we want to convert a 3D representation of electrodes into a 2D
image. For example, if we are using electrocorticography it is common to
create scatterplots on top of a brain, with each point representing an
electrode.
In this example, we'll show two ways of doing this in MNE-Python. First,
if we have the 3D locations of each electrode then we can use Mayavi to
take a snapshot of a view of the brain. If we do not have these 3D locations,
and only have a 2D image of the electrodes on the brain, we can use the
Step1: Load data
First we'll load a sample ECoG dataset which we'll use for generating
a 2D snapshot.
Step2: Project 3D electrodes to a 2D snapshot
Because we have the 3D location of each electrode, we can use the
Step3: Manually creating 2D electrode positions
If we don't have the 3D electrode positions then we can still create a
2D representation of the electrodes. Assuming that you can see the electrodes
on the 2D image, we can use | Python Code:
# Authors: Christopher Holdgraf <[email protected]>
#
# License: BSD (3-clause)
from scipy.io import loadmat
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa
from mne.viz import (plot_alignment, snapshot_brain_montage,
set_3d_view)
print(__doc__)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
path_data = mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat'
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
Explanation: ====================================================
How to convert 3D electrode positions to a 2D image.
====================================================
Sometimes we want to convert a 3D representation of electrodes into a 2D
image. For example, if we are using electrocorticography it is common to
create scatterplots on top of a brain, with each point representing an
electrode.
In this example, we'll show two ways of doing this in MNE-Python. First,
if we have the 3D locations of each electrode then we can use Mayavi to
take a snapshot of a view of the brain. If we do not have these 3D locations,
and only have a 2D image of the electrodes on the brain, we can use the
:class:mne.viz.ClickableImage class to choose our own electrode positions
on the image.
End of explanation
mat = loadmat(path_data)
ch_names = mat['ch_names'].tolist()
elec = mat['elec'] # electrode coordinates in meters
# Now we make a montage stating that the sEEG contacts are in head
# coordinate system (although they are in MRI). This is compensated
# by the fact that below we do not specicty a trans file so the Head<->MRI
# transform is the identity.
montage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),
coord_frame='head')
info = mne.create_info(ch_names, 1000., 'ecog', montage=montage)
print('Created %s channel positions' % len(ch_names))
Explanation: Load data
First we'll load a sample ECoG dataset which we'll use for generating
a 2D snapshot.
End of explanation
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'], meg=False)
set_3d_view(figure=fig, azimuth=200, elevation=70)
xy, im = snapshot_brain_montage(fig, montage)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in info['ch_names']])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
Explanation: Project 3D electrodes to a 2D snapshot
Because we have the 3D location of each electrode, we can use the
:func:mne.viz.snapshot_brain_montage function to return a 2D image along
with the electrode positions on that image. We use this in conjunction with
:func:mne.viz.plot_alignment, which visualizes electrode positions.
End of explanation
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # To save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=120, color='r')
plt.autoscale(tight=True)
ax.set_axis_off()
plt.show()
Explanation: Manually creating 2D electrode positions
If we don't have the 3D electrode positions then we can still create a
2D representation of the electrodes. Assuming that you can see the electrodes
on the 2D image, we can use :class:mne.viz.ClickableImage to open the image
interactively. You can click points on the image and the x/y coordinate will
be stored.
We'll open an image file, then use ClickableImage to
return 2D locations of mouse clicks (or load a file already created).
Then, we'll return these xy positions as a layout for use with plotting topo
maps.
End of explanation |
11,353 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Pandas 入門
Pythonを使ったデータ解析入門 3idea | OpenBook を見ながら、Pandasの基本的な操作を写経してなれる
Pandas 基本操作
https
Step1: Series
ドキュメント
Step2: DataFrame
ドキュメント
Step3: 第3章 Pandas | Python Code:
# numpy と pandas を import する。np, pd と書くのは慣習っぽい
import numpy as np
import pandas as pd
Explanation: Pandas 入門
Pythonを使ったデータ解析入門 3idea | OpenBook を見ながら、Pandasの基本的な操作を写経してなれる
Pandas 基本操作
https://openbook4.me/projects/183/sections/777
End of explanation
# Series
# 軸にラベルを付けた1次元の配列
print(pd.Series([1,2,4]))
# 値とインデックスを設定
s = pd.Series([1,2,4], index=['a','b','c'])
print(s)
print(s.index)
print('最大値:', s.max())
print('最小値:', s.min())
print('平均値:', s.mean())
print('中央値:', s.median())
print('分散:', s.var()) # variance
print('合計値:', s.sum())
print('剰余:')
print(s.mod(2)) # modulo, 剰余
print('\n累積:')
print(s.cumsum()) # cumulative, 累積
print('\n関数を適用:')
print(s.apply(lambda x: x*3)) # 特定の関数を各値に対して適応
print('\n値を変換:')
print(s.map({1: 10, 2: 200})) # 引数で与えた値に対応する値を変換
print('最大値のindex:', s.argmax())
print('最小値のindex:', s.argmin())
print('listに変換:', s.tolist())
print('dictに変換:', s.to_dict())
print('jsonに変換:', s.to_json())
Explanation: Series
ドキュメント: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html
ソース: https://github.com/pandas-dev/pandas/blob/master/pandas/core/series.py
End of explanation
# DataFrameの作成
df = pd.DataFrame([[1, 4, 7], [2, 5, 8], [3, 6, 9]],
index = ['i1', 'i2', 'i3'],
columns = list("abc"))
df
# 範囲を指定して取り出す
# 指定した行の取り出し。index名かindex番号で与えられる
print('i1行目のデータ:')
print(df.ix['i1'])
print('\n1行目のデータ:')
print(df.ix[1])
print('\ni1行目のa列のデータ:')
print(df.ix['i1', 'a'])
# : は全指定
print('\na列のデータ:')
print(df.ix[:, 'a'])
# 複数の指定も配列で可能。
print(df.ix[[1, 2], ['b','c']])
print(df.ix[[1, 2], [1, 2]])
# 列の指定
print('a列の全データ:')
print(df['a'])
print ('\na列の全データをarrayで取得')
print(df['a'].values)
print ('\na列とindex(i3)を指定して値を取得')
print(df['a']['i3'])
# iloc(integer-location) だと位置で指定できる
print(df.iloc[0]) # 0行目
print(df.iloc[0, 1]) # 0行目1列目
print(df.iloc[:,0]) # 0列目
# 条件を満たす要素に値を代入
df = pd.DataFrame(np.random.randint(3, size=(5,3)))
print(df)
# 条件を満たす要素だけを抽出する
print(df[df>1])
print(df[df==1])
# 条件を満たすものに値を代入
df[df>1] = -1
print(df)
# NaN の削除(dropna)
df = pd.DataFrame([[1, 2, None], [3, None, 4], [5, 6, 7]])
print(df)
# なんでintとfloatまざってるんだろう...
print(df.dropna()) # NaN を含む行を削除
print(df.dropna(axis=1)) # NaN を含む列を削除
print(df.dropna(subset=[1])) # 特定の列を指定することも可能
# NaN を埋める(fillna)
print(df)
print(df.fillna(-1)) # 指定した値で埋める
print(df.fillna(method='pad')) # 直前の値で埋める
print(df.fillna(method='bfill')) # 直後の値で埋める
# misssing valueの前後の線形の値で埋める
print(df)
print(df.apply(pd.Series.interpolate))
# 重複した値の処理(dupulicated)
df = pd.DataFrame([['a', 1], ['a', 1], ['a', 2], ['b', 3], ['b', 4]])
print(df)
print(df.duplicated()) # 重複しているデータを調べる
print(df.duplicated(0)) # 0列の重複しているデータを調べる
print(df.duplicated(1))
# 重複したデータの削除
print(df.drop_duplicates())
print(df.drop_duplicates(0)) # 0列目が重複しているデータの削除
print(df.drop_duplicates(0, keep='last')) # 重複している時に一番最後のデータを残す
Explanation: DataFrame
ドキュメント: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
ソース: https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py
End of explanation
df = pd.DataFrame(np.reshape(np.arange(9), (3, 3)),
columns=['a', 'b', 'c'])
df
# DataFrameの一部を取り出す
print(df.head(2)) # 先頭2行
print(df.tail(2)) # 後ろ2行
print(df.index) # インデックス(行の情報を取得)
print(df.columns) # 列の情報を取得
# DataFrameの形式を変える
print(df.T) # indexとcolumnsを入れ替える
df.sort_index(axis=1, ascending=False) # columnsを逆順にする
df.sort_values(by='b', ascending=False) # 列の値を使ってソートする
df.sort_values(by='b') # 列の値を使ってソートする
# マスクする(ほしい条件のもとで、dfから選びとる)
print(df.a)
print(df.a > 2)
df[df.a > 2] # a列の値が2より大きいデータだけを取り出す
df[df > 3] # 3より大きい値だけを取り出す
new_df = df.copy()
new_df[new_df > 3] = new_df * 2 # 3より大きい値だけ2倍する
new_df
# 列の追加
new_df = df.copy()
new_df['e'] = ['one','two','three']
new_df
# NaNのデータを除去する
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html
df_with_nan = df[df > 3]
df_with_nan
df_with_nan.dropna() # NaNがある行を削除
df_with_nan.dropna(how='all') # 全てがNaNの行を削除
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html
df_with_nan.fillna('Ice') # NaNを別な値で埋める
# DataFrameをくっつける
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html#pandas.concat
df2 = pd.DataFrame(np.reshape(np.arange(6), (3,2)),
columns=['e', 'f'])
df2
# 列でくっつける
new_df = pd.concat([df, df2], axis=1)
new_df
# 行でくっつける
new_df = pd.concat([df, df2], axis=0)
new_df
df3 = pd.DataFrame(np.reshape(np.arange(6), (3,2)),
columns=['a', 'b'])
df3
new_df = pd.concat([df, df3], axis=0)
new_df
# 行に対してデータを追加する
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.append.html#pandas.DataFrame.append
new_df = df.append(df3)
new_df
Explanation: 第3章 Pandas: DataFrameの変形
https://openbook4.me/projects/183/sections/1369
End of explanation |
11,354 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a name="top"></a>
<div style="width
Step1: <a name="multipanel"></a>
Multi-panel Plots
Often we wish to create figures with multiple panels of data. It's common to separate variables of different types into these panels. We also don't want to create each panel as an individual figure and combine them in a tool like Illustrator - imagine having to do that for hundreds of plots!
Previously we specified subplots individually with plt.subplot(). We can instead use the subplots method to specify a number of rows and columns of plots in our figure, which returns the figure and all of the axes (subplots) we ask for in a single call
Step2: So even with the sharing of axis information, there's still a lot of repeated code. This current version with just two parameters might still be ok, but
Step3: Using the zip function we can even iterate over multiple lists at the same time with ease
Step4: That's really handy, but needing to access each part of each item with an index like item[0] isn't very flexible, requires us to remember the layout of the item, and isn't best practice. Instead we can use Python's unpacking syntax to make things nice and intuitive.
Step5: <div class="alert alert-success">
<b>EXERCISE</b>
Step6: Solution
Step7: zip can also be used to "unzip" items.
Step8: Let's break down what happened there. Zip pairs elements from all of the input arguements and hands those back to us. So effectively out zip(*zipped_list) is zip((1, 2), (3, 4), (5, 6)), so the first element from each input is paired (1, 3, 5), etc. You can think of it like unzipping or transposing.
We can use the enumerate function to 'count through' an iterable object as well. This can be useful when placing figures in certain rows/columns or when a counter is needed.
Step9: <div class="alert alert-success">
<b>EXERCISE</b>
Step10: Solution
Step11: <a name="functions"></a>
Functions
You're probably already familiar with Python functions, but here's a quick refresher. Functions are used to house blocks of code that we can run repeatedly. Paramters are given as inputs, and values are returned from the function to where it was called. In the world of programming you can think of functions like paragraphs, they encapsulate a complete idea/process.
Let's define a simple function that returns a value
Step12: We've re-implemented add which isn't incredibly exiciting, but that could be hundreds of lines of a numerical method, making a plot, or some other task. Using the function is simple
Step13: <div class="alert alert-success">
<b>EXERCISE</b>
Step14: Solution
Step15: Reading buoy data with a function
Let's create a function to read in buoy data and trim it down to the last 7 days by only providing the buoy number to the function.
Step16: <a name="argskwargs"></a>
Args and Kwargs
Within a function call, we can also set optional arguments and keyword arguments (abbreviated args and kwargs in Python). Args are used to pass a variable length list of non-keyword arguments. This means that args don't have a specific keyword they are attached to, and are used in the order provided. Kwargs are arguments that are attached to specific keywords, and therefore have a specific use within a function.
Args Example
Step17: Kwargs Example
Step18: Kwargs are commonly used in MetPy, matplotlib, pandas, and many other Python libraries (in fact we've used them in almost every notebook so far!).
<a name="plottingiteration"></a>
Plotting with Iteration
Now let's bring what we've learned about iteration to bear on the problem of plotting. We'll start with a basic example and roll into a more involved system at the end.
To begin, let's make an arbitrary number of plots in a single row
Step19: It's a step forward, but we've lost a lot of formatting information. The lines are both blue, the labels as less ideal, and the title just uses the variable name. We can use some of Python's features like dictionaries, functions, and string manipulation to help improve the versatility of the plotter.
To start out, let's get the line color functionality back by using a Python dictionary to hold that information. Dictionaries can hold any data type and allow you to access that value with a key (hence the name key-value pair). We'll use the variable name for the key and the value will be the color of line to plot.
Step20: To access the value, just access that element of the dictionary with the key.
Step21: Now let's apply that to our plot. We'll use the same code from the previous example, but now look up the line color in the dictionary.
Step22: That's already much better. We need to be able to plot multiple variables on the wind speed/gust plot though. In this case, we'll allow a list of variables for each plot to be given and iterate over them. We'll store this in a list of lists. Each plot has its own list of variables!
Step23: <div class="alert alert-success">
<b>EXERCISE</b>
Step24: Solution
Step25: We're almost back to where to started, but in a much more versatile form! We just need to make the labels and titles look nice. To do that, let's write a function that uses some string manipulation to clean up the variable names and give us an axis/plot title and legend label.
Step26: <a href="#top">Top</a>
<hr style="height | Python Code:
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, DayLocator
from siphon.simplewebservice.ndbc import NDBC
%matplotlib inline
# Read in some data
df = NDBC.realtime_observations('42039')
# Trim to the last 7 days
df = df[df['time'] > (pd.Timestamp.utcnow() - pd.Timedelta(days=7))]
Explanation: <a name="top"></a>
<div style="width:1000 px">
<div style="float:right; width:98 px; height:98px;">
<img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
</div>
<h1>Pythonic Data Analysis</h1>
<h3>Unidata Python Workshop</h3>
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="http://matplotlib.org/_images/date_demo.png" alt="METAR" style="height: 300px;"></div>
Overview:
Teaching: 40 minutes
Exercises: 40 minutes
Questions
How can we employ Python language features to make complicated analysis require less code?
How can we make multi panel plots?
What can be done to eliminate repeated code that operates on sequences of objects?
How can functions be used to encapsulate calculations and behaviors?
Objectives
<a href="#basicfunctionality">From the Time Series Plotting Episode</a>
<a href="#multipanel">Multi-panel plots</a>
<a href="#iteration">Iteration and Enumeration</a>
<a href="#functions">Functions</a>
<a href="#argskwargs">Args and Kwargs</a>
<a href="#plottingiteration">Plotting with Iteration</a>
<a href="#multifile">Plotting Multiple Files</a>
<a name="basicfunctionality"></a>
From Time Series Plotting Episode
Here's the basic set of imports and data reading functionality that we established in the Basic Time Series Plotting notebook.
End of explanation
# ShareX means that the axes will share range, ticking, etc. for the x axis
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, figsize=(18, 6))
# Panel 1
ax1.plot(df.time, df.wind_speed, color='tab:orange', label='Windspeed')
ax1.set_xlabel('Time')
ax1.set_ylabel('Speed')
ax1.set_title('Measured Winds')
ax1.legend(loc='upper left')
ax1.grid(True)
# Not repeated only by sharing x
ax1.xaxis.set_major_formatter(DateFormatter('%m/%d'))
ax1.xaxis.set_major_locator(DayLocator())
# Panel 2
ax2.plot(df.time, df.pressure, color='black', label='Pressure')
ax2.set_xlabel('Time')
ax2.set_ylabel('hPa')
ax2.set_title('Atmospheric Pressure')
ax2.legend(loc='upper left')
ax2.grid(True)
plt.suptitle('Buoy 42039 Data', fontsize=24)
Explanation: <a name="multipanel"></a>
Multi-panel Plots
Often we wish to create figures with multiple panels of data. It's common to separate variables of different types into these panels. We also don't want to create each panel as an individual figure and combine them in a tool like Illustrator - imagine having to do that for hundreds of plots!
Previously we specified subplots individually with plt.subplot(). We can instead use the subplots method to specify a number of rows and columns of plots in our figure, which returns the figure and all of the axes (subplots) we ask for in a single call:
End of explanation
my_list = ['2001 A Space Obyssey',
'The Princess Bride',
'Monty Python and the Holy Grail']
for item in my_list:
print(item)
Explanation: So even with the sharing of axis information, there's still a lot of repeated code. This current version with just two parameters might still be ok, but:
What if we had more data being plotted on each axes?
What if we had many subplots?
What if we wanted to change one of the parameters?
What if we wanted to plot data from different files on the same plot?
<a name="iteration"></a>
Iteration and Enumeration
Iterating over lists is a very useful tool to reduce the amount of repeated code you write. We're going to start out by iterating over a single list with a for loop. Unlike C or other common scientific languages, Python 'knows' how to iterate over certain objects without you needing to specify an index variable and do the book keeping on that.
End of explanation
my_other_list = ['I\'m sorry, Dave. I\'m afraid I can\'t do that.',
'My name is Inigo Montoya.',
'It\'s only a flesh wound.']
for item in zip(my_list, my_other_list):
print(item)
Explanation: Using the zip function we can even iterate over multiple lists at the same time with ease:
End of explanation
for reference, quote in zip(my_list, my_other_list):
print(reference, '-', quote)
Explanation: That's really handy, but needing to access each part of each item with an index like item[0] isn't very flexible, requires us to remember the layout of the item, and isn't best practice. Instead we can use Python's unpacking syntax to make things nice and intuitive.
End of explanation
# Your code goes here
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Make two new lists named <code>plot_variables</code> and <code>plot_names</code>. Populate them
with the variable name and plot label string for windspeed and pressure.</li>
<li>Using the unpacking syntax, write a for loop that prints a sentence describing the action
that would be taken (i.e. Plotting variable wind_speed as Windspeed</li>
</ul>
</div>
End of explanation
# %load solutions/zip.py
Explanation: Solution
End of explanation
zipped_list = [(1, 2),
(3, 4),
(5, 6)]
unzipped = zip(*zipped_list)
print(list(unzipped))
Explanation: zip can also be used to "unzip" items.
End of explanation
for i, quote in enumerate(my_other_list):
print(i, ' - ', quote)
Explanation: Let's break down what happened there. Zip pairs elements from all of the input arguements and hands those back to us. So effectively out zip(*zipped_list) is zip((1, 2), (3, 4), (5, 6)), so the first element from each input is paired (1, 3, 5), etc. You can think of it like unzipping or transposing.
We can use the enumerate function to 'count through' an iterable object as well. This can be useful when placing figures in certain rows/columns or when a counter is needed.
End of explanation
# Your code goes here
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Combine what you've learned about enumeration and iteration to produce the following output:</li>
</ul>
<code>0 - 2001 A Space Obyssey - I'm sorry, Dave. I'm afraid I can't do that.
1 - The Princess Bride - My name is Inigo Montoya.
2 - Monty Python and the Holy Grail - It's only a flesh wound.</code>
</div>
End of explanation
# %load solutions/enumerate.py
Explanation: Solution
End of explanation
def silly_add(a, b):
return a + b
Explanation: <a name="functions"></a>
Functions
You're probably already familiar with Python functions, but here's a quick refresher. Functions are used to house blocks of code that we can run repeatedly. Paramters are given as inputs, and values are returned from the function to where it was called. In the world of programming you can think of functions like paragraphs, they encapsulate a complete idea/process.
Let's define a simple function that returns a value:
End of explanation
result = silly_add(3, 4)
print(result)
Explanation: We've re-implemented add which isn't incredibly exiciting, but that could be hundreds of lines of a numerical method, making a plot, or some other task. Using the function is simple:
End of explanation
# Your code goes here
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Write a function that returns powers of 2. (i.e. calling <code>myfunc(4)</code> returns 2^4)</li>
<li>**Bonus**: Using for loop iteration, print all powers of 2 from 0 to 24.</li>
</ul>
</div>
End of explanation
# %load solutions/functions.py
Explanation: Solution
End of explanation
def read_buoy_data(buoy, days=7):
# Read in some data
df = NDBC.realtime_observations(buoy)
# Trim to the last 7 days
df = df[df['time'] > (pd.Timestamp.utcnow() - pd.Timedelta(days=days))]
return df
df = read_buoy_data('42039')
df
Explanation: Reading buoy data with a function
Let's create a function to read in buoy data and trim it down to the last 7 days by only providing the buoy number to the function.
End of explanation
def arg_func(*argv):
for arg in argv:
print (arg)
arg_func('Welcome', 'to', 'the', 'Python', 'Workshop')
Explanation: <a name="argskwargs"></a>
Args and Kwargs
Within a function call, we can also set optional arguments and keyword arguments (abbreviated args and kwargs in Python). Args are used to pass a variable length list of non-keyword arguments. This means that args don't have a specific keyword they are attached to, and are used in the order provided. Kwargs are arguments that are attached to specific keywords, and therefore have a specific use within a function.
Args Example
End of explanation
# Create a function to conduct all basic math operations, using a kwarg
def silly_function(a, b, operation=None):
if operation == 'add':
return a + b
elif operation == 'subtract':
return a - b
elif operation == 'multiply':
return a * b
elif operation == 'division':
return a / b
else:
raise ValueError('Incorrect value for "operation" provided.')
print(silly_function(3, 4, operation='add'))
print(silly_function(3, 4, operation='multiply'))
Explanation: Kwargs Example
End of explanation
# A list of names of variables we want to plot
plot_variables = ['wind_speed', 'pressure']
# Make our figure, now choosing number of subplots based on length of variable name list
fig, axes = plt.subplots(1, len(plot_variables), sharex=True, figsize=(18, 6))
# Loop over the list of subplots and names together
for ax, var_name in zip(axes, plot_variables):
ax.plot(df.time, df[var_name])
# Set label/title based on variable name--no longer hard-coded
ax.set_ylabel(var_name)
ax.set_title(f'Buoy {var_name}')
# Set up our formatting--note lack of repetition
ax.grid(True)
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
ax.xaxis.set_major_locator(DayLocator())
Explanation: Kwargs are commonly used in MetPy, matplotlib, pandas, and many other Python libraries (in fact we've used them in almost every notebook so far!).
<a name="plottingiteration"></a>
Plotting with Iteration
Now let's bring what we've learned about iteration to bear on the problem of plotting. We'll start with a basic example and roll into a more involved system at the end.
To begin, let's make an arbitrary number of plots in a single row:
End of explanation
colors = {'wind_speed': 'tab:orange', 'wind_gust': 'tab:olive', 'pressure': 'black'}
Explanation: It's a step forward, but we've lost a lot of formatting information. The lines are both blue, the labels as less ideal, and the title just uses the variable name. We can use some of Python's features like dictionaries, functions, and string manipulation to help improve the versatility of the plotter.
To start out, let's get the line color functionality back by using a Python dictionary to hold that information. Dictionaries can hold any data type and allow you to access that value with a key (hence the name key-value pair). We'll use the variable name for the key and the value will be the color of line to plot.
End of explanation
colors['pressure']
Explanation: To access the value, just access that element of the dictionary with the key.
End of explanation
fig, axes = plt.subplots(1, len(plot_variables), sharex=True, figsize=(18, 6))
for ax, var_name in zip(axes, plot_variables):
# Grab the color from our dictionary and pass it to plot()
color = colors[var_name]
ax.plot(df.time, df[var_name], color)
ax.set_ylabel(var_name)
ax.set_title(f'Buoy {var_name}')
ax.grid(True)
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
ax.xaxis.set_major_locator(DayLocator())
Explanation: Now let's apply that to our plot. We'll use the same code from the previous example, but now look up the line color in the dictionary.
End of explanation
plot_variables = [['wind_speed', 'wind_gust'], ['pressure']]
fig, axes = plt.subplots(1, len(plot_variables), sharex=True, figsize=(18, 6))
for ax, var_names in zip(axes, plot_variables):
for var_name in var_names:
# Grab the color from our dictionary and pass it to plot()
color = colors[var_name]
ax.plot(df.time, df[var_name], color)
ax.set_ylabel(var_name)
ax.set_title(f'Buoy {var_name}')
ax.grid(True)
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
ax.xaxis.set_major_locator(DayLocator())
Explanation: That's already much better. We need to be able to plot multiple variables on the wind speed/gust plot though. In this case, we'll allow a list of variables for each plot to be given and iterate over them. We'll store this in a list of lists. Each plot has its own list of variables!
End of explanation
# Create your linestyles dictionary and modify the code below
fig, axes = plt.subplots(1, len(plot_variables), sharex=True, figsize=(18, 6))
for ax, var_names in zip(axes, plot_variables):
for var_name in var_names:
# Grab the color from our dictionary and pass it to plot()
color = colors[var_name]
ax.plot(df.time, df[var_name], color)
ax.set_ylabel(var_name)
ax.set_title(f'Buoy {var_name}')
ax.grid(True)
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
ax.xaxis.set_major_locator(DayLocator())
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Create a dictionary of linestyles in which the variable name is the key and the linestyle is the value.</li>
<li>Use that dictionary to modify the code below to plot the lines with the styles you specified.</li>
</ul>
</div>
End of explanation
# %load solutions/looping1.py
Explanation: Solution
End of explanation
def format_varname(varname):
parts = varname.split('_')
title = parts[0].title()
label = varname.replace('_', ' ').title()
return title, label
fig, axes = plt.subplots(1, len(plot_variables), sharex=True, figsize=(18, 6))
linestyles = {'wind_speed': '-', 'wind_gust': '--', 'pressure': '-'}
for ax, var_names in zip(axes, plot_variables):
for var_name in var_names:
title, label = format_varname(var_name)
color = colors[var_name]
linestyle = linestyles[var_name]
ax.plot(df.time, df[var_name], color, linestyle=linestyle, label=label)
ax.set_ylabel(title)
ax.set_title(f'Buoy {title}')
ax.grid(True)
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
ax.xaxis.set_major_locator(DayLocator())
ax.legend(loc='upper left')
Explanation: We're almost back to where to started, but in a much more versatile form! We just need to make the labels and titles look nice. To do that, let's write a function that uses some string manipulation to clean up the variable names and give us an axis/plot title and legend label.
End of explanation
buoys = ['42039', '42022']
fig, axes = plt.subplots(len(buoys), len(plot_variables), sharex=True, figsize=(14, 10))
for row, buoy in enumerate(buoys):
df = read_buoy_data(buoy)
for col, var_names in enumerate(plot_variables):
ax = axes[row,col]
for var_name in var_names:
title, label = format_varname(var_name)
color = colors[var_name]
linestyle = linestyles[var_name]
ax.plot(df.time, df[var_name], color, linestyle=linestyle, label=label)
ax.set_ylabel(title)
ax.set_title(f'Buoy {buoy} {title}')
ax.grid(True)
ax.set_xlabel('Time')
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
ax.xaxis.set_major_locator(DayLocator())
Explanation: <a href="#top">Top</a>
<hr style="height:2px;">
<a name="multifile"></a>
Plotting Multiple Files
Finally, let's plot data for two buoys on the same figure by iterating over a list of file names. We can use enumerate to plot each file on a new row of the figure. We will also create a function to read in the buoy data and avoid all of that repeated code.
End of explanation |
11,355 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Generative Adversarial Networks
Generative Adversarial Networks are invented by Ian Goodfellow (https
Step1: MNIST database
The MNIST database (Modified National Institute of Standards and Technology database) is a large database of handwritten digits that is commonly used for training various image processing systems. The database is also widely used for training and testing in the field of machine learning.
The MNIST database contains 60,000 training images and 10,000 testing images. Half of the training set and half of the test set were taken from NIST's training dataset, while the other half of the training set and the other half of the test set were taken from NIST's testing dataset.
Step2: Description of discriminator
As the discriminator network needs to differentiate between real and fake images the discriminator takes in [1,28,28] image vectors. For this purpose several convolutional layers are used.
Step3: Description of generator
For the generator we generate 100 random inputs and eventually map them down to a [1,28,28] pixel so that the they have the same shape as the MNIST data.
In Keras, for Deconvolution there is the command "Conv2DTranspose"
Step4: When training the GAN we are searching for an equilibrium point, which is the optimal point in a minimax game
Step5: The algorithm for training a GAN is the following
Step6: Based on the trained model we want to check whether the generator has learnt the correct images. | Python Code:
import numpy as np
from keras.datasets import mnist
import keras
from keras.layers import Input, UpSampling2D, Conv2DTranspose, Conv2D, LeakyReLU
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.models import Sequential
from keras.optimizers import RMSprop, Adam
from tensorflow.examples.tutorials.mnist import input_data
from keras.layers.normalization import *
import matplotlib.pyplot as plt
import cv2
%matplotlib inline
Explanation: Generative Adversarial Networks
Generative Adversarial Networks are invented by Ian Goodfellow (https://arxiv.org/abs/1406.2661).
"There are many interesting recent development in deep learning…The most important one, in my opinion, is adversarial training (also called GAN for Generative Adversarial Networks). This, and the variations that are now being proposed is the most interesting idea in the last 10 years in ML, in my opinion." – Yann LeCun
One network generates candidates and one evaluates them, i.e. we have two models, a generative model and a discriminative model. Before looking at GANs, let’s briefly review the difference between generative and discriminative models:
- A discriminative model learns a function that maps the input data (x) to some desired output class label (y). In probabilistic terms, they directly learn the conditional distribution P(y|x).
- A generative model tries to learn the joint probability of the input data and labels simultaneously, i.e. P(x,y). This can be converted to P(y|x) for classification via Bayes rule, but the generative ability could be used for something else as well, such as creating likely new (x, y) samples.
The discriminative model has the task of determining whether a given image looks natural (an image from the dataset) or looks like it has been artificially created. The task of the generator is to create images so that the discriminator gets trained to produce the correct outputs. This can be thought of as a zero-sum or minimax two player game. Or Goodfellow describes it
"the generative model is pitted against an adversary: a discriminative model that learns to determine whether a sample is from the model distribution or the data distribution. The generative model can be thought of as analogous to a team of counterfeiters, trying to produce fake currency and use it without detection, while the discriminative model is analogous to the police, trying to detect the counterfeit currency. Competition in this game drives both teams to improve their methods until the counterfeits are indistiguishable from the genuine articles."
The generator is typically a deconvolutional neural network, and the discriminator is a convolutional neural network. Convolutional networks are a bottom-up approach where the input signal is subjected to multiple layers of convolutions, non-linearities and sub-sampling. By contrast, each layer in our Deconvolutional Network is top-down; it seeks to generate the input signal by a sum over convolutions of the feature maps (as opposed to the input) with learned filters. Given an input and a set of filters, inferring the feature map activations requires solving a multi-component deconvolution problem that is computationally challenging.
Here is a short overview of the process:
<img src="images/GAN.png">
What are the pros and cons of Generative Adversarial Networks?
- https://www.quora.com/What-are-the-pros-and-cons-of-using-generative-adversarial-networks-a-type-of-neural-network
Why are they important?
The discriminator now is aware of the “internal representation of the data” because it has been trained to understand the differences between real images from the dataset and artificially created ones. Thus, it can be used as a feature extractor that you can use in a CNN.
End of explanation
(X_train, y_train), (X_test, y_test) = mnist.load_data()
x_train = input_data.read_data_sets("mnist",one_hot=True).train.images
x_train = x_train.reshape(-1, 28,28, 1).astype(np.float32)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
pixels = x_train[0]
pixels = pixels.reshape((28, 28))
# Plot
plt.imshow(pixels, cmap='gray')
plt.show()
Explanation: MNIST database
The MNIST database (Modified National Institute of Standards and Technology database) is a large database of handwritten digits that is commonly used for training various image processing systems. The database is also widely used for training and testing in the field of machine learning.
The MNIST database contains 60,000 training images and 10,000 testing images. Half of the training set and half of the test set were taken from NIST's training dataset, while the other half of the training set and the other half of the test set were taken from NIST's testing dataset.
End of explanation
# Build discriminator
Dis = Sequential()
input_shape = (28,28,1)
#output 14 x 14 x 64
Dis.add(Conv2D(64, 5, strides = 2, input_shape = input_shape, padding='same'))
Dis.add(LeakyReLU(0.2))
Dis.add(Dropout(0.2))
#output 7 x 7 x 128
Dis.add(Conv2D(128, 5, strides = 2, input_shape = input_shape, padding='same'))
Dis.add(LeakyReLU(0.2))
Dis.add(Dropout(0.2))
#output 4 x 4 x 256
Dis.add(Conv2D(256, 5, strides = 2, input_shape = input_shape, padding='same'))
Dis.add(LeakyReLU(0.2))
Dis.add(Dropout(0.2))
#output 4 x 4 x 512
Dis.add(Conv2D(512, 5, strides = 1, input_shape = input_shape, padding='same'))
Dis.add(LeakyReLU(0.2))
Dis.add(Dropout(0.2))
# Out: 1-dim probability
Dis.add(Flatten())
Dis.add(Dense(1))
Dis.add(Activation('sigmoid'))
Dis.summary()
Explanation: Description of discriminator
As the discriminator network needs to differentiate between real and fake images the discriminator takes in [1,28,28] image vectors. For this purpose several convolutional layers are used.
End of explanation
#Build generator
g_input = Input(shape=[100])
Gen = Sequential()
Gen.add(Dense(7*7*256, input_dim=100,kernel_initializer="glorot_normal"))
Gen.add(BatchNormalization(momentum=0.9))
Gen.add(Activation('relu'))
Gen.add(Reshape((7, 7,256)))
#G.add(Dropout(0.2))
# Input 7 x 7 x 256
# Output 14 x 14 x 128
Gen.add(UpSampling2D())
Gen.add(Conv2DTranspose(int(128), 5, padding='same',kernel_initializer="glorot_normal"))
Gen.add(BatchNormalization(momentum=0.9))
Gen.add(Activation('relu'))
# Input 14 x 14 x 128
# Output 28 x 28 x 64
Gen.add(UpSampling2D())
Gen.add(Conv2DTranspose(int(64), 5, padding='same',kernel_initializer="glorot_normal"))
Gen.add(BatchNormalization(momentum=0.9))
Gen.add(Activation('relu'))
# Input 28 x 28 x 64
# Output 28 x 28 x 32
Gen.add(Conv2DTranspose(int(32), 5, padding='same',kernel_initializer="glorot_normal"))
Gen.add(BatchNormalization(momentum=0.9))
Gen.add(Activation('relu'))
# Out: 28 x 28 x 1
Gen.add( Conv2DTranspose(1, 5, padding='same',kernel_initializer="glorot_normal"))
Gen.add( Activation('sigmoid'))
Gen.summary()
# Discriminator model
optimizer = Adam(lr=0.0002, beta_1=0.5)
DM = Sequential()
DM.add(Dis)
DM.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy'])
DM.summary()
Explanation: Description of generator
For the generator we generate 100 random inputs and eventually map them down to a [1,28,28] pixel so that the they have the same shape as the MNIST data.
In Keras, for Deconvolution there is the command "Conv2DTranspose": Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution.
End of explanation
# Adversarial model
optimizer = Adam(lr=0.0002, beta_1=0.5)
AM = Sequential()
AM.add(Gen)
AM.add(Dis)
AM.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=['accuracy'])
AM.summary()
# Freeze weights in discriminator D for stacked training
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
make_trainable(Dis, False)
Explanation: When training the GAN we are searching for an equilibrium point, which is the optimal point in a minimax game:
- The generator will models the real data,
- and the discriminator will output probability of 0.5 as the output of the generator = real data
End of explanation
train_steps=50000
batch_size=256
noise_input = None
for i in range(train_steps):
images_train = x_train[np.random.randint(0,x_train.shape[0], size=batch_size),:,:,:]
noise = np.random.normal(0.0, 1.0, size=[batch_size, 100])
images_fake = Gen.predict(noise)
make_trainable(Dis, True)
x = np.concatenate((images_train, images_fake))
y = np.ones([2*batch_size, 1])
y[batch_size:, :] = 0
d_loss = DM.train_on_batch(x, y)
make_trainable(Dis, False)
y = np.ones([batch_size, 1])
noise = np.random.normal(0.0, 1.0, size=[batch_size, 100])
a_loss = AM.train_on_batch(noise, y)
Gen.save('Generator_model.h5')
Explanation: The algorithm for training a GAN is the following:
1. Generate images using G and random noise (G predicts random images)
2. Perform a Batch update of weights in A given generated images, real images, and labels.
3. Perform a Batch update of weights in G given noise and forced “real” labels in the full GAN.
4. Repeat
End of explanation
noise = np.random.normal(0.0, 1.0,size=[256,100])
generated_images = Gen.predict(noise)
for i in range(10):
pixels =generated_images[i]
pixels = pixels.reshape((28, 28))
# Plot
plt.imshow(pixels, cmap='gray')
plt.show()
Explanation: Based on the trained model we want to check whether the generator has learnt the correct images.
End of explanation |
11,356 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Module 10
Step1: Ratio and logarithm
If you use linear scale to visualize ratios, it can be quite misleading.
Let's first create some ratios.
Step2: Q
Step3: Q
Step4: Log-binning
Let's first see what happens if we do not use the log scale for a dataset with a heavy tail.
Q
Step5: If you simply call hist() method with a dataframe object, it identifies all the numeric columns and draw a histogram for each.
Q
Step6: As we can see, a majority of the columns are not normally distributed. In particular, if you look at the worldwide gross variable, you only see a couple of meaningful data from the histogram. Is this a problem of resolution? How about increasing the number of bins?
Step7: Maybe a bit more useful, but it doesn't tell anything about the data distribution above certain point. How about changing the vertical scale to logarithmic scale?
Step8: Now, let's try log-bin. Recall that when plotting histgrams we can specify the edges of bins through the bins parameter. For example, we can specify the edges of bins to [1, 2, 3, ... , 10] as follows.
Step9: Here, we can specify the edges of bins in a similar way. Instead of specifying on the linear scale, we do it on the log space. Some useful resources
Step10: Because there seems to be movie(s) that made $0, and because log(0) is undefined & log(1) = 0, let's add 1 to the variable.
Step11: Now we can plot a histgram with log-bin. Set both axis to be log-scale.
Step12: What is going on? Is this the right plot?
Q
Step13: Q
Step14: We can also try in semilog scale (only one axis is in a log-scale), where the horizontal axis is linear.
Step15: A straight line in semilog scale means exponential decay (cf. a straight line in log-log scale means power-law decay). So it seems like the amount of money a movie makes across the world follows roughly an exponential distribution, while there are some outliers that make insane amount of money.
Q | Python Code:
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import scipy.stats as ss
import vega_datasets
Explanation: Module 10: Logscale
End of explanation
x = np.array([1, 1, 1, 1, 10, 100, 1000])
y = np.array([1000, 100, 10, 1, 1, 1, 1 ])
ratio = x/y
print(ratio)
Explanation: Ratio and logarithm
If you use linear scale to visualize ratios, it can be quite misleading.
Let's first create some ratios.
End of explanation
X = np.arange(len(ratio))
# Implement
Explanation: Q: Plot on the linear scale using the scatter() function. Also draw a horizontal line at ratio=1 for a reference.
End of explanation
# Implement
Explanation: Q: Is this a good visualization of the ratio data? Why? Why not? Explain.
Q: Can you fix it?
End of explanation
# TODO: Implement the functionality mentioned above
# The following code is just a dummy. You should load the correct dataset from vega_datasets package.
movies = pd.DataFrame({"Worldwide_Gross": np.random.sample(200), "IMDB_Rating": np.random.sample(200)})
Explanation: Log-binning
Let's first see what happens if we do not use the log scale for a dataset with a heavy tail.
Q: Load the movie dataset from vega_datasets and remove the NaN rows based on the following three columns: IMDB_Rating, IMDB_Votes, Rotten_Tomatoes_Rating.
End of explanation
# Implement
Explanation: If you simply call hist() method with a dataframe object, it identifies all the numeric columns and draw a histogram for each.
Q: draw all possible histograms of the movie dataframe. Adjust the size of the plots if needed.
End of explanation
ax = movies["Worldwide_Gross"].hist(bins=200)
ax.set_xlabel("World wide gross")
ax.set_ylabel("Frequency")
Explanation: As we can see, a majority of the columns are not normally distributed. In particular, if you look at the worldwide gross variable, you only see a couple of meaningful data from the histogram. Is this a problem of resolution? How about increasing the number of bins?
End of explanation
ax = movies["Worldwide_Gross"].hist(bins=200)
ax.set_yscale('log')
ax.set_xlabel("World wide gross")
ax.set_ylabel("Frequency")
Explanation: Maybe a bit more useful, but it doesn't tell anything about the data distribution above certain point. How about changing the vertical scale to logarithmic scale?
End of explanation
movies["IMDB_Rating"].hist(bins=range(0,11))
Explanation: Now, let's try log-bin. Recall that when plotting histgrams we can specify the edges of bins through the bins parameter. For example, we can specify the edges of bins to [1, 2, 3, ... , 10] as follows.
End of explanation
min(movies["Worldwide_Gross"])
Explanation: Here, we can specify the edges of bins in a similar way. Instead of specifying on the linear scale, we do it on the log space. Some useful resources:
Google query: python log-bin
numpy.logspace
numpy.linspace vs numpy.logspace
Hint: since $10^{\text{start}} = \text{min(Worldwide_Gross)}$, $\text{start} = \log_{10}(\text{min(Worldwide_Gross)})$
End of explanation
movies["Worldwide_Gross"] = movies["Worldwide_Gross"]+1.0
# TODO: Replace the dummy value of bins using np.logspace.
# Create 20 bins that cover the whole range of the dataset.
bins = [1.0, 2.0, 4.0]
bins
Explanation: Because there seems to be movie(s) that made $0, and because log(0) is undefined & log(1) = 0, let's add 1 to the variable.
End of explanation
ax = (movies["Worldwide_Gross"]+1.0).hist(bins=bins)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel("World wide gross")
ax.set_ylabel("Frequency")
Explanation: Now we can plot a histgram with log-bin. Set both axis to be log-scale.
End of explanation
# Implement
Explanation: What is going on? Is this the right plot?
Q: explain and fix
End of explanation
# TODO: Implement functionality mentioned above
# You must replace the dummy values with the correct code.
worldgross_sorted = np.random.sample(200)
Y = np.random.sample(200)
Explanation: Q: Can you explain the plot? Why are there gaps?
CCDF
CCDF is a nice alternative to examine distributions with heavy tails. The idea is same as CDF, but the direction of aggregation is opposite. For a given value x, CCDF(x) is the number (fraction) of data points that are same or larger than x. To write code to draw CCDF, it'll be helpful to draw it by hand by using a very small, toy dataset. Draw it by hand and then think about how each point in the CCDF plot can be computed.
Q: Draw a CCDF of worldwide gross data in log-log scale
End of explanation
plt.xlabel("World wide gross")
plt.ylabel("CCDF")
plt.plot(worldgross_sorted,Y)
plt.yscale('log')
Explanation: We can also try in semilog scale (only one axis is in a log-scale), where the horizontal axis is linear.
End of explanation
# Implement
Explanation: A straight line in semilog scale means exponential decay (cf. a straight line in log-log scale means power-law decay). So it seems like the amount of money a movie makes across the world follows roughly an exponential distribution, while there are some outliers that make insane amount of money.
Q: Which is the most successful movie in our dataset?
You can use the following
idxmax(): https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.idxmax.html
loc: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.loc.html
End of explanation |
11,357 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Data Manipulation with Numpy and Pandas
Handling with large data is easy in Python. In the simplest way using arrays. However, they are pretty slow. Numpy and Panda are two great libraries for dealing with datasets. Numpy isused for homogenous n-dimensional data (matrices). Pandas is used for heterogenous tables (CSV, MS Excel tables). Pandas is internally based on Numpy, too. See http
Step1: Accessing elements
Step2: Operations along an axis
Step3: A quick-ish introduction to Pandas
based on http
Step4: Selection
Note While many of the NumPy access methods work on DataFrames, use the pandas-specific data access methods, .at, .iat, .loc, .iloc and .ix.
See the Indexing section and below.
Step5: Missing Data
Step6: Statistics
besides .describe() there are plenty of other staticial measures and aggregation methods in Pandas/Numpy
Step7: TASKS
Now there are a series of simple tasks | Python Code:
import numpy as np
# Generating a random array
X = np.random.random((3, 5)) # a 3 x 5 array
print(X)
Explanation: Data Manipulation with Numpy and Pandas
Handling with large data is easy in Python. In the simplest way using arrays. However, they are pretty slow. Numpy and Panda are two great libraries for dealing with datasets. Numpy isused for homogenous n-dimensional data (matrices). Pandas is used for heterogenous tables (CSV, MS Excel tables). Pandas is internally based on Numpy, too. See http://scipy-lectures.github.io/ for a more detailed lesson.
End of explanation
# get a single element
X[0, 0]
# get a row
X[1]
# get a column
X[:, 1]
# Transposing an array
X.T
print(X.shape)
print(X.reshape(5, 3)) #change the layout of the matrix
# indexing by an array of integers (fancy indexing)
indices = np.array([3, 1, 0])
print(indices)
X[:, indices]
Explanation: Accessing elements
End of explanation
X
X.shape
np.sum(X, axis=1) # 1...columns
np.max(X, axis=0) # 0...rows
Explanation: Operations along an axis
End of explanation
import numpy as np
import pandas as pd
#use a standard dataset of heterogenous data
cars = pd.read_csv('data/mtcars.csv')
cars.head()
#list all columns
cars.columns
#we want to use the car as the "primary key" of a row
cars.index = cars.pop('car')
cars.head()
#describe our dataset
cars.describe()
cars.sort_index(inplace=True)
cars.head()
cars.sort_values('mpg').head(15)
cars.sort_values('hp', ascending=False).head()
Explanation: A quick-ish introduction to Pandas
based on http://pandas.pydata.org/pandas-docs/stable/10min.html
End of explanation
#single column
cars['mpg']
#depending on the name also cars.mpg works
#or a slice of rows
cars[2:5]
#by label = primary key
cars.loc['Fiat 128':'Lotus Europa']
#selection by position
cars.iloc[3]
cars.iloc[3:5, 0:2]
cars[cars.cyl > 6] # more than 6 cylinders
Explanation: Selection
Note While many of the NumPy access methods work on DataFrames, use the pandas-specific data access methods, .at, .iat, .loc, .iloc and .ix.
See the Indexing section and below.
End of explanation
cars_na = pd.read_csv('data/mtcars_with_nas.csv')
cars_na.isnull().head(4)
#fill with a default value
cars_na.fillna(0).head(4)
#or drop the rows
print(cars_na.shape)
#drop rows with na values
print(cars_na.dropna().shape)
#drop columns with na values
print(cars_na.dropna(axis=1).shape)
#see also http://pandas.pydata.org/pandas-docs/stable/missing_data.html
Explanation: Missing Data
End of explanation
#stats
cars.mean()
cars.mean(axis=1)
#grouping
cars.groupby('cyl').mean()
#grouping different aggregation methods
cars.groupby('cyl').agg({ 'mpg': 'mean', 'qsec': 'min'})
Explanation: Statistics
besides .describe() there are plenty of other staticial measures and aggregation methods in Pandas/Numpy
End of explanation
#loading gapminder data (taken from https://github.com/jennybc/gapminder)
# file located at 'data/gapminder-unfiltered.tsv' it uses tabular character as separator
# use the first column as index
#what are the columns of this dataset?
#what is the maximal year contained?
#just select all data of the year 2007
#locate Austria and print it
#list the top 10 countries by life expectancy (lifeExp)
#what is the total population (pop) per continent
Explanation: TASKS
Now there are a series of simple tasks
End of explanation |
11,358 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Star catalogue analysis
Thanks to UCF Physics undergrad Tyler Townsend for contributing to the development of this notebook.
Step1: Getting the data
Step2: Star map
Step3: Let's Graph a Constellation!
Step4: Let's Go Back in Time!
Step5: Let's Go Into the Future!
Step6: Now you try one of your own! | Python Code:
# Import modules that contain functions we need
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
Explanation: Star catalogue analysis
Thanks to UCF Physics undergrad Tyler Townsend for contributing to the development of this notebook.
End of explanation
# Read in data that will be used for the calculations.
# Using pandas read_csv method, we can create a data frame
data = pd.read_csv("https://raw.githubusercontent.com/merryjman/astronomy/master/stars.csv")
datatwo = pd.read_csv("https://raw.githubusercontent.com/astronexus/HYG-Database/master/hygdata_v3.csv")
# We wish to look at the first 12 rows of our data set
data.head(12)
Explanation: Getting the data
End of explanation
fig = plt.figure(figsize=(15, 4))
plt.scatter(data.ra,data.dec, s=0.01)
plt.xlim(24, 0)
plt.title("All the Stars in the Catalogue")
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
Explanation: Star map
End of explanation
# These are the abbreviations for all the constellations
datatwo.sort_values('con').con.unique()
# This shows just one constellation.
datatwo_con = datatwo.query('con == "UMa"')
#Define a variable called "name" so I don't have to keep renaming the plot title!
name = "Ursa Major"
# This plots where the brightest 15 stars are in the sky
datatwo_con = datatwo_con.sort_values('mag').head(15)
plt.scatter(datatwo_con.ra,datatwo_con.dec)
plt.gca().invert_xaxis()
# I graphed first without the line below, to see what it looks like, then
# I added the plt.xlim(25,20) to make it look nicer.
plt.xlim(15,8)
plt.ylim(30,70)
plt.title('%s In the Sky'%(name))
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
Explanation: Let's Graph a Constellation!
End of explanation
# What did this constellation look like 50,000 years ago??
plt.scatter(datatwo_con.ra-datatwo_con.pmra/1000/3600/15*50000,datatwo_con.dec-datatwo_con.pmdec/1000/3600*50000)
plt.xlim(15,8)
plt.ylim(30,70)
plt.title('%s Fifty Thousand Years Ago!'%(name))
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
Explanation: Let's Go Back in Time!
End of explanation
# Now, let's try looking at what this same constellation will look like in 50,000 years!
plt.scatter(datatwo_con.ra+datatwo_con.pmra/1000/3600/15*50000,datatwo_con.dec+datatwo_con.pmdec/1000/3600*50000)
plt.xlim(15,8)
plt.ylim(30,70)
plt.title('%s Fifty Thousand Years From Now!'%(name))
plt.xlabel('right ascension (hours)')
plt.ylabel('declination (degrees)')
Explanation: Let's Go Into the Future!
End of explanation
# Make a Hertzsprung-Russell Diagram!
Explanation: Now you try one of your own!
End of explanation |
11,359 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Introduction
Run this cell to set everything up!
Step1: One advantage linear regression has over more complicated algorithms is that the models it creates are explainable -- it's easy to interpret what contribution each feature makes to the predictions. In the model target = weight * feature + bias, the weight tells you by how much the target changes on average for each unit of change in the feature.
Run the next cell to see a linear regression on Hardcover Sales.
Step2: 1) Interpret linear regression with the time dummy
The linear regression line has an equation of (approximately) Hardcover = 3.33 * Time + 150.5. Over 6 days how much on average would you expect hardcover sales to change? After you've thought about it, run the next cell.
Step3: Interpreting the regression coefficients can help us recognize serial dependence in a time plot. Consider the model target = weight * lag_1 + error, where error is random noise and weight is a number between -1 and 1. The weight in this case tells you how likely the next time step will have the same sign as the previous time step
Step4: One of these series has the equation target = 0.95 * lag_1 + error and the other has the equation target = -0.95 * lag_1 + error, differing only by the sign on the lag feature. Can you tell which equation goes with each series?
Step5: Now we'll get started with the Store Sales - Time Series Forecasting competition data. The entire dataset comprises almost 1800 series recording store sales across a variety of product families from 2013 into 2017. For this lesson, we'll just work with a single series (average_sales) of the average sales each day.
3) Fit a time-step feature
Complete the code below to create a linear regression model with a time-step feature on the series of average product sales. The target is in a column called 'sales'.
Step6: Run this cell if you'd like to see a plot of the result.
Step7: 4) Fit a lag feature to Store Sales
Complete the code below to create a linear regression model with a lag feature on the series of average product sales. The target is in a column of df called 'sales'.
Step8: Run the next cell if you'd like to see the result. | Python Code:
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.time_series.ex1 import *
# Setup notebook
from pathlib import Path
from learntools.time_series.style import * # plot style settings
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.linear_model import LinearRegression
data_dir = Path('../input/ts-course-data/')
comp_dir = Path('../input/store-sales-time-series-forecasting')
book_sales = pd.read_csv(
data_dir / 'book_sales.csv',
index_col='Date',
parse_dates=['Date'],
).drop('Paperback', axis=1)
book_sales['Time'] = np.arange(len(book_sales.index))
book_sales['Lag_1'] = book_sales['Hardcover'].shift(1)
book_sales = book_sales.reindex(columns=['Hardcover', 'Time', 'Lag_1'])
ar = pd.read_csv(data_dir / 'ar.csv')
dtype = {
'store_nbr': 'category',
'family': 'category',
'sales': 'float32',
'onpromotion': 'uint64',
}
store_sales = pd.read_csv(
comp_dir / 'train.csv',
dtype=dtype,
parse_dates=['date'],
infer_datetime_format=True,
)
store_sales = store_sales.set_index('date').to_period('D')
store_sales = store_sales.set_index(['store_nbr', 'family'], append=True)
average_sales = store_sales.groupby('date').mean()['sales']
Explanation: Introduction
Run this cell to set everything up!
End of explanation
fig, ax = plt.subplots()
ax.plot('Time', 'Hardcover', data=book_sales, color='0.75')
ax = sns.regplot(x='Time', y='Hardcover', data=book_sales, ci=None, scatter_kws=dict(color='0.25'))
ax.set_title('Time Plot of Hardcover Sales');
Explanation: One advantage linear regression has over more complicated algorithms is that the models it creates are explainable -- it's easy to interpret what contribution each feature makes to the predictions. In the model target = weight * feature + bias, the weight tells you by how much the target changes on average for each unit of change in the feature.
Run the next cell to see a linear regression on Hardcover Sales.
End of explanation
# View the solution (Run this line to receive credit!)
q_1.check()
# Uncomment the next line for a hint
#_COMMENT_IF(PROD)_
q_1.hint()
Explanation: 1) Interpret linear regression with the time dummy
The linear regression line has an equation of (approximately) Hardcover = 3.33 * Time + 150.5. Over 6 days how much on average would you expect hardcover sales to change? After you've thought about it, run the next cell.
End of explanation
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(11, 5.5), sharex=True)
ax1.plot(ar['ar1'])
ax1.set_title('Series 1')
ax2.plot(ar['ar2'])
ax2.set_title('Series 2');
Explanation: Interpreting the regression coefficients can help us recognize serial dependence in a time plot. Consider the model target = weight * lag_1 + error, where error is random noise and weight is a number between -1 and 1. The weight in this case tells you how likely the next time step will have the same sign as the previous time step: a weight close to 1 means target will likely have the same sign as the previous step, while a weight close to -1 means target will likely have the opposite sign.
2) Interpret linear regression with a lag feature
Run the following cell to see two series generated according to the model just described.
End of explanation
# View the solution (Run this cell to receive credit!)
q_2.check()
# Uncomment the next line for a hint
#_COMMENT_IF(PROD)_
q_2.hint()
Explanation: One of these series has the equation target = 0.95 * lag_1 + error and the other has the equation target = -0.95 * lag_1 + error, differing only by the sign on the lag feature. Can you tell which equation goes with each series?
End of explanation
from sklearn.linear_model import LinearRegression
df = average_sales.to_frame()
# YOUR CODE HERE: Create a time dummy
time = ____
#_UNCOMMENT_IF(PROD)_
#df['time'] = time
# YOUR CODE HERE: Create training data
X = ____ # features
y = ____ # target
# Train the model
#_UNCOMMENT_IF(PROD)_
#model = LinearRegression()
#_UNCOMMENT_IF(PROD)_
#model.fit(X, y)
# Store the fitted values as a time series with the same time index as
# the training data
#_UNCOMMENT_IF(PROD)_
#y_pred = pd.Series(model.predict(X), index=X.index)
# Check your answer
q_3.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_3.hint()
#_COMMENT_IF(PROD)_
q_3.solution()
#%%RM_IF(PROD)%%
from sklearn.linear_model import LinearRegression
df = average_sales.to_frame()
time = np.ones_like(df.index)
df['time'] = time
X = df.loc[:, ['time']]
y = df.loc[:, 'sales']
model = LinearRegression()
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
from sklearn.linear_model import LinearRegression
df = average_sales.to_frame()
time = np.arange(len(df.index))
df['time'] = time
X = df.loc[:, ['sales']]
y = df.loc[:, 'time']
model = LinearRegression()
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
from sklearn.linear_model import LinearRegression
df = average_sales.to_frame()
time = np.arange(len(df.index))
df['time'] = time
X = df.loc[:, ['time']]
y = df.loc[:, 'sales']
model = LinearRegression()
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
q_3.assert_check_passed()
Explanation: Now we'll get started with the Store Sales - Time Series Forecasting competition data. The entire dataset comprises almost 1800 series recording store sales across a variety of product families from 2013 into 2017. For this lesson, we'll just work with a single series (average_sales) of the average sales each day.
3) Fit a time-step feature
Complete the code below to create a linear regression model with a time-step feature on the series of average product sales. The target is in a column called 'sales'.
End of explanation
ax = y.plot(**plot_params, alpha=0.5)
ax = y_pred.plot(ax=ax, linewidth=3)
ax.set_title('Time Plot of Total Store Sales');
Explanation: Run this cell if you'd like to see a plot of the result.
End of explanation
df = average_sales.to_frame()
# YOUR CODE HERE: Create a lag feature from the target 'sales'
lag_1 = ____
#_UNCOMMENT_IF(PROD)_
#df['lag_1'] = lag_1 # add to dataframe
#_UNCOMMENT_IF(PROD)_
#X = df.loc[:, ['lag_1']].dropna() # features
#_UNCOMMENT_IF(PROD)_
#y = df.loc[:, 'sales'] # target
#_UNCOMMENT_IF(PROD)_
#y, X = y.align(X, join='inner') # drop corresponding values in target
# YOUR CODE HERE: Create a LinearRegression instance and fit it to X and y.
model = ____
# YOUR CODE HERE: Create Store the fitted values as a time series with
# the same time index as the training data
y_pred = ____
# Check your answer
q_4.check()
# Lines below will give you a hint or solution code
q_4.hint()
q_4.solution()
#%%RM_IF(PROD)%%
df = average_sales.to_frame()
lag_1 = df['sales']
df['lag_1'] = lag_1
X = df.loc[:, ['lag_1']]
X.dropna(inplace=True) # drop missing values in the feature set
y = df.loc[:, 'sales'] # create the target
y, X = y.align(X, join='inner') # drop corresponding values in target
model = LinearRegression()
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
q_4.assert_check_failed()
#%%RM_IF(PROD)%%
df = average_sales.to_frame()
lag_1 = df['sales'].shift(-1)
df['lag_1'] = lag_1
X = df.loc[:, ['lag_1']]
X.dropna(inplace=True) # drop missing values in the feature set
y = df.loc[:, 'sales'] # create the target
y, X = y.align(X, join='inner') # drop corresponding values in target
model = LinearRegression()
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
q_4.assert_check_failed()
#%%RM_IF(PROD)%%
df = average_sales.to_frame()
lag_1 = df['sales'].shift(1)
df['lag_1'] = lag_1
X = df.loc[:, ['sales']]
X.dropna(inplace=True) # drop missing values in the feature set
y = df.loc[:, 'lag_1'] # create the target
y.dropna(inplace=True)
y, X = y.align(X, join='inner') # drop corresponding values in target
model = LinearRegression()
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
q_4.assert_check_failed()
#%%RM_IF(PROD)%%
df = average_sales.to_frame()
lag_1 = df['sales'].shift(1)
df['lag_1'] = lag_1
X = df.loc[:, ['lag_1']]
X.dropna(inplace=True) # drop missing values in the feature set
y = df.loc[:, 'sales'] # create the target
y, X = y.align(X, join='inner') # drop corresponding values in target
model = LinearRegression()
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
q_4.assert_check_passed()
Explanation: 4) Fit a lag feature to Store Sales
Complete the code below to create a linear regression model with a lag feature on the series of average product sales. The target is in a column of df called 'sales'.
End of explanation
fig, ax = plt.subplots()
ax.plot(X['lag_1'], y, '.', color='0.25')
ax.plot(X['lag_1'], y_pred)
ax.set(aspect='equal', ylabel='sales', xlabel='lag_1', title='Lag Plot of Average Sales');
Explanation: Run the next cell if you'd like to see the result.
End of explanation |
11,360 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Sebastian Raschka, 2015
https
Step1: <br>
<br>
Overview
Streamlining workflows with pipelines
Loading the Breast Cancer Wisconsin dataset
Combining transformers and estimators in a pipeline
Using k-fold cross-validation to assess model performance
The holdout method
K-fold cross-validation
Debugging algorithms with learning and validation curves
Diagnosing bias and variance problems with learning curves
Addressing overfitting and underfitting with validation curves
Fine-tuning machine learning models via grid search
Tuning hyperparameters via grid search
Algorithm selection with nested cross-validation
Looking at different performance evaluation metrics
Reading a confusion matrix
Optimizing the precision and recall of a classification model
Plotting a receiver operating characteristic
The scoring metrics for multiclass classification
Summary
<br>
<br>
Step2: Streamlining workflows with pipelines
...
Loading the Breast Cancer Wisconsin dataset
Step3: <hr>
Note
Step4: <hr>
Step5: <br>
<br>
Combining transformers and estimators in a pipeline
Step6: <br>
<br>
Using k-fold cross validation to assess model performance
...
The holdout method
Step7: <br>
<br>
K-fold cross-validation
Step8: <br>
<br>
Debugging algorithms with learning curves
<br>
<br>
Diagnosing bias and variance problems with learning curves
Step9: <br>
<br>
Addressing over- and underfitting with validation curves
Step10: <br>
<br>
Fine-tuning machine learning models via grid search
<br>
<br>
Tuning hyperparameters via grid search
Step11: <br>
<br>
Algorithm selection with nested cross-validation
Step12: <br>
<br>
Looking at different performance evaluation metrics
...
Reading a confusion matrix
Step13: Additional Note
Remember that we previously encoded the class labels so that malignant samples are the "postive" class (1), and benign samples are the "negative" class (0)
Step14: Next, we printed the confusion matrix like so
Step15: Note that the (true) class 0 samples that are correctly predicted as class 0 (true negatives) are now in the upper left corner of the matrix (index 0, 0). In order to change the ordering so that the true negatives are in the lower right corner (index 1,1) and the true positves are in the upper left, we can use the labels argument like shown below
Step16: We conclude
Step17: <br>
<br>
Plotting a receiver operating characteristic
Step18: <br>
<br>
The scoring metrics for multiclass classification | Python Code:
%load_ext watermark
%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,pandas,matplotlib,scikit-learn
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
Explanation: Sebastian Raschka, 2015
https://github.com/rasbt/python-machine-learning-book
Python Machine Learning - Code Examples
Chapter 6 - Learning Best Practices for Model Evaluation and Hyperparameter Tuning
Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
End of explanation
from IPython.display import Image
%matplotlib inline
Explanation: <br>
<br>
Overview
Streamlining workflows with pipelines
Loading the Breast Cancer Wisconsin dataset
Combining transformers and estimators in a pipeline
Using k-fold cross-validation to assess model performance
The holdout method
K-fold cross-validation
Debugging algorithms with learning and validation curves
Diagnosing bias and variance problems with learning curves
Addressing overfitting and underfitting with validation curves
Fine-tuning machine learning models via grid search
Tuning hyperparameters via grid search
Algorithm selection with nested cross-validation
Looking at different performance evaluation metrics
Reading a confusion matrix
Optimizing the precision and recall of a classification model
Plotting a receiver operating characteristic
The scoring metrics for multiclass classification
Summary
<br>
<br>
End of explanation
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases'
'/breast-cancer-wisconsin/wdbc.data', header=None)
df.shape
Explanation: Streamlining workflows with pipelines
...
Loading the Breast Cancer Wisconsin dataset
End of explanation
df = pd.read_csv('https://raw.githubusercontent.com/rasbt/python-machine-learning-book/master/code/datasets/wdbc/wdbc.data', header=None)
df.head()
Explanation: <hr>
Note:
If the link to the Breast Cancer Wisconsin dataset dataset provided above does not work for you, you can find a local copy in this repository at ./../datasets/wdbc/wdbc.data.
Or you could fetch it via
End of explanation
from sklearn.preprocessing import LabelEncoder
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
le.transform(['M', 'B'])
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.20, random_state=1)
Explanation: <hr>
End of explanation
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
pipe_lr = Pipeline([('scl', StandardScaler()),
('pca', PCA(n_components=2)),
('clf', LogisticRegression(random_state=1))])
pipe_lr.fit(X_train, y_train)
print('Test Accuracy: %.3f' % pipe_lr.score(X_test, y_test))
y_pred = pipe_lr.predict(X_test)
Image(filename='./images/06_01.png', width=500)
Explanation: <br>
<br>
Combining transformers and estimators in a pipeline
End of explanation
Image(filename='./images/06_02.png', width=500)
Explanation: <br>
<br>
Using k-fold cross validation to assess model performance
...
The holdout method
End of explanation
Image(filename='./images/06_03.png', width=500)
import numpy as np
from sklearn.cross_validation import StratifiedKFold
kfold = StratifiedKFold(y=y_train,
n_folds=10,
random_state=1)
scores = []
for k, (train, test) in enumerate(kfold):
pipe_lr.fit(X_train[train], y_train[train])
score = pipe_lr.score(X_train[test], y_train[test])
scores.append(score)
print('Fold: %s, Class dist.: %s, Acc: %.3f' % (k+1,
np.bincount(y_train[train]), score))
print('\nCV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(estimator=pipe_lr,
X=X_train,
y=y_train,
cv=10,
n_jobs=1)
print('CV accuracy scores: %s' % scores)
print('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
Explanation: <br>
<br>
K-fold cross-validation
End of explanation
Image(filename='./images/06_04.png', width=600)
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
pipe_lr = Pipeline([('scl', StandardScaler()),
('clf', LogisticRegression(penalty='l2', random_state=0))])
train_sizes, train_scores, test_scores =\
learning_curve(estimator=pipe_lr,
X=X_train,
y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10),
cv=10,
n_jobs=1)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(train_sizes, train_mean,
color='blue', marker='o',
markersize=5, label='training accuracy')
plt.fill_between(train_sizes,
train_mean + train_std,
train_mean - train_std,
alpha=0.15, color='blue')
plt.plot(train_sizes, test_mean,
color='green', linestyle='--',
marker='s', markersize=5,
label='validation accuracy')
plt.fill_between(train_sizes,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='green')
plt.grid()
plt.xlabel('Number of training samples')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.ylim([0.8, 1.0])
plt.tight_layout()
# plt.savefig('./figures/learning_curve.png', dpi=300)
plt.show()
Explanation: <br>
<br>
Debugging algorithms with learning curves
<br>
<br>
Diagnosing bias and variance problems with learning curves
End of explanation
from sklearn.learning_curve import validation_curve
param_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
train_scores, test_scores = validation_curve(
estimator=pipe_lr,
X=X_train,
y=y_train,
param_name='clf__C',
param_range=param_range,
cv=10)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean,
color='blue', marker='o',
markersize=5, label='training accuracy')
plt.fill_between(param_range, train_mean + train_std,
train_mean - train_std, alpha=0.15,
color='blue')
plt.plot(param_range, test_mean,
color='green', linestyle='--',
marker='s', markersize=5,
label='validation accuracy')
plt.fill_between(param_range,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='green')
plt.grid()
plt.xscale('log')
plt.legend(loc='lower right')
plt.xlabel('Parameter C')
plt.ylabel('Accuracy')
plt.ylim([0.8, 1.0])
plt.tight_layout()
# plt.savefig('./figures/validation_curve.png', dpi=300)
plt.show()
Explanation: <br>
<br>
Addressing over- and underfitting with validation curves
End of explanation
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
pipe_svc = Pipeline([('scl', StandardScaler()),
('clf', SVC(random_state=1))])
param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [{'clf__C': param_range,
'clf__kernel': ['linear']},
{'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']}]
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring='accuracy',
cv=10,
n_jobs=-1)
gs = gs.fit(X_train, y_train)
print(gs.best_score_)
print(gs.best_params_)
clf = gs.best_estimator_
clf.fit(X_train, y_train)
print('Test accuracy: %.3f' % clf.score(X_test, y_test))
Explanation: <br>
<br>
Fine-tuning machine learning models via grid search
<br>
<br>
Tuning hyperparameters via grid search
End of explanation
Image(filename='./images/06_07.png', width=500)
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring='accuracy',
cv=2)
# Note: Optionally, you could use cv=2
# in the GridSearchCV above to produce
# the 5 x 2 nested CV that is shown in the figure.
scores = cross_val_score(gs, X_train, y_train, scoring='accuracy', cv=5)
print('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
from sklearn.tree import DecisionTreeClassifier
gs = GridSearchCV(estimator=DecisionTreeClassifier(random_state=0),
param_grid=[{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]}],
scoring='accuracy',
cv=2)
scores = cross_val_score(gs, X_train, y_train, scoring='accuracy', cv=5)
print('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
Explanation: <br>
<br>
Algorithm selection with nested cross-validation
End of explanation
Image(filename='./images/06_08.png', width=300)
from sklearn.metrics import confusion_matrix
pipe_svc.fit(X_train, y_train)
y_pred = pipe_svc.predict(X_test)
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
fig, ax = plt.subplots(figsize=(2.5, 2.5))
ax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3)
for i in range(confmat.shape[0]):
for j in range(confmat.shape[1]):
ax.text(x=j, y=i, s=confmat[i, j], va='center', ha='center')
plt.xlabel('predicted label')
plt.ylabel('true label')
plt.tight_layout()
# plt.savefig('./figures/confusion_matrix.png', dpi=300)
plt.show()
Explanation: <br>
<br>
Looking at different performance evaluation metrics
...
Reading a confusion matrix
End of explanation
le.transform(['M', 'B'])
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
Explanation: Additional Note
Remember that we previously encoded the class labels so that malignant samples are the "postive" class (1), and benign samples are the "negative" class (0):
End of explanation
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
Explanation: Next, we printed the confusion matrix like so:
End of explanation
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred, labels=[1, 0])
print(confmat)
Explanation: Note that the (true) class 0 samples that are correctly predicted as class 0 (true negatives) are now in the upper left corner of the matrix (index 0, 0). In order to change the ordering so that the true negatives are in the lower right corner (index 1,1) and the true positves are in the upper left, we can use the labels argument like shown below:
End of explanation
from sklearn.metrics import precision_score, recall_score, f1_score
print('Precision: %.3f' % precision_score(y_true=y_test, y_pred=y_pred))
print('Recall: %.3f' % recall_score(y_true=y_test, y_pred=y_pred))
print('F1: %.3f' % f1_score(y_true=y_test, y_pred=y_pred))
from sklearn.metrics import make_scorer
scorer = make_scorer(f1_score, pos_label=0)
c_gamma_range = [0.01, 0.1, 1.0, 10.0]
param_grid = [{'clf__C': c_gamma_range,
'clf__kernel': ['linear']},
{'clf__C': c_gamma_range,
'clf__gamma': c_gamma_range,
'clf__kernel': ['rbf']}]
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring=scorer,
cv=10,
n_jobs=-1)
gs = gs.fit(X_train, y_train)
print(gs.best_score_)
print(gs.best_params_)
Explanation: We conclude:
Assuming that class 1 (malignant) is the positive class in this example, our model correctly classified 71 of the samples that belong to class 0 (true negatives) and 40 samples that belong to class 1 (true positives), respectively. However, our model also incorrectly misclassified 1 sample from class 0 as class 1 (false positive), and it predicted that 2 samples are benign although it is a malignant tumor (false negatives).
<br>
<br>
Optimizing the precision and recall of a classification model
End of explanation
from sklearn.metrics import roc_curve, auc
from scipy import interp
pipe_lr = Pipeline([('scl', StandardScaler()),
('pca', PCA(n_components=2)),
('clf', LogisticRegression(penalty='l2',
random_state=0,
C=100.0))])
X_train2 = X_train[:, [4, 14]]
cv = StratifiedKFold(y_train, n_folds=3, random_state=1)
fig = plt.figure(figsize=(7, 5))
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas = pipe_lr.fit(X_train2[train],
y_train[train]).predict_proba(X_train2[test])
fpr, tpr, thresholds = roc_curve(y_train[test],
probas[:, 1],
pos_label=1)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr,
tpr,
lw=1,
label='ROC fold %d (area = %0.2f)'
% (i+1, roc_auc))
plt.plot([0, 1],
[0, 1],
linestyle='--',
color=(0.6, 0.6, 0.6),
label='random guessing')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.plot([0, 0, 1],
[0, 1, 1],
lw=2,
linestyle=':',
color='black',
label='perfect performance')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.title('Receiver Operator Characteristic')
plt.legend(loc="lower right")
plt.tight_layout()
# plt.savefig('./figures/roc.png', dpi=300)
plt.show()
pipe_lr = pipe_lr.fit(X_train2, y_train)
y_pred2 = pipe_lr.predict(X_test[:, [4, 14]])
from sklearn.metrics import roc_auc_score, accuracy_score
print('ROC AUC: %.3f' % roc_auc_score(y_true=y_test, y_score=y_pred2))
print('Accuracy: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred2))
Explanation: <br>
<br>
Plotting a receiver operating characteristic
End of explanation
pre_scorer = make_scorer(score_func=precision_score,
pos_label=1,
greater_is_better=True,
average='micro')
Explanation: <br>
<br>
The scoring metrics for multiclass classification
End of explanation |
11,361 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Read in catalog information from a text file and plot some parameters
Authors
Adrian Price-Whelan, Kelle Cruz, Stephanie T. Douglas
Learning Goals
Read an ASCII file using astropy.io
Convert between representations of coordinate components using astropy.coordinates (hours to degrees)
Make a spherical projection sky plot using matplotlib
Keywords
file input/output, coordinates, tables, units, scatter plots, matplotlib
Summary
This tutorial demonstrates the use of astropy.io.ascii for reading ASCII data, astropy.coordinates and astropy.units for converting RA (as a sexagesimal angle) to decimal degrees, and matplotlib for making a color-magnitude diagram and on-sky locations in a Mollweide projection.
Step1: Astropy provides functionality for reading in and manipulating tabular
data through the astropy.table subpackage. An additional set of
tools for reading and writing ASCII data are provided with the
astropy.io.ascii subpackage, but fundamentally use the classes and
methods implemented in astropy.table.
We'll start by importing the ascii subpackage
Step2: For many cases, it is sufficient to use the ascii.read('filename')
function as a black box for reading data from table-formatted text
files. By default, this function will try to figure out how your
data is formatted/delimited (by default, guess=True). For example,
if your data are
Step3: The header names are automatically parsed from the top of the file,
and the delimiter is inferred from the rest of the file -- awesome!
We can access the columns directly from their names as 'keys' of the
table object
Step4: If we want to then convert the first RA (as a sexagesimal angle) to
decimal degrees, for example, we can pluck out the first (0th) item in
the column and use the coordinates subpackage to parse the string
Step5: Now let's look at a case where this breaks, and we have to specify some
more options to the read() function. Our data may look a bit messier
Step6: What happened? The column names are just col1, col2, etc., the
default names if ascii.read() is unable to parse out column
names. We know it failed to read the column names, but also notice
that the first row of data are strings -- something else went wrong!
Step7: A few things are causing problems here. First, there are two header
lines in the file and the header lines are not denoted by comment
characters. The first line is actually some meta data that we don't
care about, so we want to skip it. We can get around this problem by
specifying the header_start keyword to the ascii.read() function.
This keyword argument specifies the index of the row in the text file
to read the column names from
Step8: Great! Now the columns have the correct names, but there is still a
problem
Step9: Some of the columns have missing data, for example, some of the RA values are missing (denoted by -- when printed)
Step10: This is called a Masked column because some missing values are
masked out upon display. If we want to use this numeric data, we have
to tell astropy what to fill the missing values with. We can do this
with the .filled() method. For example, to fill all of the missing
values with NaN's
Step11: Let's recap what we've done so far, then make some plots with the
data. Our data file has an extra line above the column names, so we
use the header_start keyword to tell it to start from line 1 instead
of line 0 (remember Python is 0-indexed!). We then used had to specify
that the data starts on line 2 using the data_start
keyword. Finally, we note some columns have missing values.
Step12: Now that we have our data loaded, let's plot a color-magnitude diagram.
Here we simply make a scatter plot of the J-K color on the x-axis
against the J magnitude on the y-axis. We use a trick to flip the
y-axis plt.ylim(reversed(plt.ylim())). Called with no arguments,
plt.ylim() will return a tuple with the axis bounds,
e.g. (0,10). Calling the function with arguments will set the limits
of the axis, so we simply set the limits to be the reverse of whatever they
were before. Using this pylab-style plotting is convenient for
making quick plots and interactive use, but is not great if you need
more control over your figures.
Step13: As a final example, we will plot the angular positions from the
catalog on a 2D projection of the sky. Instead of using pylab-style
plotting, we'll take a more object-oriented approach. We'll start by
creating a Figure object and adding a single subplot to the
figure. We can specify a projection with the projection keyword; in
this example we will use a Mollweide projection. Unfortunately, it is
highly non-trivial to make the matplotlib projection defined this way
follow the celestial convention of longitude/RA increasing to the left.
The axis object, ax, knows to expect angular coordinate
values. An important fact is that it expects the values to be in
radians, and it expects the azimuthal angle values to be between
(-180º,180º). This is (currently) not customizable, so we have to
coerce our RA data to conform to these rules! astropy provides a
coordinate class for handling angular values, astropy.coordinates.Angle.
We can convert our column of RA values to radians, and wrap the
angle bounds using this class.
Step14: By default, matplotlib will add degree tick labels, so let's change the
horizontal (x) tick labels to be in units of hours, and display a grid
Step15: We can save this figure as a PDF using the savefig function | Python Code:
import numpy as np
# Set up matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
Explanation: Read in catalog information from a text file and plot some parameters
Authors
Adrian Price-Whelan, Kelle Cruz, Stephanie T. Douglas
Learning Goals
Read an ASCII file using astropy.io
Convert between representations of coordinate components using astropy.coordinates (hours to degrees)
Make a spherical projection sky plot using matplotlib
Keywords
file input/output, coordinates, tables, units, scatter plots, matplotlib
Summary
This tutorial demonstrates the use of astropy.io.ascii for reading ASCII data, astropy.coordinates and astropy.units for converting RA (as a sexagesimal angle) to decimal degrees, and matplotlib for making a color-magnitude diagram and on-sky locations in a Mollweide projection.
End of explanation
from astropy.io import ascii
Explanation: Astropy provides functionality for reading in and manipulating tabular
data through the astropy.table subpackage. An additional set of
tools for reading and writing ASCII data are provided with the
astropy.io.ascii subpackage, but fundamentally use the classes and
methods implemented in astropy.table.
We'll start by importing the ascii subpackage:
End of explanation
tbl = ascii.read("simple_table.csv")
tbl
Explanation: For many cases, it is sufficient to use the ascii.read('filename')
function as a black box for reading data from table-formatted text
files. By default, this function will try to figure out how your
data is formatted/delimited (by default, guess=True). For example,
if your data are:
# name,ra,dec
BLG100,17:51:00.0,-29:59:48
BLG101,17:53:40.2,-29:49:52
BLG102,17:56:20.2,-29:30:51
BLG103,17:56:20.2,-30:06:22
...
(see simple_table.csv)
ascii.read() will return a Table object:
End of explanation
tbl["ra"]
Explanation: The header names are automatically parsed from the top of the file,
and the delimiter is inferred from the rest of the file -- awesome!
We can access the columns directly from their names as 'keys' of the
table object:
End of explanation
import astropy.coordinates as coord
import astropy.units as u
first_row = tbl[0] # get the first (0th) row
ra = coord.Angle(first_row["ra"], unit=u.hour) # create an Angle object
ra.degree # convert to degrees
Explanation: If we want to then convert the first RA (as a sexagesimal angle) to
decimal degrees, for example, we can pluck out the first (0th) item in
the column and use the coordinates subpackage to parse the string:
End of explanation
tbl = ascii.read("Young-Objects-Compilation.csv")
tbl.colnames
Explanation: Now let's look at a case where this breaks, and we have to specify some
more options to the read() function. Our data may look a bit messier::
,,,,2MASS Photometry,,,,,,WISE Photometry,,,,,,,,Spectra,,,,Astrometry,,,,,,,,,,,
Name,Designation,RA,Dec,Jmag,J_unc,Hmag,H_unc,Kmag,K_unc,W1,W1_unc,W2,W2_unc,W3,W3_unc,W4,W4_unc,Spectral Type,Spectra (FITS),Opt Spec Refs,NIR Spec Refs,pm_ra (mas),pm_ra_unc,pm_dec (mas),pm_dec_unc,pi (mas),pi_unc,radial velocity (km/s),rv_unc,Astrometry Refs,Discovery Refs,Group/Age,Note
,00 04 02.84 -64 10 35.6,1.01201,-64.18,15.79,0.07,14.83,0.07,14.01,0.05,13.37,0.03,12.94,0.03,12.18,0.24,9.16,null,L1γ,,Kirkpatrick et al. 2010,,,,,,,,,,,Kirkpatrick et al. 2010,,
PC 0025+04,00 27 41.97 +05 03 41.7,6.92489,5.06,16.19,0.09,15.29,0.10,14.96,0.12,14.62,0.04,14.14,0.05,12.24,null,8.89,null,M9.5β,,Mould et al. 1994,,0.0105,0.0004,-0.0008,0.0003,,,,,Faherty et al. 2009,Schneider et al. 1991,,,00 32 55.84 -44 05 05.8,8.23267,-44.08,14.78,0.04,13.86,0.03,13.27,0.04,12.82,0.03,12.49,0.03,11.73,0.19,9.29,null,L0γ,,Cruz et al. 2009,,0.1178,0.0043,-0.0916,0.0043,38.4,4.8,,,Faherty et al. 2012,Reid et al. 2008,,
...
(see Young-Objects-Compilation.csv)
If we try to just use ascii.read() on this data, it fails to parse the names out and the column names become col followed by the number of the column:
End of explanation
tbl[0]
Explanation: What happened? The column names are just col1, col2, etc., the
default names if ascii.read() is unable to parse out column
names. We know it failed to read the column names, but also notice
that the first row of data are strings -- something else went wrong!
End of explanation
tbl = ascii.read("Young-Objects-Compilation.csv", header_start=1)
tbl.colnames
Explanation: A few things are causing problems here. First, there are two header
lines in the file and the header lines are not denoted by comment
characters. The first line is actually some meta data that we don't
care about, so we want to skip it. We can get around this problem by
specifying the header_start keyword to the ascii.read() function.
This keyword argument specifies the index of the row in the text file
to read the column names from:
End of explanation
tbl = ascii.read("Young-Objects-Compilation.csv", header_start=1, data_start=2)
Explanation: Great! Now the columns have the correct names, but there is still a
problem: all of the columns have string data types, and the column
names are still included as a row in the table. This is because by
default the data are assumed to start on the second row (index=1).
We can specify data_start=2 to tell the reader that the data in
this file actually start on the 3rd (index=2) row:
End of explanation
print(tbl['RA'])
Explanation: Some of the columns have missing data, for example, some of the RA values are missing (denoted by -- when printed):
End of explanation
tbl['RA'].filled(np.nan)
Explanation: This is called a Masked column because some missing values are
masked out upon display. If we want to use this numeric data, we have
to tell astropy what to fill the missing values with. We can do this
with the .filled() method. For example, to fill all of the missing
values with NaN's:
End of explanation
data = ascii.read("Young-Objects-Compilation.csv", header_start=1, data_start=2)
Explanation: Let's recap what we've done so far, then make some plots with the
data. Our data file has an extra line above the column names, so we
use the header_start keyword to tell it to start from line 1 instead
of line 0 (remember Python is 0-indexed!). We then used had to specify
that the data starts on line 2 using the data_start
keyword. Finally, we note some columns have missing values.
End of explanation
plt.scatter(data["Jmag"] - data["Kmag"], data["Jmag"]) # plot J-K vs. J
plt.ylim(reversed(plt.ylim())) # flip the y-axis
plt.xlabel("$J-K_s$", fontsize=20)
plt.ylabel("$J$", fontsize=20)
Explanation: Now that we have our data loaded, let's plot a color-magnitude diagram.
Here we simply make a scatter plot of the J-K color on the x-axis
against the J magnitude on the y-axis. We use a trick to flip the
y-axis plt.ylim(reversed(plt.ylim())). Called with no arguments,
plt.ylim() will return a tuple with the axis bounds,
e.g. (0,10). Calling the function with arguments will set the limits
of the axis, so we simply set the limits to be the reverse of whatever they
were before. Using this pylab-style plotting is convenient for
making quick plots and interactive use, but is not great if you need
more control over your figures.
End of explanation
ra = coord.Angle(data['RA'].filled(np.nan)*u.degree)
ra = ra.wrap_at(180*u.degree)
dec = coord.Angle(data['Dec'].filled(np.nan)*u.degree)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection="mollweide")
ax.scatter(ra.radian, dec.radian)
Explanation: As a final example, we will plot the angular positions from the
catalog on a 2D projection of the sky. Instead of using pylab-style
plotting, we'll take a more object-oriented approach. We'll start by
creating a Figure object and adding a single subplot to the
figure. We can specify a projection with the projection keyword; in
this example we will use a Mollweide projection. Unfortunately, it is
highly non-trivial to make the matplotlib projection defined this way
follow the celestial convention of longitude/RA increasing to the left.
The axis object, ax, knows to expect angular coordinate
values. An important fact is that it expects the values to be in
radians, and it expects the azimuthal angle values to be between
(-180º,180º). This is (currently) not customizable, so we have to
coerce our RA data to conform to these rules! astropy provides a
coordinate class for handling angular values, astropy.coordinates.Angle.
We can convert our column of RA values to radians, and wrap the
angle bounds using this class.
End of explanation
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection="mollweide")
ax.scatter(ra.radian, dec.radian)
ax.set_xticklabels(['14h','16h','18h','20h','22h','0h','2h','4h','6h','8h','10h'])
ax.grid(True)
Explanation: By default, matplotlib will add degree tick labels, so let's change the
horizontal (x) tick labels to be in units of hours, and display a grid:
End of explanation
fig.savefig("map.pdf")
Explanation: We can save this figure as a PDF using the savefig function:
End of explanation |
11,362 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Description of Melbourne Dataset
Load Data
Compute POI Statistics DataFrame
Compute POI Visit Statistics
Visualise & Save POIs
POI vs Photo
POIs with NO Visits
Photo Clusters without Corresponding POIs
Compute Trajectories
Recommendation via POI Ranking
POI Features for Ranking
Ranking POIs using rankSVM
Factorised Transition Probabilities
POI Features for Factorisation
Transition Matrix between POIs
Visualise Transition Matrix
Visualise Transitions of Specific POIs
Recommendation Results Comparison & Visualisation
Choose an Example Trajectory
Recommendation by POI Popularity
Recommendation by POI Rankings
Recommendation by Transition Probabilities
Disclaimer
Problems of Trajectory Construction
Example of Terrible Trajectories
Limitations of Google Maps and Nationalmaps
<a id='sec1'></a>
Step1: 1. Load Data
Load POI/Trajectory data from file.
1.1 Load POI Data
Step2: 1.2 Load Trajectory Data
Step3: 2. Compute POI Statistics DataFrame
2.1 Compute POI Visit Statistics
Compute the number of photos associated with each POI.
Step4: Compute the visit duration at each POI.
Step5: Filtering out zero visit duration at POI, otherwise many medians of duration will be zero.
Step6: Compute the median and summation of POI visit duration.
Step7: Compute the number of visits at each POI by all users and by distinct users.
NOTE
Step8: Copy visit statistics to POI dataframe.
Step9: 2.2 Visualise & Save POIs
Visualise POI on map
Step10: 3. POI vs Photo
3.1 POIs with NO Visits
Step11: 3.2 Photo Clusters without Corresponding POIs
TODO
Step13: 4.2 Utility Functions
Extract trajectory, i.e., a list of POIs, considering loops/subtours.
Step14: Extract trajectory, i.e., a list of POIs, assuming NO considering loops/subtours exist.
Step15: Counting the number of trajectories with loops/subtours.
Step16: Compute POI properties, e.g., popularity, total number of visit, average visit duration.
Step17: Compute the F1 score for recommended trajectory, assuming NO loops/subtours in trajectories.
Step19: Compute distance between two POIs using Haversine formula.
Step20: Distance between POIs.
Step21: Dump POI popularity.
Step22: ~~Filtering out POI visits with 0 duration.~~
Step23: Dictionary maps every trajectory ID to the actual trajectory.
Step24: Define a query (in IR terminology) using tuple (start POI, end POI, #POI) ~~user ID.~~
Step25: 5. Recommendation via POI Ranking
5.1 POI Features for Ranking
POI Features used for ranking
Step26: 5.2 Training DataFrame
Training data are generated as follows
Step27: Sanity check
Step28: Sanity check
Step29: 5.4 Ranking POIs using rankSVM
RankSVM implementation in libsvm.zip or liblinear.zip, please read README.ranksvm in the zip file for installation instructions.
Use softmax function to convert ranking scores to a probability distribution.
Step30: Below is a python wrapper of the svm-train or train and svm-predict or predict commands of rankSVM with ranking probabilities $P(p_i \lvert (p_s, p_e, len))$ computed using softmax function.
Step31: 6. Factorised Transition Probabilities
6.1 POI Features for Factorisation
POI features used to factorise transition matrix of Markov Chain with POI features (vector) as states
Step32: POIs in training set.
Step33: 6.2 Transition Matrix between POI Cateogries
Step34: 6.3 Transition Matrix between POI Popularity Classes
Step35: Discretize POI popularity with uniform log-scale bins.
Step36: 6.4 Transition Matrix between the Number of POI Visit Classes
Step37: Discretize the number of POI visit with uniform log-scale bins.
Step38: 6.5 Transition Matrix between POI Average Visit Duration Classes
Step39: 6.6 Transition Matrix between POI Neighborhood Classes
KMeans in scikit-learn seems unable to use custom distance metric and no implementation of Haversine formula, use Euclidean distance to approximate.
Step40: Scatter plot of POI coordinates with clustering results.
Step41: 6.7 Transition Matrix between POIs
Approximate transition probabilities (matrix) between different POI features (vector) using the Kronecker product of individual transition matrix corresponding to each feature, i.e., POI category, POI popularity (discritized), POI average visit duration (discritized) and POI neighborhoods (clusters).
Deal with features without corresponding POIs and feature with more than one corresponding POIs. (Before Normalisation)
- For features without corresponding POIs, just remove the rows and columns from the matrix obtained by Kronecker product.
- For different POIs with the exact same feature,
- Let POIs with the same feature as a POI group,
- The incoming transition value (i.e., unnormalised transition probability) of this POI group
should be divided uniformly among the group members,
which corresponds to choose a group member uniformly at random in the incoming case.
- The outgoing transition value should be duplicated (i.e., the same) among all group members,
as we were already in that group in the outgoing case.
- For each POI in the group, the allocation transition value of the self-loop of the POI group is similar to
that in the outgoing case, as we were already in that group, so just duplicate and then divide uniformly among
the transitions from this POI to other POIs in the same group,
which corresponds to choose a outgoing transition uniformly at random from all outgoing transitions
excluding the self-loop of this POI.
- Concretely, for a POI group with $n$ POIs,
1. If the incoming transition value of POI group is $m_1$,
then the corresponding incoming transition value for each group member is $\frac{m_1}{n}$.
1. If the outgoing transition value of POI group is $m_2$,
then the corresponding outgoing transition value for each group member is also $m_2$.
1. If the transition value of self-loop of the POI group is $m_3$,
then transition value of self-loop of individual POIs should be $0$,
and other in-group transitions with value $\frac{m_3}{n-1}$
as the total number of outgoing transitions to other POIs in the same group is $n-1$ (excluding the self-loop),
i.e. $n-1$ choose $1$.
NOTE
Step42: 6.8 Visualise Transition Matrix
Plot transition matrix heatmap.
Step43: 6.9 Visualise Transitions of Specific POIs
Generate KML file to visualise the transitions from a specific POI using edge width and edge transparency to distinguish different transition probabilities.
Step44: 6.9.1 The Most Popular POI
Define the popularity of POI as the number of distinct users that visited the POI.
Step45: Example on Google maps.
6.9.2 The Queen Victoria Market
Step46: Example on Google maps.
6.9.3 The University of Melbourne
Step47: Example on Google maps.
Step48: Example on Google maps.
6.9.4 The Margaret Court Arena
Step49: Example on Google maps.
6.9.5 RMIT City
Step50: Example on Google maps.
Step51: Example on Google maps.
6.10 Visualise Trajectories that Passing through Specific POIs
Generate KML file for a set of trajectories.
Step52: 6.10.1 The Melbourne Cricket Ground (MCG)
Trajectories (with more than $1$ POIs) that Passing through the Melbourne Cricket Ground (MCG).
Step53: Example on Google maps.
6.10.2 The Government House
Trajectories (with more than $1$ POIs) that Passing through the Government House.
Step54: Example on Google maps.
7. Recommendation Results Comparison & Visualisation
Examples of recommendation results
Step55: After looking at many of these trajectories on map, we choose trajectory 680 to illustrate.
Step56: 7.2 Recommendation by POI Popularity
Recommend trajectory based on POI popularity only.
Step57: 7.3 Recommendation by POI Rankings
Recommend trajectory based on the ranking of POIs using rankSVM.
Step58: 7.4 Recommendation by Transition Probabilities
Use dynamic programming to find a possibly non-simple path, i.e., walk.
Step59: Use integer linear programming (ILP) to find a simple path.
Step60: Example on Google maps,
- the light blue edges represent the real trajectory,
- green edges represent the recommended trajectories based on POI popularity and POI rankings (the recommendations are the same),
- the purple edges represent the recommended trajectories based on POI transition probabilities using Viterbi algorithm and ILP.
8. Disclaimer
8.1 Problems of Trajectory Construction
Problems of mapping photos to POIs according to their distance, i.e., $200$ meters.
Problems of split consecutive POI visits by $8$ hours.
Problems of extract trajectories from a sequence of POI visits such that no loops/subtours exist.
8.2 Example of Terrible Trajectories
Choose the trajectory with the maximum number of POIs.
Step61: Extract the sequence of photos associated with this trajectory.
Step62: Save photos dataframe to CSV file.
Step63: Generate KML file with edges between consecutive photos.
Step64: And the trajectory extracted. | Python Code:
% matplotlib inline
import os, sys, time, pickle, tempfile
import math, random, itertools
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
from scipy.misc import logsumexp
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from scipy.linalg import kron
from fastkml import kml, styles
from shapely.geometry import Point, LineString
import pulp
RANK_C = 10 # regularisation parameter for rankSVM
BIN_CLUSTER = 5 # Number of bins/clusters for discritizing POI features
ranksvm_dir = '$HOME/work/ranksvm'
data_dir = '../data'
fpoi = os.path.join(data_dir, 'poi-Melb-all.csv')
fvisits = os.path.join(data_dir, 'userVisits-Melb.csv')
fphotos = os.path.join(data_dir, 'Melb_photos_bigbox.csv')
Explanation: Description of Melbourne Dataset
Load Data
Compute POI Statistics DataFrame
Compute POI Visit Statistics
Visualise & Save POIs
POI vs Photo
POIs with NO Visits
Photo Clusters without Corresponding POIs
Compute Trajectories
Recommendation via POI Ranking
POI Features for Ranking
Ranking POIs using rankSVM
Factorised Transition Probabilities
POI Features for Factorisation
Transition Matrix between POIs
Visualise Transition Matrix
Visualise Transitions of Specific POIs
Recommendation Results Comparison & Visualisation
Choose an Example Trajectory
Recommendation by POI Popularity
Recommendation by POI Rankings
Recommendation by Transition Probabilities
Disclaimer
Problems of Trajectory Construction
Example of Terrible Trajectories
Limitations of Google Maps and Nationalmaps
<a id='sec1'></a>
End of explanation
poi_all = pd.read_csv(fpoi)
poi_all.set_index('poiID', inplace=True)
#poi_all.head()
poi_df = poi_all.copy()
poi_df.drop('poiURL', axis=1, inplace=True)
poi_df.rename(columns={'poiName':'Name', 'poiTheme':'Category', 'poiLat':'Latitude', 'poiLon':'Longitude'}, \
inplace=True)
poi_df.head()
Explanation: 1. Load Data
Load POI/Trajectory data from file.
1.1 Load POI Data
End of explanation
visits = pd.read_csv(fvisits, sep=';')
#visits.head()
visits.drop(['poiTheme', 'poiFreq'], axis=1, inplace=True)
visits.rename(columns={'seqID':'trajID'}, inplace=True)
visits.head()
Explanation: 1.2 Load Trajectory Data
End of explanation
poi_photo = visits[['photoID', 'poiID']].copy().groupby('poiID').agg(np.size)
poi_photo.rename(columns={'photoID':'#photos'}, inplace=True)
poi_photo.head()
Explanation: 2. Compute POI Statistics DataFrame
2.1 Compute POI Visit Statistics
Compute the number of photos associated with each POI.
End of explanation
poi_duration = visits[['dateTaken', 'poiID', 'trajID']].copy().groupby(['trajID', 'poiID']).agg([np.min, np.max])
poi_duration.columns = poi_duration.columns.droplevel()
poi_duration.rename(columns={'amin':'arrivalTime', 'amax':'departureTime'}, inplace=True)
poi_duration.reset_index(inplace=True)
poi_duration['poiDuration'] = poi_duration['departureTime'] - poi_duration['arrivalTime']
poi_duration.head()
Explanation: Compute the visit duration at each POI.
End of explanation
poi_duration = poi_duration[poi_duration['poiDuration'] > 0]
Explanation: Filtering out zero visit duration at POI, otherwise many medians of duration will be zero.
End of explanation
poi_duration_stats = poi_duration[['poiID', 'poiDuration']].copy().groupby('poiID').agg([np.median, np.sum])
poi_duration_stats.columns = poi_duration_stats.columns.droplevel()
poi_duration_stats.rename(columns={'median':'medianDuration(sec)', 'sum':'totalDuration(sec)'}, inplace=True)
poi_duration_stats.head()
Explanation: Compute the median and summation of POI visit duration.
End of explanation
poi_visits = visits[['userID', 'trajID', 'poiID', 'photoID']].copy().groupby(['userID','trajID','poiID']).agg(np.size)
poi_visits.reset_index(inplace=True)
poi_visits.rename(columns={'photoID':'#photosAtPOIInTraj'}, inplace=True)
poi_visits.head()
poi_visits_stats = poi_visits[['userID', 'poiID']].copy().groupby('poiID').agg([pd.Series.nunique, np.size])
poi_visits_stats.columns = poi_visits_stats.columns.droplevel()
poi_visits_stats.rename(columns={'nunique':'#distinctUsers', 'size':'#visits'}, inplace=True)
poi_visits_stats.head()
Explanation: Compute the number of visits at each POI by all users and by distinct users.
NOTE: we assume NO loops/subtours appear in trajectories,
so a specific user would visit a certain POI in a specific trajectory at most once.
End of explanation
poi_df['#photos'] = 0
poi_df['#visits'] = 0
poi_df['#distinctUsers'] = 0
poi_df['medianDuration(sec)'] = 0.0
poi_df['totalDuration(sec)'] = 0.0
poi_df.loc[poi_photo.index, '#photos'] = poi_photo['#photos']
poi_df.loc[poi_visits_stats.index, '#visits'] = poi_visits_stats['#visits']
poi_df.loc[poi_visits_stats.index, '#distinctUsers'] = poi_visits_stats['#distinctUsers']
poi_df.loc[poi_duration_stats.index, 'medianDuration(sec)'] = poi_duration_stats['medianDuration(sec)']
poi_df.loc[poi_duration_stats.index, 'totalDuration(sec)'] = poi_duration_stats['totalDuration(sec)']
poi_df.head()
Explanation: Copy visit statistics to POI dataframe.
End of explanation
#poi_file = os.path.join(data_dir, 'poi_df.csv')
#poi_df.to_csv(poi_file, index=True)
Explanation: 2.2 Visualise & Save POIs
Visualise POI on map: Simply import the above CSV file in Google Maps (Google Drive -> NEW -> More -> Google My Maps), an example of this POI dataframe shown on map is available here.
To sort POIs according to one attribute (e.g. #photos) in Google Maps, click the option icon at the upper right corner of the layer, then click "Open data table", a data table will pop-up, click the column of interest (e.g. #photos), then click "Sort A->Z" to sort POIs according to that attribute (e.g. #photos) in ascending order.
Save POI dataframe to CSV file.
End of explanation
poi_df[poi_df['#visits'] < 1]
Explanation: 3. POI vs Photo
3.1 POIs with NO Visits
End of explanation
traj_all = visits[['userID', 'trajID', 'poiID', 'dateTaken']].copy().groupby(['userID', 'trajID', 'poiID'])\
.agg([np.min, np.max, np.size])
traj_all.columns = traj_all.columns.droplevel()
traj_all.reset_index(inplace=True)
traj_all.rename(columns={'amin':'startTime', 'amax':'endTime', 'size':'#photo'}, inplace=True)
traj_len = traj_all[['userID', 'trajID', 'poiID']].copy().groupby(['userID', 'trajID']).agg(np.size)
traj_len.reset_index(inplace=True)
traj_len.rename(columns={'poiID':'trajLen'}, inplace=True)
traj_all = pd.merge(traj_all, traj_len, on=['userID', 'trajID'])
traj_all['poiDuration'] = traj_all['endTime'] - traj_all['startTime']
traj_all.head(10)
Explanation: 3.2 Photo Clusters without Corresponding POIs
TODO: A map with a cluster of photos at some place in Melbourne, given that NO geo-coordinates were provided in its Wikipedia page.
A popular place needs to be provided!
4. Compute Trajectories
4.1 Trajectories Data
Compute trajectories using POI visiting records, assuming NO loops/subtours in trajectories.
End of explanation
def extract_traj_withloop(tid, visits):
Compute trajectories info, taking care of trajectories that contain sub-tours
traj_df = visits[visits['trajID'] == tid].copy()
traj_df.sort_values(by='dateTaken', ascending=True, inplace=True)
traj = []
for ix in traj_df.index:
poi = traj_df.loc[ix, 'poiID']
if len(traj) == 0:
traj.append(poi)
else:
if poi != traj[-1]:
traj.append(poi)
return traj
Explanation: 4.2 Utility Functions
Extract trajectory, i.e., a list of POIs, considering loops/subtours.
End of explanation
def extract_traj(tid, traj_all):
traj = traj_all[traj_all['trajID'] == tid].copy()
traj.sort_values(by=['startTime'], ascending=True, inplace=True)
return traj['poiID'].tolist()
Explanation: Extract trajectory, i.e., a list of POIs, assuming NO considering loops/subtours exist.
End of explanation
loopcnt = 0
for tid_ in visits['trajID'].unique():
traj_ = extract_traj_withloop(tid_, visits)
if len(traj_) != len(set(traj_)):
#print(traj_)
loopcnt += 1
print('Number of trajectories with loops/subtours:', loopcnt)
Explanation: Counting the number of trajectories with loops/subtours.
End of explanation
def calc_poi_info(trajid_list, traj_all, poi_all):
assert(len(trajid_list) > 0)
# to allow duplicated trajid
poi_info = traj_all[traj_all['trajID'] == trajid_list[0]][['poiID', 'poiDuration']].copy()
for i in range(1, len(trajid_list)):
traj = traj_all[traj_all['trajID'] == trajid_list[i]][['poiID', 'poiDuration']]
poi_info = poi_info.append(traj, ignore_index=True)
poi_info = poi_info.groupby('poiID').agg([np.mean, np.size])
poi_info.columns = poi_info.columns.droplevel()
poi_info.reset_index(inplace=True)
poi_info.rename(columns={'mean':'avgDuration', 'size':'nVisit'}, inplace=True)
poi_info.set_index('poiID', inplace=True)
poi_info['poiCat'] = poi_all.loc[poi_info.index, 'poiTheme']
poi_info['poiLon'] = poi_all.loc[poi_info.index, 'poiLon']
poi_info['poiLat'] = poi_all.loc[poi_info.index, 'poiLat']
# POI popularity: the number of distinct users that visited the POI
pop_df = traj_all[traj_all['trajID'].isin(trajid_list)][['poiID', 'userID']].copy()
pop_df = pop_df.groupby('poiID').agg(pd.Series.nunique)
pop_df.rename(columns={'userID':'nunique'}, inplace=True)
poi_info['popularity'] = pop_df.loc[poi_info.index, 'nunique']
return poi_info.copy()
Explanation: Compute POI properties, e.g., popularity, total number of visit, average visit duration.
End of explanation
def calc_F1(traj_act, traj_rec):
'''Compute recall, precision and F1 for recommended trajectories'''
assert(isinstance(noloop, bool))
assert(len(traj_act) > 0)
assert(len(traj_rec) > 0)
intersize = len(set(traj_act) & set(traj_rec))
recall = intersize / len(traj_act)
precision = intersize / len(traj_rec)
F1 = 2 * precision * recall / (precision + recall)
return F1
Explanation: Compute the F1 score for recommended trajectory, assuming NO loops/subtours in trajectories.
End of explanation
def calc_dist_vec(longitudes1, latitudes1, longitudes2, latitudes2):
Calculate the distance (unit: km) between two places on earth, vectorised
# convert degrees to radians
lng1 = np.radians(longitudes1)
lat1 = np.radians(latitudes1)
lng2 = np.radians(longitudes2)
lat2 = np.radians(latitudes2)
radius = 6371.0088 # mean earth radius, en.wikipedia.org/wiki/Earth_radius#Mean_radius
# The haversine formula, en.wikipedia.org/wiki/Great-circle_distance
dlng = np.fabs(lng1 - lng2)
dlat = np.fabs(lat1 - lat2)
dist = 2 * radius * np.arcsin( np.sqrt(
(np.sin(0.5*dlat))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(0.5*dlng))**2 ))
return dist
Explanation: Compute distance between two POIs using Haversine formula.
End of explanation
POI_DISTMAT = pd.DataFrame(data=np.zeros((poi_all.shape[0], poi_all.shape[0]), dtype=np.float), \
index=poi_all.index, columns=poi_all.index)
for ix in poi_all.index:
POI_DISTMAT.loc[ix] = calc_dist_vec(poi_all.loc[ix, 'poiLon'], \
poi_all.loc[ix, 'poiLat'], \
poi_all['poiLon'], \
poi_all['poiLat'])
trajid_set_all = sorted(traj_all['trajID'].unique().tolist())
poi_info_all = calc_poi_info(trajid_set_all, traj_all, poi_all)
poi_info_all.head()
Explanation: Distance between POIs.
End of explanation
poi_all['poiPopularity'] = 0
poi_all.loc[poi_info_all.index, 'poiPopularity'] = poi_info_all.loc[poi_info_all.index, 'popularity']
poi_all.head()
#poi_all.to_csv('poi_all.csv', index=True)
Explanation: Dump POI popularity.
End of explanation
#zero_duration = poi_info_all[poi_info_all['avgDuration'] < 1]
#zero_duration
#print(traj_all.shape)
#traj_all = traj_all[traj_all['poiID'].isin(set(poi_info_all.index) - set(zero_duration.index))]
#print(traj_all.shape)
Explanation: ~~Filtering out POI visits with 0 duration.~~
End of explanation
traj_dict = dict()
for trajid in trajid_set_all:
traj = extract_traj(trajid, traj_all)
assert(trajid not in traj_dict)
traj_dict[trajid] = traj
Explanation: Dictionary maps every trajectory ID to the actual trajectory.
End of explanation
QUERY_ID_DICT = dict() # (start, end, length) --> qid
keys = [(traj_dict[x][0], traj_dict[x][-1], len(traj_dict[x])) \
for x in sorted(traj_dict.keys()) if len(traj_dict[x]) > 2]
cnt = 0
for key in keys:
if key not in QUERY_ID_DICT: # (start, end, length) --> qid
QUERY_ID_DICT[key] = cnt
cnt += 1
print('#traj in total:', len(trajid_set_all))
print('#traj (length > 2):', traj_all[traj_all['trajLen'] > 2]['trajID'].unique().shape[0])
print('#query tuple:', len(QUERY_ID_DICT))
Explanation: Define a query (in IR terminology) using tuple (start POI, end POI, #POI) ~~user ID.~~
End of explanation
DF_COLUMNS = ['poiID', 'label', 'queryID', 'popularity', 'nVisit', 'avgDuration', \
'sameCatStart', 'sameCatEnd', 'distStart', 'distEnd', 'trajLen', 'diffPopStart', \
'diffPopEnd', 'diffNVisitStart', 'diffNVisitEnd', 'diffDurationStart', 'diffDurationEnd']
Explanation: 5. Recommendation via POI Ranking
5.1 POI Features for Ranking
POI Features used for ranking:
1. popularity: POI popularity, i.e., the number of distinct users that visited the POI
1. nVisit: the total number of visit by all users
1. avgDuration: average POI visit duration
1. sameCatStart: 1 if POI category is the same as that of startPOI, -1 otherwise
1. sameCatEnd: 1 if POI category is the same as that of endPOI, -1 otherwise
1. distStart: distance (haversine formula) from startPOI
1. distEnd: distance from endPOI
1. seqLen: trajectory length (copy from query)
1. diffPopStart: difference in POI popularity from startPOI
1. diffPopEnd: difference in POI popularity from endPOI
1. diffNVisitStart: difference in the total number of visit from startPOI
1. diffNVisitEnd: difference in the total number of visit from endPOI
1. diffDurationStart: difference in average POI visit duration from the actual duration spent at startPOI
1. diffDurationEnd: difference in average POI visit duration from the actual duration spent at endPOI
End of explanation
def gen_train_subdf(poi_id, query_id_set, poi_info, query_id_rdict):
columns = DF_COLUMNS
poi_distmat = POI_DISTMAT
df_ = pd.DataFrame(data=np.zeros((len(query_id_set), len(columns)), dtype=np.float), columns=columns)
pop = poi_info.loc[poi_id, 'popularity']; nvisit = poi_info.loc[poi_id, 'nVisit']
cat = poi_info.loc[poi_id, 'poiCat']; duration = poi_info.loc[poi_id, 'avgDuration']
for j in range(len(query_id_set)):
qid = query_id_set[j]
assert(qid in query_id_rdict) # qid --> (start, end, length)
(p0, pN, trajLen) = query_id_rdict[qid]
idx = df_.index[j]
df_.loc[idx, 'poiID'] = poi_id
df_.loc[idx, 'queryID'] = qid
df_.loc[idx, 'popularity'] = pop
df_.loc[idx, 'nVisit'] = nvisit
df_.loc[idx, 'avgDuration'] = duration
df_.loc[idx, 'sameCatStart'] = 1 if cat == poi_info.loc[p0, 'poiCat'] else -1
df_.loc[idx, 'sameCatEnd'] = 1 if cat == poi_info.loc[pN, 'poiCat'] else -1
df_.loc[idx, 'distStart'] = poi_distmat.loc[poi_id, p0]
df_.loc[idx, 'distEnd'] = poi_distmat.loc[poi_id, pN]
df_.loc[idx, 'trajLen'] = trajLen
df_.loc[idx, 'diffPopStart'] = pop - poi_info.loc[p0, 'popularity']
df_.loc[idx, 'diffPopEnd'] = pop - poi_info.loc[pN, 'popularity']
df_.loc[idx, 'diffNVisitStart'] = nvisit - poi_info.loc[p0, 'nVisit']
df_.loc[idx, 'diffNVisitEnd'] = nvisit - poi_info.loc[pN, 'nVisit']
df_.loc[idx, 'diffDurationStart'] = duration - poi_info.loc[p0, 'avgDuration']
df_.loc[idx, 'diffDurationEnd'] = duration - poi_info.loc[pN, 'avgDuration']
return df_
def gen_train_df(trajid_list, traj_dict, poi_info, n_jobs=-1):
columns = DF_COLUMNS
poi_distmat = POI_DISTMAT
query_id_dict = QUERY_ID_DICT
train_trajs = [traj_dict[x] for x in trajid_list if len(traj_dict[x]) > 2]
qid_set = sorted(set([query_id_dict[(t[0], t[-1], len(t))] for t in train_trajs]))
poi_set = set()
for tr in train_trajs:
poi_set = poi_set | set(tr)
#qid_poi_pair = list(itertools.product(qid_set, poi_set)) # Cartesian product of qid_set and poi_set
#df_ = pd.DataFrame(data=np.zeros((len(qid_poi_pair), len(columns)), dtype= np.float), columns=columns)
query_id_rdict = dict()
for k, v in query_id_dict.items():
query_id_rdict[v] = k # qid --> (start, end, length)
train_df_list = Parallel(n_jobs=n_jobs)\
(delayed(gen_train_subdf)(poi, qid_set, poi_info, query_id_rdict) \
for poi in poi_set)
assert(len(train_df_list) > 0)
df_ = train_df_list[0]
for j in range(1, len(train_df_list)):
df_ = df_.append(train_df_list[j], ignore_index=True)
# set label
df_.set_index(['queryID', 'poiID'], inplace=True)
for t in train_trajs:
qid = query_id_dict[(t[0], t[-1], len(t))]
for poi in t[1:-1]: # do NOT count if the POI is startPOI/endPOI
df_.loc[(qid, poi), 'label'] += 1
df_.reset_index(inplace=True)
return df_
Explanation: 5.2 Training DataFrame
Training data are generated as follows:
1. each input tuple $(\text{startPOI}, \text{endPOI}, \text{#POI})$ form a query (in IR terminology).
1. the label of a specific POI is the number of presence of that POI in a specific query, excluding the presence as $\text{startPOI}$ or $\text{endPOI}$.
1. for each query, the label of all absence POIs from trajectories of that query in training set got a label 0.
The dimension of training data matrix is #(qid, poi) by #feature.
End of explanation
def gen_test_df(startPOI, endPOI, nPOI, poi_info):
columns = DF_COLUMNS
poi_distmat = POI_DISTMAT
query_id_dict = QUERY_ID_DICT
key = (p0, pN, trajLen) = (startPOI, endPOI, nPOI)
assert(key in query_id_dict)
assert(p0 in poi_info.index)
assert(pN in poi_info.index)
df_ = pd.DataFrame(data=np.zeros((poi_info.shape[0], len(columns)), dtype= np.float), columns=columns)
poi_list = sorted(poi_info.index)
qid = query_id_dict[key]
df_['queryID'] = qid
df_['label'] = np.random.rand(df_.shape[0]) # label for test data is arbitrary according to libsvm FAQ
for i in range(df_.index.shape[0]):
poi = poi_list[i]
lon = poi_info.loc[poi, 'poiLon']; lat = poi_info.loc[poi, 'poiLat']
pop = poi_info.loc[poi, 'popularity']; nvisit = poi_info.loc[poi, 'nVisit']
cat = poi_info.loc[poi, 'poiCat']; duration = poi_info.loc[poi, 'avgDuration']
idx = df_.index[i]
df_.loc[idx, 'poiID'] = poi
df_.loc[idx, 'popularity'] = pop
df_.loc[idx, 'nVisit'] = nvisit
df_.loc[idx, 'avgDuration'] = duration
df_.loc[idx, 'sameCatStart'] = 1 if cat == poi_info.loc[p0, 'poiCat'] else -1
df_.loc[idx, 'sameCatEnd'] = 1 if cat == poi_info.loc[pN, 'poiCat'] else -1
df_.loc[idx, 'distStart'] = poi_distmat.loc[poi, p0]
df_.loc[idx, 'distEnd'] = poi_distmat.loc[poi, pN]
df_.loc[idx, 'trajLen'] = trajLen
df_.loc[idx, 'diffPopStart'] = pop - poi_info.loc[p0, 'popularity']
df_.loc[idx, 'diffPopEnd'] = pop - poi_info.loc[pN, 'popularity']
df_.loc[idx, 'diffNVisitStart'] = nvisit - poi_info.loc[p0, 'nVisit']
df_.loc[idx, 'diffNVisitEnd'] = nvisit - poi_info.loc[pN, 'nVisit']
df_.loc[idx, 'diffDurationStart'] = duration - poi_info.loc[p0, 'avgDuration']
df_.loc[idx, 'diffDurationEnd'] = duration - poi_info.loc[pN, 'avgDuration']
return df_
Explanation: Sanity check:
- different POIs have different features for the same query trajectory
- the same POI get different features for different query-id
5.3 Test DataFrame
Test data are generated the same way as training data, except that the labels of testing data (unknown) could be arbitrary values as suggested in libsvm FAQ.
The reported accuracy (by svm-predict command) is meaningless as it is calculated based on these labels.
The dimension of training data matrix is #poi by #feature with one specific query, i.e. tuple $(\text{startPOI}, \text{endPOI}, \text{#POI})$.
End of explanation
def gen_data_str(df_, df_columns=DF_COLUMNS):
columns = df_columns[1:].copy() # get rid of 'poiID'
for col in columns:
assert(col in df_.columns)
lines = []
for idx in df_.index:
slist = [str(df_.loc[idx, 'label'])]
slist.append(' qid:')
slist.append(str(int(df_.loc[idx, 'queryID'])))
for j in range(2, len(columns)):
slist.append(' ')
slist.append(str(j-1))
slist.append(':')
slist.append(str(df_.loc[idx, columns[j]]))
slist.append('\n')
lines.append(''.join(slist))
return ''.join(lines)
Explanation: Sanity check:
- different POIs have different features for the same query trajectory
- the same POI get different features for different query-id
Generate a string for a training/test data frame.
End of explanation
def softmax(x):
x1 = x.copy()
x1 -= np.max(x1) # numerically more stable, REF: http://cs231n.github.io/linear-classify/#softmax
expx = np.exp(x1)
return expx / np.sum(expx, axis=0) # column-wise sum
Explanation: 5.4 Ranking POIs using rankSVM
RankSVM implementation in libsvm.zip or liblinear.zip, please read README.ranksvm in the zip file for installation instructions.
Use softmax function to convert ranking scores to a probability distribution.
End of explanation
# python wrapper of rankSVM
class RankSVM:
def __init__(self, bin_dir, useLinear=True, debug=False):
dir_ = !echo $bin_dir # deal with environmental variables in path
assert(os.path.exists(dir_[0]))
self.bin_dir = dir_[0]
self.bin_train = 'svm-train'
self.bin_predict = 'svm-predict'
if useLinear:
self.bin_train = 'train'
self.bin_predict = 'predict'
assert(isinstance(debug, bool))
self.debug = debug
# create named tmp files for model and feature scaling parameters
self.fmodel = None
self.fscale = None
with tempfile.NamedTemporaryFile(delete=False) as fd:
self.fmodel = fd.name
with tempfile.NamedTemporaryFile(delete=False) as fd:
self.fscale = fd.name
if self.debug:
print('model file:', self.fmodel)
print('feature scaling parameter file:', self.fscale)
def __del__(self):
# remove tmp files
if self.fmodel is not None and os.path.exists(self.fmodel):
os.unlink(self.fmodel)
if self.fscale is not None and os.path.exists(self.fscale):
os.unlink(self.fscale)
def train(self, train_df, cost=1):
# cost is parameter C in SVM
# write train data to file
ftrain = None
with tempfile.NamedTemporaryFile(mode='w+t', delete=False) as fd:
ftrain = fd.name
datastr = gen_data_str(train_df)
fd.write(datastr)
# feature scaling
ftrain_scaled = None
with tempfile.NamedTemporaryFile(mode='w+t', delete=False) as fd:
ftrain_scaled = fd.name
result = !$self.bin_dir/svm-scale -s $self.fscale $ftrain > $ftrain_scaled
if self.debug:
print('cost:', cost)
print('train data file:', ftrain)
print('feature scaled train data file:', ftrain_scaled)
# train rank svm and generate model file, if the model file exists, rewrite it
#n_cv = 10 # parameter k for k-fold cross-validation, NO model file will be generated in CV mode
#result = !$self.bin_dir/svm-train -c $cost -v $n_cv $ftrain $self.fmodel
result = !$self.bin_dir/$self.bin_train -c $cost $ftrain_scaled $self.fmodel
if self.debug:
print('Training finished.')
for i in range(len(result)): print(result[i])
# remove train data file
os.unlink(ftrain)
os.unlink(ftrain_scaled)
def predict(self, test_df):
# predict ranking scores for the given feature matrix
if self.fmodel is None or not os.path.exists(self.fmodel):
print('Model should be trained before predicting')
return
# write test data to file
ftest = None
with tempfile.NamedTemporaryFile(mode='w+t', delete=False) as fd:
ftest = fd.name
datastr = gen_data_str(test_df)
fd.write(datastr)
# feature scaling
ftest_scaled = None
with tempfile.NamedTemporaryFile(delete=False) as fd:
ftest_scaled = fd.name
result = !$self.bin_dir/svm-scale -r $self.fscale $ftest > $ftest_scaled
# generate prediction file
fpredict = None
with tempfile.NamedTemporaryFile(delete=False) as fd:
fpredict = fd.name
if self.debug:
print('test data file:', ftest)
print('feature scaled test data file:', ftest_scaled)
print('predict result file:', fpredict)
# predict using trained model and write prediction to file
result = !$self.bin_dir/$self.bin_predict $ftest_scaled $self.fmodel $fpredict
if self.debug:
print('Predict result: %-30s %s' % (result[0], result[1]))
# generate prediction DataFrame from prediction file
poi_rank_df = pd.read_csv(fpredict, header=None)
poi_rank_df.rename(columns={0:'rank'}, inplace=True)
poi_rank_df['poiID'] = test_df['poiID'].astype(np.int)
poi_rank_df.set_index('poiID', inplace=True) # duplicated 'poiID' when evaluating training data
#poi_rank_df['probability'] = softmax(poi_rank_df['rank']) # softmax
# remove test file and prediction file
os.unlink(ftest)
os.unlink(ftest_scaled)
os.unlink(fpredict)
return poi_rank_df
Explanation: Below is a python wrapper of the svm-train or train and svm-predict or predict commands of rankSVM with ranking probabilities $P(p_i \lvert (p_s, p_e, len))$ computed using softmax function.
End of explanation
def normalise_transmat(transmat_cnt):
transmat = transmat_cnt.copy()
assert(isinstance(transmat, pd.DataFrame))
for row in range(transmat.index.shape[0]):
rowsum = np.sum(transmat.iloc[row] + 1)
assert(rowsum > 0)
transmat.iloc[row] = (transmat.iloc[row] + 1) / rowsum
return transmat
Explanation: 6. Factorised Transition Probabilities
6.1 POI Features for Factorisation
POI features used to factorise transition matrix of Markov Chain with POI features (vector) as states:
- Category of POI
- Popularity of POI (discritize with uniform log-scale bins, #bins=5 )
- The number of POI visits (discritize with uniform log-scale bins, #bins=5 )
- The average visit duration of POI (discritise with uniform log-scale bins, #bins= 5)
- The neighborhood relationship between POIs (clustering POI(lat, lon) using k-means, #clusters= 5)
We count the number of transition first, then normalise each row while taking care of zero by adding each cell a number $k=1$.
End of explanation
poi_train = sorted(poi_info_all.index)
Explanation: POIs in training set.
End of explanation
poi_cats = poi_all.loc[poi_train, 'poiTheme'].unique().tolist()
poi_cats.sort()
#poi_cats
def gen_transmat_cat(trajid_list, traj_dict, poi_info, poi_cats=poi_cats):
transmat_cat_cnt = pd.DataFrame(data=np.zeros((len(poi_cats), len(poi_cats)), dtype=np.float), \
columns=poi_cats, index=poi_cats)
for tid in trajid_list:
t = traj_dict[tid]
if len(t) > 1:
for pi in range(len(t)-1):
p1 = t[pi]
p2 = t[pi+1]
assert(p1 in poi_info.index and p2 in poi_info.index)
cat1 = poi_info.loc[p1, 'poiCat']
cat2 = poi_info.loc[p2, 'poiCat']
transmat_cat_cnt.loc[cat1, cat2] += 1
return normalise_transmat(transmat_cat_cnt)
gen_transmat_cat(trajid_set_all, traj_dict, poi_info_all)
Explanation: 6.2 Transition Matrix between POI Cateogries
End of explanation
poi_pops = poi_info_all.loc[poi_train, 'popularity']
#sorted(poi_pops.unique().tolist())
Explanation: 6.3 Transition Matrix between POI Popularity Classes
End of explanation
expo_pop1 = np.log10(max(1, min(poi_pops)))
expo_pop2 = np.log10(max(poi_pops))
print(expo_pop1, expo_pop2)
nbins_pop = BIN_CLUSTER
logbins_pop = np.logspace(np.floor(expo_pop1), np.ceil(expo_pop2), nbins_pop+1)
logbins_pop[0] = 0 # deal with underflow
if logbins_pop[-1] < poi_info_all['popularity'].max():
logbins_pop[-1] = poi_info_all['popularity'].max() + 1
logbins_pop
ax = pd.Series(poi_pops).hist(figsize=(5, 3), bins=logbins_pop)
ax.set_xlim(xmin=0.1)
ax.set_xscale('log')
def gen_transmat_pop(trajid_list, traj_dict, poi_info, logbins_pop=logbins_pop):
nbins = len(logbins_pop) - 1
transmat_pop_cnt = pd.DataFrame(data=np.zeros((nbins, nbins), dtype=np.float), \
columns=np.arange(1, nbins+1), index=np.arange(1, nbins+1))
for tid in trajid_list:
t = traj_dict[tid]
if len(t) > 1:
for pi in range(len(t)-1):
p1 = t[pi]
p2 = t[pi+1]
assert(p1 in poi_info.index and p2 in poi_info.index)
pop1 = poi_info.loc[p1, 'popularity']
pop2 = poi_info.loc[p2, 'popularity']
pc1, pc2 = np.digitize([pop1, pop2], logbins_pop)
transmat_pop_cnt.loc[pc1, pc2] += 1
return normalise_transmat(transmat_pop_cnt), logbins_pop
gen_transmat_pop(trajid_set_all, traj_dict, poi_info_all)[0]
Explanation: Discretize POI popularity with uniform log-scale bins.
End of explanation
poi_visits = poi_info_all.loc[poi_train, 'nVisit']
#sorted(poi_visits.unique().tolist())
Explanation: 6.4 Transition Matrix between the Number of POI Visit Classes
End of explanation
expo_visit1 = np.log10(max(1, min(poi_visits)))
expo_visit2 = np.log10(max(poi_visits))
print(expo_visit1, expo_visit2)
nbins_visit = BIN_CLUSTER
logbins_visit = np.logspace(np.floor(expo_visit1), np.ceil(expo_visit2), nbins_visit+1)
logbins_visit[0] = 0 # deal with underflow
if logbins_visit[-1] < poi_info_all['nVisit'].max():
logbins_visit[-1] = poi_info_all['nVisit'].max() + 1
logbins_visit
ax = pd.Series(poi_visits).hist(figsize=(5, 3), bins=logbins_visit)
ax.set_xlim(xmin=0.1)
ax.set_xscale('log')
def gen_transmat_visit(trajid_list, traj_dict, poi_info, logbins_visit=logbins_visit):
nbins = len(logbins_visit) - 1
transmat_visit_cnt = pd.DataFrame(data=np.zeros((nbins, nbins), dtype=np.float), \
columns=np.arange(1, nbins+1), index=np.arange(1, nbins+1))
for tid in trajid_list:
t = traj_dict[tid]
if len(t) > 1:
for pi in range(len(t)-1):
p1 = t[pi]
p2 = t[pi+1]
assert(p1 in poi_info.index and p2 in poi_info.index)
visit1 = poi_info.loc[p1, 'nVisit']
visit2 = poi_info.loc[p2, 'nVisit']
vc1, vc2 = np.digitize([visit1, visit2], logbins_visit)
transmat_visit_cnt.loc[vc1, vc2] += 1
return normalise_transmat(transmat_visit_cnt), logbins_visit
gen_transmat_visit(trajid_set_all, traj_dict, poi_info_all)[0]
Explanation: Discretize the number of POI visit with uniform log-scale bins.
End of explanation
poi_durations = poi_info_all.loc[poi_train, 'avgDuration']
#sorted(poi_durations.unique().tolist())
expo_duration1 = np.log10(max(1, min(poi_durations)))
expo_duration2 = np.log10(max(poi_durations))
print(expo_duration1, expo_duration2)
nbins_duration = BIN_CLUSTER
logbins_duration = np.logspace(np.floor(expo_duration1), np.ceil(expo_duration2), nbins_duration+1)
logbins_duration[0] = 0 # deal with underflow
logbins_duration[-1] = np.power(10, expo_duration2+2)
logbins_duration
ax = pd.Series(poi_durations).hist(figsize=(5, 3), bins=logbins_duration)
ax.set_xlim(xmin=0.1)
ax.set_xscale('log')
def gen_transmat_duration(trajid_list, traj_dict, poi_info, logbins_duration=logbins_duration):
nbins = len(logbins_duration) - 1
transmat_duration_cnt = pd.DataFrame(data=np.zeros((nbins, nbins), dtype=np.float), \
columns=np.arange(1, nbins+1), index=np.arange(1, nbins+1))
for tid in trajid_list:
t = traj_dict[tid]
if len(t) > 1:
for pi in range(len(t)-1):
p1 = t[pi]
p2 = t[pi+1]
assert(p1 in poi_info.index and p2 in poi_info.index)
d1 = poi_info.loc[p1, 'avgDuration']
d2 = poi_info.loc[p2, 'avgDuration']
dc1, dc2 = np.digitize([d1, d2], logbins_duration)
transmat_duration_cnt.loc[dc1, dc2] += 1
return normalise_transmat(transmat_duration_cnt), logbins_duration
gen_transmat_duration(trajid_set_all, traj_dict, poi_info_all)[0]
Explanation: 6.5 Transition Matrix between POI Average Visit Duration Classes
End of explanation
X = poi_all.loc[poi_train, ['poiLon', 'poiLat']]
nclusters = BIN_CLUSTER
kmeans = KMeans(n_clusters=nclusters, random_state=987654321)
kmeans.fit(X)
clusters = kmeans.predict(X)
#clusters
poi_clusters = pd.DataFrame(data=clusters, index=poi_train)
poi_clusters.index.name = 'poiID'
poi_clusters.rename(columns={0:'clusterID'}, inplace=True)
#poi_clusters
poi_clusters.to_csv('cluster.1.csv')
Explanation: 6.6 Transition Matrix between POI Neighborhood Classes
KMeans in scikit-learn seems unable to use custom distance metric and no implementation of Haversine formula, use Euclidean distance to approximate.
End of explanation
diff = poi_all.loc[poi_train, ['poiLon', 'poiLat']].max() - poi_all.loc[poi_train, ['poiLon', 'poiLat']].min()
ratio = diff['poiLon'] / diff['poiLat']
#ratio
height = 6; width = int(round(ratio)*height)
plt.figure(figsize=[width, height])
plt.scatter(poi_all.loc[poi_train, 'poiLon'], poi_all.loc[poi_train, 'poiLat'], c=clusters, s=50)
def gen_transmat_neighbor(trajid_list, traj_dict, poi_info, poi_clusters=poi_clusters):
nclusters = len(poi_clusters['clusterID'].unique())
transmat_neighbor_cnt = pd.DataFrame(data=np.zeros((nclusters, nclusters), dtype=np.float), \
columns=np.arange(nclusters), index=np.arange(nclusters))
for tid in trajid_list:
t = traj_dict[tid]
if len(t) > 1:
for pi in range(len(t)-1):
p1 = t[pi]
p2 = t[pi+1]
assert(p1 in poi_info.index and p2 in poi_info.index)
c1 = poi_clusters.loc[p1, 'clusterID']
c2 = poi_clusters.loc[p2, 'clusterID']
transmat_neighbor_cnt.loc[c1, c2] += 1
return normalise_transmat(transmat_neighbor_cnt), poi_clusters
gen_transmat_neighbor(trajid_set_all, traj_dict, poi_info_all)[0]
Explanation: Scatter plot of POI coordinates with clustering results.
End of explanation
def gen_poi_transmat(trajid_list, poi_set, traj_dict, poi_info, debug=False):
transmat_cat = gen_transmat_cat(trajid_list, traj_dict, poi_info)
transmat_pop, logbins_pop = gen_transmat_pop(trajid_list, traj_dict, poi_info)
transmat_visit, logbins_visit = gen_transmat_visit(trajid_list, traj_dict, poi_info)
transmat_duration, logbins_duration = gen_transmat_duration(trajid_list, traj_dict, poi_info)
transmat_neighbor, poi_clusters = gen_transmat_neighbor(trajid_list, traj_dict, poi_info)
# Kronecker product
transmat_ix = list(itertools.product(transmat_cat.index, transmat_pop.index, transmat_visit.index, \
transmat_duration.index, transmat_neighbor.index))
transmat_value = transmat_cat.values
for transmat in [transmat_pop, transmat_visit, transmat_duration, transmat_neighbor]:
transmat_value = kron(transmat_value, transmat.values)
transmat_feature = pd.DataFrame(data=transmat_value, index=transmat_ix, columns=transmat_ix)
poi_train = sorted(poi_set)
feature_names = ['poiCat', 'popularity', 'nVisit', 'avgDuration', 'clusterID']
poi_features = pd.DataFrame(data=np.zeros((len(poi_train), len(feature_names))), \
columns=feature_names, index=poi_train)
poi_features.index.name = 'poiID'
poi_features['poiCat'] = poi_info.loc[poi_train, 'poiCat']
poi_features['popularity'] = np.digitize(poi_info.loc[poi_train, 'popularity'], logbins_pop)
poi_features['nVisit'] = np.digitize(poi_info.loc[poi_train, 'nVisit'], logbins_visit)
poi_features['avgDuration'] = np.digitize(poi_info.loc[poi_train, 'avgDuration'], logbins_duration)
poi_features['clusterID'] = poi_clusters.loc[poi_train, 'clusterID']
# shrink the result of Kronecker product and deal with POIs with the same features
poi_logtransmat = pd.DataFrame(data=np.zeros((len(poi_train), len(poi_train)), dtype=np.float), \
columns=poi_train, index=poi_train)
for p1 in poi_logtransmat.index:
rix = tuple(poi_features.loc[p1])
for p2 in poi_logtransmat.columns:
cix = tuple(poi_features.loc[p2])
value_ = transmat_feature.loc[(rix,), (cix,)]
poi_logtransmat.loc[p1, p2] = value_.values[0, 0]
# group POIs with the same features
features_dup = dict()
for poi in poi_features.index:
key = tuple(poi_features.loc[poi])
if key in features_dup:
features_dup[key].append(poi)
else:
features_dup[key] = [poi]
if debug == True:
for key in sorted(features_dup.keys()):
print(key, '->', features_dup[key])
# deal with POIs with the same features
for feature in sorted(features_dup.keys()):
n = len(features_dup[feature])
if n > 1:
group = features_dup[feature]
v1 = poi_logtransmat.loc[group[0], group[0]] # transition value of self-loop of POI group
# divide incoming transition value (i.e. unnormalised transition probability) uniformly among group members
for poi in group:
poi_logtransmat[poi] /= n
# outgoing transition value has already been duplicated (value copied above)
# duplicate & divide transition value of self-loop of POI group uniformly among all outgoing transitions,
# from a POI to all other POIs in the same group (excluding POI self-loop)
v2 = v1 / (n - 1)
for pair in itertools.permutations(group, 2):
poi_logtransmat.loc[pair[0], pair[1]] = v2
# normalise each row
for p1 in poi_logtransmat.index:
poi_logtransmat.loc[p1, p1] = 0
rowsum = poi_logtransmat.loc[p1].sum()
assert(rowsum > 0)
logrowsum = np.log10(rowsum)
for p2 in poi_logtransmat.columns:
if p1 == p2:
poi_logtransmat.loc[p1, p2] = -np.inf # deal with log(0) explicitly
else:
poi_logtransmat.loc[p1, p2] = np.log10(poi_logtransmat.loc[p1, p2]) - logrowsum
poi_transmat = np.power(10, poi_logtransmat)
return poi_transmat
poi_transmat = gen_poi_transmat(trajid_set_all, set(poi_info_all.index), traj_dict, poi_info_all, debug=False)
Explanation: 6.7 Transition Matrix between POIs
Approximate transition probabilities (matrix) between different POI features (vector) using the Kronecker product of individual transition matrix corresponding to each feature, i.e., POI category, POI popularity (discritized), POI average visit duration (discritized) and POI neighborhoods (clusters).
Deal with features without corresponding POIs and feature with more than one corresponding POIs. (Before Normalisation)
- For features without corresponding POIs, just remove the rows and columns from the matrix obtained by Kronecker product.
- For different POIs with the exact same feature,
- Let POIs with the same feature as a POI group,
- The incoming transition value (i.e., unnormalised transition probability) of this POI group
should be divided uniformly among the group members,
which corresponds to choose a group member uniformly at random in the incoming case.
- The outgoing transition value should be duplicated (i.e., the same) among all group members,
as we were already in that group in the outgoing case.
- For each POI in the group, the allocation transition value of the self-loop of the POI group is similar to
that in the outgoing case, as we were already in that group, so just duplicate and then divide uniformly among
the transitions from this POI to other POIs in the same group,
which corresponds to choose a outgoing transition uniformly at random from all outgoing transitions
excluding the self-loop of this POI.
- Concretely, for a POI group with $n$ POIs,
1. If the incoming transition value of POI group is $m_1$,
then the corresponding incoming transition value for each group member is $\frac{m_1}{n}$.
1. If the outgoing transition value of POI group is $m_2$,
then the corresponding outgoing transition value for each group member is also $m_2$.
1. If the transition value of self-loop of the POI group is $m_3$,
then transition value of self-loop of individual POIs should be $0$,
and other in-group transitions with value $\frac{m_3}{n-1}$
as the total number of outgoing transitions to other POIs in the same group is $n-1$ (excluding the self-loop),
i.e. $n-1$ choose $1$.
NOTE: execute the above division before or after row normalisation will lead to the same result, as the division itself does NOT change the normalising constant of each row (i.e., the sum of each row before normalising).
End of explanation
plt.figure(figsize=[13, 10])
#plt.imshow(prob_mat, interpolation='none', cmap=plt.cm.hot) # OK
#ticks = prob_mat.index
#plt.xticks(np.arange(prob_mat.shape[0]), ticks)
#plt.yticks(np.arange(prob_mat.shape[0]), ticks)
#plt.xlabel('POI ID')
#plt.ylabel('POI ID')
sns.heatmap(poi_transmat)
Explanation: 6.8 Visualise Transition Matrix
Plot transition matrix heatmap.
End of explanation
def gen_kml_transition(fname, poi_id, poi_df, poi_transmat, topk=None):
ns = '{http://www.opengis.net/kml/2.2}'
# scale (linearly) the transition probabilities to [1, 255]: f(x) = a1x + b1
probs = poi_transmat.loc[poi_id].copy()
pmax = poi_transmat.loc[poi_id].max()
probs.loc[poi_id] = 1 # set self-loop to 1 to make np.min() below to get the real minimun prob.
pmin = poi_transmat.loc[poi_id].min()
nmin1, nmin2 = 1, 1
nmax1, nmax2 = 255, 10
# solve linear equations:
# nmin = a1 x pmin + b1
# nmax = a1 x pmax + b1
a1, b1 = np.dot(np.linalg.pinv(np.array([[pmin, 1], [pmax, 1]])), np.array([nmin1, nmax1])) # control transparency
a2, b2 = np.dot(np.linalg.pinv(np.array([[pmin, 1], [pmax, 1]])), np.array([nmin2, nmax2])) # control width
pm_list = []
stydict = dict()
# Placemark for edges
columns = poi_transmat.columns
if topk is not None:
assert(isinstance(topk, int))
assert(topk > 0)
idx = np.argsort(-poi_transmat.loc[poi_id])[:topk]
columns = poi_transmat.columns[idx]
#for poi in poi_transmat.columns:
for poi in columns:
if poi == poi_id: continue
prob = poi_transmat.loc[poi_id, poi]
decimal = int(np.round(a1 * prob + b1)) # scale transition probability to [1, 255]
hexa = hex(decimal)[2:] + '0' if decimal < 16 else hex(decimal)[2:] # get rid of prefix '0x'
color = hexa + '0000ff' # colors in KML: aabbggrr, aa=00 is fully transparent, transparent red
width = int(np.round(a2 * prob + b2))
if color not in stydict:
stydict[color] = styles.LineStyle(color=color, width=width)
sid = str(poi_id) + '_' + str(poi)
ext_dict = {'From poiID': str(poi_id), 'From poiName': poi_df.loc[poi_id, 'Name'], \
'To poiID': str(poi), 'To poiName': poi_df.loc[poi, 'Name'], \
'Transition Probability': ('%.15f' % prob)}
ext_data = kml.ExtendedData(elements=[kml.Data(name=x, value=ext_dict[x]) for x in sorted(ext_dict.keys())])
pm = kml.Placemark(ns, sid, 'Edge_' + sid, description=None, styleUrl='#' + color, extended_data=ext_data)
pm.geometry = LineString([(poi_df.loc[x, 'Longitude'], poi_df.loc[x, 'Latitude']) for x in [poi_id, poi]])
pm_list.append(pm)
# Placemark for POIs: import from csv file directly
k = kml.KML()
doc = kml.Document(ns, '1', 'Transitions of POI ' + str(poi_id) , description=None, \
styles=[styles.Style(id=x, styles=[stydict[x]]) for x in stydict.keys()])
for pm in pm_list: doc.append(pm)
k.append(doc)
# save to file
kmlstr = k.to_string(prettyprint=True)
with open(fname, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(kmlstr)
print('write to', fname)
Explanation: 6.9 Visualise Transitions of Specific POIs
Generate KML file to visualise the transitions from a specific POI using edge width and edge transparency to distinguish different transition probabilities.
End of explanation
most_popular = poi_df['#distinctUsers'].argmax()
#poi_df.loc[most_popular]
#poi_transmat.loc[most_popular]
fname = 'trans_most_popular.kml'
gen_kml_transition(fname, most_popular, poi_df, poi_transmat)
Explanation: 6.9.1 The Most Popular POI
Define the popularity of POI as the number of distinct users that visited the POI.
End of explanation
poi_qvm = poi_df[poi_df['Name'] == 'Queen Victoria Market']
poi_qvm
#poi_transmat.loc[poi_qvm.index[0]]
fname = 'trans_qvm.kml'
gen_kml_transition(fname, poi_qvm.index[0], poi_df, poi_transmat)
Explanation: Example on Google maps.
6.9.2 The Queen Victoria Market
End of explanation
poi_um = poi_df[poi_df['Name'] == 'University of Melbourne']
poi_um
#poi_transmat.loc[poi_um.index[0]]
fname = 'trans_um.kml'
gen_kml_transition(fname, poi_um.index[0], poi_df, poi_transmat)
Explanation: Example on Google maps.
6.9.3 The University of Melbourne
End of explanation
fname = 'trans_um_top30.kml'
gen_kml_transition(fname, poi_um.index[0], poi_df, poi_transmat, topk=30)
Explanation: Example on Google maps.
End of explanation
poi_mca = poi_df[poi_df['Name'] == 'Margaret Court Arena']
poi_mca
#poi_transmat.loc[poi_mca.index[0]]
fname = 'trans_mca.kml'
gen_kml_transition(fname, poi_mca.index[0], poi_df, poi_transmat)
Explanation: Example on Google maps.
6.9.4 The Margaret Court Arena
End of explanation
poi_rmit = poi_df[poi_df['Name'] == 'RMIT City']
poi_rmit
#poi_transmat.loc[poi_rmit.index[0]]
fname = 'trans_rmit.kml'
gen_kml_transition(fname, poi_rmit.index[0], poi_df, poi_transmat)
Explanation: Example on Google maps.
6.9.5 RMIT City
End of explanation
fname = 'trans_rmit_top30.kml'
gen_kml_transition(fname, poi_rmit.index[0], poi_df, poi_transmat, topk=30)
Explanation: Example on Google maps.
End of explanation
def gen_kml_traj(fname, traj_subdict, poi_df):
ns = '{http://www.opengis.net/kml/2.2}'
norm = mpl.colors.Normalize(vmin=1, vmax=len(traj_subdict))
cmap = mpl.cm.hot
pmap = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
pm_list = []
stydict = dict()
trajids = sorted(traj_subdict.keys())
for i in range(len(trajids)):
traj = traj_subdict[trajids[i]]
r, g, b, a = pmap.to_rgba(i+1, bytes=True)
color = '%02x%02x%02x%02x' % (63, b, g, r) # colors in KML: aabbggrr, aa=00 is fully transparent
if color not in stydict:
stydict[color] = styles.LineStyle(color=color, width=3)
for j in range(len(traj)-1):
poi1 = traj[j]
poi2 = traj[j+1]
sid = str(poi1) + '_' + str(poi2)
ext_dict = {'TrajID': str(trajids[i]), 'Trajectory': str(traj), \
'From poiID': str(poi1), 'From poiName': poi_df.loc[poi1, 'Name'], \
'To poiID': str(poi2), 'To poiName': poi_df.loc[poi2, 'Name']}
ext_data = kml.ExtendedData(elements=[kml.Data(name=x, value=ext_dict[x]) for x in sorted(ext_dict.keys())])
pm = kml.Placemark(ns, sid, 'Edge_' + sid, description=None, styleUrl='#' + color, extended_data=ext_data)
pm.geometry = LineString([(poi_df.loc[x, 'Longitude'], poi_df.loc[x, 'Latitude']) for x in [poi1, poi2]])
pm_list.append(pm)
# Placemark for POIs: import from csv file directly
k = kml.KML()
doc = kml.Document(ns, '1', 'Visualise %d Trajectories' % len(traj_subdict), description=None, \
styles=[styles.Style(id=x, styles=[stydict[x]]) for x in stydict.keys()])
for pm in pm_list: doc.append(pm)
k.append(doc)
# save to file
kmlstr = k.to_string(prettyprint=True)
with open(fname, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(kmlstr)
print('write to', fname)
Explanation: Example on Google maps.
6.10 Visualise Trajectories that Passing through Specific POIs
Generate KML file for a set of trajectories.
End of explanation
poi_mcg = poi_df[poi_df['Name'] == 'Melbourne Cricket Ground (MCG)']
poi_mcg
traj_dict_mcg = dict()
mcg = poi_mcg.index[0]
for tid_ in sorted(traj_dict.keys()):
traj_ = traj_dict[tid_]
#if mcg in traj_ and mcg != traj_[0] and mcg != traj_[-1]:
if mcg in traj_ and len(traj_) > 1:
traj_dict_mcg[tid_] = traj_
print(len(traj_dict_mcg), 'trajectories pass through Melbourne Cricket Ground (MCG).')
fname = 'traj_pass_mcg.kml'
gen_kml_traj(fname, traj_dict_mcg, poi_df)
Explanation: 6.10.1 The Melbourne Cricket Ground (MCG)
Trajectories (with more than $1$ POIs) that Passing through the Melbourne Cricket Ground (MCG).
End of explanation
poi_gh = poi_df[poi_df['Name'] == 'Government House']
poi_gh
traj_dict_gh = dict()
gh = poi_gh.index[0]
for tid_ in sorted(traj_dict.keys()):
traj_ = traj_dict[tid_]
#if gh in traj_ and gh != traj_[0] and gh != traj_[-1]:
if gh in traj_ and len(traj_) > 1:
traj_dict_gh[tid_] = traj_
print(len(traj_dict_gh), 'trajectories pass through Government House.')
fname = 'traj_pass_gh.kml'
gen_kml_traj(fname, traj_dict_gh, poi_df)
Explanation: Example on Google maps.
6.10.2 The Government House
Trajectories (with more than $1$ POIs) that Passing through the Government House.
End of explanation
traj4s = traj_all[traj_all['trajLen'] == 4]['trajID'].unique()
traj4s
#for tid in traj4s:
# gen_kml(tid, traj_all, poi_df)
Explanation: Example on Google maps.
7. Recommendation Results Comparison & Visualisation
Examples of recommendation results: recommendation based on POI popularity, POI ranking and POI transition matrix, and visualise recommended results on map.
7.1 Choose an Example Trajectory
Choose a trajectory of length 4.
End of explanation
tid = 680
traj = extract_traj(tid, traj_all)
print('REAL:', traj)
traj_dict_rec = {'REAL_' + str(tid): traj}
start = traj[0]
end = traj[-1]
length = len(traj)
Explanation: After looking at many of these trajectories on map, we choose trajectory 680 to illustrate.
End of explanation
poi_df.sort_values(by='#distinctUsers', ascending=False, inplace=True)
rec_pop = [start] + [x for x in poi_df.index.tolist() if x not in {start, end}][:length-2] + [end]
print('REC_POP:', rec_pop)
tid_rec = 'REC_POP'
traj_dict_rec[tid_rec] = rec_pop
Explanation: 7.2 Recommendation by POI Popularity
Recommend trajectory based on POI popularity only.
End of explanation
trajid_list_train = list(set(trajid_set_all) - {tid})
poi_info = calc_poi_info(trajid_list_train, traj_all, poi_all)
train_df = gen_train_df(trajid_list_train, traj_dict, poi_info) # POI feature based ranking
ranksvm = RankSVM(ranksvm_dir, useLinear=True)
ranksvm.train(train_df, cost=RANK_C)
test_df = gen_test_df(start, end, length, poi_info)
rank_df = ranksvm.predict(test_df)
rank_df.sort_values(by='rank', ascending=False, inplace=True)
rec_rank = [start] + [x for x in rank_df.index.tolist() if x not in {start, end}][:length-2] + [end]
print('REC_RANK:', rec_rank)
tid_rec = 'REC_RANK'
traj_dict_rec[tid_rec] = rec_rank
Explanation: 7.3 Recommendation by POI Rankings
Recommend trajectory based on the ranking of POIs using rankSVM.
End of explanation
def find_path(V, E, ps, pe, L, withNodeWeight=False, alpha=0.5):
assert(isinstance(V, pd.DataFrame))
assert(isinstance(E, pd.DataFrame))
assert(ps in V.index)
assert(pe in V.index)
# with sub-tours in trajectory, this is not the case any more, but it is nonsense to recommend such trajectories
assert(2 < L <= V.index.shape[0])
if withNodeWeight == True:
assert(0 < alpha < 1)
beta = 1 - alpha
A = pd.DataFrame(data=np.zeros((L-1, V.shape[0]), dtype=np.float), columns=V.index, index=np.arange(2, L+1))
B = pd.DataFrame(data=np.zeros((L-1, V.shape[0]), dtype=np.int), columns=V.index, index=np.arange(2, L+1))
A += np.inf
for v in V.index:
if v != ps:
if withNodeWeight == True:
A.loc[2, v] = alpha * (V.loc[ps, 'weight'] + V.loc[v, 'weight']) + beta * E.loc[ps, v] # ps--v
else:
A.loc[2, v] = E.loc[ps, v] # ps--v
B.loc[2, v] = ps
for l in range(3, L+1):
for v in V.index:
if withNodeWeight == True: # ps-~-v1---v
values = [A.loc[l-1, v1] + alpha * V.loc[v, 'weight'] + beta * E.loc[v1, v] for v1 in V.index]
else:
values = [A.loc[l-1, v1] + E.loc[v1, v] for v1 in V.index] # ps-~-v1---v
minix = np.argmin(values)
A.loc[l, v] = values[minix]
B.loc[l, v] = V.index[minix]
path = [pe]
v = path[-1]
l = L
#while v != ps: #incorrect if 'ps' happens to appear in the middle of a path
while l >= 2:
path.append(B.loc[l, v])
v = path[-1]
l -= 1
path.reverse()
return path
Explanation: 7.4 Recommendation by Transition Probabilities
Use dynamic programming to find a possibly non-simple path, i.e., walk.
End of explanation
def find_path_ILP(V, E, ps, pe, L, withNodeWeight=False, alpha=0.5):
assert(isinstance(V, pd.DataFrame))
assert(isinstance(E, pd.DataFrame))
assert(ps in V.index)
assert(pe in V.index)
assert(2 < L <= V.index.shape[0])
if withNodeWeight == True:
assert(0 < alpha < 1)
beta = 1 - alpha
p0 = str(ps); pN = str(pe); N = V.index.shape[0]
# deal with np.inf which will cause ILP solver failure
Edges = E.copy()
INF = 1e6
for p in Edges.index:
Edges.loc[p, p] = INF
maxL = np.max(Edges.values.flatten())
if maxL > INF:
for p in Edges.index:
Edges.loc[p, p] = maxL
# REF: pythonhosted.org/PuLP/index.html
pois = [str(p) for p in V.index] # create a string list for each POI
pb = pulp.LpProblem('MostLikelyTraj', pulp.LpMinimize) # create problem
# visit_i_j = 1 means POI i and j are visited in sequence
visit_vars = pulp.LpVariable.dicts('visit', (pois, pois), 0, 1, pulp.LpInteger)
# a dictionary contains all dummy variables
dummy_vars = pulp.LpVariable.dicts('u', [x for x in pois if x != p0], 2, N, pulp.LpInteger)
# add objective
objlist = []
if withNodeWeight == True:
objlist.append(alpha * V.loc[int(p0), 'weight'])
for pi in [x for x in pois if x != pN]: # from
for pj in [y for y in pois if y != p0]: # to
if withNodeWeight == True:
objlist.append(visit_vars[pi][pj] * (alpha*V.loc[int(pj), 'weight'] + beta*Edges.loc[int(pi), int(pj)]))
else:
objlist.append(visit_vars[pi][pj] * Edges.loc[int(pi), int(pj)])
pb += pulp.lpSum(objlist), 'Objective'
# add constraints, each constraint should be in ONE line
pb += pulp.lpSum([visit_vars[p0][pj] for pj in pois if pj != p0]) == 1, 'StartAt_p0'
pb += pulp.lpSum([visit_vars[pi][pN] for pi in pois if pi != pN]) == 1, 'EndAt_pN'
if p0 != pN:
pb += pulp.lpSum([visit_vars[pi][p0] for pi in pois]) == 0, 'NoIncoming_p0'
pb += pulp.lpSum([visit_vars[pN][pj] for pj in pois]) == 0, 'NoOutgoing_pN'
pb += pulp.lpSum([visit_vars[pi][pj] for pi in pois if pi != pN for pj in pois if pj != p0]) == L-1, 'Length'
for pk in [x for x in pois if x not in {p0, pN}]:
pb += pulp.lpSum([visit_vars[pi][pk] for pi in pois if pi != pN]) == \
pulp.lpSum([visit_vars[pk][pj] for pj in pois if pj != p0]), 'ConnectedAt_' + pk
pb += pulp.lpSum([visit_vars[pi][pk] for pi in pois if pi != pN]) <= 1, 'Enter_' + pk + '_AtMostOnce'
pb += pulp.lpSum([visit_vars[pk][pj] for pj in pois if pj != p0]) <= 1, 'Leave_' + pk + '_AtMostOnce'
for pi in [x for x in pois if x != p0]:
for pj in [y for y in pois if y != p0]:
pb += dummy_vars[pi] - dummy_vars[pj] + 1 <= (N - 1) * (1 - visit_vars[pi][pj]), \
'SubTourElimination_' + pi + '_' + pj
#pb.writeLP("traj_tmp.lp")
# solve problem
pb.solve(pulp.PULP_CBC_CMD(options=['-threads', '6', '-strategy', '1', '-maxIt', '2000000'])) # CBC
#gurobi_options = [('TimeLimit', '7200'), ('Threads', '8'), ('NodefileStart', '0.9'), ('Cuts', '2')]
#pb.solve(pulp.GUROBI_CMD(options=gurobi_options)) # GUROBI
visit_mat = pd.DataFrame(data=np.zeros((len(pois), len(pois)), dtype=np.float), index=pois, columns=pois)
for pi in pois:
for pj in pois: visit_mat.loc[pi, pj] = visit_vars[pi][pj].varValue
# build the recommended trajectory
recseq = [p0]
while True:
pi = recseq[-1]
pj = visit_mat.loc[pi].idxmax()
assert(round(visit_mat.loc[pi, pj]) == 1)
recseq.append(pj);
#print(recseq); sys.stdout.flush()
if pj == pN: return [int(x) for x in recseq]
poi_logtransmat = np.log(gen_poi_transmat(trajid_list_train, set(poi_info.index), traj_dict, poi_info))
nodes = poi_info.copy()
edges = poi_logtransmat.copy()
edges = -1 * edges # edge weight is negative log of transition probability
rec_dp = find_path(nodes, edges, start, end, length) # DP
rec_ilp = find_path_ILP(nodes, edges, start, end, length) # ILP
print('REC_DP:', rec_dp)
print('REC_ILP:', rec_ilp)
tid_rec = 'REC_DP'
traj_dict_rec[tid_rec] = rec_dp
tid_rec = 'REC_ILP'
traj_dict_rec[tid_rec] = rec_ilp
traj_dict_rec
fname = 'traj_rec.kml'
gen_kml_traj(fname, traj_dict_rec, poi_df)
Explanation: Use integer linear programming (ILP) to find a simple path.
End of explanation
tid = traj_all.loc[traj_all['trajLen'].idxmax(), 'trajID']
tid
traj1 = extract_traj_withloop(tid, visits)
print(traj1, 'length:', len(traj1))
traj2 = extract_traj(tid, traj_all)
print(traj2, 'length:', len(traj2))
Explanation: Example on Google maps,
- the light blue edges represent the real trajectory,
- green edges represent the recommended trajectories based on POI popularity and POI rankings (the recommendations are the same),
- the purple edges represent the recommended trajectories based on POI transition probabilities using Viterbi algorithm and ILP.
8. Disclaimer
8.1 Problems of Trajectory Construction
Problems of mapping photos to POIs according to their distance, i.e., $200$ meters.
Problems of split consecutive POI visits by $8$ hours.
Problems of extract trajectories from a sequence of POI visits such that no loops/subtours exist.
8.2 Example of Terrible Trajectories
Choose the trajectory with the maximum number of POIs.
End of explanation
photo_df = pd.read_csv(fphotos, skipinitialspace=True)
photo_df.set_index('Photo_ID', inplace=True)
#photo_df.head()
photo_traj = visits[visits['trajID'] == tid]['photoID'].values
photo_tdf = photo_df.loc[photo_traj].copy()
photo_tdf.drop(photo_tdf.columns[-1], axis=1, inplace=True)
photo_tdf.drop('Accuracy', axis=1, inplace=True)
photo_tdf.sort_values(by='Timestamp', ascending=True, inplace=True)
visit_df = visits.copy()
visit_df.set_index('photoID', inplace=True)
photo_tdf['poiID'] = visit_df.loc[photo_tdf.index, 'poiID']
photo_tdf['poiName'] = poi_df.loc[photo_tdf['poiID'].values, 'Name'].values
photo_tdf.head()
Explanation: Extract the sequence of photos associated with this trajectory.
End of explanation
fname = 'photo_traj_df.csv'
photo_tdf.to_csv(fname, index=True)
Explanation: Save photos dataframe to CSV file.
End of explanation
fname = 'traj_photo_seq.kml'
ns = '{http://www.opengis.net/kml/2.2}'
k = kml.KML()
styid = 'edge_style' # colors in KML: aabbggrr, aa=00 is fully transparent
sty_edge = styles.Style(id=styid, styles=[styles.LineStyle(color='3fff0000', width=2)])
doc = kml.Document(ns, '1', 'Photo sequence of trajectory %d' % tid, description=None, styles=[sty_edge])
k.append(doc)
for j in range(photo_tdf.shape[0]-1):
p1, p2 = photo_tdf.index[j], photo_tdf.index[j+1]
poi1, poi2 = visit_df.loc[p1, 'poiID'], visit_df.loc[p2, 'poiID']
sid = 'Photo_POI%d_POI%d' % (poi1, poi2)
ext_dict = {'From Photo': str(p1), 'To Photo': str(p2)}
ext_data = kml.ExtendedData(elements=[kml.Data(name=x, value=ext_dict[x]) for x in sorted(ext_dict.keys())])
pm = kml.Placemark(ns, sid, sid, description=None, styleUrl='#' + styid, extended_data=ext_data)
pm.geometry = LineString([(photo_df.loc[x, 'Longitude'], photo_df.loc[x, 'Latitude']) for x in [p1, p2]])
doc.append(pm)
# Placemark for photos: import from csv file directly
# save to file
kmlstr = k.to_string(prettyprint=True)
with open(fname, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(kmlstr)
print('write to', fname)
Explanation: Generate KML file with edges between consecutive photos.
End of explanation
fname = 'traj_terrible.kml'
gen_kml_traj(fname, {tid:traj2}, poi_df)
Explanation: And the trajectory extracted.
End of explanation |
11,363 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Add Multiple Layers
In this example, three Layers are added to a Map. Notice the draw order and default symbology for each.
For more information, run help(Layer)
Step1: Using default legends
Step2: Adding a Layer Selector | Python Code:
from cartoframes.auth import set_default_credentials
from cartoframes.viz import Map, Layer
set_default_credentials('cartoframes')
Map([
Layer('countries'),
Layer('global_power_plants'),
Layer('world_rivers')
])
Explanation: Add Multiple Layers
In this example, three Layers are added to a Map. Notice the draw order and default symbology for each.
For more information, run help(Layer)
End of explanation
from cartoframes.viz import default_legend
Map([
Layer('countries', legends=default_legend('Countries')),
Layer('global_power_plants', legends=default_legend('Global Power Plants')),
Layer('world_rivers', legends=default_legend('World Rivers'))
])
Explanation: Using default legends
End of explanation
from cartoframes.viz import default_legend
Map([
Layer('countries', title='Countries', legends=default_legend()),
Layer('global_power_plants', title='Global Power Plants', legends=default_legend()),
Layer('world_rivers', title='World Rivers', legends=default_legend())
], layer_selector=True)
Explanation: Adding a Layer Selector
End of explanation |
11,364 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Preparing the reads
[Loose et al] published their raw read files on ENA. This script uses four of these sets which contain reads of amplicons. These were processed using different "read until" scripts (or none at all), but that doesn't matter. What does matter is to get as much real reads as possible.
Step1: Load metadata for 4 datasets
Step2: The individual filenames will look like this
Step3: Merging alignment data
[Loose et al] provide all the intermediate data files necessary to recreate their figures. Among these, there are some alignment files in SAM format.
Because it doesn't make sense to classify complement sequences/matches in the Read Until context, we only use the "Template" strands.
Step4: Unfortunately filenames and sequence names tend to get a bit mangled when going from Fast5 to SAM, for various reasons. As of now, there is no particular convention for naming read files or naming the exported sequences. On the one hand I don't feel like it is a good idea to abuse filenames as character seperated database rows, on the other hand, using the unique read id from the Fast5 File isn't very human-friendly either.
To assign genomic coordinates to the reads, a regular expression extracts four numbers from the file name/query name making each read unique and matchable.
Step5: Visualizing the alignments
This is just a simple histogram showing where the "reference_start" values fall.
Step6: Processing the amplicons
[Loose et al] pooled 11 amplicons. Each read has to be assigned retroactively to one of these, represented by number from 0 to 10.
Step7: How many reads failed to be assigned?
Step8: Purge these
Step9: The number of viable reads is diminishing quickly. But this can't be helped.
How many reads longer than 500 bases are assigned to each amplicon?
Step10: Unfortunately some amplicons are severely underrepresented, with one going as low as 635 reads.
This is a big problem for dividing the data into training and test sets, because blindly sampling from total pool may skew this balance even further. The algorithms will then bias against the least represented amplicons to gain a bit of extra accuracy, which is not what we want. With ten times as much data we could balance both the training and the test set. As it is, I chose to balance the test set only, to get a more realistic view of the performance. My assumption is that, over multiple repetitions of amplification / library preparation and sequencing runs, the amplicons should be roughly equally distributed.
To balance the test set, 200 reads from each amplicon are chosen. This makes for a very weak test set. But again, this can't be helped at this point. | Python Code:
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
import porekit
import re
import pysam
import random
import feather
%matplotlib inline
Explanation: Preparing the reads
[Loose et al] published their raw read files on ENA. This script uses four of these sets which contain reads of amplicons. These were processed using different "read until" scripts (or none at all), but that doesn't matter. What does matter is to get as much real reads as possible.
End of explanation
directories = ["AmpliconOddEvenControl", "AmpliconOddReadUntil", "AmpliconEvenReadUntil", "Balanced"]
meta_frames = []
for d in directories:
print(d)
df = porekit.gather_metadata("/home/andi/nanopore/%s" % d, workers=4)
print(df.shape[0])
meta_frames.append(df)
meta = pd.concat (meta_frames)
for df in meta_frames:
print (df.shape)
Explanation: Load metadata for 4 datasets
End of explanation
meta_frames[0].index.values[0]
meta_frames[1].index.values[0]
meta_frames[2].index.values[0]
meta_frames[3].index.values[0]
Explanation: The individual filenames will look like this:
End of explanation
def sam_to_dataframe(file_name):
sam = pysam.AlignmentFile(file_name)
records = []
for i, segment in enumerate(sam):
d = dict()
for k in ["query_name", "reference_start", "reference_end", "mapping_quality", ]:
d[k] = getattr(segment, k)
records.append(d)
alignments = pd.DataFrame.from_records(records)
return alignments
base = "/home/andi/nanopore/RUFigs/data"
bams = ["/fig3/RU_dudu/RU_dudu_Template.bam",
"/fig3/RU_udud/RU_udud_Template.bam",
"/fig3/NO_RU/NO_RU_Template.bam",
"/fig4/200/200_Template.bam",
]
alignments = pd.concat([sam_to_dataframe(base+file_name) for file_name in bams])
Explanation: Merging alignment data
[Loose et al] provide all the intermediate data files necessary to recreate their figures. Among these, there are some alignment files in SAM format.
Because it doesn't make sense to classify complement sequences/matches in the Read Until context, we only use the "Template" strands.
End of explanation
regexp = re.compile(r'_(?P<a>\d+)_(?P<b>\d+)_ch(?P<c>\d+)_file(?P<d>\d+)')
def extract(s):
try:
return "_".join(regexp.search(s).groups())
except:
return ""
alignments["alignment_key"] = alignments.query_name.map(extract)
meta["alignment_key"] = meta.index.map(extract)
alignments["alignment_key"].map(lambda s: s.split("_")[0]).unique()
meta["run_number"] = meta["alignment_key"].map(lambda s: s.split("_")[0])
meta2 = meta.reset_index().merge(alignments).set_index("filename")
meta2.shape
meta = meta2
Explanation: Unfortunately filenames and sequence names tend to get a bit mangled when going from Fast5 to SAM, for various reasons. As of now, there is no particular convention for naming read files or naming the exported sequences. On the one hand I don't feel like it is a good idea to abuse filenames as character seperated database rows, on the other hand, using the unique read id from the Fast5 File isn't very human-friendly either.
To assign genomic coordinates to the reads, a regular expression extracts four numbers from the file name/query name making each read unique and matchable.
End of explanation
f, ax = plt.subplots()
f.set_figwidth(13)
ax.hist(meta.reference_start, bins=110);
Explanation: Visualizing the alignments
This is just a simple histogram showing where the "reference_start" values fall.
End of explanation
amplicons = [(52,1980),
(2065,3965),
(4070,5989),
(6059,7981),
(8012,9947),
(10008,11963),
(12006,13941),
(14011,15945),
(16076,17987),
(18022,19972),
(20053,21979),
]
def amplicon_from_position(pos):
for i,c in enumerate(amplicons):
a,b = c
if a<=pos<=b:
return i
meta["amplicon"] = meta.reference_start.map(amplicon_from_position)
Explanation: Processing the amplicons
[Loose et al] pooled 11 amplicons. Each read has to be assigned retroactively to one of these, represented by number from 0 to 10.
End of explanation
meta.amplicon.isnull().sum()
Explanation: How many reads failed to be assigned?
End of explanation
meta = meta[np.isnan(meta.amplicon)==False]
meta.shape
Explanation: Purge these:
End of explanation
meta.query("template_length>500").groupby("amplicon").format.count()
Explanation: The number of viable reads is diminishing quickly. But this can't be helped.
How many reads longer than 500 bases are assigned to each amplicon?
End of explanation
sufficient = meta.query("template_length>=500")
all_files = sufficient.index.values
test_files = []
for i in range(11):
sub = sufficient[sufficient.amplicon==i]
test_files += list(np.random.choice(sub.index.values, 200))
training_files = list(set(sufficient.index.values) - set(test_files))
len(training_files), len(test_files)
test_data = sufficient.ix[np.array(test_files)]
feather.write_dataframe(test_data, "amplicon_test_metadata.feather")
training_data = sufficient.ix[np.array(training_files)]
feather.write_dataframe(training_data, "amplicon_training_metadata.feather")
Explanation: Unfortunately some amplicons are severely underrepresented, with one going as low as 635 reads.
This is a big problem for dividing the data into training and test sets, because blindly sampling from total pool may skew this balance even further. The algorithms will then bias against the least represented amplicons to gain a bit of extra accuracy, which is not what we want. With ten times as much data we could balance both the training and the test set. As it is, I chose to balance the test set only, to get a more realistic view of the performance. My assumption is that, over multiple repetitions of amplification / library preparation and sequencing runs, the amplicons should be roughly equally distributed.
To balance the test set, 200 reads from each amplicon are chosen. This makes for a very weak test set. But again, this can't be helped at this point.
End of explanation |
11,365 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Script to Process the Sensor Readings - ProcessSensorReadings.py
Overview
Step2: The "writeLumbarReadings" method takes the rdd received from Spark Streaming as an input. It then extracts the JSON data and converts to a SQLContext dataframe.
After this it creates a new column in the dataframe that contains the "feature vector" that will be used to predict the posture.
The prediction process uses a model that is created and saved previously. it uses the feature vector to predict the posture.
Finally, the extra feature column is dropped and the final dataframe is inserted into the MySQL database using JDBC.
Step3: The "writeLumbarTrainingReadings" method also accepts an RDD from Spark Streaming but does not need to do any machine learning processing since we already know the posture from the JSON data.
Readings are simply transformed to a SQLContext dataframe and then inserted into the MySQL training readings table.
Step4: In the main part of the script the machine learning model is loaded and then two Spark StreamingContexts are created to listen for either actual device readings or training readings. The appropriate methods are then called upon receipt. | Python Code:
import json
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import explode
from pyspark.ml.feature import VectorAssembler
from pyspark.mllib.tree import RandomForest, RandomForestModel
#custom modules
import MySQLConnection
IMPORTANT: MUST use class paths when using spark-submit
$SPARK_HOME/bin/spark-submit --packages org.apache.spark:spark-streaming-kafka_2.10:1.6.2,mysql:mysql-connector-java:5.1.28 ProcessSensorReadings.py
Explanation: Script to Process the Sensor Readings - ProcessSensorReadings.py
Overview:
This Script uses Spark Streaming to read Kafka topics as they come in and then insert them into a MySQL database. There are two main methods:
Read actual sensor readings: Kafka Topic (LumbarSensorReadings) -> writeLumbarReadings -> MySQL table: SensorReadings
Read Training sensor readings: Kafka Topic (LumbarSensorTrainingReadings) -> writeLumbarTrainingReadings -> MySQL table: SensorTrainingReadings
This script requires the JDBC Driver in order to connect to to a MySQL database.
End of explanation
def writeLumbarReadings(time, rdd):
try:
# Convert RDDs of the words DStream to DataFrame and run SQL query
connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
sqlContext = SQLContext(rdd.context)
if rdd.isEmpty() == False:
lumbarReadings = sqlContext.jsonRDD(rdd)
lumbarReadingsIntermediate = lumbarReadings.selectExpr("readingID","readingTime","deviceID","metricTypeID","uomID","actual.y AS actualYaw","actual.p AS actualPitch","actual.r AS actualRoll","setPoints.y AS setPointYaw","setPoints.p AS setPointPitch","setPoints.r AS setPointRoll")
assembler = VectorAssembler(
inputCols=["actualPitch"], # Must be in same order as what was used to train the model. Testing using only pitch since model has limited dataset.
outputCol="features")
lumbarReadingsIntermediate = assembler.transform(lumbarReadingsIntermediate)
predictions = loadedModel.predict(lumbarReadingsIntermediate.map(lambda x: x.features))
predictionsDF = lumbarReadingsIntermediate.map(lambda x: x.readingID).zip(predictions).toDF(["readingID","positionID"])
combinedDF = lumbarReadingsIntermediate.join(predictionsDF, lumbarReadingsIntermediate.readingID == predictionsDF.readingID).drop(predictionsDF.readingID)
combinedDF = combinedDF.drop("features")
combinedDF.show()
combinedDF.write.jdbc("jdbc:mysql://localhost/biosensor", "SensorReadings", properties=connectionProperties)
except:
pass
Explanation: The "writeLumbarReadings" method takes the rdd received from Spark Streaming as an input. It then extracts the JSON data and converts to a SQLContext dataframe.
After this it creates a new column in the dataframe that contains the "feature vector" that will be used to predict the posture.
The prediction process uses a model that is created and saved previously. it uses the feature vector to predict the posture.
Finally, the extra feature column is dropped and the final dataframe is inserted into the MySQL database using JDBC.
End of explanation
def writeLumbarTrainingReadings(time, rddTraining):
try:
# Convert RDDs of the words DStream to DataFrame and run SQL query
connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
sqlContext = SQLContext(rddTraining.context)
if rddTraining.isEmpty() == False:
lumbarTrainingReading = sqlContext.jsonRDD(rddTraining)
lumbarTrainingReadingFinal = lumbarTrainingReading.selectExpr("deviceID","metricTypeID","uomID","positionID","actual.y AS actualYaw","actual.p AS actualPitch","actual.r AS actualRoll","setPoints.y AS setPointYaw","setPoints.p AS setPointPitch","setPoints.r AS setPointRoll")
lumbarTrainingReadingFinal.write.jdbc("jdbc:mysql://localhost/biosensor", "SensorTrainingReadings", properties=connectionProperties)
except:
pass
Explanation: The "writeLumbarTrainingReadings" method also accepts an RDD from Spark Streaming but does not need to do any machine learning processing since we already know the posture from the JSON data.
Readings are simply transformed to a SQLContext dataframe and then inserted into the MySQL training readings table.
End of explanation
if __name__ == "__main__":
sc = SparkContext(appName="Process Lumbar Sensor Readings")
ssc = StreamingContext(sc, 2) # 2 second batches
loadedModel = RandomForestModel.load(sc, "../machine_learning/models/IoTBackBraceRandomForest.model")
#Process Readings
streamLumbarSensor = KafkaUtils.createDirectStream(ssc, ["LumbarSensorReadings"], {"metadata.broker.list": "localhost:9092"})
lineSensorReading = streamLumbarSensor.map(lambda x: x[1])
lineSensorReading.foreachRDD(writeLumbarReadings)
#Process Training Readings
streamLumbarSensorTraining = KafkaUtils.createDirectStream(ssc, ["LumbarSensorTrainingReadings"], {"metadata.broker.list": "localhost:9092"})
lineSensorTrainingReading = streamLumbarSensorTraining.map(lambda x: x[1])
lineSensorTrainingReading.foreachRDD(writeLumbarTrainingReadings)
# Run and then wait for termination signal
ssc.start()
ssc.awaitTermination()
Explanation: In the main part of the script the machine learning model is loaded and then two Spark StreamingContexts are created to listen for either actual device readings or training readings. The appropriate methods are then called upon receipt.
End of explanation |
11,366 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
JupyterWorkflow
From exploratory analysis to reproducible research
Mehmetcan Budak
Step1: Look for Annual Trend; growth-decline over ridership
Let's try a rolling window. Over 365 days rolling sum
Step2: They don't go all the way to zero so let's set the y lenght to zero to none. current maxima.
Step3: There seems to be a offset between the left and right sidewalk. Let's plot them. See their trends.
Step4: Somehow the east and west side trends are reversed so the total bike rides across the bridge hover around 1 million and pretty accurent over the last couple of years +- couple percent.
Let's group by time of day and let's take it's mean and plot it.
Step5: Let's see the whole data set in this way not just this average. We will do a pivot table.
Step6: We now have a 2d data set. Each column is a day and each row is an hour during that day.
Let's take legend off and plot it.
Step7: Let's reduce transparency to see better. | Python Code:
URL = "https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD"
from urllib.request import urlretrieve
urlretrieve(URL, "Fremont.csv")
!head Freemont.csv
import pandas as pd
data = pd.read_csv("Fremont.csv")
data.head()
data = pd.read_csv("Fremont.csv", index_col="Date", parse_dates=True)
data.head()
%matplotlib inline
data.index = pd.to_datetime(data.index)
data.plot()
data.resample('W').sum().plot()
import matplotlib.pyplot as plt
plt.style.use("seaborn")
data.resample("W").sum().plot()
data.columns = ["West", "East"]
data.resample("W").sum().plot()
Explanation: JupyterWorkflow
From exploratory analysis to reproducible research
Mehmetcan Budak
End of explanation
data.resample("D").sum().rolling(365).sum().plot()
Explanation: Look for Annual Trend; growth-decline over ridership
Let's try a rolling window. Over 365 days rolling sum
End of explanation
ax = data.resample("D").sum().rolling(365).sum().plot()
ax.set_ylim(0, None)
Explanation: They don't go all the way to zero so let's set the y lenght to zero to none. current maxima.
End of explanation
data["Total"] = data["West"] + data["East"]
ax = data.resample("D").sum().rolling(365).sum().plot()
ax.set_ylim(0, None)
Explanation: There seems to be a offset between the left and right sidewalk. Let's plot them. See their trends.
End of explanation
data.groupby(data.index.time).mean().plot()
Explanation: Somehow the east and west side trends are reversed so the total bike rides across the bridge hover around 1 million and pretty accurent over the last couple of years +- couple percent.
Let's group by time of day and let's take it's mean and plot it.
End of explanation
pivoted = data.pivot_table("Total", index=data.index.time, columns=data.index.date)
pivoted.iloc[:5, :5]
Explanation: Let's see the whole data set in this way not just this average. We will do a pivot table.
End of explanation
pivoted.plot(legend=False)
Explanation: We now have a 2d data set. Each column is a day and each row is an hour during that day.
Let's take legend off and plot it.
End of explanation
pivoted.plot(legend=False,alpha=0.01)
Explanation: Let's reduce transparency to see better.
End of explanation |
11,367 | Given the following text description, write Python code to implement the functionality described.
Description:
Count composite fibonacci numbers from given array
Python3 program to implement the above approach ; Function to find all Fibonacci numbers up to Max ; Store all Fibonacci numbers upto Max ; Stores previous element of Fibonacci sequence ; Stores previous element of Fibonacci sequence ; Insert prev into hashmap ; Insert all the Fibonacci numbers up to Max ; Insert curr into hashmap ; Stores curr into temp ; Update curr ; Update prev ; Function to find all Composite numbers up to Max ; isPrime [ i ] : Stores if i is a prime number or not ; Calculate all prime numbers up to Max using Sieve of Eratosthenes ; If P is a prime number ; Set all multiple of P as non - prime ; Function to find the numbers which is both a composite and Fibonacci number ; Stores the largest element of the array ; Traverse the array arr [ ] ; Update Max ; isPrim [ i ] check i is a prime number or not ; Stores all the Fibonacci numbers ; Traverse the array arr [ ] ; Current element is not a composite number ; If current element is a Fibonacci and composite number ; Print current element ; Driver Code
| Python Code:
import math
def createhashmap(Max ) :
hashmap = { ""}
curr = 1
prev = 0
hashmap . add(prev )
while(curr <= Max ) :
hashmap . add(curr )
temp = curr
curr = curr + prev
prev = temp
return hashmap
def SieveOfEratosthenes(Max ) :
isPrime =[1 for x in range(Max + 1 ) ]
isPrime[0 ] = 0
isPrime[1 ] = 0
for p in range(0 , int(math . sqrt(Max ) ) ) :
if(isPrime[p ] ) :
for i in range(2 * p , Max , p ) :
isPrime[i ] = 0
return isPrime
def cntFibonacciPrime(arr , N ) :
Max = arr[0 ]
for i in range(0 , N ) :
Max = max(Max , arr[i ] )
isPrime = SieveOfEratosthenes(Max )
hashmap = createhashmap(Max )
for i in range(0 , N ) :
if arr[i ] == 1 :
continue
if(( arr[i ] in hashmap ) and(not(isPrime[arr[i ] ] ) ) ) :
print(arr[i ] , end = "▁ ")
arr =[13 , 55 , 7 , 3 , 5 , 21 , 233 , 144 , 89 ]
N = len(arr )
cntFibonacciPrime(arr , N )
|
11,368 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
In this tutorial we will show how to access and navigate the Iteration/Expression Tree (IET) rooted in an Operator.
Part I - Top Down
Let's start with a fairly trivial example. First of all, we disable all performance-related optimizations, to maximize the simplicity of the created IET as well as the readability of the generated code.
Step1: Then, we create a TimeFunction with 3 points in each of the space Dimensions x and y.
Step2: We now create an Operator that increments by 1 all points in the computational domain.
Step3: An Operator is an IET node that can generate, JIT-compile, and run low-level code (e.g., C). Just like all other types of IET nodes, it's got a number of metadata attached. For example, we can query an Operator to retrieve the input/output Functions.
Step4: If we print op, we can see how the generated code looks like.
Step5: An Operator is the root of an IET that typically consists of several nested Iterations and Expressions – two other fundamental IET node types. The user-provided SymPy equations are wrapped within Expressions. Loop nest embedding such expressions are constructed by suitably nesting Iterations.
The Devito compiler constructs the IET from a collection of Clusters, which represent a higher-level intermediate representation (not covered in this tutorial).
The Devito compiler also attaches to the IET key computational properties, such as sequential, parallel, and affine, which are derived through data dependence analysis.
We can print the IET structure of an Operator, as well as the attached computational properties, using the utility function pprint.
Step6: In this example, op is represented as a <Callable Kernel>. Attached to it are metadata, such as _headers and _includes, as well as the body, which includes the children IET nodes. Here, the body is the concatenation of an PointerCast and a List object.
Step7: We can explicitly traverse the body until we locate the user-provided SymPy equations.
Step8: Below we access the Iteration representing the time loop.
Step9: We can for example inspect the Iteration to discover what its iteration bounds are.
Step10: And as we keep going down through the IET, we can eventually reach the Expression wrapping the user-provided SymPy equation.
Step11: Of course, there are mechanisms in place to, for example, find all Expressions in a given IET. The Devito compiler has a number of IET visitors, among which FindNodes, usable to retrieve all nodes of a particular type. So we easily
can get all Expressions within op as follows | Python Code:
from devito import configuration
configuration['opt'] = 'noop'
configuration['language'] = 'C'
Explanation: In this tutorial we will show how to access and navigate the Iteration/Expression Tree (IET) rooted in an Operator.
Part I - Top Down
Let's start with a fairly trivial example. First of all, we disable all performance-related optimizations, to maximize the simplicity of the created IET as well as the readability of the generated code.
End of explanation
from devito import Grid, TimeFunction
grid = Grid(shape=(3, 3))
u = TimeFunction(name='u', grid=grid)
Explanation: Then, we create a TimeFunction with 3 points in each of the space Dimensions x and y.
End of explanation
from devito import Eq, Operator
eq = Eq(u.forward, u+1)
op = Operator(eq)
Explanation: We now create an Operator that increments by 1 all points in the computational domain.
End of explanation
op.input
op.output
Explanation: An Operator is an IET node that can generate, JIT-compile, and run low-level code (e.g., C). Just like all other types of IET nodes, it's got a number of metadata attached. For example, we can query an Operator to retrieve the input/output Functions.
End of explanation
print(op)
Explanation: If we print op, we can see how the generated code looks like.
End of explanation
from devito.tools import pprint
pprint(op)
Explanation: An Operator is the root of an IET that typically consists of several nested Iterations and Expressions – two other fundamental IET node types. The user-provided SymPy equations are wrapped within Expressions. Loop nest embedding such expressions are constructed by suitably nesting Iterations.
The Devito compiler constructs the IET from a collection of Clusters, which represent a higher-level intermediate representation (not covered in this tutorial).
The Devito compiler also attaches to the IET key computational properties, such as sequential, parallel, and affine, which are derived through data dependence analysis.
We can print the IET structure of an Operator, as well as the attached computational properties, using the utility function pprint.
End of explanation
op._headers
op._includes
op.body
Explanation: In this example, op is represented as a <Callable Kernel>. Attached to it are metadata, such as _headers and _includes, as well as the body, which includes the children IET nodes. Here, the body is the concatenation of an PointerCast and a List object.
End of explanation
print(op.body[0]) # Printing the PointerCast
print(op.body[1]) # Printing the List
Explanation: We can explicitly traverse the body until we locate the user-provided SymPy equations.
End of explanation
t_iter = op.body[1].body[0]
t_iter
Explanation: Below we access the Iteration representing the time loop.
End of explanation
t_iter.limits
Explanation: We can for example inspect the Iteration to discover what its iteration bounds are.
End of explanation
expr = t_iter.nodes[0].body[0].body[0].nodes[0].nodes[0].body[0]
expr.view
Explanation: And as we keep going down through the IET, we can eventually reach the Expression wrapping the user-provided SymPy equation.
End of explanation
from devito.ir.iet import Expression, FindNodes
exprs = FindNodes(Expression).visit(op)
exprs[0].view
Explanation: Of course, there are mechanisms in place to, for example, find all Expressions in a given IET. The Devito compiler has a number of IET visitors, among which FindNodes, usable to retrieve all nodes of a particular type. So we easily
can get all Expressions within op as follows
End of explanation |
11,369 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
1. Loading file and basic information
Step1: Print some basic information about the grid
Step2: List all fields in this plot file
Step3: List all derived fields in the plot file
Step4: What is the size of the domain?
Step5: Where is the center of the domain?
This is the coordinate of the central point
Step6: Excersice
What is the coordinate of the left edge of the domain?
(hint
Step7: The box is probably too big. We should zoom in to the center.
Step8: That's much better. We can see a bubble is being drilled by the jets.
We can also slice through a different axis.
Step9: We can also slice at a different location.
Step10: Excersice
What do the slices of temperature and pressure look like?
How to visualize the velocity (which is a vector)
(Refer to ds.derived_field_list for all available properties)
We can show the magnitude of the velocity
Step11: Or just one component of the velocity
(velocity_z in this example)
Step12: Maybe we can change the color map for a better representation of the positive and negative values
See yt colormap or matplotlib colormap reference for all available colormpas.
Step13: We see positive velocity_z (going toward right in this case) in red and negative (going toward left) in blue
We can put some arrow to indicate the direction of the flow
Step14: The velocity in the jet is too large compared to other region. Let's focus at some other region away from the center.
Step15: We need to specify the range of the velocity so that the 0 velocity will be in the middle.
(The velocity of the jet is 0.2c = 0.2*3E10 cm = 6E9 cm.)
Step16: 3. Inspect Grid Structure
We can show the grid boundary in a slice plot
Step17: In this simulation, each box (which is called grid) represents 8x8x8 cells that are the most basic resolution elements.
We can zoom in to see the cells inside a grid. | Python Code:
import yt
ds = yt.load('/home/ychen/d9/2018_production_runs/20180802_L438_rc10_beta07/data/Group_L438_hdf5_plt_cnt_0100')
print(ds.parameters['run_comment'])
Explanation: 1. Loading file and basic information
End of explanation
ds.print_stats()
Explanation: Print some basic information about the grid
End of explanation
ds.field_list
Explanation: List all fields in this plot file
End of explanation
ds.derived_field_list
Explanation: List all derived fields in the plot file
End of explanation
print(ds.domain_width)
# in different units
print(ds.domain_width.in_units('Mpc'))
print(ds.domain_width.in_units('ly'))
print(ds.domain_width.in_units('cm'))
Explanation: What is the size of the domain?
End of explanation
print(ds.domain_center)
Explanation: Where is the center of the domain?
This is the coordinate of the central point
End of explanation
slc = yt.SlicePlot(ds, normal='y', fields='density')
slc.show()
Explanation: Excersice
What is the coordinate of the left edge of the domain?
(hint: use tab to auto-complete and list possible options)
2. Let's do some visualizations
Slice through the center of the simulation box and show the density
End of explanation
slc.zoom(32)
slc.show()
Explanation: The box is probably too big. We should zoom in to the center.
End of explanation
slc = yt.SlicePlot(ds, normal='z', fields='density')
slc.zoom(32)
slc.show()
Explanation: That's much better. We can see a bubble is being drilled by the jets.
We can also slice through a different axis.
End of explanation
slc = yt.SlicePlot(ds, normal='z', fields='density', center=([0,0,10], 'kpc'))
slc.zoom(32)
slc.show()
Explanation: We can also slice at a different location.
End of explanation
slc = yt.SlicePlot(ds, normal='y', fields='velocity_magnitude')
slc.zoom(32)
slc.show()
Explanation: Excersice
What do the slices of temperature and pressure look like?
How to visualize the velocity (which is a vector)
(Refer to ds.derived_field_list for all available properties)
We can show the magnitude of the velocity
End of explanation
slc = yt.SlicePlot(ds, normal='y', fields='velocity_z')
slc.zoom(32)
slc.show()
Explanation: Or just one component of the velocity
(velocity_z in this example)
End of explanation
slc.set_cmap('velocity_z', 'seismic')
slc.show()
Explanation: Maybe we can change the color map for a better representation of the positive and negative values
See yt colormap or matplotlib colormap reference for all available colormpas.
End of explanation
slc = yt.SlicePlot(ds, normal='y', fields='velocity_z')
slc.zoom(32)
slc.set_cmap('velocity_z', 'seismic')
slc.annotate_velocity()
slc.show()
Explanation: We see positive velocity_z (going toward right in this case) in red and negative (going toward left) in blue
We can put some arrow to indicate the direction of the flow
End of explanation
slc = yt.SlicePlot(ds, normal='y', fields='velocity_z', center=([0,0,10], 'kpc'), width=(10, 'kpc'))
slc.set_cmap('velocity_z', 'seismic')
slc.annotate_velocity()
slc.show()
Explanation: The velocity in the jet is too large compared to other region. Let's focus at some other region away from the center.
End of explanation
slc.set_zlim('velocity_z', -6E9, 6E9)
Explanation: We need to specify the range of the velocity so that the 0 velocity will be in the middle.
(The velocity of the jet is 0.2c = 0.2*3E10 cm = 6E9 cm.)
End of explanation
slc = yt.SlicePlot(ds, normal='y', fields='density', width=(40, 'kpc'))
slc.annotate_grids()
slc.show()
Explanation: 3. Inspect Grid Structure
We can show the grid boundary in a slice plot
End of explanation
slc = yt.SlicePlot(ds, normal='y', fields='density', width=(1, 'kpc'), center=([0,0,10], 'kpc'))
slc.annotate_grids()
slc.show()
Explanation: In this simulation, each box (which is called grid) represents 8x8x8 cells that are the most basic resolution elements.
We can zoom in to see the cells inside a grid.
End of explanation |
11,370 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
This notebook shows how BigBang can help you analyze the senders in a particular mailing list archive.
First, use this IPython magic to tell the notebook to display matplotlib graphics inline. This is a nice way to display results.
Step1: Import the BigBang modules as needed. These should be in your Python environment if you've installed BigBang correctly.
Step2: Also, let's import a number of other dependencies we'll use later.
Step3: Now let's load the data for analysis.
Step4: This variable is for the range of days used in computing rolling averages.
Now, let's see
Step5: This might be useful for seeing the distribution (does the top message sender dominate?) or for identifying key participants to talk to.
Many mailing lists will have some duplicate senders
Step6: For this still naive measure (edit distance on a normalized string), it appears that there are many duplicates in the <10 range, but that above that the edit distance of short email addresses at common domain names can take over.
Step7: We can create the same color plot with the consolidated dataframe to see how the distribution has changed.
Step8: Of course, there are still some duplicates, mostly people who are using the same name, but with a different email address at an unrelated domain name.
How does our consolidation affect the graph of distribution of senders?
Step9: Okay, not dramatically different, but the consolidation makes the head heavier. There are more people close to that high end, a stronger core group and less a power distribution smoothly from one or two people.
We could also use sender email addresses as a naive inference for affiliation, especially for mailing lists where corporate/organizational email addresses are typically used.
Step10: Pandas lets us group by the results of a keying function, which we can use to group participants sending from email addresses with the same domain.
Step11: We can also aggregate the number of messages that come from addresses at each domain. | Python Code:
%matplotlib inline
Explanation: This notebook shows how BigBang can help you analyze the senders in a particular mailing list archive.
First, use this IPython magic to tell the notebook to display matplotlib graphics inline. This is a nice way to display results.
End of explanation
import bigbang.mailman as mailman
import bigbang.graph as graph
import bigbang.process as process
from bigbang.parse import get_date
reload(process)
Explanation: Import the BigBang modules as needed. These should be in your Python environment if you've installed BigBang correctly.
End of explanation
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import numpy as np
import math
import pytz
import pickle
import os
pd.options.display.mpl_style = 'default' # pandas has a set of preferred graph formatting options
Explanation: Also, let's import a number of other dependencies we'll use later.
End of explanation
urls = ["http://www.ietf.org/mail-archive/text/ietf-privacy/",
"http://lists.w3.org/Archives/Public/public-privacy/"]
mlists = [mailman.open_list_archives(url,"../archives") for url in urls]
activities = [process.activity(ml) for ml in mlists]
Explanation: Now let's load the data for analysis.
End of explanation
a = activities[1] # for the first mailing list
ta = a.sum(0) # sum along the first axis
ta.sort()
ta[-10:].plot(kind='barh', width=1)
Explanation: This variable is for the range of days used in computing rolling averages.
Now, let's see: who are the authors of the most messages to one particular list?
End of explanation
levdf = process.sorted_lev(a) # creates a slightly more nuanced edit distance matrix
# and sorts by rows/columns that have the best candidates
levdf_corner = levdf.iloc[:25,:25] # just take the top 25
fig = plt.figure(figsize=(15, 12))
plt.pcolor(levdf_corner)
plt.yticks(np.arange(0.5, len(levdf_corner.index), 1), levdf_corner.index)
plt.xticks(np.arange(0.5, len(levdf_corner.columns), 1), levdf_corner.columns, rotation='vertical')
plt.colorbar()
plt.show()
Explanation: This might be useful for seeing the distribution (does the top message sender dominate?) or for identifying key participants to talk to.
Many mailing lists will have some duplicate senders: individuals who use multiple email addresses or are recorded as different senders when using the same email address. We want to identify those potential duplicates in order to get a more accurate representation of the distribution of senders.
To begin with, let's calculate the similarity of the From strings, based on the Levenshtein distance.
End of explanation
consolidates = []
# gather pairs of names which have a distance of less than 10
for col in levdf.columns:
for index, value in levdf.loc[levdf[col] < 10, col].iteritems():
if index != col: # the name shouldn't be a pair for itself
consolidates.append((col, index))
print str(len(consolidates)) + ' candidates for consolidation.'
c = process.consolidate_senders_activity(a, consolidates)
print 'We removed: ' + str(len(a.columns) - len(c.columns)) + ' columns.'
Explanation: For this still naive measure (edit distance on a normalized string), it appears that there are many duplicates in the <10 range, but that above that the edit distance of short email addresses at common domain names can take over.
End of explanation
lev_c = process.sorted_lev(c)
levc_corner = lev_c.iloc[:25,:25]
fig = plt.figure(figsize=(15, 12))
plt.pcolor(levc_corner)
plt.yticks(np.arange(0.5, len(levc_corner.index), 1), levc_corner.index)
plt.xticks(np.arange(0.5, len(levc_corner.columns), 1), levc_corner.columns, rotation='vertical')
plt.colorbar()
plt.show()
Explanation: We can create the same color plot with the consolidated dataframe to see how the distribution has changed.
End of explanation
fig, axes = plt.subplots(nrows=2, figsize=(15, 12))
ta = a.sum(0) # sum along the first axis
ta.sort()
ta[-20:].plot(kind='barh',ax=axes[0], width=1, title='Before consolidation')
tc = c.sum(0)
tc.sort()
tc[-20:].plot(kind='barh',ax=axes[1], width=1, title='After consolidation')
plt.show()
Explanation: Of course, there are still some duplicates, mostly people who are using the same name, but with a different email address at an unrelated domain name.
How does our consolidation affect the graph of distribution of senders?
End of explanation
reload(process)
Explanation: Okay, not dramatically different, but the consolidation makes the head heavier. There are more people close to that high end, a stronger core group and less a power distribution smoothly from one or two people.
We could also use sender email addresses as a naive inference for affiliation, especially for mailing lists where corporate/organizational email addresses are typically used.
End of explanation
grouped = tc.groupby(process.domain_name_from_email)
domain_groups = grouped.size()
domain_groups.sort(ascending=True)
domain_groups[-20:].plot(kind='barh', width=1, title="Number of participants at domain")
Explanation: Pandas lets us group by the results of a keying function, which we can use to group participants sending from email addresses with the same domain.
End of explanation
domain_messages_sum = grouped.sum()
domain_messages_sum.sort(ascending=True)
domain_messages_sum[-20:].plot(kind='barh', width=1, title="Number of messages from domain")
Explanation: We can also aggregate the number of messages that come from addresses at each domain.
End of explanation |
11,371 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Indexing and Selection
| Operation | Syntax | Result |
|-------------------------------|----------------|-----------|
| Select column | df[col] | Series |
| Select row by label | df.loc[label] | Series |
| Select row by integer | df.iloc[loc] | Series |
| Select rows | df[start
Step1: selection using dictionary-like string
Step2: list of strings as index (note
Step3: select row using integer index
Step4: select rows using integer slice
Step5: + is over-loaded as concatenation operator
Step6: Data alignment and arithmetic
Data alignment between DataFrame objects automatically align on both the columns and the index (row labels).
Note locations for 'NaN'
Step7: Boolean indexing
Step8: first select rows in column B whose values are less than zero
then, include information for all columns in that row in the resulting data set
Step9: isin function
Step10: where function | Python Code:
import pandas as pd
import numpy as np
produce_dict = {'veggies': ['potatoes', 'onions', 'peppers', 'carrots'],'fruits': ['apples', 'bananas', 'pineapple', 'berries']}
produce_df = pd.DataFrame(produce_dict)
produce_df
Explanation: Indexing and Selection
| Operation | Syntax | Result |
|-------------------------------|----------------|-----------|
| Select column | df[col] | Series |
| Select row by label | df.loc[label] | Series |
| Select row by integer | df.iloc[loc] | Series |
| Select rows | df[start:stop] | DataFrame |
| Select rows with boolean mask | df[mask] | DataFrame |
documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html
End of explanation
produce_df['fruits']
Explanation: selection using dictionary-like string
End of explanation
produce_df[ ['fruits', 'veggies'] ]
Explanation: list of strings as index (note: double square brackets)
End of explanation
produce_df.iloc[2]
Explanation: select row using integer index
End of explanation
produce_df.iloc[0:2]
produce_df.iloc[:-2]
Explanation: select rows using integer slice
End of explanation
produce_df + produce_df.iloc[0]
Explanation: + is over-loaded as concatenation operator
End of explanation
df = pd.DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df2 = pd.DataFrame(np.random.randn(7, 3), columns=['A', 'B', 'C'])
sum_df = df + df2
sum_df
Explanation: Data alignment and arithmetic
Data alignment between DataFrame objects automatically align on both the columns and the index (row labels).
Note locations for 'NaN'
End of explanation
sum_df>0
sum_df[sum_df>0]
Explanation: Boolean indexing
End of explanation
mask = sum_df['B'] < 0
mask
sum_df[mask]
Explanation: first select rows in column B whose values are less than zero
then, include information for all columns in that row in the resulting data set
End of explanation
produce_df.isin(['apples', 'onions'])
Explanation: isin function
End of explanation
produce_df.where(produce_df > 'k')
Explanation: where function
End of explanation |
11,372 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Operations on word vectors
Welcome to your first assignment of this week!
Because word embeddings are very computionally expensive to train, most ML practitioners will load a pre-trained set of embeddings.
After this assignment you will be able to
Step1: Next, lets load the word vectors. For this assignment, we will use 50-dimensional GloVe vectors to represent words. Run the following cell to load the word_to_vec_map.
Step3: You've loaded
Step5: Expected Output
Step6: Run the cell below to test your code, this may take 1-2 minutes.
Step7: Expected Output
Step8: Now, you will consider the cosine similarity of different words with $g$. Consider what a positive value of similarity means vs a negative cosine similarity.
Step9: As you can see, female first names tend to have a positive cosine similarity with our constructed vector $g$, while male first names tend to have a negative cosine similarity. This is not suprising, and the result seems acceptable.
But let's try with some other words.
Step11: Do you notice anything surprising? It is astonishing how these results reflect certain unhealthy gender stereotypes. For example, "computer" is closer to "man" while "literature" is closer to "woman". Ouch!
We'll see below how to reduce the bias of these vectors, using an algorithm due to Boliukbasi et al., 2016. Note that some word pairs such as "actor"/"actress" or "grandmother"/"grandfather" should remain gender specific, while other words such as "receptionist" or "technology" should be neutralized, i.e. not be gender-related. You will have to treat these two type of words differently when debiasing.
3.1 - Neutralize bias for non-gender specific words
The figure below should help you visualize what neutralizing does. If you're using a 50-dimensional word embedding, the 50 dimensional space can be split into two parts
Step13: Expected Output | Python Code:
import numpy as np
from w2v_utils import *
Explanation: Operations on word vectors
Welcome to your first assignment of this week!
Because word embeddings are very computionally expensive to train, most ML practitioners will load a pre-trained set of embeddings.
After this assignment you will be able to:
Load pre-trained word vectors, and measure similarity using cosine similarity
Use word embeddings to solve word analogy problems such as Man is to Woman as King is to ______.
Modify word embeddings to reduce their gender bias
Let's get started! Run the following cell to load the packages you will need.
End of explanation
words, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')
Explanation: Next, lets load the word vectors. For this assignment, we will use 50-dimensional GloVe vectors to represent words. Run the following cell to load the word_to_vec_map.
End of explanation
# GRADED FUNCTION: cosine_similarity
def cosine_similarity(u, v):
Cosine similarity reflects the degree of similariy between u and v
Arguments:
u -- a word vector of shape (n,)
v -- a word vector of shape (n,)
Returns:
cosine_similarity -- the cosine similarity between u and v defined by the formula above.
distance = 0.0
### START CODE HERE ###
# Compute the dot product between u and v (≈1 line)
dot = np.dot(u, v)
# Compute the L2 norm of u (≈1 line)
norm_u = np.sqrt(np.dot(u, u))
# Compute the L2 norm of v (≈1 line)
norm_v = np.sqrt(np.dot(v, v))
# Compute the cosine similarity defined by formula (1) (≈1 line)
cosine_similarity = dot/(norm_u * norm_v)
### END CODE HERE ###
return cosine_similarity
father = word_to_vec_map["father"]
mother = word_to_vec_map["mother"]
ball = word_to_vec_map["ball"]
crocodile = word_to_vec_map["crocodile"]
france = word_to_vec_map["france"]
italy = word_to_vec_map["italy"]
paris = word_to_vec_map["paris"]
rome = word_to_vec_map["rome"]
print("cosine_similarity(father, mother) = ", cosine_similarity(father, mother))
print("cosine_similarity(ball, crocodile) = ",cosine_similarity(ball, crocodile))
print("cosine_similarity(france - paris, rome - italy) = ",cosine_similarity(france - paris, rome - italy))
Explanation: You've loaded:
- words: set of words in the vocabulary.
- word_to_vec_map: dictionary mapping words to their GloVe vector representation.
You've seen that one-hot vectors do not do a good job cpaturing what words are similar. GloVe vectors provide much more useful information about the meaning of individual words. Lets now see how you can use GloVe vectors to decide how similar two words are.
1 - Cosine similarity
To measure how similar two words are, we need a way to measure the degree of similarity between two embedding vectors for the two words. Given two vectors $u$ and $v$, cosine similarity is defined as follows:
$$\text{CosineSimilarity(u, v)} = \frac {u . v} {||u||_2 ||v||_2} = cos(\theta) \tag{1}$$
where $u.v$ is the dot product (or inner product) of two vectors, $||u||_2$ is the norm (or length) of the vector $u$, and $\theta$ is the angle between $u$ and $v$. This similarity depends on the angle between $u$ and $v$. If $u$ and $v$ are very similar, their cosine similarity will be close to 1; if they are dissimilar, the cosine similarity will take a smaller value.
<img src="images/cosine_sim.png" style="width:800px;height:250px;">
<caption><center> Figure 1: The cosine of the angle between two vectors is a measure of how similar they are</center></caption>
Exercise: Implement the function cosine_similarity() to evaluate similarity between word vectors.
Reminder: The norm of $u$ is defined as $ ||u||2 = \sqrt{\sum{i=1}^{n} u_i^2}$
End of explanation
# GRADED FUNCTION: complete_analogy
def complete_analogy(word_a, word_b, word_c, word_to_vec_map):
Performs the word analogy task as explained above: a is to b as c is to ____.
Arguments:
word_a -- a word, string
word_b -- a word, string
word_c -- a word, string
word_to_vec_map -- dictionary that maps words to their corresponding vectors.
Returns:
best_word -- the word such that v_b - v_a is close to v_best_word - v_c, as measured by cosine similarity
# convert words to lower case
word_a, word_b, word_c = word_a.lower(), word_b.lower(), word_c.lower()
### START CODE HERE ###
# Get the word embeddings v_a, v_b and v_c (≈1-3 lines)
e_a, e_b, e_c = word_to_vec_map[word_a], word_to_vec_map[word_b], word_to_vec_map[word_c]
### END CODE HERE ###
words = word_to_vec_map.keys()
max_cosine_sim = -100 # Initialize max_cosine_sim to a large negative number
best_word = None # Initialize best_word with None, it will help keep track of the word to output
# loop over the whole word vector set
for w in words:
# to avoid best_word being one of the input words, pass on them.
if w in [word_a, word_b, word_c] :
continue
### START CODE HERE ###
# Compute cosine similarity between the vector (e_b - e_a) and the vector ((w's vector representation) - e_c) (≈1 line)
cosine_sim = cosine_similarity(e_b - e_a, word_to_vec_map[w] - e_c)
# If the cosine_sim is more than the max_cosine_sim seen so far,
# then: set the new max_cosine_sim to the current cosine_sim and the best_word to the current word (≈3 lines)
if cosine_sim > max_cosine_sim:
max_cosine_sim = cosine_sim
best_word = w
### END CODE HERE ###
return best_word
Explanation: Expected Output:
<table>
<tr>
<td>
**cosine_similarity(father, mother)** =
</td>
<td>
0.890903844289
</td>
</tr>
<tr>
<td>
**cosine_similarity(ball, crocodile)** =
</td>
<td>
0.274392462614
</td>
</tr>
<tr>
<td>
**cosine_similarity(france - paris, rome - italy)** =
</td>
<td>
-0.675147930817
</td>
</tr>
</table>
After you get the correct expected output, please feel free to modify the inputs and measure the cosine similarity between other pairs of words! Playing around the cosine similarity of other inputs will give you a better sense of how word vectors behave.
2 - Word analogy task
In the word analogy task, we complete the sentence <font color='brown'>"a is to b as c is to ____"</font>. An example is <font color='brown'> 'man is to woman as king is to queen' </font>. In detail, we are trying to find a word d, such that the associated word vectors $e_a, e_b, e_c, e_d$ are related in the following manner: $e_b - e_a \approx e_d - e_c$. We will measure the similarity between $e_b - e_a$ and $e_d - e_c$ using cosine similarity.
Exercise: Complete the code below to be able to perform word analogies!
End of explanation
triads_to_try = [('italy', 'italian', 'spain'), ('india', 'delhi', 'japan'), ('man', 'woman', 'boy'), ('small', 'smaller', 'large')]
for triad in triads_to_try:
print ('{} -> {} :: {} -> {}'.format( *triad, complete_analogy(*triad,word_to_vec_map)))
triads_to_try = [('burger', 'soda', 'steak'),]
for triad in triads_to_try:
print ('{} -> {} :: {} -> {}'.format( *triad, complete_analogy(*triad,word_to_vec_map)))
triads_to_try = [('morning', 'coffee', 'evening'),]
for triad in triads_to_try:
print ('{} -> {} :: {} -> {}'.format( *triad, complete_analogy(*triad,word_to_vec_map)))
triads_to_try = [('name', 'lastname', 'fuyang'),]
for triad in triads_to_try:
print ('{} -> {} :: {} -> {}'.format( *triad, complete_analogy(*triad,word_to_vec_map)))
Explanation: Run the cell below to test your code, this may take 1-2 minutes.
End of explanation
g = word_to_vec_map['woman'] - word_to_vec_map['man']
print(g)
Explanation: Expected Output:
<table>
<tr>
<td>
**italy -> italian** ::
</td>
<td>
spain -> spanish
</td>
</tr>
<tr>
<td>
**india -> delhi** ::
</td>
<td>
japan -> tokyo
</td>
</tr>
<tr>
<td>
**man -> woman ** ::
</td>
<td>
boy -> girl
</td>
</tr>
<tr>
<td>
**small -> smaller ** ::
</td>
<td>
large -> larger
</td>
</tr>
</table>
Once you get the correct expected output, please feel free to modify the input cells above to test your own analogies. Try to find some other analogy pairs that do work, but also find some where the algorithm doesn't give the right answer: For example, you can try small->smaller as big->?.
Congratulations!
You've come to the end of this assignment. Here are the main points you should remember:
Cosine similarity a good way to compare similarity between pairs of word vectors. (Though L2 distance works too.)
For NLP applications, using a pre-trained set of word vectors from the internet is often a good way to get started.
Even though you have finished the graded portions, we recommend you take a look too at the rest of this notebook.
Congratulations on finishing the graded portions of this notebook!
3 - Debiasing word vectors (OPTIONAL/UNGRADED)
In the following exercise, you will examine gender biases that can be reflected in a word embedding, and explore algorithms for reducing the bias. In addition to learning about the topic of debiasing, this exercise will also help hone your intuition about what word vectors are doing. This section involves a bit of linear algebra, though you can probably complete it even without being expert in linear algebra, and we encourage you to give it a shot. This portion of the notebook is optional and is not graded.
Lets first see how the GloVe word embeddings relate to gender. You will first compute a vector $g = e_{woman}-e_{man}$, where $e_{woman}$ represents the word vector corresponding to the word woman, and $e_{man}$ corresponds to the word vector corresponding to the word man. The resulting vector $g$ roughly encodes the concept of "gender". (You might get a more accurate representation if you compute $g_1 = e_{mother}-e_{father}$, $g_2 = e_{girl}-e_{boy}$, etc. and average over them. But just using $e_{woman}-e_{man}$ will give good enough results for now.)
End of explanation
print ('List of names and their similarities with constructed vector:')
# girls and boys name
name_list = ['john', 'marie', 'sophie', 'ronaldo', 'priya', 'rahul', 'danielle', 'reza', 'katy', 'yasmin']
for w in name_list:
print (w, cosine_similarity(word_to_vec_map[w], g))
print ('alex', cosine_similarity(word_to_vec_map['alex'], g))
print ('kim', cosine_similarity(word_to_vec_map['kim'], g))
Explanation: Now, you will consider the cosine similarity of different words with $g$. Consider what a positive value of similarity means vs a negative cosine similarity.
End of explanation
print('Other words and their similarities:')
word_list = ['lipstick', 'guns', 'science', 'arts', 'literature', 'warrior','doctor', 'tree', 'receptionist',
'technology', 'fashion', 'teacher', 'engineer', 'pilot', 'computer', 'singer']
for w in word_list:
print (w, cosine_similarity(word_to_vec_map[w], g))
print ('single', cosine_similarity(word_to_vec_map['single'], g))
print ('married', cosine_similarity(word_to_vec_map['married'], g))
Explanation: As you can see, female first names tend to have a positive cosine similarity with our constructed vector $g$, while male first names tend to have a negative cosine similarity. This is not suprising, and the result seems acceptable.
But let's try with some other words.
End of explanation
def neutralize(word, g, word_to_vec_map):
Removes the bias of "word" by projecting it on the space orthogonal to the bias axis.
This function ensures that gender neutral words are zero in the gender subspace.
Arguments:
word -- string indicating the word to debias
g -- numpy-array of shape (50,), corresponding to the bias axis (such as gender)
word_to_vec_map -- dictionary mapping words to their corresponding vectors.
Returns:
e_debiased -- neutralized word vector representation of the input "word"
### START CODE HERE ###
# Select word vector representation of "word". Use word_to_vec_map. (≈ 1 line)
e = word_to_vec_map[word]
# Compute e_biascomponent using the formula give above. (≈ 1 line)
e_biascomponent = np.dot(e, g) * g / np.dot(g,g)
# Neutralize e by substracting e_biascomponent from it
# e_debiased should be equal to its orthogonal projection. (≈ 1 line)
e_debiased = e - e_biascomponent
### END CODE HERE ###
return e_debiased
e = "receptionist"
print("cosine similarity between " + e + " and g, before neutralizing: ", cosine_similarity(word_to_vec_map["receptionist"], g))
e_debiased = neutralize("receptionist", g, word_to_vec_map)
print("cosine similarity between " + e + " and g, after neutralizing: ", cosine_similarity(e_debiased, g))
Explanation: Do you notice anything surprising? It is astonishing how these results reflect certain unhealthy gender stereotypes. For example, "computer" is closer to "man" while "literature" is closer to "woman". Ouch!
We'll see below how to reduce the bias of these vectors, using an algorithm due to Boliukbasi et al., 2016. Note that some word pairs such as "actor"/"actress" or "grandmother"/"grandfather" should remain gender specific, while other words such as "receptionist" or "technology" should be neutralized, i.e. not be gender-related. You will have to treat these two type of words differently when debiasing.
3.1 - Neutralize bias for non-gender specific words
The figure below should help you visualize what neutralizing does. If you're using a 50-dimensional word embedding, the 50 dimensional space can be split into two parts: The bias-direction $g$, and the remaining 49 dimensions, which we'll call $g_{\perp}$. In linear algebra, we say that the 49 dimensional $g_{\perp}$ is perpendicular (or "othogonal") to $g$, meaning it is at 90 degrees to $g$. The neutralization step takes a vector such as $e_{receptionist}$ and zeros out the component in the direction of $g$, giving us $e_{receptionist}^{debiased}$.
Even though $g_{\perp}$ is 49 dimensional, given the limitations of what we can draw on a screen, we illustrate it using a 1 dimensional axis below.
<img src="images/neutral.png" style="width:800px;height:300px;">
<caption><center> Figure 2: The word vector for "receptionist" represented before and after applying the neutralize operation. </center></caption>
Exercise: Implement neutralize() to remove the bias of words such as "receptionist" or "scientist". Given an input embedding $e$, you can use the following formulas to compute $e^{debiased}$:
$$e^{bias_component} = \frac{e \cdot g}{||g||_2^2} * g\tag{2}$$
$$e^{debiased} = e - e^{bias_component}\tag{3}$$
If you are an expert in linear algebra, you may recognize $e^{bias_component}$ as the projection of $e$ onto the direction $g$. If you're not an expert in linear algebra, don't worry about this.
<!--
**Reminder**: a vector $u$ can be split into two parts: its projection over a vector-axis $v_B$ and its projection over the axis orthogonal to $v$:
$$u = u_B + u_{\perp}$$
where : $u_B = $ and $ u_{\perp} = u - u_B $
!-->
End of explanation
def equalize(pair, bias_axis, word_to_vec_map):
Debias gender specific words by following the equalize method described in the figure above.
Arguments:
pair -- pair of strings of gender specific words to debias, e.g. ("actress", "actor")
bias_axis -- numpy-array of shape (50,), vector corresponding to the bias axis, e.g. gender
word_to_vec_map -- dictionary mapping words to their corresponding vectors
Returns
e_1 -- word vector corresponding to the first word
e_2 -- word vector corresponding to the second word
### START CODE HERE ###
# Step 1: Select word vector representation of "word". Use word_to_vec_map. (≈ 2 lines)
w1, w2 = None # Well... my life is short...
e_w1, e_w2 = None
# Step 2: Compute the mean of e_w1 and e_w2 (≈ 1 line)
mu = None
# Step 3: Compute the projections of mu over the bias axis and the orthogonal axis (≈ 2 lines)
mu_B = None
mu_orth = None
# Step 4: Use equations (7) and (8) to compute e_w1B and e_w2B (≈2 lines)
e_w1B = None
e_w2B = None
# Step 5: Adjust the Bias part of e_w1B and e_w2B using the formulas (9) and (10) given above (≈2 lines)
corrected_e_w1B = None
corrected_e_w2B = None
# Step 6: Debias by equalizing e1 and e2 to the sum of their corrected projections (≈2 lines)
e1 = None
e2 = None
### END CODE HERE ###
return e1, e2
print("cosine similarities before equalizing:")
print("cosine_similarity(word_to_vec_map[\"man\"], gender) = ", cosine_similarity(word_to_vec_map["man"], g))
print("cosine_similarity(word_to_vec_map[\"woman\"], gender) = ", cosine_similarity(word_to_vec_map["woman"], g))
print()
e1, e2 = equalize(("man", "woman"), g, word_to_vec_map)
print("cosine similarities after equalizing:")
print("cosine_similarity(e1, gender) = ", cosine_similarity(e1, g))
print("cosine_similarity(e2, gender) = ", cosine_similarity(e2, g))
Explanation: Expected Output: The second result is essentially 0, up to numerical roundof (on the order of $10^{-17}$).
<table>
<tr>
<td>
**cosine similarity between receptionist and g, before neutralizing:** :
</td>
<td>
0.330779417506
</td>
</tr>
<tr>
<td>
**cosine similarity between receptionist and g, after neutralizing:** :
</td>
<td>
-3.26732746085e-17
</tr>
</table>
3.2 - Equalization algorithm for gender-specific words
Next, lets see how debiasing can also be applied to word pairs such as "actress" and "actor." Equalization is applied to pairs of words that you might want to have differ only through the gender property. As a concrete example, suppose that "actress" is closer to "babysit" than "actor." By applying neutralizing to "babysit" we can reduce the gender-stereotype associated with babysitting. But this still does not guarantee that "actor" and "actress" are equidistant from "babysit." The equalization algorithm takes care of this.
The key idea behind equalization is to make sure that a particular pair of words are equi-distant from the 49-dimensional $g_\perp$. The equalization step also ensures that the two equalized steps are now the same distance from $e_{receptionist}^{debiased}$, or from any other work that has been neutralized. In pictures, this is how equalization works:
<img src="images/equalize10.png" style="width:800px;height:400px;">
The derivation of the linear algebra to do this is a bit more complex. (See Bolukbasi et al., 2016 for details.) But the key equations are:
$$ \mu = \frac{e_{w1} + e_{w2}}{2}\tag{4}$$
$$ \mu_{B} = \frac {\mu \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} *\text{bias_axis}
\tag{5}$$
$$\mu_{\perp} = \mu - \mu_{B} \tag{6}$$
$$ e_{w1B} = \frac {e_{w1} \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} \text{bias_axis}
\tag{7}$$
$$ e_{w2B} = \frac {e_{w2} \cdot \text{bias_axis}}{||\text{bias_axis}||_2^2} \text{bias_axis}
\tag{8}$$
$$e_{w1B}^{corrected} = \sqrt{ |{1 - ||\mu_{\perp} ||^2_2} |} * \frac{e_{\text{w1B}} - \mu_B} {|(e_{w1} - \mu_{\perp}) - \mu_B)|} \tag{9}$$
$$e_{w2B}^{corrected} = \sqrt{ |{1 - ||\mu_{\perp} ||^2_2} |} * \frac{e_{\text{w2B}} - \mu_B} {|(e_{w2} - \mu_{\perp}) - \mu_B)|} \tag{10}$$
$$e_1 = e_{w1B}^{corrected} + \mu_{\perp} \tag{11}$$
$$e_2 = e_{w2B}^{corrected} + \mu_{\perp} \tag{12}$$
Exercise: Implement the function below. Use the equations above to get the final equalized version of the pair of words. Good luck!
End of explanation |
11,373 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
A Crash Course in Python for Scientists
Rick Muller, Sandia National Laboratories
version 0.62, Updated Dec 15, 2016 by Ryan Smith, Cal State East Bay
Using Python 3.5.2 | Anaconda 4.1.1
This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
If skipping to other sections, it's a good idea to run this first
Step1: Table of Contents
0. Preliminary
0.1 Why Python?
0.2 What You Need to Install
1. Python Overview
1.1 Using Python as a Calculator
1.2 Strings
1.3 Lists
1.4 Iteration, Indentation, and Blocks
1.5 Slicing
1.6 Booleans and Truth Testing
1.7 Code Example
Step2: (If you're typing this into an IPython notebook, or otherwise using notebook file, you hit shift-Enter to evaluate a cell.)
There are some gotchas compared to using a normal calculator.
Step3: There used to be gotchas in division in python 2, like C or Fortran integer division, where division truncates the remainder and returns an integer. In version 3, Python returns a floating point number. If for some reason you are using Python 2, you can fix this by importing the module from the future features
Step4: or you can simply import the math library itself
Step5: You can define variables using the equals (=) sign
Step6: If you try to access a variable that you haven't yet defined, you get an error
Step7: and you need to define it
Step8: You can name a variable almost anything you want. It needs to start with an alphabetical character or "_", can contain alphanumeric charcters plus underscores ("_"). Certain words, however, are reserved for the language
Step9: The Python Tutorial has more on using Python as an interactive shell. The IPython tutorial makes a nice complement to this, since IPython has a much more sophisticated iteractive shell.
1.2 Strings
Strings are lists of printable characters, and can be defined using either single quotes
Step10: or double quotes
Step11: But not both at the same time, unless you want one of the symbols to be part of the string.
Step12: Just like the other two data objects we're familiar with (ints and floats), you can assign a string to a variable
Step13: The print statement is often used for printing character strings
Step14: But it can also print data types other than strings
Step15: In the above snipped, the number 600 (stored in the variable "area") is converted into a string before being printed out.
You can use the + operator to concatenate strings together
Step16: Don't forget the space between the strings, if you want one there.
Step17: You can use + to concatenate multiple strings in a single statement
Step18: If you have a lot of words to concatenate together, there are other, more efficient ways to do this. But this is fine for linking a few strings together.
1.3 Lists
Very often in a programming language, one wants to keep a group of similar items together. Python does this using a data type called lists.
Step19: You can access members of the list using the index of that item
Step20: Python lists, like C, but unlike Fortran, use 0 as the index of the first element of a list. Thus, in this example, the 0 element is "Sunday", 1 is "Monday", and so on. If you need to access the nth element from the end of the list, you can use a negative index. For example, the -1 element of a list is the last element
Step21: You can add additional items to the list using the .append() command
Step22: The range() command is a convenient way to make sequential lists of numbers
Step23: Note that range(n) starts at 0 and gives the sequential list of integers less than n. If you want to start at a different number, use range(start,stop)
Step24: The lists created above with range have a step of 1 between elements. You can also give a fixed step size via a third command
Step25: Lists do not have to hold the same data type. For example,
Step26: However, it's good (but not essential) to use lists for similar objects that are somehow logically connected. If you want to group different data types together into a composite data object, it's best to use tuples, which we will learn about below.
You can find out how long a list is using the len() command
Step27: 1.4 Iteration, Indentation, and Blocks
One of the most useful things you can do with lists is to iterate through them, i.e. to go through each element one at a time. To do this in Python, we use the for statement
Step28: This code snippet goes through each element of the list called days_of_the_week and assigns it to the variable day. It then executes everything in the indented block (in this case only one line of code, the print statement) using those variable assignments. When the program has gone through every element of the list, it exists the block.
(Almost) every programming language defines blocks of code in some way. In Fortran, one uses END statements (ENDDO, ENDIF, etc.) to define code blocks. In C, C++, and Perl, one uses curly braces {} to define these blocks.
Python uses a colon ("
Step29: The range() command is particularly useful with the for statement to execute loops of a specified length
Step30: 1.5 Slicing
Lists and strings have something in common that you might not suspect
Step31: This is only occasionally useful. Slightly more useful is the slicing operation, which you can also use on any sequence. We already know that we can use indexing to get the first element of a list
Step32: If we want the list containing the first two elements of a list, we can do this via
Step33: or simply
Step34: If we want the last items of the list, we can do this with negative slicing
Step35: which is somewhat logically consistent with negative indices accessing the last elements of the list.
You can do
Step36: Since strings are sequences, you can also do this to them
Step37: If we really want to get fancy, we can pass a third element into the slice, which specifies a step length (just like a third argument to the range() function specifies the step)
Step38: Note that in this example we omitted the first few arguments, so that the slice started at 6, went to the end of the list, and took every second element, to generate the list of even numbers lup to 20 (the last element in the original list).
1.6 Booleans and Truth Testing
We have now learned a few data types. We have integers and floating point numbers, strings, and lists to contain them. We have also learned about lists, a container that can hold any data type. We have learned to print things out, and to iterate over items in lists. We will now learn about boolean variables that can be either True or False.
We invariably need some concept of conditions in programming to control branching behavior, to allow a program to react differently to different situations. If it's Monday, I'll go to work, but if it's Sunday, I'll sleep in. To do this in Python, we use a combination of boolean variables, which evaluate to either True or False, and if statements, that control branching based on boolean values.
For example
Step39: (Quick quiz
Step40: If we evaluate it by itself, as we just did, we see that it returns a boolean value, False. The "==" operator performs equality testing. If the two items are equal, it returns True, otherwise it returns False. In this case, it is comparing two variables, the string "Sunday", and whatever is stored in the variable "day", which, in this case, is the other string "Saturday". Since the two strings are not equal to each other, the truth test has the false value.
The if statement that contains the truth test is followed by a code block (a colon followed by an indented block of code). If the boolean is true, it executes the code in that block. Since it is false in the above example, we don't see that code executed.
The first block of code is followed by an else statement, which is executed if nothing else in the above if statement is true. Since the value was false, this code is executed, which is why we see "Go to work".
Try setting the day equal to "Sunday" and then running the above if/else statement. Did it work as you thought it would?
You can compare any data types in Python
Step41: We see a few other boolean operators here, all of which which should be self-explanatory. Less than, equality, non-equality, and so on.
Particularly interesting is the 1 == 1.0 test, which is true, since even though the two objects are different data types (integer and floating point number), they have the same value. There is another boolean operator is, that tests whether two objects are the same object
Step42: Why is 1 not the same as 1.0? Different data type. You can check the data type
Step43: We can do boolean tests on lists as well
Step44: Finally, note that you can also string multiple comparisons together, which can result in very intuitive tests
Step45: If statements can have elif parts ("else if"), in addition to if/else parts. For example
Step46: Of course we can combine if statements with for loops, to make a snippet that is almost interesting
Step47: This is something of an advanced topic, but ordinary data types have boolean values associated with them, and, indeed, in early versions of Python there was not a separate boolean object. Essentially, anything that was a 0 value (the integer or floating point 0, an empty string "", or an empty list []) was False, and everything else was true. You can see the boolean value of any data object using the bool() function.
Step48: 1.7 Code Example
Step49: Let's go through this line by line. First, we define the variable n, and set it to the integer 20. n is the length of the sequence we're going to form, and should probably have a better variable name. We then create a variable called sequence, and initialize it to the list with the integers 0 and 1 in it, the first two elements of the Fibonacci sequence. We have to create these elements "by hand", since the iterative part of the sequence requires two previous elements.
We then have a for loop over the list of integers from 2 (the next element of the list) to n (the length of the sequence). After the colon, we see a hash tag "#", and then a comment that if we had set n to some number less than 2 we would have a problem. Comments in Python start with #, and are good ways to make notes to yourself or to a user of your code explaining why you did what you did. Better than the comment here would be to test to make sure the value of n is valid, and to complain if it isn't; we'll try this later.
In the body of the loop, we append to the list an integer equal to the sum of the two previous elements of the list.
After exiting the loop (ending the indentation) we then print out the whole list. That's it!
1.8 Functions
We might want to use the Fibonacci snippet with different sequence lengths. We could cut an paste the code into another cell, changing the value of n, but it's easier and more useful to make a function out of the code. We do this with the def statement in Python
Step50: We can now call fibonacci() for different sequence_lengths
Step51: We've introduced a several new features here. First, note that the function itself is defined as a code block (a colon followed by an indented block). This is the standard way that Python delimits things. Next, note that the first line of the function is a single string. This is called a docstring, and is a special kind of comment that is often available to people using the function through the python command line
Step52: If you define a docstring for all of your functions, it makes it easier for other people to use them, since they can get help on the arguments and return values of the function.
Next, note that rather than putting a comment in about what input values lead to errors, we have some testing of these values, followed by a warning if the value is invalid, and some conditional code to handle special cases.
1.9 Recursion and Factorials
Functions can also call themselves, something that is often called recursion. We're going to experiment with recursion by computing the factorial function. The factorial is defined for a positive integer n as
$$ n! = n(n-1)(n-2)\cdots 1 $$
First, note that we don't need to write a function at all, since this is a function built into the standard math library. Let's use the help function to find out about it
Step53: This is clearly what we want.
Step54: However, if we did want to write a function ourselves, we could do recursively by noting that
$$ n! = n(n-1)!$$
The program then looks something like
Step55: Recursion can be very elegant, and can lead to very simple programs.
1.10 Two More Data Structures
Step56: Tuples are like lists, in that you can access the elements using indices
Step57: However, tuples are immutable, you can't append to them or change the elements of them
Step58: Tuples are useful anytime you want to group different pieces of data together in an object, but don't want to create a full-fledged class (see below) for them. For example, let's say you want the Cartesian coordinates of some objects in your program. Tuples are a good way to do this
Step59: Again, it's not a necessary distinction, but one way to distinguish tuples and lists is that tuples are a collection of different things, here a name, and x and y coordinates, whereas a list is a collection of similar things, like if we wanted a list of those coordinates
Step60: Tuples can be used when functions return more than one value. Say we wanted to compute the smallest x- and y-coordinates of the above list of objects. We could write
Step61: Here we did two things with tuples you haven't seen before. First, we unpacked an object into a set of named variables using tuple assignment
Step62: Dictionaries are an object called "mappings" or "associative arrays" in other languages. Whereas a list associates an integer index with a set of objects
Step63: The index in a dictionary is called the key, and the corresponding dictionary entry is the value. A dictionary can use (almost) anything as the key. Whereas lists are formed with square brackets [], dictionaries use curly brackets {}
Step64: There's also a convenient way to create dictionaries without having to quote the keys.
Step65: Notice in either case you are not choosing the ordering -- it is automagically grouped alphabetically.
The len() command works on both tuples and dictionaries
Step66: 1.11 Plotting with Matplotlib
We can generally understand trends in data by using a plotting program to chart it. Python has a wonderful plotting library called Matplotlib. The Jupyter notebook interface we are using for these notes has that functionality built in.
First off, it is important to import the library. We did this at the very beginning of this whole jupyter notebook, but here it is in case you've jumped straight here without running the first code line
Step67: The %matplotlib inline command makes it so plots are within this notebook. To plot to a separate window, use instead
Step68: Next lets generate the factorials.
Step69: Now we use the Matplotlib function plot to compare the two.
Step70: The factorial function grows much faster. In fact, you can't even see the Fibonacci sequence. It's not entirely surprising
Step71: There are many more things you can do with Matplotlib. We'll be looking at some of them in the sections to come. In the meantime, if you want an idea of the different things you can do, look at the Matplotlib Gallery. Rob Johansson's IPython notebook Introduction to Matplotlib is also particularly good.
1.12 Conclusion of the Python Overview
There is, of course, much more to the language than we've covered here. I've tried to keep this brief enough so that you can jump in and start using Python to simplify your life and work. My own experience in learning new things is that the information doesn't "stick" unless you try and use it for something in real life.
You will no doubt need to learn more as you go. I've listed several other good references, including the Python Tutorial and Learn Python the Hard Way. Additionally, now is a good time to start familiarizing yourself with the Python Documentation, and, in particular, the Python Language Reference.
Tim Peters, one of the earliest and most prolific Python contributors, wrote the "Zen of Python", which can be accessed via the "import this" command
Step72: No matter how experienced a programmer you are, these are words to meditate on.
2. Numpy and Scipy
Numpy contains core routines for doing fast vector, matrix, and linear algebra-type operations in Python. Scipy contains additional routines for optimization, special functions, and so on. Both contain modules written in C and Fortran so that they're as fast as possible. Together, they give Python roughly the same capability that the Matlab program offers. (In fact, if you're an experienced Matlab user, there a guide to Numpy for Matlab users just for you.)
First off, it is important to import the library. Again, we did this at the very beginning of this whole jupyter notebook, but here it is in case you've jumped straight here without running the first code line
Step73: 2.1 Making vectors and matrices
Fundamental to both Numpy and Scipy is the ability to work with vectors and matrices. You can create vectors from lists using the array command
Step74: You can pass in a second argument to array that gives the numeric type. There are a number of types listed here that your matrix can be. Some of these are aliased to single character codes. The most common ones are 'd' (double precision floating point number), 'D' (double precision complex number), and 'i' (int32). Thus,
Step75: To build matrices, you can either use the array command with lists of lists
Step76: You can also form empty (zero) matrices of arbitrary shape (including vectors, which Numpy treats as vectors with one row), using the zeros command
Step77: The first argument is a tuple containing the shape of the matrix, and the second is the data type argument, which follows the same conventions as in the array command. Thus, you can make row vectors
Step78: or column vectors
Step79: There's also an identity command that behaves as you'd expect
Step80: as well as a ones command.
2.2 Linspace, matrix functions, and plotting
The linspace command makes a linear array of points from a starting to an ending value.
Step81: If you provide a third argument, it takes that as the number of points in the space. If you don't provide the argument, it gives a length 50 linear space.
Step82: linspace is an easy way to make coordinates for plotting. Functions in the numpy library (all of which are imported into IPython notebook) can act on an entire vector (or even a matrix) of points at once. Thus,
Step83: In conjunction with matplotlib, this is a nice way to plot things
Step84: 2.3 Matrix operations
Matrix objects act sensibly when multiplied by scalars
Step85: as well as when you add two matrices together. (However, the matrices have to be the same shape.)
Step86: Something that confuses Matlab users is that the times (*) operator give element-wise multiplication rather than matrix multiplication
Step87: To get matrix multiplication, you need the dot command
Step88: dot can also do dot products (duh!)
Step89: as well as matrix-vector products.
There are determinant, inverse, and transpose functions that act as you would suppose. Transpose can be abbreviated with ".T" at the end of a matrix object
Step90: There's also a diag() function that takes a list or a vector and puts it along the diagonal of a square matrix.
Step91: We'll find this useful later on.
2.4 Matrix Solvers
You can solve systems of linear equations using the solve command in the linear algebra toolbox of the numpy library
Step92: There are a number of routines to compute eigenvalues and eigenvectors
eigvals returns the eigenvalues of a matrix
eigvalsh returns the eigenvalues of a Hermitian matrix
eig returns the eigenvalues and eigenvectors of a matrix
eigh returns the eigenvalues and eigenvectors of a Hermitian matrix.
Step93: 2.5 Example
Step94: Let's see whether this works for our sin example from above
Step95: Pretty close!
2.6 One-Dimensional Harmonic Oscillator using Finite Difference
Now that we've convinced ourselves that finite differences aren't a terrible approximation, let's see if we can use this to solve the one-dimensional harmonic oscillator.
We want to solve the time-independent Schrodinger equation
$$ -\frac{\hbar^2}{2m}\frac{\partial^2\psi(x)}{\partial x^2} + V(x)\psi(x) = E\psi(x)$$
for $\psi(x)$ when $V(x)=\frac{1}{2}m\omega^2x^2$ is the harmonic oscillator potential. We're going to use the standard trick to transform the differential equation into a matrix equation by multiplying both sides by $\psi^*(x)$ and integrating over $x$. This yields
$$ -\frac{\hbar}{2m}\int\psi(x)\frac{\partial^2}{\partial x^2}\psi(x)dx + \int\psi(x)V(x)\psi(x)dx = E$$
We will again use the finite difference approximation. The finite difference formula for the second derivative is
$$ y'' = \frac{y_{i+1}-2y_i+y_{i-1}}{x_{i+1}-x_{i-1}} $$
We can think of the first term in the Schrodinger equation as the overlap of the wave function $\psi(x)$ with the second derivative of the wave function $\frac{\partial^2}{\partial x^2}\psi(x)$. Given the above expression for the second derivative, we can see if we take the overlap of the states $y_1,\dots,y_n$ with the second derivative, we will only have three points where the overlap is nonzero, at $y_{i-1}$, $y_i$, and $y_{i+1}$. In matrix form, this leads to the tridiagonal Laplacian matrix, which has -2's along the diagonals, and 1's along the diagonals above and below the main diagonal.
The second term turns leads to a diagonal matrix with $V(x_i)$ on the diagonal elements. Putting all of these pieces together, we get
Step96: We've made a couple of hacks here to get the orbitals the way we want them. First, I inserted a -1 factor before the wave functions, to fix the phase of the lowest state. The phase (sign) of a quantum wave function doesn't hold any information, only the square of the wave function does, so this doesn't really change anything.
But the eigenfunctions as we generate them aren't properly normalized. The reason is that finite difference isn't a real basis in the quantum mechanical sense. It's a basis of Dirac δ functions at each point; we interpret the space betwen the points as being "filled" by the wave function, but the finite difference basis only has the solution being at the points themselves. We can fix this by dividing the eigenfunctions of our finite difference Hamiltonian by the square root of the spacing, and this gives properly normalized functions.
2.7 Special Functions
The solutions to the Harmonic Oscillator are supposed to be Hermite polynomials. The Wikipedia page has the HO states given by
$$\psi_n(x) = \frac{1}{\sqrt{2^n n!}}
\left(\frac{m\omega}{\pi\hbar}\right)^{1/4}
\exp\left(-\frac{m\omega x^2}{2\hbar}\right)
H_n\left(\sqrt{\frac{m\omega}{\hbar}}x\right)$$
Let's see whether they look like those. There are some special functions in the Numpy library, and some more in Scipy. Hermite Polynomials are in Numpy
Step97: Let's compare the first function to our solution.
Step98: The agreement is almost exact.
We can use the subplot command to put multiple comparisons in different panes on a single plot (run %matplotlib qt on a separate line first to plot in a separate window)
Step99: Other than phase errors (which I've corrected with a little hack
Step101: As well as Jacobi, Laguerre, Hermite polynomials, Hypergeometric functions, and many others. There's a full listing at the Scipy Special Functions Page.
2.8 Least squares fitting
Very often we deal with some data that we want to fit to some sort of expected behavior. Say we have the following
Step102: There's a section below on parsing CSV data. We'll steal the parser from that. For an explanation, skip ahead to that section. Otherwise, just assume that this is a way to parse that text into a numpy array that we can plot and do other analyses with.
Step103: Since we expect the data to have an exponential decay, we can plot it using a semi-log plot.
Step104: For a pure exponential decay like this, we can fit the log of the data to a straight line. The above plot suggests this is a good approximation. Given a function
$$ y = Ae^{ax} $$
$$ \log(y) = ax + \log(A) $$
Thus, if we fit the log of the data versus x, we should get a straight line with slope $a$, and an intercept that gives the constant $A$.
There's a numpy function called polyfit that will fit data to a polynomial form. We'll use this to fit to a straight line (a polynomial of order 1)
Step105: Let's see whether this curve fits the data.
Step107: If we have more complicated functions, we may not be able to get away with fitting to a simple polynomial. Consider the following data
Step108: This data looks more Gaussian than exponential. If we wanted to, we could use polyfit for this as well, but let's use the curve_fit function from Scipy, which can fit to arbitrary functions. You can learn more using help(curve_fit).
First define a general Gaussian function to fit to.
Step109: Now fit to it using curve_fit
Step110: The curve_fit routine we just used is built on top of a very good general minimization capability in Scipy. You can learn more at the scipy documentation pages.
2.9 Monte Carlo, random numbers, and computing $\pi$
Many methods in scientific computing rely on Monte Carlo integration, where a sequence of (pseudo) random numbers are used to approximate the integral of a function. Python has good random number generators in the standard library. The random() function from the numpy library gives pseudorandom numbers uniformly distributed between 0 and 1
Step111: Or, more elegantly
Step112: np.random.random() uses the Mersenne Twister algorithm, which is a highly regarded pseudorandom number generator. There are also functions to generate random integers, to randomly shuffle a list, and functions to pick random numbers from a particular distribution, like the normal distribution
Step113: We can check the distribution by using the histogram feature, as shown on the help page for numpy.random.normal
Step114: Here's an interesting use of random numbers
Step115: The idea behind the program is that the ratio of the area of the unit circle to the square that inscribes it is $\pi/4$, so by counting the fraction of the random points in the square that are inside the circle, we get increasingly good estimates to $\pi$.
The above code uses some higher level Numpy tricks to compute the radius of each point in a single line, to count how many radii are below one in a single line, and to filter the x,y points based on their radii. To be honest, I rarely write code like this
Step116: If you're interested in another great method, check out Ramanujan's method. This converges so fast you really need arbitrary precision math to display enough decimal places. You can do this with the Python decimal module, if you're interested.
2.10 Numerical Integration
Integration can be hard, and sometimes it's easier to work out a definite integral using an approximation. For example, suppose we wanted to figure out the integral
Step117: Scipy has a numerical integration routine quad (since sometimes numerical integration is called quadrature), that we can use for this
Step118: The first number in the tuple is the result, the second number is an estimate of the absolute error in the result.
There are also 2d and 3d numerical integrators in Scipy. See the docs for more information.
2.11 Fast Fourier Transform and Signal Processing
Very often we want to use FFT techniques to help obtain the signal from noisy data. Scipy has several different options for this.
Step120: There are additional signal processing routines in Scipy (e.g. splines, filtering) that you can read about here.
3. Intermediate Python
3.1 Parsing data output
As more and more of our day-to-day work is being done on and through computers, we increasingly have output that one program writes, often in a text file, that we need to analyze in one way or another, and potentially feed that output into another file.
Suppose we have the following output in CSV (comma separated values) format, a format that originally came from Microsoft Excel, and is increasingly used as a data interchange format in big data applications. How would we parse that?
Step121: This is a giant string. If we use splitlines(), we see that a list is created where line gets separated into a string
Step122: Splitting is a big concept in text processing. We used splitlines() here, and next we'll use the more general .split(",") function below to split each line into comma-delimited words.
We now want to do three things
Step123: What does split() do?
Step124: Since the data is now in a list of lines, we can iterate over it, splitting up data where we see a comma
Step125: We need to add these results at each step to a list
Step126: Let's examine what we just did
Step127: Hartrees (what most quantum chemistry programs use by default) are really stupid units. We really want this in kcal/mol or eV or something we use. So let's quickly replot this in terms of eV above the minimum energy, which will give us a much more useful plot
Step128: The real value in a language like Python is that it makes it easy to take additional steps to analyze data in this fashion, which means you are thinking more about your data, and are more likely to see important patterns.
3.2 Reading in data files
Let's take a look at a perhaps easier approach to a common problem -- you have a data file with some header info and comma-delimited values and you want the data so you can start doing stuff with it. Let's use numpy's genfromtxt()
Step129: That was easy! Why didn't we only learn that? Because not every data set is "nice" like that. Better to have some tools for when things aren't working how you'd like them to be. That being said, much data coming from scientific equipment and computational tools can be cast into a format that can be read in through genfromtxt(). For larger data sets, the library pandas might be helpful.
3.3 More Sophisticated String Formatting and Processing
Strings are a big deal in most modern languages, and hopefully the previous sections helped underscore how versatile Python's string processing techniques are. We will continue this topic in this section.
We can print out lines in Python using the print command.
Step130: In IPython we don't even need the print command, since it will display the last expression not assigned to a variable.
Step131: print even converts some arguments to strings for us
Step132: As versatile as this is, you typically need more freedom over the data you print out. For example, what if we want to print a bunch of data to exactly 4 decimal places? We can do this using formatted strings.
Formatted strings share a syntax with the C printf statement. We make a string that has some funny format characters in it, and then pass a bunch of variables into the string that fill out those characters in different ways.
For example,
Step133: We use a percent sign in two different ways here. First, the format character itself starts with a percent sign. %d or %i are for integers, %f is for floats, %e is for numbers in exponential formats. All of the numbers can take number immediately after the percent that specifies the total spaces used to print the number. Formats with a decimal can take an additional number after a dot . to specify the number of decimal places to print.
The other use of the percent sign is after the string, to pipe a set of variables in. You can pass in multiple variables (if your formatting string supports it) by putting a tuple after the percent. Thus,
Step135: This is a simple formatting structure that will satisfy most of your string formatting needs. More information on different format symbols is available in the string formatting part of the standard docs.
It's worth noting that more complicated string formatting methods are in development, but I prefer this system due to its simplicity and its similarity to C formatting strings.
Recall we discussed multiline strings. We can put format characters in these as well, and fill them with the percent sign as before.
Step137: The problem with a long block of text like this is that it's often hard to keep track of what all of the variables are supposed to stand for. There's an alternate format where you can pass a dictionary into the formatted string, and give a little bit more information to the formatted string itself. This method looks like
Step139: By providing a little bit more information, you're less likely to make mistakes, like referring to your customer as "alien attack".
As a scientist, you're less likely to be sending bulk mailings to a bunch of customers. But these are great methods for generating and submitting lots of similar runs, say scanning a bunch of different structures to find the optimal configuration for something.
For example, you can use the following template for NWChem input files
Step141: If you want to submit a sequence of runs to a computer somewhere, it's pretty easy to put together a little script, maybe even with some more string formatting in it
Step142: This is a very bad geometry for a water molecule, and it would be silly to run so many geometry optimizations of structures that are guaranteed to converge to the same single geometry, but you get the idea of how you can run vast numbers of simulations with a technique like this.
We used the enumerate function to loop over both the indices and the items of a sequence, which is valuable when you want a clean way of getting both. enumerate is roughly equivalent to
Step143: Although enumerate uses generators (see below) so that it doesn't have to create a big list, which makes it faster for really long sequenes.
3.4 Optional arguments of a function
You will recall that the linspace function can take either two arguments (for the starting and ending points)
Step144: or it can take three arguments, for the starting point, the ending point, and the number of points
Step145: You can also pass in keywords to exclude the endpoint
Step146: Right now, we only know how to specify functions that have a fixed number of arguments. We'll learn how to do the more general cases here.
If we're defining a simple version of linspace, we would start with
Step147: We can add an optional argument by specifying a default value in the argument list
Step148: This gives exactly the same result if we don't specify anything
Step149: But also let's us override the default value with a third argument
Step150: We can add arbitrary keyword arguments to the function definition by putting a keyword argument **kwargs handle in
Step151: What the keyword argument construction does is to take any additional keyword arguments (i.e. arguments specified by name, like "endpoint=False"), and stick them into a dictionary called "kwargs" (you can call it anything you like, but it has to be preceded by two stars). You can then grab items out of the dictionary using the get command, which also lets you specify a default value. I realize it takes a little getting used to, but it is a common construction in Python code, and you should be able to recognize it.
There's an analogous *args that dumps any additional arguments into a list called "args". Think about the range function
Step152: Note that we have defined a few new things you haven't seen before
Step153: 3.5 List Comprehensions and Generators
List comprehensions are a streamlined way to make lists. They look something like a list definition, with some logic thrown in. For example
Step154: You can also put some boolean testing into the construct
Step155: Here i%2 is the remainder when i is divided by 2, so that i%2==1 is true if the number is odd. Even though this is a relative new addition to the language, it is now fairly common since it's so convenient.
iterators are a way of making virtual sequence objects. Consider if we had the nested loop structure
Step156: We can always turn an iterator into a list using the list command
Step157: There's a special syntax called a generator expression that looks a lot like a list comprehension
Step158: 3.6 Factory Functions
A factory function is a function that returns a function. They have the fancy name lexical closure, which makes you sound really intelligent in front of your CS friends. But, despite the arcane names, factory functions can play a very practical role.
Suppose you want the Gaussian function centered at 0.5, with height 99 and width 1.0. You could write a general function.
Step159: But what if you need a function with only one argument, like f(x) rather than f(x,y,z,...)? You can do this with Factory Functions
Step161: Everything in Python is an object, including functions. This means that functions can be returned by other functions. (They can also be passed into other functions, which is also useful, but a topic for another discussion.) In the gauss_maker example, the g function that is output "remembers" the A, a, x0 values it was constructed with, since they're all stored in the local memory space (this is what the lexical closure really refers to) of that function.
Factories are one of the more important of the Software Design Patterns, which are a set of guidelines to follow to make high-quality, portable, readable, stable software. It's beyond the scope of the current work to go more into either factories or design patterns, but I thought I would mention them for people interested in software design.
3.7 Serialization
Step162: Your data sits in something that looks like a Python dictionary, and in a single line of code, you can load it into a Python dictionary for use later.
In the same way, you can, with a single line of code, put a bunch of variables into a dictionary, and then output to a file using json
Step163: 3.8 Functional programming
Functional programming is a very broad subject. The idea is to have a series of functions, each of which generates a new data structure from an input, without changing the input structure at all. By not modifying the input structure (something that is called not having side effects), many guarantees can be made about how independent the processes are, which can help parallelization and guarantees of program accuracy. There is a Python Functional Programming HOWTO in the standard docs that goes into more details on functional programming. I just wanted to touch on a few of the most important ideas here.
There is an operator module that has function versions of most of the Python operators. For example
Step164: These are useful building blocks for functional programming.
The lambda operator allows us to build anonymous functions, which are simply functions that aren't defined by a normal def statement with a name. For example, a function that doubles the input is
Step165: We could also write this as
Step166: And assign it to a function separately
Step167: lambda is particularly convenient (as we'll see below) in passing simple functions as arguments to other functions.
map is a way to repeatedly apply a function to a list
Step168: reduce is a way to repeatedly apply a function to the first two items of the list. There already is a sum function in Python that is a reduction
Step169: We can use reduce to define an analogous prod function
Step170: 3.9 Object Oriented Programming
We've seen a lot of examples of objects in Python. We create a string object with quote marks
Step171: and we have a bunch of methods we can use on the object
Step173: Object oriented programming simply gives you the tools to define objects and methods for yourself. It's useful anytime you want to keep some data (like the characters in the string) tightly coupled to the functions that act on the data (length, split, startswith, etc.).
As an example, we're going to bundle the functions we did to make the 1d harmonic oscillator eigenfunctions with arbitrary potentials, so we can pass in a function defining that potential, some additional specifications, and get out something that can plot the orbitals, as well as do other things with them, if desired.
Step174: The init() function specifies what operations go on when the object is created. The self argument is the object itself, and we don't pass it in. The only required argument is the function that defines the QM potential. We can also specify additional arguments that define the numerical grid that we're going to use for the calculation.
For example, to do an infinite square well potential, we have a function that is 0 everywhere. We don't have to specify the barriers, since we'll only define the potential in the well, which means that it can't be defined anywhere else.
Step175: We can similarly redefine the Harmonic Oscillator potential.
Step176: Let's define a finite well potential
Step177: A triangular well
Step178: Or we can combine the two, making something like a semiconductor quantum well with a top gate
Step179: There's a lot of philosophy behind object oriented programming. Since I'm trying to focus on just the basics here, I won't go into them, but the internet is full of lots of resources on OO programming and theory. The best of this is contained in the Design Patterns book, which I highly recommend.
4. Speeding Python
Step180: The little % sign that we have in front of the timeit call is an example of an IPython magic function, which we don't have time to go into here, but it's just some little extra mojo that IPython adds to the functions to make it run better in the IPython environment. You can read more about it in the IPython tutorial.
In any case, the timeit function runs 3 loops, and tells us that it took on the average of 583 ns to compute 20!. In contrast
Step181: the factorial function we wrote is about a factor of 10 slower. This is because the built-in factorial function is written in C code and called from Python, and the version we wrote is written in plain old Python. A Python program has a lot of stuff in it that make it nice to interact with, but all that friendliness slows down the code. In contrast, the C code is less friendly but more efficient. If you want speed with as little effort as possible, write your code in an easy to program language like Python, but dump the slow parts into a faster language like C, and call it from Python. We'll go through some tricks to do this in this section.
4.2 Profiling
Profiling complements what timeit does by splitting the overall timing into the time spent in each function. It can give us a better understanding of what our program is really spending its time on.
Suppose we want to create a list of even numbers. Our first effort yields this
Step182: Is this code fast enough? We find out by running the Python profiler on a longer run
Step183: This looks okay, 0.05 seconds isn't a huge amount of time, but looking at the profiling shows that the append function is taking almost 20% of the time. Can we do better? Let's try a list comprehension.
Step184: By removing a small part of the code using a list comprehension, we've doubled the overall speed of the code!
It seems like range is taking a long time, still. Can we get rid of it? We can, using the xrange generator
Step186: This is where profiling can be useful. Our code now runs 3x faster by making trivial changes. We wouldn't have thought to look in these places had we not had access to easy profiling. Imagine what you would find in more complicated programs.
4.3 Other Ways to Speed Python
When we compared the fact and factorial functions, above, we noted that C routines are often faster because they're more streamlined. Once we've determined that one routine is a bottleneck for the performance of a program, we can replace it with a faster version by writing it in C. This is called extending Python, and there's a good section in the standard documents. This can be a tedious process if you have many different routines to convert. Fortunately, there are several other options.
Swig (the simplified wrapper and interface generator) is a method to generate binding not only for Python but also for Matlab, Perl, Ruby, and other scripting languages. Swig can scan the header files of a C project and generate Python binding for it. Using Swig is substantially easier than writing the routines in C.
Cython is a C-extension language. You can start by compiling a Python routine into a shared object libraries that can be imported into faster versions of the routines. You can then add additional static typing and make other restrictions to further speed the code. Cython is generally easier than using Swig.
PyPy is the easiest way of obtaining fast code. PyPy compiles Python to a subset of the Python language called RPython that can be efficiently compiled and optimized. Over a wide range of tests, PyPy is roughly 6 times faster than the standard Python Distribution.
4.4 Fun
Step187: You might think that Python is a bad choice for something like this, but, in terms of time, it really doesn't take long | Python Code:
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
Explanation: A Crash Course in Python for Scientists
Rick Muller, Sandia National Laboratories
version 0.62, Updated Dec 15, 2016 by Ryan Smith, Cal State East Bay
Using Python 3.5.2 | Anaconda 4.1.1
This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
If skipping to other sections, it's a good idea to run this first:
End of explanation
2+2
(50-5*6)/4
Explanation: Table of Contents
0. Preliminary
0.1 Why Python?
0.2 What You Need to Install
1. Python Overview
1.1 Using Python as a Calculator
1.2 Strings
1.3 Lists
1.4 Iteration, Indentation, and Blocks
1.5 Slicing
1.6 Booleans and Truth Testing
1.7 Code Example: The Fibonacci Sequence
1.8 Functions
1.9 Recursion and Factorials
1.10 Two More Data Structures: Tuples and Dictionaries
1.11 Plotting with Matplotlib
1.12 Conclusion of the Python Overview
2. Numpy and Scipy
2.1 Making vectors and matrices
2.2 Linspace, matrix functions, and plotting
2.3 Matrix operations
2.4 Matrix Solvers
2.5 Example: Finite Differences
2.6 One-Dimensional Harmonic Oscillator using Finite Difference
2.7 Special Functions
2.8 Least squares fitting
2.9 Monte Carlo, random numbers, and computing $\pi$
2.10 Numerical Integration
2.11 Fast Fourier Transform and Signal Processing
3. Intermediate Python
3.1 Parsing data output
3.2 Reading in data files
3.3 More Sophisticated String Formatting and Processing
3.4 Optional arguments of a function
3.5 List Comprehensions and Generators
3.6 Factory Functions
3.7 Serialization: Save it for later
3.8 Functional programming
3.9 Object Oriented Programming
4. Speeding Python: Timeit, Profiling, Cython, SWIG, and PyPy
4.1 Timeit
4.2 Profiling
4.3 Other Ways to Speed Python
4.4 Fun: Finding Primes
5. References
6. Acknowledgements
0. Preliminary
0.1 Why Python?
Python is the programming language of choice for many scientists to a large degree because it offers a great deal of power to analyze and model scientific data with relatively little overhead in terms of learning, installation or development time. It is a language you can pick up in a weekend, and use for the rest of your life.
The Python Tutorial is a great place to start getting a feel for the language. To complement this material, I taught a Python Short Course years ago to a group of computational chemists during a time that I was worried the field was moving too much in the direction of using canned software rather than developing one's own methods. I wanted to focus on what working scientists needed to be more productive: parsing output of other programs, building simple models, experimenting with object oriented programming, extending the language with C, and simple GUIs.
I'm trying to do something very similar here, to cut to the chase and focus on what scientists need. In the last year or so, the IPython Project has put together a notebook interface that I have found incredibly valuable. A large number of people have released very good IPython Notebooks that I have taken a huge amount of pleasure reading through. Some ones that I particularly like include:
Rob Johansson's excellent notebooks, including Scientific Computing with Python and Computational Quantum Physics with QuTiP lectures;
XKCD style graphs in matplotlib;
A collection of Notebooks for using IPython effectively
A gallery of interesting IPython Notebooks
I find IPython notebooks an easy way both to get important work done in my everyday job, as well as to communicate what I've done, how I've done it, and why it matters to my coworkers. I find myself endlessly sweeping the IPython subreddit hoping someone will post a new notebook. In the interest of putting more notebooks out into the wild for other people to use and enjoy, I thought I would try to recreate some of what I was trying to get across in the original Python Short Course, updated by 15 years of Python, Numpy, Scipy, Matplotlib, and IPython development, as well as my own experience in using Python almost every day of this time.
IPython notebooks are now called Jupyter notebooks.
0.2 What You Need to Install
There are two branches of current releases in Python: the older-syntax Python 2, and the newer-syntax Python 3. This schizophrenia is largely intentional: when it became clear that some non-backwards-compatible changes to the language were necessary, the Python dev-team decided to go through a five-year (or so) transition, during which the new language features would be introduced and the old language was still actively maintained, to make such a transition as easy as possible. We're now (2016) past the halfway point, and people are moving to python 3.
These notes are written with Python 3 in mind.
If you are new to python, try installing Anaconda Python 3.5 (supported by Continuum) and you will automatically have all libraries installed with your distribution. These notes assume you have a Python distribution that includes:
Python version 3;
Numpy, the core numerical extensions for linear algebra and multidimensional arrays;
Scipy, additional libraries for scientific programming;
Matplotlib, excellent plotting and graphing libraries;
IPython, with the additional libraries required for the notebook interface.
Here are some other options for various ways to run python:
Continuum supports a bundled, multiplatform Python package called Anaconda
Entought Python Distribution, also known as EPD. You can either purchase a license to use EPD, or there is also a free version that you can download and install.
Linux Most distributions have an installation manager. Redhat has yum, Ubuntu has apt-get. To my knowledge, all of these packages should be available through those installers.
Mac I use Macports, which has up-to-date versions of all of these packages.
Windows The PythonXY package has everything you need: install the package, then go to Start > PythonXY > Command Prompts > IPython notebook server.
Cloud This notebook is currently running on the IPython notebook viewer, which allows the notebook to be viewed but not interactively.
1. Python Overview
This is a quick introduction to Python. There are lots of other places to learn the language more thoroughly. I have collected a list of useful links, including ones to other learning resources, at the end of this notebook. If you want a little more depth, Python Tutorial is a great place to start, as is Zed Shaw's Learn Python the Hard Way.
The lessons that follow make use of the IPython notebooks. There's a good introduction to notebooks in the IPython notebook documentation that even has a nice video on how to use the notebooks. You should probably also flip through the IPython tutorial in your copious free time.
Briefly, notebooks have code cells (that are generally followed by result cells) and text cells. The text cells are the stuff that you're reading now. The code cells start with "In []:" with some number generally in the brackets. If you put your cursor in the code cell and hit Shift-Enter, the code will run in the Python interpreter and the result will print out in the output cell. You can then change things around and see whether you understand what's going on. If you need to know more, see the IPython notebook documentation or the IPython tutorial.
1.1 Using Python as a Calculator
Many of the things I used to use a calculator for, I now use Python for:
End of explanation
7/33
Explanation: (If you're typing this into an IPython notebook, or otherwise using notebook file, you hit shift-Enter to evaluate a cell.)
There are some gotchas compared to using a normal calculator.
End of explanation
from math import sqrt
sqrt(81)
Explanation: There used to be gotchas in division in python 2, like C or Fortran integer division, where division truncates the remainder and returns an integer. In version 3, Python returns a floating point number. If for some reason you are using Python 2, you can fix this by importing the module from the future features:
from __future__ import division
In the last few lines, we have sped by a lot of things that we should stop for a moment and explore a little more fully. We've seen, however briefly, two different data types: integers, also known as whole numbers to the non-programming world, and floating point numbers, also known (incorrectly) as decimal numbers to the rest of the world.
We've also seen the first instance of an import statement. Python has a huge number of libraries included with the distribution. To keep things simple, most of these variables and functions are not accessible from a normal Python interactive session. Instead, you have to import the name. For example, there is a math module containing many useful functions. To access, say, the square root function, you can either first import the sqrt function from the math library:
End of explanation
import math
math.sqrt(81)
Explanation: or you can simply import the math library itself
End of explanation
width = 20
length = 30
area = length*width
area
Explanation: You can define variables using the equals (=) sign:
End of explanation
volume
Explanation: If you try to access a variable that you haven't yet defined, you get an error:
End of explanation
depth = 10
volume = area*depth
volume
Explanation: and you need to define it:
End of explanation
return = 0
Explanation: You can name a variable almost anything you want. It needs to start with an alphabetical character or "_", can contain alphanumeric charcters plus underscores ("_"). Certain words, however, are reserved for the language:
and, as, assert, break, class, continue, def, del, elif, else, except,
exec, finally, for, from, global, if, import, in, is, lambda, not, or,
pass, print, raise, return, try, while, with, yield
Trying to define a variable using one of these will result in a syntax error:
End of explanation
'Hello, World!'
Explanation: The Python Tutorial has more on using Python as an interactive shell. The IPython tutorial makes a nice complement to this, since IPython has a much more sophisticated iteractive shell.
1.2 Strings
Strings are lists of printable characters, and can be defined using either single quotes
End of explanation
"Hello, World!"
Explanation: or double quotes
End of explanation
"He's a Rebel"
'She asked, "How are you today?"'
Explanation: But not both at the same time, unless you want one of the symbols to be part of the string.
End of explanation
greeting = "Hello, World!"
Explanation: Just like the other two data objects we're familiar with (ints and floats), you can assign a string to a variable
End of explanation
print(greeting)
Explanation: The print statement is often used for printing character strings:
End of explanation
print("The area is ",area)
Explanation: But it can also print data types other than strings:
End of explanation
statement = "Hello," + "World!"
print(statement)
Explanation: In the above snipped, the number 600 (stored in the variable "area") is converted into a string before being printed out.
You can use the + operator to concatenate strings together:
End of explanation
statement = "Hello, " + "World!"
print(statement)
Explanation: Don't forget the space between the strings, if you want one there.
End of explanation
print( "This " + "is " + "a " + "longer " + "statement.")
Explanation: You can use + to concatenate multiple strings in a single statement:
End of explanation
days_of_the_week = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
Explanation: If you have a lot of words to concatenate together, there are other, more efficient ways to do this. But this is fine for linking a few strings together.
1.3 Lists
Very often in a programming language, one wants to keep a group of similar items together. Python does this using a data type called lists.
End of explanation
days_of_the_week[2]
Explanation: You can access members of the list using the index of that item:
End of explanation
days_of_the_week[-1]
Explanation: Python lists, like C, but unlike Fortran, use 0 as the index of the first element of a list. Thus, in this example, the 0 element is "Sunday", 1 is "Monday", and so on. If you need to access the nth element from the end of the list, you can use a negative index. For example, the -1 element of a list is the last element:
End of explanation
languages = ["Fortran","C","C++"]
languages.append("Python")
print(languages)
Explanation: You can add additional items to the list using the .append() command:
End of explanation
list(range(10))
Explanation: The range() command is a convenient way to make sequential lists of numbers:
End of explanation
list(range(2,8))
Explanation: Note that range(n) starts at 0 and gives the sequential list of integers less than n. If you want to start at a different number, use range(start,stop)
End of explanation
evens = list(range(0,20,2))
evens
evens[3]
Explanation: The lists created above with range have a step of 1 between elements. You can also give a fixed step size via a third command:
End of explanation
["Today",7,99.3,""]
Explanation: Lists do not have to hold the same data type. For example,
End of explanation
help(len)
len(evens)
Explanation: However, it's good (but not essential) to use lists for similar objects that are somehow logically connected. If you want to group different data types together into a composite data object, it's best to use tuples, which we will learn about below.
You can find out how long a list is using the len() command:
End of explanation
for day in days_of_the_week:
print(day)
Explanation: 1.4 Iteration, Indentation, and Blocks
One of the most useful things you can do with lists is to iterate through them, i.e. to go through each element one at a time. To do this in Python, we use the for statement:
End of explanation
for day in days_of_the_week:
statement = "Today is " + day
print(statement)
Explanation: This code snippet goes through each element of the list called days_of_the_week and assigns it to the variable day. It then executes everything in the indented block (in this case only one line of code, the print statement) using those variable assignments. When the program has gone through every element of the list, it exists the block.
(Almost) every programming language defines blocks of code in some way. In Fortran, one uses END statements (ENDDO, ENDIF, etc.) to define code blocks. In C, C++, and Perl, one uses curly braces {} to define these blocks.
Python uses a colon (":"), followed by indentation level to define code blocks. Everything at a higher level of indentation is taken to be in the same block. In the above example the block was only a single line, but we could have had longer blocks as well:
End of explanation
for i in range(20):
print("The square of ",i," is ",i*i)
Explanation: The range() command is particularly useful with the for statement to execute loops of a specified length:
End of explanation
for letter in "Sunday":
print(letter)
Explanation: 1.5 Slicing
Lists and strings have something in common that you might not suspect: they can both be treated as sequences. You already know that you can iterate through the elements of a list. You can also iterate through the letters in a string:
End of explanation
days_of_the_week[0]
Explanation: This is only occasionally useful. Slightly more useful is the slicing operation, which you can also use on any sequence. We already know that we can use indexing to get the first element of a list:
End of explanation
days_of_the_week[0:2]
Explanation: If we want the list containing the first two elements of a list, we can do this via
End of explanation
days_of_the_week[:2]
Explanation: or simply
End of explanation
days_of_the_week[-2:]
Explanation: If we want the last items of the list, we can do this with negative slicing:
End of explanation
workdays = days_of_the_week[1:5]
print(workdays)
Explanation: which is somewhat logically consistent with negative indices accessing the last elements of the list.
You can do:
End of explanation
day = "Saturday"
abbreviation = day[:3]
print(abbreviation)
Explanation: Since strings are sequences, you can also do this to them:
End of explanation
numbers = list(range(0,21))
evens = numbers[6::2]
evens
Explanation: If we really want to get fancy, we can pass a third element into the slice, which specifies a step length (just like a third argument to the range() function specifies the step):
End of explanation
if day == "Sunday":
print("Sleep in")
else:
print("Go to work")
Explanation: Note that in this example we omitted the first few arguments, so that the slice started at 6, went to the end of the list, and took every second element, to generate the list of even numbers lup to 20 (the last element in the original list).
1.6 Booleans and Truth Testing
We have now learned a few data types. We have integers and floating point numbers, strings, and lists to contain them. We have also learned about lists, a container that can hold any data type. We have learned to print things out, and to iterate over items in lists. We will now learn about boolean variables that can be either True or False.
We invariably need some concept of conditions in programming to control branching behavior, to allow a program to react differently to different situations. If it's Monday, I'll go to work, but if it's Sunday, I'll sleep in. To do this in Python, we use a combination of boolean variables, which evaluate to either True or False, and if statements, that control branching based on boolean values.
For example:
End of explanation
day == "Sunday"
Explanation: (Quick quiz: why did the snippet print "Go to work" here? What is the variable "day" set to?)
Let's take the snippet apart to see what happened. First, note the statement
End of explanation
1 == 2
50 == 2*25
3 < 3.14159
1 == 1.0
1 != 0
1 <= 2
1 >= 1
Explanation: If we evaluate it by itself, as we just did, we see that it returns a boolean value, False. The "==" operator performs equality testing. If the two items are equal, it returns True, otherwise it returns False. In this case, it is comparing two variables, the string "Sunday", and whatever is stored in the variable "day", which, in this case, is the other string "Saturday". Since the two strings are not equal to each other, the truth test has the false value.
The if statement that contains the truth test is followed by a code block (a colon followed by an indented block of code). If the boolean is true, it executes the code in that block. Since it is false in the above example, we don't see that code executed.
The first block of code is followed by an else statement, which is executed if nothing else in the above if statement is true. Since the value was false, this code is executed, which is why we see "Go to work".
Try setting the day equal to "Sunday" and then running the above if/else statement. Did it work as you thought it would?
You can compare any data types in Python:
End of explanation
1 is 1.0
Explanation: We see a few other boolean operators here, all of which which should be self-explanatory. Less than, equality, non-equality, and so on.
Particularly interesting is the 1 == 1.0 test, which is true, since even though the two objects are different data types (integer and floating point number), they have the same value. There is another boolean operator is, that tests whether two objects are the same object:
End of explanation
type(1)
type(1.0)
Explanation: Why is 1 not the same as 1.0? Different data type. You can check the data type:
End of explanation
[1,2,3] == [1,2,4]
Explanation: We can do boolean tests on lists as well:
End of explanation
hours = 5
0 < hours < 24
Explanation: Finally, note that you can also string multiple comparisons together, which can result in very intuitive tests:
End of explanation
if day == "Sunday":
print ("Sleep in")
elif day == "Saturday":
print ("Do chores")
else:
print ("Go to work")
Explanation: If statements can have elif parts ("else if"), in addition to if/else parts. For example:
End of explanation
for day in days_of_the_week:
statement = "On " + day + ":"
print (statement)
if day == "Sunday":
print (" Sleep in")
elif day == "Saturday":
print (" Do chores")
else:
print (" Go to work")
Explanation: Of course we can combine if statements with for loops, to make a snippet that is almost interesting:
End of explanation
bool(1)
bool(0)
bool(["This "," is "," a "," list"])
Explanation: This is something of an advanced topic, but ordinary data types have boolean values associated with them, and, indeed, in early versions of Python there was not a separate boolean object. Essentially, anything that was a 0 value (the integer or floating point 0, an empty string "", or an empty list []) was False, and everything else was true. You can see the boolean value of any data object using the bool() function.
End of explanation
n = 10
sequence = [0,1]
for i in range(2,n): # This is going to be a problem if we ever set n <= 2!
sequence.append(sequence[i-1]+sequence[i-2])
print (sequence)
Explanation: 1.7 Code Example: The Fibonacci Sequence
The Fibonacci sequence is a sequence in math that starts with 0 and 1, and then each successive entry is the sum of the previous two. Thus, the sequence goes 0,1,1,2,3,5,8,13,21,34,55,89,...
A very common exercise in programming books is to compute the Fibonacci sequence up to some number n. First I'll show the code, then I'll discuss what it is doing.
End of explanation
def fibonacci(sequence_length):
"Return the Fibonacci sequence of length *sequence_length*"
sequence = [0,1]
if sequence_length < 1:
print("Fibonacci sequence only defined for length 1 or greater")
return
if 0 < sequence_length < 3:
return sequence[:sequence_length]
for i in range(2,sequence_length):
sequence.append(sequence[i-1]+sequence[i-2])
return sequence
Explanation: Let's go through this line by line. First, we define the variable n, and set it to the integer 20. n is the length of the sequence we're going to form, and should probably have a better variable name. We then create a variable called sequence, and initialize it to the list with the integers 0 and 1 in it, the first two elements of the Fibonacci sequence. We have to create these elements "by hand", since the iterative part of the sequence requires two previous elements.
We then have a for loop over the list of integers from 2 (the next element of the list) to n (the length of the sequence). After the colon, we see a hash tag "#", and then a comment that if we had set n to some number less than 2 we would have a problem. Comments in Python start with #, and are good ways to make notes to yourself or to a user of your code explaining why you did what you did. Better than the comment here would be to test to make sure the value of n is valid, and to complain if it isn't; we'll try this later.
In the body of the loop, we append to the list an integer equal to the sum of the two previous elements of the list.
After exiting the loop (ending the indentation) we then print out the whole list. That's it!
1.8 Functions
We might want to use the Fibonacci snippet with different sequence lengths. We could cut an paste the code into another cell, changing the value of n, but it's easier and more useful to make a function out of the code. We do this with the def statement in Python:
End of explanation
fibonacci(2)
fibonacci(12)
Explanation: We can now call fibonacci() for different sequence_lengths:
End of explanation
help(fibonacci)
Explanation: We've introduced a several new features here. First, note that the function itself is defined as a code block (a colon followed by an indented block). This is the standard way that Python delimits things. Next, note that the first line of the function is a single string. This is called a docstring, and is a special kind of comment that is often available to people using the function through the python command line:
End of explanation
from math import factorial
help(factorial)
Explanation: If you define a docstring for all of your functions, it makes it easier for other people to use them, since they can get help on the arguments and return values of the function.
Next, note that rather than putting a comment in about what input values lead to errors, we have some testing of these values, followed by a warning if the value is invalid, and some conditional code to handle special cases.
1.9 Recursion and Factorials
Functions can also call themselves, something that is often called recursion. We're going to experiment with recursion by computing the factorial function. The factorial is defined for a positive integer n as
$$ n! = n(n-1)(n-2)\cdots 1 $$
First, note that we don't need to write a function at all, since this is a function built into the standard math library. Let's use the help function to find out about it:
End of explanation
factorial(20)
Explanation: This is clearly what we want.
End of explanation
def fact(n):
if n <= 0:
return 1
return n*fact(n-1)
fact(20)
Explanation: However, if we did want to write a function ourselves, we could do recursively by noting that
$$ n! = n(n-1)!$$
The program then looks something like:
End of explanation
t = (1,2,'hi',9.0)
t
Explanation: Recursion can be very elegant, and can lead to very simple programs.
1.10 Two More Data Structures: Tuples and Dictionaries
Before we end the Python overview, I wanted to touch on two more data structures that are very useful (and thus very common) in Python programs.
A tuple is a sequence object like a list or a string. It's constructed by grouping a sequence of objects together with commas, either without brackets, or with parentheses:
End of explanation
t[1]
Explanation: Tuples are like lists, in that you can access the elements using indices:
End of explanation
t.append(7)
t[1]=77
Explanation: However, tuples are immutable, you can't append to them or change the elements of them:
End of explanation
('Bob',0.0,21.0)
Explanation: Tuples are useful anytime you want to group different pieces of data together in an object, but don't want to create a full-fledged class (see below) for them. For example, let's say you want the Cartesian coordinates of some objects in your program. Tuples are a good way to do this:
End of explanation
positions = [
('Bob',0.0,21.0),
('Cat',2.5,13.1),
('Dog',33.0,1.2)
]
Explanation: Again, it's not a necessary distinction, but one way to distinguish tuples and lists is that tuples are a collection of different things, here a name, and x and y coordinates, whereas a list is a collection of similar things, like if we wanted a list of those coordinates:
End of explanation
def minmax(objects):
minx = 1e20 # These are set to really big numbers
miny = 1e20
for obj in objects:
name,x,y = obj
if x < minx:
minx = x
if y < miny:
miny = y
return minx,miny
x,y = minmax(positions)
print(x,y)
Explanation: Tuples can be used when functions return more than one value. Say we wanted to compute the smallest x- and y-coordinates of the above list of objects. We could write:
End of explanation
x,y = 1,2
y,x = x,y
x,y
Explanation: Here we did two things with tuples you haven't seen before. First, we unpacked an object into a set of named variables using tuple assignment:
>>> name,x,y = obj
We also returned multiple values (minx,miny), which were then assigned to two other variables (x,y), again by tuple assignment. This makes what would have been complicated code in C++ rather simple.
Tuple assignment is also a convenient way to swap variables:
End of explanation
mylist = [1,2,9,21]
Explanation: Dictionaries are an object called "mappings" or "associative arrays" in other languages. Whereas a list associates an integer index with a set of objects:
End of explanation
ages = {"Rick": 46, "Bob": 86, "Fred": 21}
print("Rick's age is ",ages["Rick"])
Explanation: The index in a dictionary is called the key, and the corresponding dictionary entry is the value. A dictionary can use (almost) anything as the key. Whereas lists are formed with square brackets [], dictionaries use curly brackets {}:
End of explanation
dict(Rick=46,Bob=86,Fred=20)
Explanation: There's also a convenient way to create dictionaries without having to quote the keys.
End of explanation
len(t)
len(ages)
Explanation: Notice in either case you are not choosing the ordering -- it is automagically grouped alphabetically.
The len() command works on both tuples and dictionaries:
End of explanation
import matplotlib.pyplot as plt
%matplotlib inline
Explanation: 1.11 Plotting with Matplotlib
We can generally understand trends in data by using a plotting program to chart it. Python has a wonderful plotting library called Matplotlib. The Jupyter notebook interface we are using for these notes has that functionality built in.
First off, it is important to import the library. We did this at the very beginning of this whole jupyter notebook, but here it is in case you've jumped straight here without running the first code line:
End of explanation
fibs = fibonacci(10)
Explanation: The %matplotlib inline command makes it so plots are within this notebook. To plot to a separate window, use instead:
%matplotlib qt
As an example of plotting, we have looked at two different functions, the Fibonacci function, and the factorial function, both of which grow faster than polynomially. Which one grows the fastest? Let's plot them. First, let's generate the Fibonacci sequence of length 20:
End of explanation
facts = []
for i in range(10):
facts.append(factorial(i))
Explanation: Next lets generate the factorials.
End of explanation
plt.plot(facts,'-ob',label="factorial")
plt.plot(fibs,'-dg',label="Fibonacci")
plt.xlabel("n")
plt.legend()
Explanation: Now we use the Matplotlib function plot to compare the two.
End of explanation
plt.semilogy(facts,label="factorial")
plt.semilogy(fibs,label="Fibonacci")
plt.xlabel("n")
plt.legend()
Explanation: The factorial function grows much faster. In fact, you can't even see the Fibonacci sequence. It's not entirely surprising: a function where we multiply by n each iteration is bound to grow faster than one where we add (roughly) n each iteration.
Let's plot these on a semilog plot so we can see them both a little more clearly:
End of explanation
import this
Explanation: There are many more things you can do with Matplotlib. We'll be looking at some of them in the sections to come. In the meantime, if you want an idea of the different things you can do, look at the Matplotlib Gallery. Rob Johansson's IPython notebook Introduction to Matplotlib is also particularly good.
1.12 Conclusion of the Python Overview
There is, of course, much more to the language than we've covered here. I've tried to keep this brief enough so that you can jump in and start using Python to simplify your life and work. My own experience in learning new things is that the information doesn't "stick" unless you try and use it for something in real life.
You will no doubt need to learn more as you go. I've listed several other good references, including the Python Tutorial and Learn Python the Hard Way. Additionally, now is a good time to start familiarizing yourself with the Python Documentation, and, in particular, the Python Language Reference.
Tim Peters, one of the earliest and most prolific Python contributors, wrote the "Zen of Python", which can be accessed via the "import this" command:
End of explanation
import numpy as np
Explanation: No matter how experienced a programmer you are, these are words to meditate on.
2. Numpy and Scipy
Numpy contains core routines for doing fast vector, matrix, and linear algebra-type operations in Python. Scipy contains additional routines for optimization, special functions, and so on. Both contain modules written in C and Fortran so that they're as fast as possible. Together, they give Python roughly the same capability that the Matlab program offers. (In fact, if you're an experienced Matlab user, there a guide to Numpy for Matlab users just for you.)
First off, it is important to import the library. Again, we did this at the very beginning of this whole jupyter notebook, but here it is in case you've jumped straight here without running the first code line:
End of explanation
np.array([1,2,3,4,5,6])
Explanation: 2.1 Making vectors and matrices
Fundamental to both Numpy and Scipy is the ability to work with vectors and matrices. You can create vectors from lists using the array command:
End of explanation
np.array([1,2,3,4,5,6],'d')
np.array([1,2,3,4,5,6],'D')
np.array([1,2,3,4,5,6],'i')
Explanation: You can pass in a second argument to array that gives the numeric type. There are a number of types listed here that your matrix can be. Some of these are aliased to single character codes. The most common ones are 'd' (double precision floating point number), 'D' (double precision complex number), and 'i' (int32). Thus,
End of explanation
np.array([[0,1],[1,0]],'d')
Explanation: To build matrices, you can either use the array command with lists of lists:
End of explanation
np.zeros((3,3),'d')
Explanation: You can also form empty (zero) matrices of arbitrary shape (including vectors, which Numpy treats as vectors with one row), using the zeros command:
End of explanation
np.zeros(3,'d')
np.zeros((1,3),'d')
Explanation: The first argument is a tuple containing the shape of the matrix, and the second is the data type argument, which follows the same conventions as in the array command. Thus, you can make row vectors:
End of explanation
np.zeros((3,1),'d')
Explanation: or column vectors:
End of explanation
np.identity(4,'d')
Explanation: There's also an identity command that behaves as you'd expect:
End of explanation
np.linspace(0,1)
Explanation: as well as a ones command.
2.2 Linspace, matrix functions, and plotting
The linspace command makes a linear array of points from a starting to an ending value.
End of explanation
np.linspace(0,1,11)
Explanation: If you provide a third argument, it takes that as the number of points in the space. If you don't provide the argument, it gives a length 50 linear space.
End of explanation
x = np.linspace(0,2*np.pi)
np.sin(x)
Explanation: linspace is an easy way to make coordinates for plotting. Functions in the numpy library (all of which are imported into IPython notebook) can act on an entire vector (or even a matrix) of points at once. Thus,
End of explanation
plt.plot(x,np.sin(x))
plt.show()
Explanation: In conjunction with matplotlib, this is a nice way to plot things:
End of explanation
0.125*np.identity(3,'d')
Explanation: 2.3 Matrix operations
Matrix objects act sensibly when multiplied by scalars:
End of explanation
np.identity(2,'d') + np.array([[1,1],[1,2]])
Explanation: as well as when you add two matrices together. (However, the matrices have to be the same shape.)
End of explanation
np.identity(2)*np.ones((2,2))
Explanation: Something that confuses Matlab users is that the times (*) operator give element-wise multiplication rather than matrix multiplication:
End of explanation
np.dot(np.identity(2),np.ones((2,2)))
Explanation: To get matrix multiplication, you need the dot command:
End of explanation
v = np.array([3,4],'d')
np.sqrt(np.dot(v,v))
Explanation: dot can also do dot products (duh!):
End of explanation
m = np.array([[1,2],[3,4]])
m.T
Explanation: as well as matrix-vector products.
There are determinant, inverse, and transpose functions that act as you would suppose. Transpose can be abbreviated with ".T" at the end of a matrix object:
End of explanation
np.diag([1,2,3,4,5])
Explanation: There's also a diag() function that takes a list or a vector and puts it along the diagonal of a square matrix.
End of explanation
A = np.array([[1,1,1],[0,2,5],[2,5,-1]])
b = np.array([6,-4,27])
np.linalg.solve(A,b)
Explanation: We'll find this useful later on.
2.4 Matrix Solvers
You can solve systems of linear equations using the solve command in the linear algebra toolbox of the numpy library:
End of explanation
A = np.array([[13,-4],[-4,7]],'d')
np.linalg.eigvalsh(A)
np.linalg.eigh(A)
Explanation: There are a number of routines to compute eigenvalues and eigenvectors
eigvals returns the eigenvalues of a matrix
eigvalsh returns the eigenvalues of a Hermitian matrix
eig returns the eigenvalues and eigenvectors of a matrix
eigh returns the eigenvalues and eigenvectors of a Hermitian matrix.
End of explanation
def nderiv(y,x):
"Finite difference derivative of the function f"
n = len(y)
d = np.zeros(n,'d') # assume double
# Use centered differences for the interior points, one-sided differences for the ends
for i in range(1,n-1):
d[i] = (y[i+1]-y[i])/(x[i+1]-x[i])
d[0] = (y[1]-y[0])/(x[1]-x[0])
d[n-1] = (y[n-1]-y[n-2])/(x[n-1]-x[n-2])
return d
Explanation: 2.5 Example: Finite Differences
Now that we have these tools in our toolbox, we can start to do some cool stuff with it. Many of the equations we want to solve in Physics involve differential equations. We want to be able to compute the derivative of functions:
$$ y' = \frac{y(x+h)-y(x)}{h} $$
by discretizing the function $y(x)$ on an evenly spaced set of points $x_0, x_1, \dots, x_n$, yielding $y_0, y_1, \dots, y_n$. Using the discretization, we can approximate the derivative by
$$ y_i' \approx \frac{y_{i+1}-y_{i-1}}{x_{i+1}-x_{i-1}} $$
We can write a derivative function in Python via
End of explanation
x = np.linspace(0,2*np.pi)
dsin = nderiv(np.sin(x),x)
plt.plot(x,dsin,label='numerical')
plt.plot(x,np.cos(x),label='analytical')
plt.title("Comparison of numerical and analytical derivatives of sin(x)")
plt.legend()
Explanation: Let's see whether this works for our sin example from above:
End of explanation
def Laplacian(x):
h = x[1]-x[0] # assume uniformly spaced points
n = len(x)
M = -2*np.identity(n,'d')
for i in range(1,n):
M[i,i-1] = M[i-1,i] = 1
return M/h**2
x = np.linspace(-3,3)
m = 1.0
ohm = 1.0
T = (-0.5/m)*Laplacian(x)
V = 0.5*(ohm**2)*(x**2)
H = T + np.diag(V)
E,U = np.linalg.eigh(H)
h = x[1]-x[0]
# Plot the Harmonic potential
plt.plot(x,V,color='k')
for i in range(4):
# For each of the first few solutions, plot the energy level:
plt.axhline(y=E[i],color='k',ls=":")
# as well as the eigenfunction, displaced by the energy level so they don't
# all pile up on each other:
plt.plot(x,-U[:,i]/np.sqrt(h)+E[i])
plt.title("Eigenfunctions of the Quantum Harmonic Oscillator")
plt.xlabel("Displacement (bohr)")
plt.ylabel("Energy (hartree)")
Explanation: Pretty close!
2.6 One-Dimensional Harmonic Oscillator using Finite Difference
Now that we've convinced ourselves that finite differences aren't a terrible approximation, let's see if we can use this to solve the one-dimensional harmonic oscillator.
We want to solve the time-independent Schrodinger equation
$$ -\frac{\hbar^2}{2m}\frac{\partial^2\psi(x)}{\partial x^2} + V(x)\psi(x) = E\psi(x)$$
for $\psi(x)$ when $V(x)=\frac{1}{2}m\omega^2x^2$ is the harmonic oscillator potential. We're going to use the standard trick to transform the differential equation into a matrix equation by multiplying both sides by $\psi^*(x)$ and integrating over $x$. This yields
$$ -\frac{\hbar}{2m}\int\psi(x)\frac{\partial^2}{\partial x^2}\psi(x)dx + \int\psi(x)V(x)\psi(x)dx = E$$
We will again use the finite difference approximation. The finite difference formula for the second derivative is
$$ y'' = \frac{y_{i+1}-2y_i+y_{i-1}}{x_{i+1}-x_{i-1}} $$
We can think of the first term in the Schrodinger equation as the overlap of the wave function $\psi(x)$ with the second derivative of the wave function $\frac{\partial^2}{\partial x^2}\psi(x)$. Given the above expression for the second derivative, we can see if we take the overlap of the states $y_1,\dots,y_n$ with the second derivative, we will only have three points where the overlap is nonzero, at $y_{i-1}$, $y_i$, and $y_{i+1}$. In matrix form, this leads to the tridiagonal Laplacian matrix, which has -2's along the diagonals, and 1's along the diagonals above and below the main diagonal.
The second term turns leads to a diagonal matrix with $V(x_i)$ on the diagonal elements. Putting all of these pieces together, we get:
End of explanation
from numpy.polynomial.hermite import Hermite
def ho_evec(x,n,m,ohm):
vec = [0]*9
vec[n] = 1
Hn = Hermite(vec)
return (1/np.sqrt(2**n*factorial(n)))*pow(m*ohm/np.pi,0.25)*np.exp(-0.5*m*ohm*x**2)*Hn(x*np.sqrt(m*ohm))
Explanation: We've made a couple of hacks here to get the orbitals the way we want them. First, I inserted a -1 factor before the wave functions, to fix the phase of the lowest state. The phase (sign) of a quantum wave function doesn't hold any information, only the square of the wave function does, so this doesn't really change anything.
But the eigenfunctions as we generate them aren't properly normalized. The reason is that finite difference isn't a real basis in the quantum mechanical sense. It's a basis of Dirac δ functions at each point; we interpret the space betwen the points as being "filled" by the wave function, but the finite difference basis only has the solution being at the points themselves. We can fix this by dividing the eigenfunctions of our finite difference Hamiltonian by the square root of the spacing, and this gives properly normalized functions.
2.7 Special Functions
The solutions to the Harmonic Oscillator are supposed to be Hermite polynomials. The Wikipedia page has the HO states given by
$$\psi_n(x) = \frac{1}{\sqrt{2^n n!}}
\left(\frac{m\omega}{\pi\hbar}\right)^{1/4}
\exp\left(-\frac{m\omega x^2}{2\hbar}\right)
H_n\left(\sqrt{\frac{m\omega}{\hbar}}x\right)$$
Let's see whether they look like those. There are some special functions in the Numpy library, and some more in Scipy. Hermite Polynomials are in Numpy:
End of explanation
plt.plot(x,ho_evec(x,0,1,1),label="Analytic")
plt.plot(x,-U[:,0]/np.sqrt(h),label="Numeric")
plt.xlabel('x (bohr)')
plt.ylabel(r'$\psi(x)$')
plt.title("Comparison of numeric and analytic solutions to the Harmonic Oscillator")
plt.legend()
Explanation: Let's compare the first function to our solution.
End of explanation
phase_correction = [-1,1,1,-1,-1,1]
for i in range(6):
plt.subplot(2,3,i+1)
plt.plot(x,ho_evec(x,i,1,1),label="Analytic")
plt.plot(x,phase_correction[i]*U[:,i]/np.sqrt(h),label="Numeric")
Explanation: The agreement is almost exact.
We can use the subplot command to put multiple comparisons in different panes on a single plot (run %matplotlib qt on a separate line first to plot in a separate window):
End of explanation
from scipy.special import airy,jn,eval_chebyt,eval_legendre
plt.subplot(2,2,1)
x = np.linspace(-1,1)
Ai,Aip,Bi,Bip = airy(x)
plt.plot(x,Ai)
plt.plot(x,Aip)
plt.plot(x,Bi)
plt.plot(x,Bip)
plt.title("Airy functions")
plt.subplot(2,2,2)
x = np.linspace(0,10)
for i in range(4):
plt.plot(x,jn(i,x))
plt.title("Bessel functions")
plt.subplot(2,2,3)
x = np.linspace(-1,1)
for i in range(6):
plt.plot(x,eval_chebyt(i,x))
plt.title("Chebyshev polynomials of the first kind")
plt.subplot(2,2,4)
x = np.linspace(-1,1)
for i in range(6):
plt.plot(x,eval_legendre(i,x))
plt.title("Legendre polynomials")
# plt.tight_layout()
plt.show()
Explanation: Other than phase errors (which I've corrected with a little hack: can you find it?), the agreement is pretty good, although it gets worse the higher in energy we get, in part because we used only 50 points.
The Scipy module has many more special functions:
End of explanation
raw_data = \
3.1905781584582433,0.028208609537968457
4.346895074946466,0.007160804747670053
5.374732334047101,0.0046962988461934805
8.201284796573875,0.0004614473299618756
10.899357601713055,0.00005038370219939726
16.295503211991434,4.377451812785309e-7
21.82012847965739,3.0799922117601088e-9
32.48394004282656,1.524776208284536e-13
43.53319057815846,5.5012073588707224e-18
Explanation: As well as Jacobi, Laguerre, Hermite polynomials, Hypergeometric functions, and many others. There's a full listing at the Scipy Special Functions Page.
2.8 Least squares fitting
Very often we deal with some data that we want to fit to some sort of expected behavior. Say we have the following:
End of explanation
data = []
for line in raw_data.splitlines():
words = line.split(',')
data.append(list(map(float,words)))
data = np.array(data)
plt.title("Raw Data")
plt.xlabel("Distance")
plt.plot(data[:,0],data[:,1],'bo')
Explanation: There's a section below on parsing CSV data. We'll steal the parser from that. For an explanation, skip ahead to that section. Otherwise, just assume that this is a way to parse that text into a numpy array that we can plot and do other analyses with.
End of explanation
plt.title("Raw Data")
plt.xlabel("Distance")
plt.semilogy(data[:,0],data[:,1],'bo')
Explanation: Since we expect the data to have an exponential decay, we can plot it using a semi-log plot.
End of explanation
params = np.polyfit(data[:,0],np.log(data[:,1]),1)
a = params[0] # the coefficient of x**1
logA = params[1] # the coefficient of x**0
# plot if curious:
# plt.plot(data[:,0],np.log(data[:,1]),'bo')
# plt.plot(data[:,0],data[:,0]*a+logA,'r')
Explanation: For a pure exponential decay like this, we can fit the log of the data to a straight line. The above plot suggests this is a good approximation. Given a function
$$ y = Ae^{ax} $$
$$ \log(y) = ax + \log(A) $$
Thus, if we fit the log of the data versus x, we should get a straight line with slope $a$, and an intercept that gives the constant $A$.
There's a numpy function called polyfit that will fit data to a polynomial form. We'll use this to fit to a straight line (a polynomial of order 1)
End of explanation
x = np.linspace(1,45)
plt.title("Raw Data")
plt.xlabel("Distance")
plt.semilogy(data[:,0],data[:,1],'bo',label='data')
plt.semilogy(x,np.exp(logA)*np.exp(a*x),'r-',label='fit')
plt.legend()
Explanation: Let's see whether this curve fits the data.
End of explanation
gauss_data = \
-0.9902286902286903,1.4065274110372852e-19
-0.7566104566104566,2.2504438576596563e-18
-0.5117810117810118,1.9459459459459454
-0.31887271887271884,10.621621621621626
-0.250997150997151,15.891891891891893
-0.1463309463309464,23.756756756756754
-0.07267267267267263,28.135135135135133
-0.04426734426734419,29.02702702702703
-0.0015939015939017698,29.675675675675677
0.04689304689304685,29.10810810810811
0.0840994840994842,27.324324324324326
0.1700546700546699,22.216216216216214
0.370878570878571,7.540540540540545
0.5338338338338338,1.621621621621618
0.722014322014322,0.08108108108108068
0.9926849926849926,-0.08108108108108646
gdata = []
for line in gauss_data.splitlines():
words = line.split(',')
gdata.append(list(map(float,words)))
gdata = np.array(gdata)
plt.plot(gdata[:,0],gdata[:,1],'bo')
Explanation: If we have more complicated functions, we may not be able to get away with fitting to a simple polynomial. Consider the following data:
End of explanation
def gauss(x,A,a): return A*np.exp(a*x**2)
Explanation: This data looks more Gaussian than exponential. If we wanted to, we could use polyfit for this as well, but let's use the curve_fit function from Scipy, which can fit to arbitrary functions. You can learn more using help(curve_fit).
First define a general Gaussian function to fit to.
End of explanation
from scipy.optimize import curve_fit
params,conv = curve_fit(gauss,gdata[:,0],gdata[:,1])
x = np.linspace(-1,1)
plt.plot(gdata[:,0],gdata[:,1],'bo')
A,a = params
plt.plot(x,gauss(x,A,a),'r-')
Explanation: Now fit to it using curve_fit:
End of explanation
# from random import random
rands = []
for i in range(100):
rands.append(np.random.random())
Explanation: The curve_fit routine we just used is built on top of a very good general minimization capability in Scipy. You can learn more at the scipy documentation pages.
2.9 Monte Carlo, random numbers, and computing $\pi$
Many methods in scientific computing rely on Monte Carlo integration, where a sequence of (pseudo) random numbers are used to approximate the integral of a function. Python has good random number generators in the standard library. The random() function from the numpy library gives pseudorandom numbers uniformly distributed between 0 and 1:
End of explanation
rands = np.random.rand(100)
plt.plot(rands,'o')
Explanation: Or, more elegantly:
End of explanation
mu, sigma = 0, 0.1 # mean and standard deviation
s=np.random.normal(mu, sigma,1000)
Explanation: np.random.random() uses the Mersenne Twister algorithm, which is a highly regarded pseudorandom number generator. There are also functions to generate random integers, to randomly shuffle a list, and functions to pick random numbers from a particular distribution, like the normal distribution:
It is generally more efficient to generate a list of random numbers all at once, particularly if you're drawing from a non-uniform distribution. Numpy has functions to generate vectors and matrices of particular types of random distributions:
End of explanation
count, bins, ignored = plt.hist(s, 30, normed=True)
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
linewidth=2, color='r')
Explanation: We can check the distribution by using the histogram feature, as shown on the help page for numpy.random.normal:
End of explanation
npts = 5000
xs = 2*np.random.rand(npts)-1
ys = 2*np.random.rand(npts)-1
r = xs**2+ys**2
ninside = (r<1).sum()
plt.figure(figsize=(6,6)) # make the figure square
plt.title("Approximation to pi = %f" % (4*ninside/float(npts)))
plt.plot(xs[r<1],ys[r<1],'b.')
plt.plot(xs[r>1],ys[r>1],'r.')
plt.figure(figsize=(8,6)) # change the figsize back to standard size for the rest of the notebook
Explanation: Here's an interesting use of random numbers: compute $\pi$ by taking random numbers as x and y coordinates, and counting how many of them were in the unit circle. For example:
End of explanation
n = 100
total = 0
for k in range(n):
total += pow(-1,k)/(2*k+1.0)
print(4*total)
Explanation: The idea behind the program is that the ratio of the area of the unit circle to the square that inscribes it is $\pi/4$, so by counting the fraction of the random points in the square that are inside the circle, we get increasingly good estimates to $\pi$.
The above code uses some higher level Numpy tricks to compute the radius of each point in a single line, to count how many radii are below one in a single line, and to filter the x,y points based on their radii. To be honest, I rarely write code like this: I find some of these Numpy tricks a little too cute to remember them, and I'm more likely to use a list comprehension (see below) to filter the points I want, since I can remember that.
As methods of computing $\pi$ go, this is among the worst. A much better method is to use Leibniz's expansion of arctan(1):
$$\frac{\pi}{4} = \sum_k \frac{(-1)^k}{2*k+1}$$
End of explanation
def f(x): return np.exp(-x)
x = np.linspace(0,10)
plt.plot(x,np.exp(-x))
Explanation: If you're interested in another great method, check out Ramanujan's method. This converges so fast you really need arbitrary precision math to display enough decimal places. You can do this with the Python decimal module, if you're interested.
2.10 Numerical Integration
Integration can be hard, and sometimes it's easier to work out a definite integral using an approximation. For example, suppose we wanted to figure out the integral:
$$\int_0^\infty\exp(-x)dx$$
(It turns out that this is equal to 1, as you can work out easily with a pencil :) )
End of explanation
from scipy.integrate import quad
quad(f,0,np.inf)
Explanation: Scipy has a numerical integration routine quad (since sometimes numerical integration is called quadrature), that we can use for this:
End of explanation
from scipy.fftpack import fft,fftfreq
npts = 4000
nplot = int(npts/10)
t = np.linspace(0,120,npts)
def sig(t): return 50*np.sin(2*np.pi*2.0*t) + 20*np.sin(2*np.pi*5.0*t) + 10*np.sin(2*np.pi*8.0*t) + 2*np.random.rand(npts)
Vsignal = sig(t)
FFT = abs(fft(Vsignal))
freqs = fftfreq(npts, t[1]-t[0])
FFT_plot = FFT[0:int(len(freqs)/2)]
freqs_plot = freqs[0:int(len(freqs)/2)]
plt.subplot(211)
plt.plot(t[:nplot], Vsignal[:nplot])
plt.xlabel ('time (s)')
plt.ylabel ('voltage\nmeasured (V)')
plt.subplot(212)
plt.semilogy(freqs_plot,FFT_plot**2,'-')
plt.xlabel ('frequency (Hz)')
plt.ylabel ('power\nspectrum (a.u.)')
plt.ylim([1e-1,np.max(FFT_plot**2)])
plt.tight_layout()
Explanation: The first number in the tuple is the result, the second number is an estimate of the absolute error in the result.
There are also 2d and 3d numerical integrators in Scipy. See the docs for more information.
2.11 Fast Fourier Transform and Signal Processing
Very often we want to use FFT techniques to help obtain the signal from noisy data. Scipy has several different options for this.
End of explanation
csv = \
1, -6095.12544083, 0.03686, 1391.5
2, -6095.25762870, 0.00732, 10468.0
3, -6095.26325979, 0.00233, 11963.5
4, -6095.26428124, 0.00109, 13331.9
5, -6095.26463203, 0.00057, 14710.8
6, -6095.26477615, 0.00043, 20211.1
7, -6095.26482624, 0.00015, 21726.1
8, -6095.26483584, 0.00021, 24890.5
9, -6095.26484405, 0.00005, 26448.7
10, -6095.26484599, 0.00003, 27258.1
11, -6095.26484676, 0.00003, 28155.3
12, -6095.26484693, 0.00002, 28981.7
13, -6095.26484693, 0.00002, 28981.7
csv
Explanation: There are additional signal processing routines in Scipy (e.g. splines, filtering) that you can read about here.
3. Intermediate Python
3.1 Parsing data output
As more and more of our day-to-day work is being done on and through computers, we increasingly have output that one program writes, often in a text file, that we need to analyze in one way or another, and potentially feed that output into another file.
Suppose we have the following output in CSV (comma separated values) format, a format that originally came from Microsoft Excel, and is increasingly used as a data interchange format in big data applications. How would we parse that?
End of explanation
lines = csv.splitlines()
lines
Explanation: This is a giant string. If we use splitlines(), we see that a list is created where line gets separated into a string:
End of explanation
lines[4].split(",")
Explanation: Splitting is a big concept in text processing. We used splitlines() here, and next we'll use the more general .split(",") function below to split each line into comma-delimited words.
We now want to do three things:
Skip over the lines that don't carry any information
Break apart each line that does carry information and grab the pieces we want
Turn the resulting data into something that we can plot.
To break apart each line, we will use .split(","). Let's see what it does to one of the lines:
End of explanation
help("".split)
Explanation: What does split() do?
End of explanation
for line in lines:
# do something with each line
words = line.split(",")
Explanation: Since the data is now in a list of lines, we can iterate over it, splitting up data where we see a comma:
End of explanation
data = []
for line in csv.splitlines()[2:]:
words = line.split(',')
data.append(list(map(float,words)))
data = np.array(data)
data
Explanation: We need to add these results at each step to a list:
End of explanation
plt.plot(data[:,0],data[:,1],'-o')
plt.xlabel('step')
plt.ylabel('Energy (hartrees)')
plt.title('Convergence of NWChem geometry optimization for Si cluster\n')
Explanation: Let's examine what we just did: first, we used a for loop to iterate over each line. However, we skipped the first two (the lines[2:] only takes the lines starting from index 2), since lines[0] contained the title information, and lines[1] contained underscores. Similarly, [:5] instead would take the first five lines.
We pass the comma string ",'"into the split function, so that it breaks to a new word every time it sees a comma. Next, to simplify things a bit, we're using the map() command to repeatedly apply a single function (float()) to a list, and to return the output as a list. Finally, we turn the list of lists into a numpy arrray structure.
End of explanation
energies = data[:,1]
minE = np.min(energies)
energies_eV = 27.211*(energies-minE)
plt.plot(data[:,0],energies_eV,'-o')
plt.xlabel('step')
plt.ylabel('Energy (eV)')
plt.title('Convergence of NWChem geometry optimization for Si cluster')
Explanation: Hartrees (what most quantum chemistry programs use by default) are really stupid units. We really want this in kcal/mol or eV or something we use. So let's quickly replot this in terms of eV above the minimum energy, which will give us a much more useful plot:
End of explanation
filename= 'DS0004.csv'
data = np.genfromtxt(filename,delimiter=',',skip_header=17 )
x_values = data[:,0]
y_values = data[:,1]
plt.plot(x_values, y_values)
Explanation: The real value in a language like Python is that it makes it easy to take additional steps to analyze data in this fashion, which means you are thinking more about your data, and are more likely to see important patterns.
3.2 Reading in data files
Let's take a look at a perhaps easier approach to a common problem -- you have a data file with some header info and comma-delimited values and you want the data so you can start doing stuff with it. Let's use numpy's genfromtxt()
End of explanation
print("I have 3 errands to run")
Explanation: That was easy! Why didn't we only learn that? Because not every data set is "nice" like that. Better to have some tools for when things aren't working how you'd like them to be. That being said, much data coming from scientific equipment and computational tools can be cast into a format that can be read in through genfromtxt(). For larger data sets, the library pandas might be helpful.
3.3 More Sophisticated String Formatting and Processing
Strings are a big deal in most modern languages, and hopefully the previous sections helped underscore how versatile Python's string processing techniques are. We will continue this topic in this section.
We can print out lines in Python using the print command.
End of explanation
"I have 3 errands to run"
Explanation: In IPython we don't even need the print command, since it will display the last expression not assigned to a variable.
End of explanation
a,b,c = 1,2,3
print("The variables are ",1,2,3)
Explanation: print even converts some arguments to strings for us:
End of explanation
print("Pi as a decimal = %d" % np.pi)
print("Pi as a float = %f" % np.pi)
print("Pi with 4 decimal places = %.4f" % np.pi)
print("Pi with overall fixed length of 10 spaces, with 6 decimal places = %10.6f" % np.pi)
print("Pi as in exponential format = %e" % np.pi)
Explanation: As versatile as this is, you typically need more freedom over the data you print out. For example, what if we want to print a bunch of data to exactly 4 decimal places? We can do this using formatted strings.
Formatted strings share a syntax with the C printf statement. We make a string that has some funny format characters in it, and then pass a bunch of variables into the string that fill out those characters in different ways.
For example,
End of explanation
print("The variables specified earlier are %d, %d, and %d" % (a,b,c))
Explanation: We use a percent sign in two different ways here. First, the format character itself starts with a percent sign. %d or %i are for integers, %f is for floats, %e is for numbers in exponential formats. All of the numbers can take number immediately after the percent that specifies the total spaces used to print the number. Formats with a decimal can take an additional number after a dot . to specify the number of decimal places to print.
The other use of the percent sign is after the string, to pipe a set of variables in. You can pass in multiple variables (if your formatting string supports it) by putting a tuple after the percent. Thus,
End of explanation
form_letter = \
%s
Dear %s,
We regret to inform you that your product did not
ship today due to %s.
We hope to remedy this as soon as possible.
From,
Your Supplier
print(form_letter % ("July 1, 2016","Valued Customer Bob","alien attack"))
Explanation: This is a simple formatting structure that will satisfy most of your string formatting needs. More information on different format symbols is available in the string formatting part of the standard docs.
It's worth noting that more complicated string formatting methods are in development, but I prefer this system due to its simplicity and its similarity to C formatting strings.
Recall we discussed multiline strings. We can put format characters in these as well, and fill them with the percent sign as before.
End of explanation
form_letter = \
%(date)s
Dear %(customer)s,
We regret to inform you that your product did not
ship today due to %(lame_excuse)s.
We hope to remedy this as soon as possible.
From,
Your Supplier
print(form_letter % {"date" : "July 1, 2016","customer":"Valued Customer Bob","lame_excuse":"alien attack"})
Explanation: The problem with a long block of text like this is that it's often hard to keep track of what all of the variables are supposed to stand for. There's an alternate format where you can pass a dictionary into the formatted string, and give a little bit more information to the formatted string itself. This method looks like:
End of explanation
nwchem_format =
start %(jobname)s
title "%(thetitle)s"
charge %(charge)d
geometry units angstroms print xyz autosym
%(geometry)s
end
basis
* library 6-31G**
end
dft
xc %(dft_functional)s
mult %(multiplicity)d
end
task dft %(jobtype)s
Explanation: By providing a little bit more information, you're less likely to make mistakes, like referring to your customer as "alien attack".
As a scientist, you're less likely to be sending bulk mailings to a bunch of customers. But these are great methods for generating and submitting lots of similar runs, say scanning a bunch of different structures to find the optimal configuration for something.
For example, you can use the following template for NWChem input files:
End of explanation
oxygen_xy_coords = [(0,0),(0,0.1),(0.1,0),(0.1,0.1)]
charge = 0
multiplicity = 1
dft_functional = "b3lyp"
jobtype = "optimize"
geometry_template = \
O %f %f 0.0
H 0.0 1.0 0.0
H 1.0 0.0 0.0
for i,xy in enumerate(oxygen_xy_coords):
thetitle = "Water run #%d" % i
jobname = "h2o-%d" % i
geometry = geometry_template % xy
print("---------")
print(nwchem_format % dict(thetitle=thetitle,charge=charge,jobname=jobname,jobtype=jobtype,
geometry=geometry,dft_functional=dft_functional,multiplicity=multiplicity))
Explanation: If you want to submit a sequence of runs to a computer somewhere, it's pretty easy to put together a little script, maybe even with some more string formatting in it:
End of explanation
def my_enumerate(seq):
l = []
for i in range(len(seq)):
l.append((i,seq[i]))
return l
my_enumerate(oxygen_xy_coords)
Explanation: This is a very bad geometry for a water molecule, and it would be silly to run so many geometry optimizations of structures that are guaranteed to converge to the same single geometry, but you get the idea of how you can run vast numbers of simulations with a technique like this.
We used the enumerate function to loop over both the indices and the items of a sequence, which is valuable when you want a clean way of getting both. enumerate is roughly equivalent to:
End of explanation
np.linspace(0,1)
Explanation: Although enumerate uses generators (see below) so that it doesn't have to create a big list, which makes it faster for really long sequenes.
3.4 Optional arguments of a function
You will recall that the linspace function can take either two arguments (for the starting and ending points):
End of explanation
np.linspace(0,1,5)
Explanation: or it can take three arguments, for the starting point, the ending point, and the number of points:
End of explanation
np.linspace(0,1,5,endpoint=False)
Explanation: You can also pass in keywords to exclude the endpoint:
End of explanation
def my_linspace(start,end):
npoints = 50
v = []
d = (end-start)/float(npoints-1)
for i in range(npoints):
v.append(start + i*d)
return v
my_linspace(0,1)
Explanation: Right now, we only know how to specify functions that have a fixed number of arguments. We'll learn how to do the more general cases here.
If we're defining a simple version of linspace, we would start with:
End of explanation
def my_linspace(start,end,npoints = 50):
v = []
d = (end-start)/float(npoints-1)
for i in range(npoints):
v.append(start + i*d)
return v
Explanation: We can add an optional argument by specifying a default value in the argument list:
End of explanation
my_linspace(0,1)
Explanation: This gives exactly the same result if we don't specify anything:
End of explanation
my_linspace(0,1,5)
Explanation: But also let's us override the default value with a third argument:
End of explanation
def my_linspace(start,end,npoints=50,**kwargs):
endpoint = kwargs.get('endpoint',True)
v = []
if endpoint:
d = (end-start)/float(npoints-1)
else:
d = (end-start)/float(npoints)
for i in range(npoints):
v.append(start + i*d)
return v
my_linspace(0,1,5,endpoint=False)
Explanation: We can add arbitrary keyword arguments to the function definition by putting a keyword argument **kwargs handle in:
End of explanation
def my_range(*args):
start = 0
step = 1
if len(args) == 1:
end = args[0]
elif len(args) == 2:
start,end = args
elif len(args) == 3:
start,end,step = args
else:
raise Exception("Unable to parse arguments")
v = []
value = start
while True:
v.append(value)
value += step
if value > end: break
return v
Explanation: What the keyword argument construction does is to take any additional keyword arguments (i.e. arguments specified by name, like "endpoint=False"), and stick them into a dictionary called "kwargs" (you can call it anything you like, but it has to be preceded by two stars). You can then grab items out of the dictionary using the get command, which also lets you specify a default value. I realize it takes a little getting used to, but it is a common construction in Python code, and you should be able to recognize it.
There's an analogous *args that dumps any additional arguments into a list called "args". Think about the range function: it can take one (the endpoint), two (starting and ending points), or three (starting, ending, and step) arguments. How would we define this?
End of explanation
my_range()
Explanation: Note that we have defined a few new things you haven't seen before: a break statement, that allows us to exit a for loop if some conditions are met, and an exception statement, that causes the interpreter to exit with an error message. For example:
End of explanation
evens1 = [2*i for i in range(10)]
print(evens1)
Explanation: 3.5 List Comprehensions and Generators
List comprehensions are a streamlined way to make lists. They look something like a list definition, with some logic thrown in. For example:
End of explanation
odds = [i for i in range(20) if i%2==1]
odds
Explanation: You can also put some boolean testing into the construct:
End of explanation
def evens_below(n):
for i in range(n):
if i%2 == 0:
yield i
return
for i in evens_below(9):
print(i)
Explanation: Here i%2 is the remainder when i is divided by 2, so that i%2==1 is true if the number is odd. Even though this is a relative new addition to the language, it is now fairly common since it's so convenient.
iterators are a way of making virtual sequence objects. Consider if we had the nested loop structure:
for i in range(1000000):
for j in range(1000000):
Inside the main loop, we make a list of 1,000,000 integers, just to loop over them one at a time. We don't need any of the additional things that a lists gives us, like slicing or random access, we just need to go through the numbers one at a time. And we're making 1,000,000 of them.
iterators are a way around this. For example, the xrange function is the iterator version of range. This simply makes a counter that is looped through in sequence, so that the analogous loop structure would look like:
for i in xrange(1000000):
for j in xrange(1000000):
Even though we've only added two characters, we've dramatically sped up the code, because we're not making 1,000,000 big lists.
We can define our own iterators using the yield statement:
End of explanation
list(evens_below(9))
Explanation: We can always turn an iterator into a list using the list command:
End of explanation
evens_gen = (i for i in range(9) if i%2==0)
for i in evens_gen:
print(i)
Explanation: There's a special syntax called a generator expression that looks a lot like a list comprehension:
End of explanation
def gauss(x,A,a,x0):
return A*np.exp(-a*(x-x0)**2)
Explanation: 3.6 Factory Functions
A factory function is a function that returns a function. They have the fancy name lexical closure, which makes you sound really intelligent in front of your CS friends. But, despite the arcane names, factory functions can play a very practical role.
Suppose you want the Gaussian function centered at 0.5, with height 99 and width 1.0. You could write a general function.
End of explanation
def gauss_maker(A,a,x0):
def f(x):
return A*np.exp(-a*(x-x0)**2)
return f
x = np.linspace(0,1)
g = gauss_maker(99.0,20,0.5)
plt.plot(x,g(x))
Explanation: But what if you need a function with only one argument, like f(x) rather than f(x,y,z,...)? You can do this with Factory Functions:
End of explanation
# Data in a json format:
json_data = \
{
"a": [1,2,3],
"b": [4,5,6],
"greeting" : "Hello"
}
import json
loaded_json=json.loads(json_data)
loaded_json
Explanation: Everything in Python is an object, including functions. This means that functions can be returned by other functions. (They can also be passed into other functions, which is also useful, but a topic for another discussion.) In the gauss_maker example, the g function that is output "remembers" the A, a, x0 values it was constructed with, since they're all stored in the local memory space (this is what the lexical closure really refers to) of that function.
Factories are one of the more important of the Software Design Patterns, which are a set of guidelines to follow to make high-quality, portable, readable, stable software. It's beyond the scope of the current work to go more into either factories or design patterns, but I thought I would mention them for people interested in software design.
3.7 Serialization: Save it for later
Serialization refers to the process of outputting data (and occasionally functions) to a database or a regular file, for the purpose of using it later on. In the very early days of programming languages, this was normally done in regular text files. Python is excellent at text processing, and you probably already know enough to get started with this.
When accessing large amounts of data became important, people developed database software based around the Structured Query Language (SQL) standard. I'm not going to cover SQL here, but, if you're interested, I recommend using the sqlite3 module in the Python standard library.
As data interchange became important, the eXtensible Markup Language (XML) has emerged. XML makes data formats that are easy to write parsers for, greatly simplifying the ambiguity that sometimes arises in the process. Again, I'm not going to cover XML here, but if you're interested in learning more, look into Element Trees, now part of the Python standard library.
Python has a very general serialization format called pickle that can turn any Python object, even a function or a class, into a representation that can be written to a file and read in later. But, again, I'm not going to talk about this, since I rarely use it myself. Again, the standard library documentation for pickle is the place to go.
What I am going to talk about is a relatively recent format call JavaScript Object Notation (JSON) that has become very popular over the past few years. There's a module in the standard library for encoding and decoding JSON formats. The reason I like JSON so much is that it looks almost like Python, so that, unlike the other options, you can look at your data and edit it, use it in another program, etc.
Here's a little example:
End of explanation
json.dumps({"a":[1,2,3],"b":[9,10,11],"greeting":"Hola"})
Explanation: Your data sits in something that looks like a Python dictionary, and in a single line of code, you can load it into a Python dictionary for use later.
In the same way, you can, with a single line of code, put a bunch of variables into a dictionary, and then output to a file using json:
End of explanation
from operator import add, mul
add(1,2)
mul(3,4)
Explanation: 3.8 Functional programming
Functional programming is a very broad subject. The idea is to have a series of functions, each of which generates a new data structure from an input, without changing the input structure at all. By not modifying the input structure (something that is called not having side effects), many guarantees can be made about how independent the processes are, which can help parallelization and guarantees of program accuracy. There is a Python Functional Programming HOWTO in the standard docs that goes into more details on functional programming. I just wanted to touch on a few of the most important ideas here.
There is an operator module that has function versions of most of the Python operators. For example:
End of explanation
def doubler(x): return 2*x
doubler(17)
Explanation: These are useful building blocks for functional programming.
The lambda operator allows us to build anonymous functions, which are simply functions that aren't defined by a normal def statement with a name. For example, a function that doubles the input is:
End of explanation
lambda x: 2*x
Explanation: We could also write this as:
End of explanation
another_doubler = lambda x: 2*x
another_doubler(19)
Explanation: And assign it to a function separately:
End of explanation
list(map(float,'1 2 3 4 5'.split()))
Explanation: lambda is particularly convenient (as we'll see below) in passing simple functions as arguments to other functions.
map is a way to repeatedly apply a function to a list:
End of explanation
sum([1,2,3,4,5])
Explanation: reduce is a way to repeatedly apply a function to the first two items of the list. There already is a sum function in Python that is a reduction:
End of explanation
from functools import reduce
def prod(l): return reduce(mul,l)
prod([1,2,3,4,5])
Explanation: We can use reduce to define an analogous prod function:
End of explanation
mystring = "Hi there"
Explanation: 3.9 Object Oriented Programming
We've seen a lot of examples of objects in Python. We create a string object with quote marks:
End of explanation
mystring.split()
mystring.startswith('Hi')
len(mystring)
Explanation: and we have a bunch of methods we can use on the object:
End of explanation
class Schrod1d:
\
Schrod1d: Solver for the one-dimensional Schrodinger equation.
def __init__(self,V,start=0,end=1,npts=50,**kwargs):
m = kwargs.get('m',1.0)
self.x = np.linspace(start,end,npts)
self.Vx = V(self.x)
self.H = (-0.5/m)*self.laplacian() + np.diag(self.Vx)
return
def plot(self,*args,**kwargs):
titlestring = kwargs.get('titlestring',"Eigenfunctions of the 1d Potential")
xstring = kwargs.get('xstring',"Displacement (bohr)")
ystring = kwargs.get('ystring',"Energy (hartree)")
if not args:
args = [3]
x = self.x
E,U = np.linalg.eigh(self.H)
h = x[1]-x[0]
# Plot the Potential
plt.plot(x,self.Vx,color='k')
for i in range(*args):
# For each of the first few solutions, plot the energy level:
plt.axhline(y=E[i],color='k',ls=":")
# as well as the eigenfunction, displaced by the energy level so they don't
# all pile up on each other:
plt.plot(x,U[:,i]/np.sqrt(h)+E[i])
plt.title(titlestring)
plt.xlabel(xstring)
plt.ylabel(ystring)
return
def laplacian(self):
x = self.x
h = x[1]-x[0] # assume uniformly spaced points
n = len(x)
M = -2*np.identity(n,'d')
for i in range(1,n):
M[i,i-1] = M[i-1,i] = 1
return M/h**2
Explanation: Object oriented programming simply gives you the tools to define objects and methods for yourself. It's useful anytime you want to keep some data (like the characters in the string) tightly coupled to the functions that act on the data (length, split, startswith, etc.).
As an example, we're going to bundle the functions we did to make the 1d harmonic oscillator eigenfunctions with arbitrary potentials, so we can pass in a function defining that potential, some additional specifications, and get out something that can plot the orbitals, as well as do other things with them, if desired.
End of explanation
square_well = Schrod1d(lambda x: 0*x,m=10)
square_well.plot(4,titlestring="Square Well Potential")
Explanation: The init() function specifies what operations go on when the object is created. The self argument is the object itself, and we don't pass it in. The only required argument is the function that defines the QM potential. We can also specify additional arguments that define the numerical grid that we're going to use for the calculation.
For example, to do an infinite square well potential, we have a function that is 0 everywhere. We don't have to specify the barriers, since we'll only define the potential in the well, which means that it can't be defined anywhere else.
End of explanation
ho = Schrod1d(lambda x: x**2,start=-3,end=3)
ho.plot(6,titlestring="Harmonic Oscillator")
Explanation: We can similarly redefine the Harmonic Oscillator potential.
End of explanation
def finite_well(x,V_left=1,V_well=0,V_right=1,d_left=10,d_well=10,d_right=10):
V = np.zeros(x.size,'d')
for i in range(x.size):
if x[i] < d_left:
V[i] = V_left
elif x[i] > (d_left+d_well):
V[i] = V_right
else:
V[i] = V_well
return V
fw = Schrod1d(finite_well,start=0,end=30,npts=100)
fw.plot()
Explanation: Let's define a finite well potential:
End of explanation
def triangular(x,F=30): return F*x
tw = Schrod1d(triangular,m=10)
tw.plot()
Explanation: A triangular well:
End of explanation
def tri_finite(x): return finite_well(x)+triangular(x,F=0.025)
tfw = Schrod1d(tri_finite,start=0,end=30,npts=100)
tfw.plot()
Explanation: Or we can combine the two, making something like a semiconductor quantum well with a top gate:
End of explanation
%timeit factorial(20)
Explanation: There's a lot of philosophy behind object oriented programming. Since I'm trying to focus on just the basics here, I won't go into them, but the internet is full of lots of resources on OO programming and theory. The best of this is contained in the Design Patterns book, which I highly recommend.
4. Speeding Python: Timeit, Profiling, Cython, SWIG, and PyPy
The first rule of speeding up your code is not to do it at all. As Donald Knuth said:
"We should forget about small efficiencies, say about 97% of the time: premature optimization is the root of all evil."
The second rule of speeding up your code is to only do it if you really think you need to do it. Python has two tools to help with this process: a timing program called timeit, and a very good code profiler. We will discuss both of these tools in this section, as well as techniques to use to speed up your code once you know it's too slow.
4.1 Timeit
timeit helps determine which of two similar routines is faster. Recall that some time ago we wrote a factorial routine, but also pointed out that Python had its own routine built into the math module. Is there any difference in the speed of the two? timeit helps us determine this. For example, timeit tells how long each method takes:
End of explanation
%timeit fact(20)
Explanation: The little % sign that we have in front of the timeit call is an example of an IPython magic function, which we don't have time to go into here, but it's just some little extra mojo that IPython adds to the functions to make it run better in the IPython environment. You can read more about it in the IPython tutorial.
In any case, the timeit function runs 3 loops, and tells us that it took on the average of 583 ns to compute 20!. In contrast:
End of explanation
def evens(n):
"Return a list of even numbers below n"
l = []
for x in range(n):
if x % 2 == 0:
l.append(x)
return l
Explanation: the factorial function we wrote is about a factor of 10 slower. This is because the built-in factorial function is written in C code and called from Python, and the version we wrote is written in plain old Python. A Python program has a lot of stuff in it that make it nice to interact with, but all that friendliness slows down the code. In contrast, the C code is less friendly but more efficient. If you want speed with as little effort as possible, write your code in an easy to program language like Python, but dump the slow parts into a faster language like C, and call it from Python. We'll go through some tricks to do this in this section.
4.2 Profiling
Profiling complements what timeit does by splitting the overall timing into the time spent in each function. It can give us a better understanding of what our program is really spending its time on.
Suppose we want to create a list of even numbers. Our first effort yields this:
End of explanation
import cProfile
cProfile.run('evens(100000)')
Explanation: Is this code fast enough? We find out by running the Python profiler on a longer run:
End of explanation
def evens2(n):
"Return a list of even numbers below n"
return [x for x in range(n) if x % 2 == 0]
import cProfile
cProfile.run('evens2(100000)')
Explanation: This looks okay, 0.05 seconds isn't a huge amount of time, but looking at the profiling shows that the append function is taking almost 20% of the time. Can we do better? Let's try a list comprehension.
End of explanation
def evens3(n):
"Return a list of even numbers below n"
return [x for x in range(n) if x % 2 == 0]
import cProfile
cProfile.run('evens3(100000)')
Explanation: By removing a small part of the code using a list comprehension, we've doubled the overall speed of the code!
It seems like range is taking a long time, still. Can we get rid of it? We can, using the xrange generator:
End of explanation
def primes(n):
\
From python cookbook, returns a list of prime numbers from 2 to < n
>>> primes(2)
[2]
>>> primes(10)
[2, 3, 5, 7]
if n==2: return [2]
elif n<2: return []
s=list(range(3,n+2,2))
mroot = n ** 0.5
half=(n+1)/2-1
i=0
m=3
while m <= mroot:
if s[i]:
j=int((m*m-3)/2)
s[j]=0
while j<half:
s[j]=0
j+=m
i=i+1
m=2*i+3
return [2]+[x for x in s if x]
number_to_try = 1000000
list_of_primes = primes(number_to_try)
print(list_of_primes[10001])
Explanation: This is where profiling can be useful. Our code now runs 3x faster by making trivial changes. We wouldn't have thought to look in these places had we not had access to easy profiling. Imagine what you would find in more complicated programs.
4.3 Other Ways to Speed Python
When we compared the fact and factorial functions, above, we noted that C routines are often faster because they're more streamlined. Once we've determined that one routine is a bottleneck for the performance of a program, we can replace it with a faster version by writing it in C. This is called extending Python, and there's a good section in the standard documents. This can be a tedious process if you have many different routines to convert. Fortunately, there are several other options.
Swig (the simplified wrapper and interface generator) is a method to generate binding not only for Python but also for Matlab, Perl, Ruby, and other scripting languages. Swig can scan the header files of a C project and generate Python binding for it. Using Swig is substantially easier than writing the routines in C.
Cython is a C-extension language. You can start by compiling a Python routine into a shared object libraries that can be imported into faster versions of the routines. You can then add additional static typing and make other restrictions to further speed the code. Cython is generally easier than using Swig.
PyPy is the easiest way of obtaining fast code. PyPy compiles Python to a subset of the Python language called RPython that can be efficiently compiled and optimized. Over a wide range of tests, PyPy is roughly 6 times faster than the standard Python Distribution.
4.4 Fun: Finding Primes
Project Euler is a site where programming puzzles are posed that might have interested Euler. Problem 7 asks the question:
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10,001st prime number?
To solve this we need a very long list of prime numbers. First we'll make a function that uses the Sieve of Erastothenes to generate all the primes less than n.
End of explanation
cProfile.run('primes(1000000)')
Explanation: You might think that Python is a bad choice for something like this, but, in terms of time, it really doesn't take long:
End of explanation |
11,374 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Character-Level LSTM in PyTorch
In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. This model will be able to generate new text based on the text from the book!
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Below is the general architecture of the character-wise RNN.
<img src="images/charseq.jpeg" width="500">
First let's load in our required resources for data loading and model creation.
Step1: Load in Data
Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: Tokenization
In the cells, below, I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step4: And we can see those same characters from above, encoded as integers.
Step5: Pre-processing the data
As you can see in our char-RNN image above, our LSTM expects an input that is one-hot encoded meaning that each character is converted into an integer (via our created dictionary) and then converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
Step6: Making training mini-batches
To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this
Step7: Test Your Implementation
Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
Step8: If you implemented get_batches correctly, the above output should look something like
```
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
``
although the exact numbers may be different. Check to make sure the data is shifted over one step fory`.
Defining the network with PyTorch
Below is where you'll define the network.
<img src="images/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
Model Structure
In __init__ the suggested structure is as follows
Step9: Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training
Step10: Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
Step11: Set your training hyperparameters!
Step12: Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
Hyperparameters
Here are the hyperparameters for the network.
In defining the model
Step13: Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
A note on the predict function
The output of our RNN is from a fully-connected layer and it outputs a distribution of next-character scores.
To actually get the next character, we apply a softmax function, which gives us a probability distribution that we can then sample to predict the next character.
Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about topk, here.
Step14: Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
Step15: Loading a checkpoint | Python Code:
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
Explanation: Character-Level LSTM in PyTorch
In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. This model will be able to generate new text based on the text from the book!
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Below is the general architecture of the character-wise RNN.
<img src="images/charseq.jpeg" width="500">
First let's load in our required resources for data loading and model creation.
End of explanation
# Open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
Explanation: Load in Data
Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
End of explanation
text[:100]
Explanation: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
End of explanation
# Encode the text and map each character to an integer and vice versa
# We create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# Encode the text
encoded = np.array([char2int[ch] for ch in text])
Explanation: Tokenization
In the cells, below, I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
End of explanation
encoded[:100]
Explanation: And we can see those same characters from above, encoded as integers.
End of explanation
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((np.multiply(*arr.shape), n_labels),
dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# Check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
Explanation: Pre-processing the data
As you can see in our char-RNN image above, our LSTM expects an input that is one-hot encoded meaning that each character is converted into an integer (via our created dictionary) and then converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
End of explanation
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
## TODO: Get the number of batches we can make
n_batches = len(arr) // (batch_size * seq_length)
## TODO: Keep only enough characters to make full batches
arr = arr[:(n_batches * batch_size * seq_length)]
## TODO: Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
## TODO: Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:(n + seq_length)]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n + seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
Explanation: Making training mini-batches
To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="images/[email protected]" width=500px>
<br>
In this example, we'll take the encoded characters (passed in as the arr parameter) and split them into multiple sequences, given by batch_size. Each of our sequences will be seq_length long.
Creating Batches
1. The first thing we need to do is discard some of the text so we only have completely full mini-batches.
Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array arr, you divide the length of arr by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from arr, $N * M * K$.
2. After that, we need to split arr into $N$ batches.
You can do this using arr.reshape(size) where size is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use -1 as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
3. Now that we have this array, we can iterate through it to get our mini-batches.
The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by seq_length. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use range to take steps of size n_steps from $0$ to arr.shape[1], the total number of tokens in each sequence. That way, the integers you get from range always point to the start of a batch, and each window is seq_length wide.
TODO: Write the code for creating batches in the function below. The exercises in this notebook will not be easy. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, type out the solution code yourself.
End of explanation
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
Explanation: Test Your Implementation
Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
End of explanation
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self,
tokens,
n_hidden=256,
n_layers=2,
drop_prob=0.5,
lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
## TODO: define the layers of the model
self.lstm = nn.LSTM(input_size=len(self.chars),
hidden_size=n_hidden,
num_layers=n_layers,
dropout=drop_prob,
batch_first=True)
## Define dropout
self.dropout = nn.Dropout(drop_prob)
## Define the final fully-connected layer
self.fc_out = nn.Linear(in_features=n_hidden,
out_features=len(self.chars))
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## TODO: Get the outputs and the new hidden state from the lstm
lstm_out, hidden = self.lstm(x, hidden)
after_dropout = self.dropout(lstm_out)
# Reshaping the data
reshaped = after_dropout.contiguous().view(-1, self.n_hidden)
# Return the final output and the hidden state
out = self.fc_out(reshaped)
return out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
Explanation: If you implemented get_batches correctly, the above output should look something like
```
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
``
although the exact numbers may be different. Check to make sure the data is shifted over one step fory`.
Defining the network with PyTorch
Below is where you'll define the network.
<img src="images/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
Model Structure
In __init__ the suggested structure is as follows:
* Create and store the necessary dictionaries (this has been done for you)
* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size n_hidden, a number of layers n_layers, a dropout probability drop_prob, and a batch_first boolean (True, since we are batching)
* Define a dropout layer with dropout_prob
* Define a fully-connected layer with params: input size n_hidden and output size (the number of characters)
* Finally, initialize the weights (again, this has been given)
Note that some parameters have been named and given in the __init__ function, and we use them and store them by doing something like self.drop_prob = drop_prob.
LSTM Inputs/Outputs
You can create a basic LSTM layer as follows
python
self.lstm = nn.LSTM(input_size, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
where input_size is the number of characters this cell expects to see as sequential input, and n_hidden is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the forward function, we can stack up the LSTM cells into layers using .view. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
We also need to create an initial hidden state of all zeros. This is done like so
python
self.init_hidden()
End of explanation
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(params=net.parameters(),
lr=lr)
criterion = nn.CrossEntropyLoss()
# Create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# Initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y).type(torch.LongTensor)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# Zero accumulated gradients
net.zero_grad()
# Get the output from the model
output, h = net(inputs, h)
# Calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size * seq_length))
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y).type(torch.LongTensor)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size * seq_length))
val_losses.append(val_loss.item())
# Reset to train mode after iterationg through validation data
net.train()
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
Explanation: Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training:
Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new tuple variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
We use clip_grad_norm_ to help prevent exploding gradients.
End of explanation
## TODO: set you model hyperparameters
# Define and print the net
n_hidden = 256
n_layers = 2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
Explanation: Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
End of explanation
batch_size = 128
seq_length = 100
# Start small if you are just testing initial behavior
n_epochs = 20
# Train the model
train(net, encoded, epochs=n_epochs,
batch_size=batch_size, seq_length=seq_length,
lr=0.001, print_every=10)
Explanation: Set your training hyperparameters!
End of explanation
# Change the name, for saving multiple files
model_name = './models/rnn_x_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
Explanation: Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
Hyperparameters
Here are the hyperparameters for the network.
In defining the model:
* n_hidden - The number of units in the hidden layers.
* n_layers - Number of hidden LSTM layers to use.
We assume that dropout probability and learning rate will be kept at the default, in this example.
And in training:
* batch_size - Number of sequences running through the network in one pass.
* seq_length - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* lr - Learning rate for training
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to where it originally came from.
Tips and Tricks
Monitoring Validation Loss vs. Training Loss
If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
If your training loss is much lower than validation loss then this means the network might be overfitting. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
If your training/validation loss are about equal then your model is underfitting. Increase the size of your model (either number of layers or the raw number of neurons per layer)
Approximate number of parameters
The two most important parameters that control the model are n_hidden and n_layers. I would advise that you always use n_layers of either 2/3. The n_hidden can be adjusted based on how much data you have. The two important quantities to keep track of here are:
The number of parameters in your model. This is printed when you start training.
The size of your dataset. 1MB file is approximately 1 million characters.
These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make n_hidden larger.
I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
Best models strategy
The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
Checkpoint
After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
End of explanation
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# Tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# Detach hidden state from history
h = tuple([each.data for each in h])
# Get the output of the model
out, h = net(inputs, h)
# Get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# Get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# Select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# Return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
Explanation: Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
A note on the predict function
The output of our RNN is from a fully-connected layer and it outputs a distribution of next-character scores.
To actually get the next character, we apply a softmax function, which gives us a probability distribution that we can then sample to predict the next character.
Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about topk, here.
End of explanation
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
# Eval mode
net.eval()
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='Anna', top_k=5))
Explanation: Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
End of explanation
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('./models/rnn_x_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'],
n_hidden=checkpoint['n_hidden'],
n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# Sample using a loaded model
print(sample(loaded, 2000,
top_k=5,
prime="And Levin said"))
Explanation: Loading a checkpoint
End of explanation |
11,375 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Marvin query Results
Now that you have performed your first query, let's take at what Marvin returns as a Marvin Results object.
Step1: Let's look at the Marvin Results object. We can see how many results were returned with r.count and r.totalcount
Step2: Queries returning more than 1000 results are paginated into chunks of 100. For anything less than 1000, the query will return everything. Totalcount shows the total result count, and count shows the returned count in just that page.
The results from your query are stored in the .results attribute, as a list of NamedTuples. These are like regular tuples except they have names (like dictionary key names)
Step3: You can access specific values of the results through tuple indexing or via the named attribute, but this is not recommended in general.
Step4: But be careful Names using the full table.parameter syntax cannot be accessed via the named attribute. This syntax is returned when two parameters with non-unique names are returned, like ifu.name and bintype.name. Instead we recommend using the Marvin Results getListOf and getDictOf methods.
Step5: To see what columns are available, use r.columns and r.coltoparam
Step6: if you want to retrieve the results as a list of dictionaries or dictionary of lists, use getDictOf
Step7: you can change the format returned using the format_type keyword. format_type='dictlist' returns a dictionary of lists getDictOf returns a list of dictionaries
Step8: Retrieving More Results
If your returned results have been paginated, you can retrieve more using r.getNext, r.getPrevious, and r.getSubset
Step9: Sorting results
You can sort your results using the r.sort method. You can sort on any of the returned columns, using either the column name or full parameter name.
Step10: Converting to Marvin Tool Objects
Once you have a set of results, you may want to work with them using Marvin Tools. You can easily convert to Marvin Tools using the method r.convertToTool. This method lets you convert to Marvin Cubes, Spaxels, Maps, RSS, or ModelCube objects. Note
Step11: Save your Results and restore them | Python Code:
from marvin import config
config.setRelease('MPL-4')
from marvin.tools.query import Query, Results, doQuery
# make a query
myquery = 'nsa.sersic_logmass > 10.3 AND nsa.z < 0.1'
q = Query(searchfilter=myquery)
# run a query
r = q.run()
Explanation: Marvin query Results
Now that you have performed your first query, let's take at what Marvin returns as a Marvin Results object.
End of explanation
print(r)
print('Total count', r.totalcount)
print('Page count', r.count)
Explanation: Let's look at the Marvin Results object. We can see how many results were returned with r.count and r.totalcount
End of explanation
r.results
Explanation: Queries returning more than 1000 results are paginated into chunks of 100. For anything less than 1000, the query will return everything. Totalcount shows the total result count, and count shows the returned count in just that page.
The results from your query are stored in the .results attribute, as a list of NamedTuples. These are like regular tuples except they have names (like dictionary key names)
End of explanation
res = r.results[0]
print('single row', res)
print('mangaid', res[0])
print('mangaid', res.mangaid)
# what are the columns
print('columns', r.columns)
print(res.sersic_logmass)
Explanation: You can access specific values of the results through tuple indexing or via the named attribute, but this is not recommended in general.
End of explanation
# if you want a retrieve a list of a single parameter, use getListOf
mangaid = r.getListOf('mangaid')
print(mangaid)
Explanation: But be careful Names using the full table.parameter syntax cannot be accessed via the named attribute. This syntax is returned when two parameters with non-unique names are returned, like ifu.name and bintype.name. Instead we recommend using the Marvin Results getListOf and getDictOf methods.
End of explanation
# these are the column names in the results
print('columns', r.columns)
Explanation: To see what columns are available, use r.columns and r.coltoparam
End of explanation
# by default, getDictOf returns a list of dictionaries, that you can iterate over
mylist = r.getDictOf()
print(mylist)
print('mangaid', mylist[0]['cube.mangaid'], mylist[1]['cube.mangaid'])
Explanation: if you want to retrieve the results as a list of dictionaries or dictionary of lists, use getDictOf
End of explanation
mydict = r.getDictOf(format_type='dictlist')
print(mydict)
print('keys', mydict.keys())
print('mangaid', mydict['cube.mangaid'])
Explanation: you can change the format returned using the format_type keyword. format_type='dictlist' returns a dictionary of lists getDictOf returns a list of dictionaries
End of explanation
# get the next set of results
r.getNext()
# get only the next 10 results
r.getNext(chunk=10)
# get the previous 20 results
r.getPrevious(chunk=20)
# get a subset of results giving the starting index and number limit
# total results
print('total', r.totalcount)
# let's get a subset of 10 rows starting at 300
r.getSubset(300, limit=10)
Explanation: Retrieving More Results
If your returned results have been paginated, you can retrieve more using r.getNext, r.getPrevious, and r.getSubset
End of explanation
# let's sort by redshift. Default is in ascending order
r.sort('z')
# or in descending order
r.sort('nsa.z', order='desc')
Explanation: Sorting results
You can sort your results using the r.sort method. You can sort on any of the returned columns, using either the column name or full parameter name.
End of explanation
# See some results
r.results[0:3]
# Let's convert our results to Marvin Cube objects
r.columns
r.convertToTool('cube')
# Your new objects are stored as a list in your results called objects
r.objects
Explanation: Converting to Marvin Tool Objects
Once you have a set of results, you may want to work with them using Marvin Tools. You can easily convert to Marvin Tools using the method r.convertToTool. This method lets you convert to Marvin Cubes, Spaxels, Maps, RSS, or ModelCube objects. Note: You must have the necessary parameters to initialize a particular Marvin object.
End of explanation
# We strongly recommend saving to a Marvin pickle file (.mpf), so that you can restore the Results object later
r.save('results.mpf')
restored = Results.restore('results.mpf')
# Saving to CSV, JSON, xlsx, txt, or FITS
df = r.toDataFrame()
df.to_csv('results.csv')
df.to_json('results.json')
df.to_excel('results.xlsx')
table = r.toTable()
table.write('results.txt')
r.toFits('results.fits')
Explanation: Save your Results and restore them
End of explanation |
11,376 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
GeoPyMC Simulation Tutorial
Step1: Initializing the Class
First we need to create the GeoPyMC_sim object providing a name for the simulation project.
Step2: The first thing we need to do is to choose in which GeoMdel we want to perform the Bayesian inference. Making use of the next function we will assign the local path of the GeoModel that we intend to analyse.
Step3: Set the Priors Distribution
Once we got our GeoModel, we need to provide the uncertainty distribution for the parameters that form the model. At the moment, the uncertain parameters that form our GeoModel can be given in an excel table following the format of the example. PyMC support all type of density distribution, however in pyGeoMod is only automatized for Gaussian distribution so far.
Step4: Load the Syntetic Gravity Model
At the moment there are two ways to give the "measured gravity". Either we create a numpy array of the same dimensions as the simulation that we want to simulate or we can also give the path to a .xyz file. In the following example, we will use a forward simulation of one of our stochastic model as "measured gravity". The following code simply generates a numpy array which contain the forward simulation from a previous simulation.
Step5: Now we can use the GeoBay function to load the gravity data to our object, specifying type = "grid" (i.e. numpy array). Verbose allows us to plot the gravity.
Step6: Adding Constrains (Likelihood functions)
In addition to priors, our method permits the use of likelihood functions as tool to incorporate extra information and knowledge in order to constrain the model. The vast amount of possibilities make the automatization of this step extremely complex. Here, we show some examples that illustrates the flexibility and options of our method.
The creation of the GeoModel making use of the API is embeded in the method GeoBay.deterministic_GeoModel. We need to give the name of the .xml f the GeoModel. This method accept multiple attributes and keywords. The most importants are the resolution of the model or if we need to calculate the forward gravity. Verbose controls a section plotting and other information that can help to assure the correct functionality of the process
Step7: Once we have created the network we can generate a DOT plot to visualize it
Step8: Data base settings
Now we choose the directory where we want to save our database with our posteriors. The format used is .hdf5
Step9: Run Simulation
Now we can run the Bayesian inference making use of the PyMC methods as follow
Step10: Generate Posterior GeoModels from Database (Theoretical)
The database used in thsi example is not found in the repository. Please notice that the following is only a demostration how to do it.
First we load the database with our posteriors. Since the database save all the elements of the Bayesian Model we need to "forbid" all the elements that are not the input parameters of GeoModeller.
Step11: We set the options of the model we want create. Notice it can be different to the model used during the Bayesian inference. This object only is used as a representation of our posteriors.
Step12: Once we have all the PyMC objects set we can proceed to proceed to the forward modeling step. | Python Code:
import sys, os
sys.path.append(r"C:\Users\Miguel\workspace\pygeomod\pygeomod")
import geoPyMC
import pymc as pm
import numpy as np
import geogrid
import matplotlib.pyplot as plt
reload (geoPyMC)
%matplotlib inline
Explanation: GeoPyMC Simulation Tutorial
End of explanation
GeoBay = geoPyMC.GeoPyMC_sim("example")
Explanation: Initializing the Class
First we need to create the GeoPyMC_sim object providing a name for the simulation project.
End of explanation
GeoBay.proj_dir("C:\Users\Miguel\workspace\Thesis\Thesis\PyMC_geomod\Temp_graben")
Explanation: The first thing we need to do is to choose in which GeoMdel we want to perform the Bayesian inference. Making use of the next function we will assign the local path of the GeoModel that we intend to analyse.
End of explanation
# Load the data from an excel. Verbose may display the table to check the correct operation
GeoBay.read_excel("C:\Users\Miguel\workspace\Thesis\Thesis\PyMC_geomod\Graben_Data_Uncert.xlsx", verbose = 0)
# Creates a PyMC object corresponding to a normal distribution of every parameter provided
GeoBay.set_Stoch_normal_distribution()
# Now we GeoBay has the PyMC instances contact_points_mc, dips_mc and azimuths_mc encapsulating our uncertainty/priors
print GeoBay.contact_points_mc
Explanation: Set the Priors Distribution
Once we got our GeoModel, we need to provide the uncertainty distribution for the parameters that form the model. At the moment, the uncertain parameters that form our GeoModel can be given in an excel table following the format of the example. PyMC support all type of density distribution, however in pyGeoMod is only automatized for Gaussian distribution so far.
End of explanation
# Loading old database
LD = pm.database.hdf5.load("C:\Users\Miguel\workspace\Thesis\Thesis\PyMC_geomod\database_paper\graben_Graben Soft Const.hdf5")
# Extracting a model from data base
Real_model = LD.trace("model", chain = 0)[-1:]
Real_model = Real_model[0]
# Calculating gravity given the following densities
densities = { 1: 3100,
2: 2920,
3: 2610,
4: 0}
Real_model.analyse_geophysics(densities)
Gravity_rm = Real_model.geophys.grv_data
# Normalizing
Gravity_rm_norm = (Gravity_rm-np.max(Gravity_rm))/np.min(Gravity_rm-np.max(Gravity_rm))
# Generating a bit of noisre
np.random.seed(25258258)
noise = np.random.normal(0,0.00011, np.shape(Gravity_rm))
Gravity_rm_noise = Gravity_rm+noise
Explanation: Load the Syntetic Gravity Model
At the moment there are two ways to give the "measured gravity". Either we create a numpy array of the same dimensions as the simulation that we want to simulate or we can also give the path to a .xyz file. In the following example, we will use a forward simulation of one of our stochastic model as "measured gravity". The following code simply generates a numpy array which contain the forward simulation from a previous simulation.
End of explanation
GeoBay.original_grav(Gravity_rm_noise, type = "grid", verbose = 1)
Explanation: Now we can use the GeoBay function to load the gravity data to our object, specifying type = "grid" (i.e. numpy array). Verbose allows us to plot the gravity.
End of explanation
#===============================
# First Constrains: Input data
#=============================
#============ Thickness of layers =========================
# We can create numpy arrays that contain multiple inputs. In this case we clasify our contact points according to the layer
# they belong to in order to facilitate later operations
@pm.deterministic
def sedi2(contact_points_sedi2 = GeoBay.contact_points_mc[[int(np.argwhere(GeoBay.data_depth[:,0]=="sedi2_right")),
int(np.argwhere(GeoBay.data_depth[:,0]=="sedi2_left")),
int(np.argwhere(GeoBay.data_depth[:,0]=="sedi2_centre"))]]):
sedi2 = contact_points_sedi2
return sedi2
@pm.deterministic
def sedi1(contact_points_sedi1 = GeoBay.contact_points_mc[[int(np.argwhere(GeoBay.data_depth[:,0]=="sedi1_right")),
int(np.argwhere(GeoBay.data_depth[:,0]=="sedi1_left")),
int(np.argwhere(GeoBay.data_depth[:,0]=="sedi1_centre"))]]):
sedi1 = contact_points_sedi1
return sedi1
@pm.deterministic
def basement(contact_points_basement = GeoBay.contact_points_mc[[int(np.argwhere(GeoBay.data_depth[:,0]=="basement_right")),
int(np.argwhere(GeoBay.data_depth[:,0]=="basement_left")),
int(np.argwhere(GeoBay.data_depth[:,0]=="basement_centre"))]]):
basement = contact_points_basement
return basement
# Here we calculate the thickness of layers substracting the previus arrays
@pm.deterministic
def thickness_layer1(sedi1 = sedi1, sedi2 = sedi2):
return sedi2 - sedi1
@pm.deterministic
def thickness_layer2(sedi1 = sedi1, basement = basement):
return sedi1 - basement
# We compare the "inferred thickness" with the thickness obtained of our priors
@pm.stochastic
def thickness_layer1_likelihood(value = 150,thickness_layer1 = thickness_layer1, trace = False):
return pm.normal_like(thickness_layer1, 180, 1./np.square(20.))
@pm.stochastic
def thickness_layer2_likelihood(value = 150,thickness_layer2 = thickness_layer2):
return pm.normal_like(thickness_layer2, 130, 1./np.square(20.))
#================== Shape of Graben of the input data section ============================
# It is also possible to include geological knowledge. In this case, we prevent a possitive offset of our graben as well as
# a offset below our fault intersection
@pm.stochastic
def offset_negative_constraint(value = -500 ,sedi2 = sedi2):
if sedi2[2] > sedi2[0] or sedi2[2]>sedi2[1]:
return -np.inf
return 0
@pm.stochastic
def offset_below_faults_constraint(value = 0,
fault_bases = GeoBay.contact_points_mc[[int(np.argwhere(GeoBay.data_depth[:,0]=="fault_right_base")),
int(np.argwhere(GeoBay.data_depth[:,0]=="fault_left_base"))]],
basement_center = GeoBay.contact_points_mc[[int(np.argwhere(GeoBay.data_depth[:,0]=="basement_centre"))]]):
if basement_center < np.any(fault_bases)-100:
return -np.inf
return 0
#================= Creating the GeoModel ===================
#============================================================
# In our Bayesian Network the GeoModel behaves as a deterministic function
@pm.deterministic
def model(contact_points_val_mc = GeoBay.contact_points_mc, azimuths_val = GeoBay.azimuths_mc , dips_val = GeoBay.dips_mc):
return GeoBay.deterministic_GeoModel("Simple_Graben_3.xml",resolution=[100,100,10],
noddy_geophy=True, densities= {1: 3100,
2: 2920,
3: 2610,
4: 0}, verbose = 0, two_points = True)
#===============================
# Second Constrains: After the model
#=============================
#=================================
# Inversion
#=================================
# In the same manner as we have seen so far, gravity can be used as a constrain more
@pm.deterministic
def e_sq(grav = GeoBay.ori_grav_grid, model = model):
Forw = model.geophys.grv_data
e_sq = np.sum(np.square(grav - Forw))
return e_sq
@pm.observed
def inversion(value = 1, e_sq = e_sq):
return pm.half_cauchy_like(e_sq/0.0004,1,50.)
# PyMC request the creation of a list with all the elements that take part in the inference. The following method puts together
# priors and likelihoods ready for farther steps
GeoBay.creating_Bayes_model([sedi2,sedi1,basement, thickness_layer1, thickness_layer1_likelihood, thickness_layer2,
thickness_layer2_likelihood, offset_below_faults_constraint, offset_negative_constraint,
e_sq,inversion, model], verbose = 0)
Explanation: Adding Constrains (Likelihood functions)
In addition to priors, our method permits the use of likelihood functions as tool to incorporate extra information and knowledge in order to constrain the model. The vast amount of possibilities make the automatization of this step extremely complex. Here, we show some examples that illustrates the flexibility and options of our method.
The creation of the GeoModel making use of the API is embeded in the method GeoBay.deterministic_GeoModel. We need to give the name of the .xml f the GeoModel. This method accept multiple attributes and keywords. The most importants are the resolution of the model or if we need to calculate the forward gravity. Verbose controls a section plotting and other information that can help to assure the correct functionality of the process
End of explanation
GeoBay.dot_plot(display = True)
Explanation: Once we have created the network we can generate a DOT plot to visualize it:
End of explanation
GeoBay.MCMC_obj()
Explanation: Data base settings
Now we choose the directory where we want to save our database with our posteriors. The format used is .hdf5
End of explanation
GeoBay.Sim_MCMC.sample(iter = 3, burn = 2)
Explanation: Run Simulation
Now we can run the Bayesian inference making use of the PyMC methods as follow:
End of explanation
# Load prior values
GeoPost = geoPyMC.GeoPyMC_GeoMod_from_posterior("5thFeb.hdf5",
forbbiden = ["adaptive","model","deviance", "likelihood", "e_sq", "constrain",
"Metropolis","layer2_conti", "order_layers", "SM2_Atley", "two_folds", "inversion", "layer2_conti" ] )
# Set directory for the new database with the GeoModels
GeoPost.proj_dir("C:\\Users\\Miguel\\Desktop\\Working_Space\\Sandstone_project\\Temp_SandstoneCopy")
# Set all PyMC objects making use of the posteriors values from the database
GeoPost.recover_parameters()
# Load gravity in case file. Only necessary if dimension of the models are determined by a gravity measurement
GeoPost.original_grav("temp\Sandstone_geophys\Sst_grav_500.xyz"
, type = "xyz", resolution= [85,125], verbose = 0)
Explanation: Generate Posterior GeoModels from Database (Theoretical)
The database used in thsi example is not found in the repository. Please notice that the following is only a demostration how to do it.
First we load the database with our posteriors. Since the database save all the elements of the Bayesian Model we need to "forbid" all the elements that are not the input parameters of GeoModeller.
End of explanation
@pm.deterministic(trace= True)
def model(contact_points_val_mc = GeoPost.contact_points_mc, azimuths_val = GeoPost.azimuths_mc , dips_val = GeoPost.dips_mc):
return GeoPost.deterministic_GeoModel("Temp_SandstoneCopy.xml",resolution=[85,125,51],
noddy_geophy= True, densities= {1: 2.61,
2: 2.92,
3: 3.1,
4: 2.92,
5: 2.61}, verbose = 0, two_points = False,
plot_direction = "y", plot_cell = 42, z_dim = [-20000,1000])
GeoPost.creating_Bayes_model([model], verbose = 0)
Explanation: We set the options of the model we want create. Notice it can be different to the model used during the Bayesian inference. This object only is used as a representation of our posteriors.
End of explanation
# Defining the database name
P = pm.MCMC(GeoPost.pymc_model, db= "hdf5" , dbname= "database_temp/5thFeb_post.hdf5")
# Run the simulation
P.sample(iter = 800)
Explanation: Once we have all the PyMC objects set we can proceed to proceed to the forward modeling step.
End of explanation |
11,377 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
============================================================
Define target events based on time lag, plot evoked response
============================================================
This script shows how to define higher order events based on
time lag between reference and target events. For
illustration, we will put face stimuli presented into two
classes, that is 1) followed by an early button press
(within 590 milliseconds) and followed by a late button
press (later than 590 milliseconds). Finally, we will
visualize the evoked responses to both 'quickly-processed'
and 'slowly-processed' face stimuli.
Step1: Set parameters
Step2: Find stimulus event followed by quick button presses
Step3: View evoked response | Python Code:
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.event import define_target_events
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
Explanation: ============================================================
Define target events based on time lag, plot evoked response
============================================================
This script shows how to define higher order events based on
time lag between reference and target events. For
illustration, we will put face stimuli presented into two
classes, that is 1) followed by an early button press
(within 590 milliseconds) and followed by a late button
press (later than 590 milliseconds). Finally, we will
visualize the evoked responses to both 'quickly-processed'
and 'slowly-processed' face stimuli.
End of explanation
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads
# pick MEG channels
picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
include=include, exclude='bads')
Explanation: Set parameters
End of explanation
reference_id = 5 # presentation of a smiley face
target_id = 32 # button press
sfreq = raw.info['sfreq'] # sampling rate
tmin = 0.1 # trials leading to very early responses will be rejected
tmax = 0.59 # ignore face stimuli followed by button press later than 590 ms
new_id = 42 # the new event id for a hit. If None, reference_id is used.
fill_na = 99 # the fill value for misses
events_, lag = define_target_events(events, reference_id, target_id,
sfreq, tmin, tmax, new_id, fill_na)
print(events_) # The 99 indicates missing or too late button presses
# besides the events also the lag between target and reference is returned
# this could e.g. be used as parametric regressor in subsequent analyses.
print(lag[lag != fill_na]) # lag in milliseconds
# #############################################################################
# Construct epochs
tmin_ = -0.2
tmax_ = 0.4
event_id = dict(early=new_id, late=fill_na)
epochs = mne.Epochs(raw, events_, event_id, tmin_,
tmax_, picks=picks, baseline=(None, 0),
reject=dict(mag=4e-12))
# average epochs and get an Evoked dataset.
early, late = [epochs[k].average() for k in event_id]
Explanation: Find stimulus event followed by quick button presses
End of explanation
times = 1e3 * epochs.times # time in milliseconds
title = 'Evoked response followed by %s button press'
fig, axes = plt.subplots(2, 1)
early.plot(axes=axes[0], time_unit='s')
axes[0].set(title=title % 'late', ylabel='Evoked field (fT)')
late.plot(axes=axes[1], time_unit='s')
axes[1].set(title=title % 'early', ylabel='Evoked field (fT)')
plt.show()
Explanation: View evoked response
End of explanation |
11,378 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Hidden Markov Models (HMMs) are powerful, flexible methods for representing and classifying data with trends over time, and have been a key component in speech recognition systems for many years.
I found it very difficult to find a good example (with code) of a simple speech recognition system, so I decided to create this post. Though this implementation won't win any awards for "Best Speech Recognizer", I hope it will provide some insight into how HMMs can be used for speech recognition and other tasks.
<!-- TEASER_END -->
In this post, I will define what Hidden Markov Models are, show how to implement one form (Gaussian Mixture Model HMM, GMM-HMM) using numpy + scipy, and how to use this algorithm for single speaker speech recognition. For a more "production grade" HMM implementation, see hmmlearn which holds the HMM implementations that were formerly a part of sklearn.
Data
To demonstrate this algorithm, we need a dataset to operate on. I have chosen to use the sample dataset from this Google Code project by Hakon Sandsmark. I also used this code as a reference when creating my own implementation of a Guassian Mixture Model HMM (GMM-HMM). This aided in testing my implementation, as well as giving a frame of reference for performance.
Other available datasets are largely multispeaker, but the simple frequency peak features used in this example do not work in the multispeaker regime (different speakers have different frequency content for the same word! Let alone male/female speech differences). Future work will cover more advanced feature extraction techniques for audio, and extend these examples to multispeaker recognition.
Step1: This data has a total of 7 different spoken words, and each was spoken 15 different times, giving a grand total of 105 files. Next, the files will be extracted into a single data matrix (zero padding files to uniform length), and a label vector with the correct label for each data file is created.
Step2: Science Fiction (Double Feature)
Once the data has been downloaded and turned into an input matrix, the next step is to extract features from the raw data, as is done in many other machine learning pipelines.
Most "consumer grade" speaker recognition systems use advanced processing to extract a variety of features that describe the sound over both frequency and time, and until recently "custom features" were one of the keys to making a great recognition system. The current state of the art (to my knowledge, at least) has recently moved to using deep neural networks for feature extraction, which I hope to show in a future post. For now, we will stick to very simple features, in order to show a "simplest working example".
In this example, simple frequency peak detection was used, rather than the bevy of expert features typically used in a modern speech recognition pipeline (MFCCs, or more recently, a pretrained multilayer neural network). This has a direct effect on performance, but allows for a holistic implementation that fits in a single post
Step3: In order to find peaks in frequency, a technique called the Short Time Fourier Transform (STFT) is used. This idea is quite simple - the FFT is applied over chunks of the input data, resulting in a 2D FFT "image", usually called the spectrogram. Setting the FFT size allows us to control the amount of frequency resolution available, while overlapping these windows allows us to control the time resolution at the cost of increasing the data size.
Briefly, if X is a vector of length 20, we wish to create a 2D array, STFT_X. If the FFT size is 10, and the overlap is .5 (5 samples), this means (in pseudocode)
Step4: Next, peak detection is applied to each FFT frame of every data file. In a previous blog post, I described the use of wavelets for peak detection. Here, we will use a moving window to search for peaks instead. The primary steps to this algorithm are as follows
Step5: The peak detector does an acceptable job in detecting peaks, though it is by no means perfect. One of the limiting factors here is the length of the FFT - this peak detector is currently searching over 9 sample chunks, which is very large when the FFT size is only 64 bins! Once again, this will have a negative impact on performance.
We could set the FFT size larger, but in testing this algorithm longer FFT sizes appeared to do worse, even with higher overlap to maintain the same time resolution. Ultimately, this is where more advanced features would come into play.
Step6: GMM-HMM Bop
An implementation of a GMM-HMM is shown below. This code is also available as a gist, and any improvements I make will show up there.
This algorithm is fairly complex, and a good summary can be seen here, Brown and here, Moore. This implementation does not have any Viterbi backtracking - since my only interest (for now) is in speech classification, only Baum-Welch and Forward-Backward portions of the GMM-HMM are necessary.
HMMs can do do three primary tasks
Step7: Still Peaking
Once the peaks are detected for each frame, we have a 3 dimensional numpy array of size [numdatafiles, numpeaks, numframes] - in this case [105, 6, 216]. It is important that the observations be normalized in order to form probabilities, since the HMM expects to be trained on state probabilities. What does this mean?
For each sample, we have extracted a number of frames over time, and each frame has its own set of peaks. We divide each frame by the sum of all peaks in the frame (axis 0 of all_obs[n], or axis 1 of all_obs), and form a "state probability" for each frame in our dataset. In essence, we are creating 6 states, where the highest peak in a frame is state 1, second highest state 2, etc. for each FFT frame.
This representation shows the "spread" of the peaks - if all the peaks are close together, each one will have about the same probability, while peaks that are in a more unique distribution should take on a more unique fingerprint. An extreme case would be 3 low frequency peaks, and 3 high - this would result in very low state probabilities for the low frequency peaks, and very high state probabilities for the high frequency peaks. Since the HMM attempts to learn transition probabilities between frames, this is a decent way to represent speech for HMMs.
Step8: To predict words, we need to train 7 separate GMM-HMM models, one for each word. We will then feed the features for a test sample into each, choosing the word associated with the GMM-HMM having the highest output likelihood. This maximum likelihood estimate is our best guess for what word was spoken.
Step9: It seems that this classifier does decently. Let's look at a confusion matrix for what words were incorrect, to see if there is any unusual pattern in the misclassifications. | Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from utils import progress_bar_downloader
import os
#Hosting files on my dropbox since downloading from google code is painful
#Original project hosting is here: https://code.google.com/p/hmm-speech-recognition/downloads/list
#Audio is included in the zip file
link = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
dlname = 'audio.tar.gz'
if not os.path.exists('./%s' % dlname):
progress_bar_downloader(link, dlname)
os.system('tar xzf %s' % dlname)
else:
print('%s already downloaded!' % dlname)
fpaths = []
labels = []
spoken = []
for f in os.listdir('audio'):
for w in os.listdir('audio/' + f):
fpaths.append('audio/' + f + '/' + w)
labels.append(f)
if f not in spoken:
spoken.append(f)
print('Words spoken:', spoken)
Explanation: Hidden Markov Models (HMMs) are powerful, flexible methods for representing and classifying data with trends over time, and have been a key component in speech recognition systems for many years.
I found it very difficult to find a good example (with code) of a simple speech recognition system, so I decided to create this post. Though this implementation won't win any awards for "Best Speech Recognizer", I hope it will provide some insight into how HMMs can be used for speech recognition and other tasks.
<!-- TEASER_END -->
In this post, I will define what Hidden Markov Models are, show how to implement one form (Gaussian Mixture Model HMM, GMM-HMM) using numpy + scipy, and how to use this algorithm for single speaker speech recognition. For a more "production grade" HMM implementation, see hmmlearn which holds the HMM implementations that were formerly a part of sklearn.
Data
To demonstrate this algorithm, we need a dataset to operate on. I have chosen to use the sample dataset from this Google Code project by Hakon Sandsmark. I also used this code as a reference when creating my own implementation of a Guassian Mixture Model HMM (GMM-HMM). This aided in testing my implementation, as well as giving a frame of reference for performance.
Other available datasets are largely multispeaker, but the simple frequency peak features used in this example do not work in the multispeaker regime (different speakers have different frequency content for the same word! Let alone male/female speech differences). Future work will cover more advanced feature extraction techniques for audio, and extend these examples to multispeaker recognition.
End of explanation
#Files can be heard in Linux using the following commands from the command line
#cat kiwi07.wav | aplay -f S16_LE -t wav -r 8000
#Files are signed 16 bit raw, sample rate 8000
from scipy.io import wavfile
data = np.zeros((len(fpaths), 32000))
maxsize = -1
for n,file in enumerate(fpaths):
_, d = wavfile.read(file)
data[n, :d.shape[0]] = d
if d.shape[0] > maxsize:
maxsize = d.shape[0]
data = data[:, :maxsize]
#Each sample file is one row in data, and has one entry in labels
print('Number of files total:', data.shape[0])
all_labels = np.zeros(data.shape[0])
for n, l in enumerate(set(labels)):
all_labels[np.array([i for i, _ in enumerate(labels) if _ == l])] = n
print('Labels and label indices', all_labels)
Explanation: This data has a total of 7 different spoken words, and each was spoken 15 different times, giving a grand total of 105 files. Next, the files will be extracted into a single data matrix (zero padding files to uniform length), and a label vector with the correct label for each data file is created.
End of explanation
import scipy
def stft(x, fftsize=64, overlap_pct=.5):
#Modified from http://stackoverflow.com/questions/2459295/stft-and-istft-in-python
hop = int(fftsize * (1 - overlap_pct))
w = scipy.hanning(fftsize + 1)[:-1]
raw = np.array([np.fft.rfft(w * x[i:i + fftsize]) for i in range(0, len(x) - fftsize, hop)])
return raw[:, :(fftsize // 2)]
Explanation: Science Fiction (Double Feature)
Once the data has been downloaded and turned into an input matrix, the next step is to extract features from the raw data, as is done in many other machine learning pipelines.
Most "consumer grade" speaker recognition systems use advanced processing to extract a variety of features that describe the sound over both frequency and time, and until recently "custom features" were one of the keys to making a great recognition system. The current state of the art (to my knowledge, at least) has recently moved to using deep neural networks for feature extraction, which I hope to show in a future post. For now, we will stick to very simple features, in order to show a "simplest working example".
In this example, simple frequency peak detection was used, rather than the bevy of expert features typically used in a modern speech recognition pipeline (MFCCs, or more recently, a pretrained multilayer neural network). This has a direct effect on performance, but allows for a holistic implementation that fits in a single post :)
End of explanation
import matplotlib.pyplot as plt
plt.plot(data[0, :], color='steelblue')
plt.title('Timeseries example for %s'%labels[0])
plt.xlim(0, 3500)
plt.xlabel('Time (samples)')
plt.ylabel('Amplitude (signed 16 bit)')
plt.figure()
# + 1 to avoid log of 0
log_freq = 20 * np.log(np.abs(stft(data[0, :])) + 1)
print(log_freq.shape)
plt.imshow(log_freq, cmap='gray', interpolation=None)
plt.xlabel('Freq (bin)')
plt.ylabel('Time (overlapped frames)')
plt.ylim(log_freq.shape[1])
plt.title('PSD of %s example'%labels[0])
Explanation: In order to find peaks in frequency, a technique called the Short Time Fourier Transform (STFT) is used. This idea is quite simple - the FFT is applied over chunks of the input data, resulting in a 2D FFT "image", usually called the spectrogram. Setting the FFT size allows us to control the amount of frequency resolution available, while overlapping these windows allows us to control the time resolution at the cost of increasing the data size.
Briefly, if X is a vector of length 20, we wish to create a 2D array, STFT_X. If the FFT size is 10, and the overlap is .5 (5 samples), this means (in pseudocode):
```
STFT_X[0, :] = FFT(X[0:9])
STFT_X[1, :] = FFT(X[5:14])
STFT_X[2, :] = FFT(X[10:19])
```
We then have 3 FFT frames which have been extracted from the input sample X. For our feature extraction, we would next find peaks in each row of STFT_X.
The STFT is usually a crucial element of most DSP pipelines, and highly efficient routines are available to compute this (see FFTW, which numpy wraps). Though I have implemented my own STFT here, it is also possible to use matplotlib's specgram function instead.
End of explanation
from numpy.lib.stride_tricks import as_strided
#Peak detection using the technique described here: http://kkjkok.blogspot.com/2013/12/dsp-snippets_9.html
def peakfind(x, n_peaks, l_size=3, r_size=3, c_size=3, f=np.mean):
win_size = l_size + r_size + c_size
shape = x.shape[:-1] + (x.shape[-1] - win_size + 1, win_size)
strides = x.strides + (x.strides[-1],)
xs = as_strided(x, shape=shape, strides=strides)
def is_peak(x):
centered = (np.argmax(x) == l_size + int(c_size/2))
l = x[:l_size]
c = x[l_size:l_size + c_size]
r = x[-r_size:]
passes = np.max(c) > np.max([f(l), f(r)])
if centered and passes:
return np.max(c)
else:
return -1
r = np.apply_along_axis(is_peak, 1, xs)
top = np.argsort(r, None)[::-1]
heights = r[top[:n_peaks]]
#Add l_size and half - 1 of center size to get to actual peak location
top[top > -1] = top[top > -1] + l_size + int(c_size / 2.)
return heights, top[:n_peaks]
plot_data = np.abs(stft(data[20, :]))[15, :]
values, locs = peakfind(plot_data, n_peaks=6)
fp = locs[values > -1]
fv = values[values > -1]
plt.plot(plot_data, color='steelblue')
plt.plot(fp, fv, 'x', color='darkred')
plt.title('Peak location example')
plt.xlabel('Frequency (bins)')
plt.ylabel('Amplitude')
Explanation: Next, peak detection is applied to each FFT frame of every data file. In a previous blog post, I described the use of wavelets for peak detection. Here, we will use a moving window to search for peaks instead. The primary steps to this algorithm are as follows:
Create a data window of length X. In this example X=9, though any window size can be used.
Split this window into 3 sections: left, center and right. For the 9 sample window, this will be LLLCCCRRR.
Apply some function (mean, median, max, min, etc) over each section of the window.
If the maximum value of the function over the center section is greater than the result for left or right, continue to the next check. Otherwise GOTO 6.
If the maximum value for f(CCC) is in the very center of the window, you have found a peak! Mark it and continue. Otherwise, go to the next step.
Shift the input data by one sample, and repeat the process. (data[0:9] -> data[1:10])
Once all data has been processed, you should have some detected peaks. Sort them in descending order by amplitude, then output the top N peaks. In this case, N=6
An implementation of this algorithm is shown below.
End of explanation
#This processing (top freq peaks) only works for single speaker case... need better features for multispeaker!
#MFCC (or deep NN/automatic feature extraction) could be interesting
all_obs = []
for i in range(data.shape[0]):
d = np.abs(stft(data[i, :]))
n_dim = 6
obs = np.zeros((n_dim, d.shape[0]))
for r in range(d.shape[0]):
_, t = peakfind(d[r, :], n_peaks=n_dim)
obs[:, r] = t.copy()
if i % 10 == 0:
print("Processed obs %s" % i)
all_obs.append(obs)
all_obs = np.atleast_3d(all_obs)
Explanation: The peak detector does an acceptable job in detecting peaks, though it is by no means perfect. One of the limiting factors here is the length of the FFT - this peak detector is currently searching over 9 sample chunks, which is very large when the FFT size is only 64 bins! Once again, this will have a negative impact on performance.
We could set the FFT size larger, but in testing this algorithm longer FFT sizes appeared to do worse, even with higher overlap to maintain the same time resolution. Ultimately, this is where more advanced features would come into play.
End of explanation
import scipy.stats as st
import numpy as np
class gmmhmm:
#This class converted with modifications from https://code.google.com/p/hmm-speech-recognition/source/browse/Word.m
def __init__(self, n_states):
self.n_states = n_states
self.random_state = np.random.RandomState(0)
#Normalize random initial state
self.prior = self._normalize(self.random_state.rand(self.n_states, 1))
self.A = self._stochasticize(self.random_state.rand(self.n_states, self.n_states))
self.mu = None
self.covs = None
self.n_dims = None
def _forward(self, B):
log_likelihood = 0.
T = B.shape[1]
alpha = np.zeros(B.shape)
for t in range(T):
if t == 0:
alpha[:, t] = B[:, t] * self.prior.ravel()
else:
alpha[:, t] = B[:, t] * np.dot(self.A.T, alpha[:, t - 1])
alpha_sum = np.sum(alpha[:, t])
alpha[:, t] /= alpha_sum
log_likelihood = log_likelihood + np.log(alpha_sum)
return log_likelihood, alpha
def _backward(self, B):
T = B.shape[1]
beta = np.zeros(B.shape);
beta[:, -1] = np.ones(B.shape[0])
for t in range(T - 1)[::-1]:
beta[:, t] = np.dot(self.A, (B[:, t + 1] * beta[:, t + 1]))
beta[:, t] /= np.sum(beta[:, t])
return beta
def _state_likelihood(self, obs):
obs = np.atleast_2d(obs)
B = np.zeros((self.n_states, obs.shape[1]))
for s in range(self.n_states):
#Needs scipy 0.14
np.random.seed(self.random_state.randint(1))
B[s, :] = st.multivariate_normal.pdf(
obs.T, mean=self.mu[:, s].T, cov=self.covs[:, :, s].T)
#This function can (and will!) return values >> 1
#See the discussion here for the equivalent matlab function
#https://groups.google.com/forum/#!topic/comp.soft-sys.matlab/YksWK0T74Ak
#Key line: "Probabilities have to be less than 1,
#Densities can be anything, even infinite (at individual points)."
#This is evaluating the density at individual points...
return B
def _normalize(self, x):
return (x + (x == 0)) / np.sum(x)
def _stochasticize(self, x):
return (x + (x == 0)) / np.sum(x, axis=1)
def _em_init(self, obs):
#Using this _em_init function allows for less required constructor args
if self.n_dims is None:
self.n_dims = obs.shape[0]
if self.mu is None:
subset = self.random_state.choice(np.arange(self.n_dims), size=self.n_states, replace=False)
self.mu = obs[:, subset]
if self.covs is None:
self.covs = np.zeros((self.n_dims, self.n_dims, self.n_states))
self.covs += np.diag(np.diag(np.cov(obs)))[:, :, None]
return self
def _em_step(self, obs):
obs = np.atleast_2d(obs)
B = self._state_likelihood(obs)
T = obs.shape[1]
log_likelihood, alpha = self._forward(B)
beta = self._backward(B)
xi_sum = np.zeros((self.n_states, self.n_states))
gamma = np.zeros((self.n_states, T))
for t in range(T - 1):
partial_sum = self.A * np.dot(alpha[:, t], (beta[:, t] * B[:, t + 1]).T)
xi_sum += self._normalize(partial_sum)
partial_g = alpha[:, t] * beta[:, t]
gamma[:, t] = self._normalize(partial_g)
partial_g = alpha[:, -1] * beta[:, -1]
gamma[:, -1] = self._normalize(partial_g)
expected_prior = gamma[:, 0]
expected_A = self._stochasticize(xi_sum)
expected_mu = np.zeros((self.n_dims, self.n_states))
expected_covs = np.zeros((self.n_dims, self.n_dims, self.n_states))
gamma_state_sum = np.sum(gamma, axis=1)
#Set zeros to 1 before dividing
gamma_state_sum = gamma_state_sum + (gamma_state_sum == 0)
for s in range(self.n_states):
gamma_obs = obs * gamma[s, :]
expected_mu[:, s] = np.sum(gamma_obs, axis=1) / gamma_state_sum[s]
partial_covs = np.dot(gamma_obs, obs.T) / gamma_state_sum[s] - np.dot(expected_mu[:, s], expected_mu[:, s].T)
#Symmetrize
partial_covs = np.triu(partial_covs) + np.triu(partial_covs).T - np.diag(partial_covs)
#Ensure positive semidefinite by adding diagonal loading
expected_covs += .01 * np.eye(self.n_dims)[:, :, None]
self.prior = expected_prior
self.mu = expected_mu
self.covs = expected_covs
self.A = expected_A
return log_likelihood
def fit(self, obs, n_iter=15):
#Support for 2D and 3D arrays
#2D should be n_features, n_dims
#3D should be n_examples, n_features, n_dims
#For example, with 6 features per speech segment, 105 different words
#this array should be size
#(105, 6, X) where X is the number of frames with features extracted
#For a single example file, the array should be size (6, X)
if len(obs.shape) == 2:
for i in range(n_iter):
self._em_init(obs)
log_likelihood = self._em_step(obs)
elif len(obs.shape) == 3:
count = obs.shape[0]
for n in range(count):
for i in range(n_iter):
self._em_init(obs[n, :, :])
log_likelihood = self._em_step(obs[n, :, :])
return self
def transform(self, obs):
#Support for 2D and 3D arrays
#2D should be n_features, n_dims
#3D should be n_examples, n_features, n_dims
#For example, with 6 features per speech segment, 105 different words
#this array should be size
#(105, 6, X) where X is the number of frames with features extracted
#For a single example file, the array should be size (6, X)
if len(obs.shape) == 2:
B = self._state_likelihood(obs)
log_likelihood, _ = self._forward(B)
return log_likelihood
elif len(obs.shape) == 3:
count = obs.shape[0]
out = np.zeros((count,))
for n in range(count):
B = self._state_likelihood(obs[n, :, :])
log_likelihood, _ = self._forward(B)
out[n] = log_likelihood
return out
if __name__ == "__main__":
rstate = np.random.RandomState(0)
t1 = np.ones((4, 40)) + .001 * rstate.rand(4, 40)
t1 /= t1.sum(axis=0)
t2 = rstate.rand(*t1.shape)
t2 /= t2.sum(axis=0)
m1 = gmmhmm(2)
m1.fit(t1)
m2 = gmmhmm(2)
m2.fit(t2)
m1t1 = m1.transform(t1)
m2t1 = m2.transform(t1)
print("Likelihoods for test set 1")
print("M1:", m1t1)
print("M2:", m2t1)
print("Prediction for test set 1")
print("Model", np.argmax([m1t1, m2t1]) + 1)
print()
m1t2 = m1.transform(t2)
m2t2 = m2.transform(t2)
print("Likelihoods for test set 2")
print("M1:", m1t2)
print("M2:", m2t2)
print("Prediction for test set 2")
print("Model", np.argmax([m1t2, m2t2]) + 1)
Explanation: GMM-HMM Bop
An implementation of a GMM-HMM is shown below. This code is also available as a gist, and any improvements I make will show up there.
This algorithm is fairly complex, and a good summary can be seen here, Brown and here, Moore. This implementation does not have any Viterbi backtracking - since my only interest (for now) is in speech classification, only Baum-Welch and Forward-Backward portions of the GMM-HMM are necessary.
HMMs can do do three primary tasks:
State Estimation $P(S | O)$ - can be useful if you have prior info about what states mean and create the state probabilities yourself.
Path Estimation - given observations, what is the most likely "state path"? Not useful in our case, and not even implemented here!
Maximum Likelihood Estimation $P(O | \lambda)$ - learn the HMM parameters $\lambda$ which maximize the probability of observations. This is the primary method we will use.
To train the HMM, we use the Baum-Welch algorithm. There are many, many resources on this algorithm and I will not regurgitate here. Implementing this HMM was fairly tricky, and I highly recommend using a library unless you are interested in a "learning experience".
This code uses requires scipy 0.14 for the multivariate_normal density.
End of explanation
from sklearn.cross_validation import StratifiedShuffleSplit
sss = StratifiedShuffleSplit(all_labels, test_size=0.1, random_state=0)
for n,i in enumerate(all_obs):
all_obs[n] /= all_obs[n].sum(axis=0)
for train_index, test_index in sss:
X_train, X_test = all_obs[train_index, ...], all_obs[test_index, ...]
y_train, y_test = all_labels[train_index], all_labels[test_index]
print('Size of training matrix:', X_train.shape)
print('Size of testing matrix:', X_test.shape)
Explanation: Still Peaking
Once the peaks are detected for each frame, we have a 3 dimensional numpy array of size [numdatafiles, numpeaks, numframes] - in this case [105, 6, 216]. It is important that the observations be normalized in order to form probabilities, since the HMM expects to be trained on state probabilities. What does this mean?
For each sample, we have extracted a number of frames over time, and each frame has its own set of peaks. We divide each frame by the sum of all peaks in the frame (axis 0 of all_obs[n], or axis 1 of all_obs), and form a "state probability" for each frame in our dataset. In essence, we are creating 6 states, where the highest peak in a frame is state 1, second highest state 2, etc. for each FFT frame.
This representation shows the "spread" of the peaks - if all the peaks are close together, each one will have about the same probability, while peaks that are in a more unique distribution should take on a more unique fingerprint. An extreme case would be 3 low frequency peaks, and 3 high - this would result in very low state probabilities for the low frequency peaks, and very high state probabilities for the high frequency peaks. Since the HMM attempts to learn transition probabilities between frames, this is a decent way to represent speech for HMMs.
End of explanation
ys = set(all_labels)
ms = [gmmhmm(6) for y in ys]
_ = [m.fit(X_train[y_train == y, :, :]) for m, y in zip(ms, ys)]
ps = [m.transform(X_test) for m in ms]
res = np.vstack(ps)
predicted_labels = np.argmax(res, axis=0)
missed = (predicted_labels != y_test)
print('Test accuracy: %.2f percent' % (100 * (1 - np.mean(missed))))
Explanation: To predict words, we need to train 7 separate GMM-HMM models, one for each word. We will then feed the features for a test sample into each, choosing the word associated with the GMM-HMM having the highest output likelihood. This maximum likelihood estimate is our best guess for what word was spoken.
End of explanation
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, predicted_labels)
plt.matshow(cm, cmap='gray')
ax = plt.gca()
_ = ax.set_xticklabels([" "] + [l[:2] for l in spoken])
_ = ax.set_yticklabels([" "] + spoken)
plt.title('Confusion matrix, single speaker')
plt.ylabel('True label')
plt.xlabel('Predicted label')
Explanation: It seems that this classifier does decently. Let's look at a confusion matrix for what words were incorrect, to see if there is any unusual pattern in the misclassifications.
End of explanation |
11,379 |
Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
how to load a custom pretrained model and make predictions using the model
| Python Code::
import tensorflow as tf
model = tf.keras.models.load_model('filename')
pred = model.predict(X_val)
|
11,380 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Introduction to Feature Columns
Learning Objectives
Load a CSV file using Pandas
Create an input pipeline using tf.data
Create multiple types of feature columns
Introduction
In this notebook, you classify structured data (e.g. tabular data in a CSV file) using feature columns. Feature columns serve as a bridge to map from columns in a CSV file to features used to train a model. In a subsequent lab, we will use Keras to define the model.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
The Dataset
We will use a small dataset provided by the Cleveland Clinic Foundation for Heart Disease. There are several hundred rows in the CSV. Each row describes a patient, and each column describes an attribute. We will use this information to predict whether a patient has heart disease, which in this dataset is a binary classification task.
Below is a description of this dataset. Notice there are both numeric and categorical columns.
Column| Description| Feature Type | Data Type
------------|--------------------|----------------------|-----------------
Age | Age in years | Numerical | integer
Sex | (1 = male; 0 = female) | Categorical | integer
CP | Chest pain type (0, 1, 2, 3, 4) | Categorical | integer
Trestbpd | Resting blood pressure (in mm Hg on admission to the hospital) | Numerical | integer
Chol | Serum cholestoral in mg/dl | Numerical | integer
FBS | (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) | Categorical | integer
RestECG | Resting electrocardiographic results (0, 1, 2) | Categorical | integer
Thalach | Maximum heart rate achieved | Numerical | integer
Exang | Exercise induced angina (1 = yes; 0 = no) | Categorical | integer
Oldpeak | ST depression induced by exercise relative to rest | Numerical | float
Slope | The slope of the peak exercise ST segment | Numerical | integer
CA | Number of major vessels (0-3) colored by flourosopy | Numerical | integer
Thal | 3 = normal; 6 = fixed defect; 7 = reversable defect | Categorical | string
Target | Diagnosis of heart disease (1 = true; 0 = false) | Classification | integer
Import TensorFlow and other libraries
Step1: Lab Task 1
Step2: Split the dataframe into train, validation, and test
The dataset we downloaded was a single CSV file. As a best practice, we will split this into train, validation, and test sets.
Step3: Lab Task 2
Step4: Understand the input pipeline
Now that we have created the input pipeline, let's call it to see the format of the data it returns. We have used a small batch size to keep the output readable.
Step5: Lab Task 3
Step6: Numeric columns
The output of a feature column becomes the input to the model. A numeric column is the simplest type of column. It is used to represent real valued features. When using this column, your model will receive the column value from the dataframe unchanged.
Step7: Let's have a look at the output
Step8: Bucketized columns
Often, you don't want to feed a number directly into the model, but instead split its value into different categories based on numerical ranges. Consider raw data that represents a person's age. Instead of representing age as a numeric column, we could split the age into several buckets using a bucketized column. Notice the one-hot values below describe which age range each row matches.
Step9: Categorical columns
In this dataset, thal is represented as a string (e.g. 'fixed', 'normal', or 'reversible'). We cannot feed strings directly to a model. Instead, we must first map them to numeric values. The categorical vocabulary columns provide a way to represent strings as a one-hot vector (much like you have seen above with age buckets). The vocabulary can be passed as a list using categorical_column_with_vocabulary_list, or loaded from a file using categorical_column_with_vocabulary_file.
Step10: In a more complex dataset, many columns would be categorical (e.g. strings). Feature columns are most valuable when working with categorical data. Although there is only one categorical column in this dataset, we will use it to demonstrate several important types of feature columns that you could use when working with other datasets.
Embedding columns
Suppose instead of having just a few possible strings, we have thousands (or more) values per category. For a number of reasons, as the number of categories grow large, it becomes infeasible to train a neural network using one-hot encodings. We can use an embedding column to overcome this limitation. Instead of representing the data as a one-hot vector of many dimensions, an embedding column represents that data as a lower-dimensional, dense vector in which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the example below) is a parameter that must be tuned.
Key point
Step11: Hashed feature columns
Another way to represent a categorical column with a large number of values is to use a categorical_column_with_hash_bucket. This feature column calculates a hash value of the input, then selects one of the hash_bucket_size buckets to encode a string. When using this column, you do not need to provide the vocabulary, and you can choose to make the number of hash_buckets significantly smaller than the number of actual categories to save space.
Key point
Step12: Crossed feature columns
Combining features into a single feature, better known as feature crosses, enables a model to learn separate weights for each combination of features. Here, we will create a new feature that is the cross of age and thal. Note that crossed_column does not build the full table of all possible combinations (which could be very large). Instead, it is backed by a hashed_column, so you can choose how large the table is.
Step13: Choose which columns to use
We have seen how to use several types of feature columns. Now we will use them to train a model. The goal of this tutorial is to show you the complete code (e.g. mechanics) needed to work with feature columns. We have selected a few columns to train our model below arbitrarily.
Key point
Step14: How to Input Feature Columns to a Keras Model
Now that we have defined our feature columns, we now use a DenseFeatures layer to input them to a Keras model. Don't worry if you have not used Keras before. There is a more detailed video and lab introducing the Keras Sequential and Functional models.
Step15: Earlier, we used a small batch size to demonstrate how feature columns worked. We create a new input pipeline with a larger batch size.
Step16: Create, compile, and train the model
Step17: Visualize the model loss curve
Next, we will use Matplotlib to draw the model's loss curves for training and validation. A line plot is also created showing the accuracy over the training epochs for both the train (blue) and test (orange) sets. | Python Code:
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the results of that search
# to a name in the local scope.
import numpy as np
import pandas as pd
# Import matplotlib to visualize the model
import matplotlib.pyplot as plt
# Seaborn is a Python data visualization library based on matplotlib
import seaborn as sns
# %matplotlib inline sets the backend of matplotlib to the `inline` backend
%matplotlib inline
import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
print("TensorFlow version: ",tf.version.VERSION)
Explanation: Introduction to Feature Columns
Learning Objectives
Load a CSV file using Pandas
Create an input pipeline using tf.data
Create multiple types of feature columns
Introduction
In this notebook, you classify structured data (e.g. tabular data in a CSV file) using feature columns. Feature columns serve as a bridge to map from columns in a CSV file to features used to train a model. In a subsequent lab, we will use Keras to define the model.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
The Dataset
We will use a small dataset provided by the Cleveland Clinic Foundation for Heart Disease. There are several hundred rows in the CSV. Each row describes a patient, and each column describes an attribute. We will use this information to predict whether a patient has heart disease, which in this dataset is a binary classification task.
Below is a description of this dataset. Notice there are both numeric and categorical columns.
Column| Description| Feature Type | Data Type
------------|--------------------|----------------------|-----------------
Age | Age in years | Numerical | integer
Sex | (1 = male; 0 = female) | Categorical | integer
CP | Chest pain type (0, 1, 2, 3, 4) | Categorical | integer
Trestbpd | Resting blood pressure (in mm Hg on admission to the hospital) | Numerical | integer
Chol | Serum cholestoral in mg/dl | Numerical | integer
FBS | (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) | Categorical | integer
RestECG | Resting electrocardiographic results (0, 1, 2) | Categorical | integer
Thalach | Maximum heart rate achieved | Numerical | integer
Exang | Exercise induced angina (1 = yes; 0 = no) | Categorical | integer
Oldpeak | ST depression induced by exercise relative to rest | Numerical | float
Slope | The slope of the peak exercise ST segment | Numerical | integer
CA | Number of major vessels (0-3) colored by flourosopy | Numerical | integer
Thal | 3 = normal; 6 = fixed defect; 7 = reversable defect | Categorical | string
Target | Diagnosis of heart disease (1 = true; 0 = false) | Classification | integer
Import TensorFlow and other libraries
End of explanation
URL = 'https://storage.googleapis.com/download.tensorflow.org/data/heart.csv'
# Read a comma-separated values (csv) file into a DataFrame using the read_csv() function
dataframe = pd.read_csv(URL)
# Get the first five rows using the head() method
dataframe.head()
# Get a concise summary of a DataFrame
dataframe.info()
Explanation: Lab Task 1: Use Pandas to create a dataframe
Pandas is a Python library with many helpful utilities for loading and working with structured data. We will use Pandas to download the dataset from a URL, and load it into a dataframe.
End of explanation
# TODO 1a
# Create test, validation and train samples from one dataframe with pandas.
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
Explanation: Split the dataframe into train, validation, and test
The dataset we downloaded was a single CSV file. As a best practice, we will split this into train, validation, and test sets.
End of explanation
# A utility method to create a tf.data dataset from a Pandas Dataframe
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) # TODO 2a
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
# A small batch size is used for demonstration purposes
batch_size = 5
# TODO 2b
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
Explanation: Lab Task 2: Create an input pipeline using tf.data
Next, we will wrap the dataframes with tf.data. This will enable us to use feature columns as a bridge to map from the columns in the Pandas dataframe to features used to train a model. If we were working with a very large CSV file (so large that it does not fit into memory), we would use tf.data to read it from disk directly. That is not covered in this lab.
End of explanation
# If you don't use take(1), all elements will eventually be fetched
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of ages:', feature_batch['age'])
print('A batch of targets:', label_batch)
Explanation: Understand the input pipeline
Now that we have created the input pipeline, let's call it to see the format of the data it returns. We have used a small batch size to keep the output readable.
End of explanation
# We will use this batch to demonstrate several types of feature columns
example_batch = next(iter(train_ds))[0]
# A utility method to create a feature column
# and to transform a batch of data
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
print(feature_layer(example_batch).numpy())
Explanation: Lab Task 3: Demonstrate several types of feature column
TensorFlow provides many types of feature columns. In this section, we will create several types of feature columns, and demonstrate how they transform a column from the dataframe.
End of explanation
# Create a numeric feature column out of `age`
age = feature_column.numeric_column("age")
tf.feature_column.numeric_column
print(age)
Explanation: Numeric columns
The output of a feature column becomes the input to the model. A numeric column is the simplest type of column. It is used to represent real valued features. When using this column, your model will receive the column value from the dataframe unchanged.
End of explanation
# Demo of a numeric feature column out of `age`
demo(age)
Explanation: Let's have a look at the output:
key='age'
A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature Tensor objects, and feature columns.
shape=(1,)
In the heart disease dataset, most columns from the dataframe are numeric. Recall that tensors have a rank. "Age" is a "vector" or "rank-1" tensor, which is like a list of values. A vector has 1-axis, thus the shape will always look like this: shape=(3,), where 3 is a scalar (or single number) and with 1-axis.
default_value=None
A single value compatible with dtype or an iterable of values compatible with dtype which the column takes on during tf.Example parsing if data is missing. A default value of None will cause tf.io.parse_example to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the default_value should be equal to the given shape.
dtype=tf.float32
defines the type of values. Default value is tf.float32. Must be a non-quantized, real integer or floating point type.
normalizer_fn=None
If not None, a function that can be used to normalize the value of the tensor after default_value is applied for parsing. Normalizer function takes the input Tensor as its argument, and returns the output Tensor. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though the most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations.
End of explanation
# Create a bucketized feature column out of `age` with the following boundaries and demo it.
age_buckets = tf.feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(age_buckets) # TODO 3a
Explanation: Bucketized columns
Often, you don't want to feed a number directly into the model, but instead split its value into different categories based on numerical ranges. Consider raw data that represents a person's age. Instead of representing age as a numeric column, we could split the age into several buckets using a bucketized column. Notice the one-hot values below describe which age range each row matches.
End of explanation
# Create a categorical vocabulary column out of the
# above mentioned categories with the key specified as `thal`.
thal = tf.feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
# Create an indicator column out of the created categorical column.
thal_one_hot = tf.feature_column.indicator_column(thal)
demo(thal_one_hot)
Explanation: Categorical columns
In this dataset, thal is represented as a string (e.g. 'fixed', 'normal', or 'reversible'). We cannot feed strings directly to a model. Instead, we must first map them to numeric values. The categorical vocabulary columns provide a way to represent strings as a one-hot vector (much like you have seen above with age buckets). The vocabulary can be passed as a list using categorical_column_with_vocabulary_list, or loaded from a file using categorical_column_with_vocabulary_file.
End of explanation
# Notice the input to the embedding column is the categorical column
# we previously created
# Set the size of the embedding to 8, by using the dimension parameter
thal_embedding = tf.feature_column.embedding_column(thal, dimension=8)
demo(thal_embedding)
Explanation: In a more complex dataset, many columns would be categorical (e.g. strings). Feature columns are most valuable when working with categorical data. Although there is only one categorical column in this dataset, we will use it to demonstrate several important types of feature columns that you could use when working with other datasets.
Embedding columns
Suppose instead of having just a few possible strings, we have thousands (or more) values per category. For a number of reasons, as the number of categories grow large, it becomes infeasible to train a neural network using one-hot encodings. We can use an embedding column to overcome this limitation. Instead of representing the data as a one-hot vector of many dimensions, an embedding column represents that data as a lower-dimensional, dense vector in which each cell can contain any number, not just 0 or 1. The size of the embedding (8, in the example below) is a parameter that must be tuned.
Key point: using an embedding column is best when a categorical column has many possible values. We are using one here for demonstration purposes, so you have a complete example you can modify for a different dataset in the future.
End of explanation
# Create a hashed feature column with `thal` as the key and 1000 hash buckets.
thal_hashed = tf.feature_column.categorical_column_with_hash_bucket(
'thal', hash_bucket_size=1000)
demo(tf.feature_column.indicator_column(thal_hashed))
Explanation: Hashed feature columns
Another way to represent a categorical column with a large number of values is to use a categorical_column_with_hash_bucket. This feature column calculates a hash value of the input, then selects one of the hash_bucket_size buckets to encode a string. When using this column, you do not need to provide the vocabulary, and you can choose to make the number of hash_buckets significantly smaller than the number of actual categories to save space.
Key point: An important downside of this technique is that there may be collisions in which different strings are mapped to the same bucket. In practice, this can work well for some datasets regardless.
End of explanation
# Create a crossed column using the bucketized column (age_buckets)
# the categorical vocabulary column (thal), and 1000 hash buckets.
crossed_feature = tf.feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
demo(tf.feature_column.indicator_column(crossed_feature))
Explanation: Crossed feature columns
Combining features into a single feature, better known as feature crosses, enables a model to learn separate weights for each combination of features. Here, we will create a new feature that is the cross of age and thal. Note that crossed_column does not build the full table of all possible combinations (which could be very large). Instead, it is backed by a hashed_column, so you can choose how large the table is.
End of explanation
feature_columns = []
# numeric cols
# Create a feature column out of the header using a numeric column.
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']:
feature_columns.append(feature_column.numeric_column(header))
# bucketized cols
# Create a bucketized feature column out of the age column using the following boundaries.
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
# Create a categorical vocabulary column out of the below categories with the key specified as `thal`.
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
# embedding cols
# Create an embedding column out of the categorical vocabulary
thal_embedding = feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# crossed cols
# Create a crossed column using the bucketized column (age_buckets),
# the categorical vocabulary column (thal), and 1000 hash buckets.
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
Explanation: Choose which columns to use
We have seen how to use several types of feature columns. Now we will use them to train a model. The goal of this tutorial is to show you the complete code (e.g. mechanics) needed to work with feature columns. We have selected a few columns to train our model below arbitrarily.
Key point: If your aim is to build an accurate model, try a larger dataset of your own, and think carefully about which features are the most meaningful to include, and how they should be represented.
End of explanation
# Create a Keras DenseFeatures layer and pass the feature_columns
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
Explanation: How to Input Feature Columns to a Keras Model
Now that we have defined our feature columns, we now use a DenseFeatures layer to input them to a Keras model. Don't worry if you have not used Keras before. There is a more detailed video and lab introducing the Keras Sequential and Functional models.
End of explanation
batch_size = 32
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
Explanation: Earlier, we used a small batch size to demonstrate how feature columns worked. We create a new input pipeline with a larger batch size.
End of explanation
# `Sequential` provides training and inference features on this model.
model = tf.keras.Sequential([
feature_layer,
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(1)
])
# `Compile` configures the model for training.
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# `Fit` trains the model for a fixed number of epochs
history = model.fit(train_ds,
validation_data=val_ds,
epochs=5)
# `Evaluate` returns the loss value & metrics values for the model in test mode.
loss, accuracy = model.evaluate(test_ds)
print("Accuracy", accuracy)
Explanation: Create, compile, and train the model
End of explanation
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'accuracy'])
Explanation: Visualize the model loss curve
Next, we will use Matplotlib to draw the model's loss curves for training and validation. A line plot is also created showing the accuracy over the training epochs for both the train (blue) and test (orange) sets.
End of explanation |
11,381 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Seaice
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required
Step7: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required
Step8: 3.2. Ocean Freezing Point Value
Is Required
Step9: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required
Step10: 4.2. Canonical Horizontal Resolution
Is Required
Step11: 4.3. Number Of Horizontal Gridpoints
Is Required
Step12: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required
Step13: 5.2. Target
Is Required
Step14: 5.3. Simulations
Is Required
Step15: 5.4. Metrics Used
Is Required
Step16: 5.5. Variables
Is Required
Step17: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required
Step18: 6.2. Additional Parameters
Is Required
Step19: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required
Step20: 7.2. On Diagnostic Variables
Is Required
Step21: 7.3. Missing Processes
Is Required
Step22: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required
Step23: 8.2. Properties
Is Required
Step24: 8.3. Budget
Is Required
Step25: 8.4. Was Flux Correction Used
Is Required
Step26: 8.5. Corrected Conserved Prognostic Variables
Is Required
Step27: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required
Step28: 9.2. Grid Type
Is Required
Step29: 9.3. Scheme
Is Required
Step30: 9.4. Thermodynamics Time Step
Is Required
Step31: 9.5. Dynamics Time Step
Is Required
Step32: 9.6. Additional Details
Is Required
Step33: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required
Step34: 10.2. Number Of Layers
Is Required
Step35: 10.3. Additional Details
Is Required
Step36: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required
Step37: 11.2. Number Of Categories
Is Required
Step38: 11.3. Category Limits
Is Required
Step39: 11.4. Ice Thickness Distribution Scheme
Is Required
Step40: 11.5. Other
Is Required
Step41: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required
Step42: 12.2. Number Of Snow Levels
Is Required
Step43: 12.3. Snow Fraction
Is Required
Step44: 12.4. Additional Details
Is Required
Step45: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required
Step46: 13.2. Transport In Thickness Space
Is Required
Step47: 13.3. Ice Strength Formulation
Is Required
Step48: 13.4. Redistribution
Is Required
Step49: 13.5. Rheology
Is Required
Step50: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required
Step51: 14.2. Thermal Conductivity
Is Required
Step52: 14.3. Heat Diffusion
Is Required
Step53: 14.4. Basal Heat Flux
Is Required
Step54: 14.5. Fixed Salinity Value
Is Required
Step55: 14.6. Heat Content Of Precipitation
Is Required
Step56: 14.7. Precipitation Effects On Salinity
Is Required
Step57: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required
Step58: 15.2. Ice Vertical Growth And Melt
Is Required
Step59: 15.3. Ice Lateral Melting
Is Required
Step60: 15.4. Ice Surface Sublimation
Is Required
Step61: 15.5. Frazil Ice
Is Required
Step62: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Is Required
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required
Step65: 17.2. Constant Salinity Value
Is Required
Step66: 17.3. Additional Details
Is Required
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required
Step68: 18.2. Constant Salinity Value
Is Required
Step69: 18.3. Additional Details
Is Required
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required
Step72: 20.2. Additional Details
Is Required
Step73: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required
Step74: 21.2. Formulation
Is Required
Step75: 21.3. Impacts
Is Required
Step76: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required
Step77: 22.2. Snow Aging Scheme
Is Required
Step78: 22.3. Has Snow Ice Formation
Is Required
Step79: 22.4. Snow Ice Formation Scheme
Is Required
Step80: 22.5. Redistribution
Is Required
Step81: 22.6. Heat Diffusion
Is Required
Step82: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required
Step83: 23.2. Ice Radiation Transmission
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bnu', 'sandbox-3', 'seaice')
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: BNU
Source ID: SANDBOX-3
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:41
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation |
11,382 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
network(), radar() and site() objects
This notebook introduces the high-level python interface with the radar.dat and hdw.dat content.
For more in-depth access (i.e., your own hdw.dat), look at the radInfoIO module
Step1: Import all radars
This could be used to iterate through radars, plot them all on a map, find radars in view of specific points...
Step2: Import a specific radar
Saves memory and time
Step3: Import a specific radar site
Saves even more memory and time | Python Code:
# Import radar module
%pylab inline
from davitpy.pydarn.radar import *
Explanation: network(), radar() and site() objects
This notebook introduces the high-level python interface with the radar.dat and hdw.dat content.
For more in-depth access (i.e., your own hdw.dat), look at the radInfoIO module:
radInfoIo?
End of explanation
radars = network()
print radars
# How to get the total number of radars
print len(radars)
print radars.nradar
# How to get a specific radar from the mountain of recorded radars
print radars.getRadarByCode("bks")
# is equivalent to...
#print radars.getRadarById(33)
#print, radars.getRadarByName("Blackstone")
# How to get a specific radar site information at a given date
from datetime import datetime
print radars.getRadarByName('Goose Bay').getSiteByDate(datetime(2011,1,4))
Explanation: Import all radars
This could be used to iterate through radars, plot them all on a map, find radars in view of specific points...
End of explanation
# How to get only one radar without getting all the other radars
rad = radar(code='bks')
print rad
Explanation: Import a specific radar
Saves memory and time
End of explanation
# How to get a site without going trhough the whole network or radar classes
print site(code='bks', dt=datetime(2010,11,17))
Explanation: Import a specific radar site
Saves even more memory and time
End of explanation |
11,383 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Introduction
Machine learning literature makes heavy use of probabilistic graphical models
and bayesian statistics. In fact, state of the art (SOTA) architectures, such as
[variational autoencoders][vae-blog] (VAE) or [generative adversarial
networks][gan-blog] (GAN), are intrinsically stochastic by nature. To
wholesomely understand research in this field not only do we need a broad
knowledge of mathematics, probability, and optimization but we somehow need
intuition about how these concepts are applied to real world problems. For
example, one of the most common applications of deep learning techniques is
vision. We may want to classify images or generate new ones. Most SOTA
techniques pose these problems in a probabilistic framework. We frequently see
things like $p(\mathbf{x}|\mathbf{z})$ where $\mathbf{x}$ is an image and
$\mathbf{z}$ is a latent variable. What do we mean by the probability of an
image? What is a latent variable, and why is it necessary[^Bishop2006] to pose
the problems this way?
Short answer, it is necessary due to the inherent uncertainty of our universe.
In this case, uncertainty in image acquisition can be introduced via many
sources, such as the recording apparatus, the finite precision of our
measurements, as well as the intrinsic stochasticity of the process being
measured. Perhaps the most important source of uncertainty we will consider is
due to there being sources of variability that are themselves unobserved.
Probability theory provides us with a framework to reason in the presence of
uncertainty and information theory allows us to quantify uncertainty. As we
elluded earlier the field of machine learning makes heavy use of both, and
this is no coincidence.
Representations
How do we describe a face? The word "face" is a symbol and this symbol means
different things to different people. Yet, there is enough commonality between
our interpretations that we are able to effectively communicate with one
another using the word. How is that? What are the underlying features of faces
that we all hold common? Why is a simple smiley face clip art so obviously
perceived as a face? To make it more concrete, why are two simple ellipses
decorated underneath by a short curve so clearly a face, while an eye lid,
lower lip, one ear and a nostril, not?
Insert Image of Faces
Left
Step1: Any one point inside the unit square would represent an image. For example the image associated with the point $(0.25,0.85)$ is shown below.
Step2: Now consider the case where there is some
process correlating the two variables. This
would be similar to their being some rules behind
the structure of faces. We know, that this must be
the case because if it weren't then faces would
be created randomly and we would not see the
patterns that was do. In
this case, the pixels would be correlated in
some manner due to the mechanism driving the
construction of faces. In this simple case,
let's consider a direct correlation of the
form $x_1 = \frac{1}{2} \cos(2\pi x_2)+\frac{1}{2}+\epsilon$
where $\epsilon$ is a noise term coming from
a low variability normal distribution
$\epsilon \sim N(0,\frac{1}{10})$. We see
in Figure \ref{fig
Step3: We will refer to the structure suggested by
the two dimensional points as the 'manifold'.
This is a common practice when analyzing images.
A 28 by 28 dimensional image will be a point in
784 dimensional space. If we are examining
images with structure, various images of the
number 2 for example, then it turns out that
these images will form a manifold in 784
dimensional space. In most cases, as is the
case in our contrived example, this manifold
exists in a lower dimensional space than that
of the images themselves. The goal is to 'learn'
this manifold. In our simple case we can describe
the manifold as a function of only 1 variable
$$f(t) = <t,\frac{1}{2} \cos(2\pi t)+\frac{1}{2}>$$
This is what we would call the underlying data
generating process. In practice we usually
describe the manifold in terms of a probability
distribution. We will refer to the data
generating distribution in our example as
$p_{test}(x_1, x_2)$. Why did we choose a
probability to describe the manifold created
by the data generating process? How might this
probability be interpreted?
Learning the actual distribution turns out to
be a difficult task. Here we will use a
common non parametric technique for describing
distributions, the histogram. Looking at a
histogram of the images, or two dimensional points,
will give us insight into the structure of the
distribution from which they came. Notice here
though that the histogram merely describes the
distribution, we do not know what it is. | Python Code:
x1 = np.random.uniform(size=500)
x2 = np.random.uniform(size=500)
fig = plt.figure();
ax = fig.add_subplot(1,1,1);
ax.scatter(x1,x2, edgecolor='black', s=80);
ax.grid();
ax.set_axisbelow(True);
ax.set_xlim(-0.25,1.25); ax.set_ylim(-0.25,1.25)
ax.set_xlabel('Pixel 2'); ax.set_ylabel('Pixel 1'); plt.savefig('images_in_2dspace.pdf')
Explanation: Introduction
Machine learning literature makes heavy use of probabilistic graphical models
and bayesian statistics. In fact, state of the art (SOTA) architectures, such as
[variational autoencoders][vae-blog] (VAE) or [generative adversarial
networks][gan-blog] (GAN), are intrinsically stochastic by nature. To
wholesomely understand research in this field not only do we need a broad
knowledge of mathematics, probability, and optimization but we somehow need
intuition about how these concepts are applied to real world problems. For
example, one of the most common applications of deep learning techniques is
vision. We may want to classify images or generate new ones. Most SOTA
techniques pose these problems in a probabilistic framework. We frequently see
things like $p(\mathbf{x}|\mathbf{z})$ where $\mathbf{x}$ is an image and
$\mathbf{z}$ is a latent variable. What do we mean by the probability of an
image? What is a latent variable, and why is it necessary[^Bishop2006] to pose
the problems this way?
Short answer, it is necessary due to the inherent uncertainty of our universe.
In this case, uncertainty in image acquisition can be introduced via many
sources, such as the recording apparatus, the finite precision of our
measurements, as well as the intrinsic stochasticity of the process being
measured. Perhaps the most important source of uncertainty we will consider is
due to there being sources of variability that are themselves unobserved.
Probability theory provides us with a framework to reason in the presence of
uncertainty and information theory allows us to quantify uncertainty. As we
elluded earlier the field of machine learning makes heavy use of both, and
this is no coincidence.
Representations
How do we describe a face? The word "face" is a symbol and this symbol means
different things to different people. Yet, there is enough commonality between
our interpretations that we are able to effectively communicate with one
another using the word. How is that? What are the underlying features of faces
that we all hold common? Why is a simple smiley face clip art so obviously
perceived as a face? To make it more concrete, why are two simple ellipses
decorated underneath by a short curve so clearly a face, while an eye lid,
lower lip, one ear and a nostril, not?
Insert Image of Faces
Left: Most would likely agree, this is clearly a face. Middle:
With nearly all of the details removed, a mere two circles and
curve are enough to create what the author still recognizes
as a face. Right: Does this look like a face to you? An ear,
nostril, eyelid, and lip do not seem to convey a face as clearly
as the eyes and the mouth do. We will quantify this demonstration
shortly.
Features, or representations, are built on the idea that characteristics of the
symbol "face" are not a property of any one face. Rather, they only arise from
the myriad of things we use the symbol to represent. In other words, a
particular face is not ascribed meaning by the word "face" - the word "face"
derives meaning from the many faces it represents. This suggests that facial
characteristics can be described through the statistical properties of all
faces. Loosely speaking, these underlying statistical characteristics are what
the machine learning field often calls latent variables.
Probability of an Image
Most images are contaminated with noise that must be addressed. At the
highest level, we have noise being added to the data by the imaging device. The
next level of uncertainty comes as a consequence of discretization.
Images in reality are continuous but in the process of imaging we only measure
certain points along the face. Consider for example a military satellite
tracking a vehicle. If one wishes to predict the future location of the van,
the prediction is limited to be within one of the discrete cells that make up
its measurements. However, the true location of the van could be anywhere
within that grid cell. There is also intrinsic stochasticity at the atomic
level that we ignore. The fluctuations taking place at that scale are assumed
to be averaged out in our observations.
The unobserved sources of variability will be our primary focus. Before we
address that, let us lay down some preliminary concepts. We are going to assume
that there exists some true unknown process that determines what faces look
like. A dataset of faces can then be considered as a sample of this process at
various points throughout its life. This suggests that these snapshots are a
outputs of the underlying data generating process. Considering the many
sources of uncertainty outlined above, it is natural to describe this process
as a probability distribution. There will be many ways to interpret the data as
a probability, but we will begin by considering any one image to be the result
of a data generating distribution, $P_{data}(\mathbf{x})$. Here $\mathbf{x}$ is considered to be
an image of a face with $n$ pixels. So $P_{data}$ is a joint distribution over
each pixel of the frame with a probability density function (pdf),
$p_{data}(x_1,x_2,\dots,x_n)$.
To build intuition about what $p_{data}(\mathbf{x})$ is and how it relates to
the assumed data generating process, we will explore a simple example. Take an
image with only 2 pixels... [$x_1$,$x_2$] where both $x_1$ and $x_2$ are in
[0,1]. Each image can be considered as a two dimensional point, in
$\mathbb{R}^2$. All possible images would occupy a square in the 2 dimensional
plane. An example of what this might look like can be seen in Figure
\ref{fig:images_in_2dspace} on page \pageref{fig:images_in_2dspace}. Any one
point inside the unit square would represent an image. For example the image
associated with the point $(0.25,0.85)$ is shown below.
End of explanation
im = [(0.25, 0.85)]
plt.imshow(im, cmap='gray',vmin=0,vmax=1)
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
left='off',
right='off'
)
plt.xticks([])
plt.yticks([])
plt.xlabel('Pixel 1 = 0.25 Pixel 2 = 0.85')
plt.savefig('sample_2dspace_image.pdf')
Explanation: Any one point inside the unit square would represent an image. For example the image associated with the point $(0.25,0.85)$ is shown below.
End of explanation
x1 = lambda x2: 0.5*np.cos(2*np.pi*x2)+0.5
x2 = np.linspace(0,1,200)
eps = np.random.normal(scale=0.1, size=200)
fig = plt.figure();
ax = fig.add_subplot(1,1,1);
ax.scatter(x2,x1(x2)+eps, edgecolor='black', s=80);
ax.grid();
ax.set_axisbelow(True);
ax.set_xlim(-0.25,1.25); ax.set_ylim(-0.25,1.25); plt.axes().set_aspect('equal')
ax.set_xlabel('Pixel 2'); ax.set_ylabel('Pixel 1'); plt.savefig('structured_images_in_2dspace.pdf')
Explanation: Now consider the case where there is some
process correlating the two variables. This
would be similar to their being some rules behind
the structure of faces. We know, that this must be
the case because if it weren't then faces would
be created randomly and we would not see the
patterns that was do. In
this case, the pixels would be correlated in
some manner due to the mechanism driving the
construction of faces. In this simple case,
let's consider a direct correlation of the
form $x_1 = \frac{1}{2} \cos(2\pi x_2)+\frac{1}{2}+\epsilon$
where $\epsilon$ is a noise term coming from
a low variability normal distribution
$\epsilon \sim N(0,\frac{1}{10})$. We see
in Figure \ref{fig:structured_images_in_2dspace}
on page \pageref{fig:structured_images_in_2dspace}
that in this case, the images plotted
in two dimensions resulting from this
relationship form a distinct pattern.
End of explanation
from matplotlib.colors import LogNorm
x2 = np.random.uniform(size=100000)
eps = np.random.normal(scale=0.1, size=100000)
hist2d = plt.hist2d(x2,x1(x2)+eps, bins=50, norm=LogNorm())
plt.xlim(0.0,1.0); plt.ylim(-0.3,1.3); plt.axes().set_aspect('equal')
plt.xlabel('Pixel 2'); plt.ylabel('Pixel 1')
plt.colorbar();
plt.savefig('histogram_of_structured_images.pdf')
Explanation: We will refer to the structure suggested by
the two dimensional points as the 'manifold'.
This is a common practice when analyzing images.
A 28 by 28 dimensional image will be a point in
784 dimensional space. If we are examining
images with structure, various images of the
number 2 for example, then it turns out that
these images will form a manifold in 784
dimensional space. In most cases, as is the
case in our contrived example, this manifold
exists in a lower dimensional space than that
of the images themselves. The goal is to 'learn'
this manifold. In our simple case we can describe
the manifold as a function of only 1 variable
$$f(t) = <t,\frac{1}{2} \cos(2\pi t)+\frac{1}{2}>$$
This is what we would call the underlying data
generating process. In practice we usually
describe the manifold in terms of a probability
distribution. We will refer to the data
generating distribution in our example as
$p_{test}(x_1, x_2)$. Why did we choose a
probability to describe the manifold created
by the data generating process? How might this
probability be interpreted?
Learning the actual distribution turns out to
be a difficult task. Here we will use a
common non parametric technique for describing
distributions, the histogram. Looking at a
histogram of the images, or two dimensional points,
will give us insight into the structure of the
distribution from which they came. Notice here
though that the histogram merely describes the
distribution, we do not know what it is.
End of explanation |
11,384 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Outline
Glossary
2. Mathematical Groundwork
Previous
Step1: Import section specific modules
Step4: Convolution
Definition of the convolution
Properties of the convolution
Convolution examples
2.5. Convolution<a id='math
Step5: One might assume that one is observing a (co-)sine function. But it can get worse
Step7: This example illustrates that the process of filtering can destroy information about our signal. However, filtering can also be useful. Given noisy observations of a function, a rectangle function can be used to filter out the noise. This is illustrated in the subsequent example.
Step9: Note, that while the signal is not visible in the noisy data, it is partially recovered in the output of the filter.
Representing instrumental functions, it is important to differentiate between the response function in a certain direction and the image of an impulse, which is the reverse of the response function. The function used to represent a measurement via convolution is the image of an impulse function at the origin. This becomes evident, when we convolve two asymmetric functions. | Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
Explanation: Outline
Glossary
2. Mathematical Groundwork
Previous: 2.4 The Fourier Transform
Next: 2.6 Cross-correlation and auto-correlation
Import standard modules:
End of explanation
import math
from IPython.display import HTML
HTML('../style/code_toggle.html')
Explanation: Import section specific modules:
End of explanation
import math
from matplotlib import rcParams
rcParams['text.usetex'] = True
#def trianglewave(x, T):
#
# This is a sawtooth, though
#
# return np.mod(x/T,1.)*np.logical_and(x>=0,x<=T)
def trianglewave(x, T):
T is the period.
return np.abs(2.*(np.mod(x/T,1.)-0.5))-0.5
def boxcar(x,a,b,amp):
return amp*np.logical_and(x>=a,x<=b)
def plottriboxconv(a, b, period):
# limits of boxcar Play arround with this
# a = -0.1
# b = 0.1
# Plotting range
xrange = [-2., 2.]
# Create functions
xpoints = 1000
# Resolution element
dx = (xrange[1]-xrange[0])/float(xpoints)
x = np.linspace(xrange[0], xrange[1], xpoints)
y = boxcar(x, a, b, 1.)
# boxcar will be normalised to 1. amp = 1./(b-a) works in the limit of many points, but here we do
# numberofpixelsinbox*dx*amplitude = y.sum *dx*amplitude = 1
# to take into account numerical effects
amp = float(xpoints)/((xrange[1]-xrange[0])* y.sum())
y = boxcar(x, a, b, 1./(b-a))
ycorr = boxcar(x, a, b, amp)
z = trianglewave(x, period)
result = np.convolve(ycorr,z,'same')
result = dx*result
# Start the plot, create a figure instance and a subplot
fig = plt.figure()
ax1 = fig.add_subplot(311)
fig.tight_layout()
plt.subplots_adjust(hspace = 0.6)
# Axis ranges
ax1.axis([xrange[0]+(b-a), xrange[1]-(b-a), z.min()-0.1*(z.max()-z.min()), z.max()+0.1*(z.max()-z.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,z,'b-')
plt.title("Triangle wave", fontsize=14,color='black')
ax2 = fig.add_subplot(312, sharex=ax1)
# Axis ranges
ax2.axis([xrange[0]+(b-a), xrange[1]-(b-a), ycorr.min()-0.1*(ycorr.max()-ycorr.min()), \
ycorr.max()+0.1*(ycorr.max()-ycorr.min())])
# Plot a grid
ax2.grid(True)
# Insert lines at x=0 and y=0
ax2.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax2.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
e1 = int(math.ceil(xpoints*(a-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[:e1],y[:e1],'b-')
ax2.plot([a, a],[0., amp],'b--')
e2 = int(math.floor(xpoints*(b-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[e1:e2],y[e1:e2],'b-')
e3 = xpoints
ax2.plot(x[e2:],y[e2:],'b-')
ax2.plot([b, b],[0., amp],'b--')
plt.title("Rectangle function", fontsize=14,color='black')
ax3 = fig.add_subplot(313, sharex=ax2)
# Axis ranges: mask out border effects
rmin = result.min()
rmax = result.max()
# Just to make the result a bit more beautiful if the function is very flat
if (rmax - rmin) < 0.1:
rmin=rmin-0.1
rmax=rmax+0.1
ax3.axis([xrange[0]+(b-a), xrange[1]-(b-a), rmin-0.1*(rmax-rmin), rmax+0.1*(rmax-rmin)])
# Plot a grid
ax3.grid(True)
# Insert lines at x=0 and y=0
ax3.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax3.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
plr1 = int(xpoints*(b-a)/(xrange[1]-xrange[0]))
plr2 = int(xpoints*(1-(b-a)/(xrange[1]-xrange[0])))
ax3.plot(x[plr1:plr2],result[plr1:plr2],'b-')
plt.title("Triangle wave filtered with rectangle function", fontsize=14,color='black')
# first two arguments give the position of the rectangle, third the period of the Triangle
plottriboxconv(-0.1, 0.1, 1.0)
Explanation: Convolution
Definition of the convolution
Properties of the convolution
Convolution examples
2.5. Convolution<a id='math:sec:convolution'></a>
The convolution is an operation connecting two functions, with the result of a mutual broadening. In signal processing, the convolution is often used to represent instrumental broadening of a signal. For any observation, the signal received is "filtered" by an instrumental function. The signal is smeared out. The mathematical description for this effect is the convolution of the function representing the original signal with the instrumental function. In this chapter, we give a detailed description.
2.5.1. Definition of the convolution<a id='math:sec:definition_of_the_convolution'></a>
The convolution $\circ$ is an operation acting on two complex-valued functions.
<a id='math:eq:5_001'></a><!--\label{math:eq:5_001}-->$$
\circ: \left{f\,|\, f:\mathbb{R}\rightarrow \mathbb{C}\right}\,\times\, \left{f\,|\, f:\mathbb{R}\rightarrow \mathbb{C}\right} \rightarrow \left{f\,|\, f:\mathbb{R}\rightarrow \mathbb{C}\right}\
(f\circ g)(x) \,=\, \int_{-\infty}^{+\infty} f(x-t)\,g(t)\,dt
$$
or, in more than one dimension
<a id='math:eq:5_002'></a><!--\label{math:eq:5_002}-->$$
\circ: \left{f\,|\, f:\mathbb{R}^n\rightarrow \mathbb{C}\right}\,\times\, \left{f\,|\, f:\mathbb{R}^n\rightarrow \mathbb{C}\right} \rightarrow \left{f\,|\, f:\mathbb{R}^n\rightarrow \mathbb{C}\right} \, \quad n \in \mathbb{N}\
\begin{align}
(f\circ g)(x_1,\ldots,x_n ) \,&=\, (f\circ g)({\bf x})\
\,&=\, \int_{-\infty}^{+\infty} \ldots \int_{-\infty}^{+\infty} f(x_1-t_1, \ldots , x_n-t_n)\,g(t_1, \ldots, t_n) \,d^nt\
\,&=\, \int_{-\infty}^{+\infty} f({\bf x}-{\bf t})\,g({\bf t}) \,d^nt\end{align}
$$
2.5.2. Properties of the convolution<a id='math:sec:properties_of_the_convolution'></a>
The following rules apply:
<a id='math:eq:5_003'></a><!--\label{math:eq:5_003}-->$$
\forall\,f,g\in \left{h\,|\, h:\mathbb{R}\rightarrow \mathbb{C}\right}, \quad a \in \mathbb{C}\
\begin{align}
f\circ g \,&=\, g \circ f&\qquad (\text{commutativity})\
(f\circ g)\circ h \,&=\, f \circ (g\circ h)&\qquad (\text{assiociativity})\
f \circ (g + h) \,&=\, (f \circ g) + (f\circ h) &\qquad (\text{distributivity})\
(a\, g)\circ h \,&=\, a \, (g\circ h)&\qquad (\text{assiociativity with scalar multiplication})\
\end{align}
$$
Because (in one dimenstion, to keep it short)
<a id='math:eq:5_002'></a><!--\label{math:eq:5_002}-->$$
\begin{split}
(f\circ g)(x) \,&=\, \int_{-\infty}^{+\infty} f(x-t)\,g(t)\,dt\
&\underset{t^{\prime} = x - t}{=}\, \int_{\infty}^{-\infty} f(t^{\prime})\,g(x-t^{\prime})\,\frac{dt}{dt^{\prime}}dt^{\prime}\
&=\, - \int_{-\infty}^{+\infty} f(t^{\prime})\,g(x-t^{\prime})(-1)\,dt'\
&= (g\circ f)(x)\
((f\circ g)\circ h)(x) \,&=\, \int_{-\infty}^{+\infty} \left[\int_{-\infty}^{+\infty} f((x-t^{\prime})-t)\,g(t)\,dt\right]\,h(t^\prime)\,dt^{\prime}\
&=\, \int_{-\infty}^{+\infty} \int_{-\infty}^{+\infty} f(x-t -t^{\prime})\,g(t)\,h(t^\prime)\,dt\,dt^{\prime}\
&=\, \int_{-\infty}^{+\infty} \int_{-\infty}^{+\infty} f((x-t) -t^{\prime})\,h(t^\prime)\,g(t)\,dt^{\prime}\,dt\
&=\, \int_{-\infty}^{+\infty} \left[\int_{-\infty}^{+\infty} f((x-t) -t^{\prime})\,h(t^\prime)\,dt^{\prime}\right]\,g(t)\,dt\
&=\, (f\circ (g\circ h))(x)
\end{split}\qquad \rm ,
$$
The last two rules can be easily verified.
2.5.3. Convolution Examples<a id='math:sec:convolution_examples'></a>
As said, the convolution is often used to represent an instrumental function. We want to demonstrate this. Let us assume a simple function, the triangle wave and a rectangle function (scaled to an area of 1). If we convolve them we get this:
End of explanation
# first two arguments give the position of the rectangle, third the period of the Triangle
plottriboxconv(-0.5, 0.5, 1.0)
Explanation: One might assume that one is observing a (co-)sine function. But it can get worse:
End of explanation
from matplotlib import rcParams
rcParams['text.usetex'] = True
def noisycosinewave(x, amplitude, T, sigma):
T is the period, sigma is the dispersion, amplitude the amplitude
return amplitude*np.cos(2.*math.pi*x/T)+np.random.normal(scale=sigma, size=x.size)
def boxcar(x,a,b,amp):
return amp*np.logical_and(x>=a,x<=b)
def plotcosboxconv(a, b, period, sigma):
# limits of boxcar Play arround with this
# a = -0.1
# b = 0.1
# Plotting range
xrange = [-2., 2.]
# Create functions
xpoints = 1000
# Resolution element
dx = (xrange[1]-xrange[0])/float(xpoints)
x = np.linspace(xrange[0], xrange[1], xpoints)
y = boxcar(x, a, b, 1.)
# boxcar will be normalised to 1. amp = 1./(b-a) works in the limit of many points, but here we do
# numberofpixelsinbox*dx*amplitude = y.sum *dx*amplitude = 1
# to take into account numerical effects
amp = float(xpoints)/((xrange[1]-xrange[0])* y.sum())
y = boxcar(x, a, b, 1./(b-a))
ycorr = boxcar(x, a, b, amp)
z = noisycosinewave(x, 1., period, sigma)
c = np.cos(2.*math.pi*x/period)
result = np.convolve(ycorr,z,'same')
result = dx*result
# Start the plot, create a figure instance and a subplot
fig = plt.figure()
ax1 = fig.add_subplot(411)
fig.tight_layout()
plt.subplots_adjust(hspace = 0.8)
# Axis ranges
ax1.axis([xrange[0]+(b-a), xrange[1]-(b-a), c.min()-0.1*(c.max()-c.min()), c.max()+0.1*(c.max()-c.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,c,'b-')
plt.title("Original function (cos)", fontsize=14,color='black')
ax1 = fig.add_subplot(412)
# Axis ranges
ax1.axis([xrange[0]+(b-a), xrange[1]-(b-a), z.min()-0.1*(z.max()-z.min()), z.max()+0.1*(z.max()-z.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,z,'b-')
plt.title("Noise added", fontsize=14,color='black')
ax2 = fig.add_subplot(413, sharex=ax1)
# Axis ranges
ax2.axis([xrange[0]+(b-a), xrange[1]-(b-a), ycorr.min()-0.1*(ycorr.max()-ycorr.min()), \
ycorr.max()+0.1*(ycorr.max()-ycorr.min())])
# Plot a grid
ax2.grid(True)
# Insert lines at x=0 and y=0
ax2.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax2.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
e1 = int(math.ceil(xpoints*(a-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[:e1],y[:e1],'b-')
ax2.plot([a, a],[0., amp],'b--')
e2 = int(math.floor(xpoints*(b-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[e1:e2],y[e1:e2],'b-')
e3 = xpoints
ax2.plot(x[e2:],y[e2:],'b-')
ax2.plot([b, b],[0., amp],'b--')
plt.title("Rectangle function", fontsize=14,color='black')
ax3 = fig.add_subplot(414, sharex=ax2)
# Axis ranges: mask out border effects
rmin = result.min()
rmax = result.max()
# Just to make the result a bit more beautiful if the function is very flat
if (rmax - rmin) < 0.1:
rmin=rmin-0.1
rmax=rmax+0.1
ax3.axis([xrange[0]+(b-a), xrange[1]-(b-a), rmin-0.1*(rmax-rmin), rmax+0.1*(rmax-rmin)])
# Plot a grid
ax3.grid(True)
# Insert lines at x=0 and y=0
ax3.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax3.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
plr1 = int(xpoints*(b-a)/(xrange[1]-xrange[0]))
plr2 = int(xpoints*(1-(b-a)/(xrange[1]-xrange[0])))
ax3.plot(x[plr1:plr2],result[plr1:plr2],'b-')
plt.title("Noisy function filtered with rectangle function", fontsize=14,color='black')
# first two arguments give the position of the rectangle, third the period of the Triangle
plotcosboxconv(-0.1, 0.1, 1.0, 2.5)
Explanation: This example illustrates that the process of filtering can destroy information about our signal. However, filtering can also be useful. Given noisy observations of a function, a rectangle function can be used to filter out the noise. This is illustrated in the subsequent example.
End of explanation
from matplotlib import rcParams
rcParams['text.usetex'] = True
def gausshermetian(x, amp, mu, sigma, h3, h4):
T is the period, sigma is the dispersion, amplitude the amplitude
y = (x-mu)/sigma
return amp*np.exp(-0.5*y**2)*(1+h3*(2*np.sqrt(2.)*y**3-3*np.sqrt(2.)*y)/np.sqrt(6.)+h4*(4*y**4-12*y**2+3)/np.sqrt(24))
#amplitude*np.cos(2.*math.pi*x/T)+np.random.normal(scale=sigma, size=x.size)
def boxcar(x,a,b,amp):
return amp*np.logical_and(x>=a,x<=b)
def plotskewedgaussobs(pos1, pos2, boxwidth, sigma, h3, h4):
# limits of boxcar Play arround with this
# a = -0.1
# b = 0.1
# Plotting range
xrange = [-2., 2.]
# Create functions
xpoints = 1000
# Resolution element
dx = (xrange[1]-xrange[0])/float(xpoints)
x = np.linspace(xrange[0], xrange[1], xpoints)
y = boxcar(x, pos1-boxwidth/2., pos1+boxwidth/2, \
1./boxwidth)+0.5*boxcar(x, pos2-boxwidth/2., pos2+boxwidth/2, 1./boxwidth)
# boxcar will be normalised to 1. amp = 1./(b-a) works in the limit of many points, but here we do
# numberofpixelsinbox*dx*amplitude = y.sum *dx*amplitude = 1
# to take into account numerical effects
z = gausshermetian(x, 1., 0., sigma, h3, h4)
result = np.convolve(y,z,'same')
result = dx*result
# Start the plot, create a figure instance and a subplot
fig = plt.figure()
ax1 = fig.add_subplot(311)
fig.tight_layout()
plt.subplots_adjust(hspace = 0.7)
# Axis ranges
ax1.axis([xrange[0]+boxwidth, xrange[1]-boxwidth, y.min()-0.1*(y.max()-y.min()), y.max()+0.1*(y.max()-y.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,y,'b-')
plt.title("Original function, impulse", fontsize=14,color='black')
ax2 = fig.add_subplot(312, sharex=ax1)
# Axis ranges
ax2.axis([xrange[0]+boxwidth, xrange[1]-boxwidth, z.min()-0.1*(z.max()-z.min()), z.max()+0.1*(z.max()-z.min())])
# Plot a grid
ax2.grid(True)
# Insert lines at x=0 and y=0
ax2.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax2.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax2.plot(x,z,'b-')
plt.title("Instrumental function", fontsize=14,color='black')
ax3 = fig.add_subplot(313, sharex=ax2)
# Axis ranges: mask out border effects
rmin = result.min()
rmax = result.max()
ax3.axis([xrange[0]+boxwidth, xrange[1]-boxwidth, rmin-0.1*(rmax-rmin), rmax+0.1*(rmax-rmin)])
# Plot a grid
ax3.grid(True)
# Insert lines at x=0 and y=0
ax3.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax3.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
plr1 = int(xpoints*boxwidth/(xrange[1]-xrange[0]))
plr2 = int(xpoints*(1-boxwidth/(xrange[1]-xrange[0])))
ax3.plot(x[plr1:plr2],result[plr1:plr2],'b-')
plt.title("Image: original function filtered with instrumental function", fontsize=14,color='black')
# first two arguments give the position of the rectangle, third the period of the Triangle
plotskewedgaussobs(0.0, 1.0, 0.01, 0.1, 0.2, 0.1)
Explanation: Note, that while the signal is not visible in the noisy data, it is partially recovered in the output of the filter.
Representing instrumental functions, it is important to differentiate between the response function in a certain direction and the image of an impulse, which is the reverse of the response function. The function used to represent a measurement via convolution is the image of an impulse function at the origin. This becomes evident, when we convolve two asymmetric functions.
End of explanation |
11,385 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
Step3: Explore the Data
Play around with view_sentence_range to view different parts of the data.
Step6: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below
Step9: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token
Step11: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
Step13: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
Step15: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below
Step18: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders
Step21: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
Step24: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
Step27: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
Step30: Build the Neural Network
Apply the functions you implemented above to
Step33: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements
Step35: Neural Network Training
Hyperparameters
Tune the following parameters
Step37: Build the Graph
Build the graph using the neural network you implemented.
Step39: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
Step41: Save Parameters
Save seq_length and save_dir for generating a new TV script.
Step43: Checkpoint
Step46: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names
Step49: Choose Word
Implement the pick_word() function to select the next word using probabilities.
Step51: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate. | Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
vocab = set(text)
vocab_to_int = {w: i for i, w in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
punctuation_dict = {
'.' : '||Period||',
',' : '||Comma||',
'"' : '||Quotation_mark||',
';' : '||Semicolon||',
'!' : '||Exclamation_mark||',
'?' : '||Question_mark||',
'(' : '||Left_parentheses||',
')' : '||Right_parentheses||',
'--' : '||Dash||',
'\n' : '||Return||'
}
return punctuation_dict
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(tf.int32, shape=[None, None], name='targets')
learningrate = tf.placeholder(tf.float32, name='learningrate')
return inputs, targets, learningrate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
LSTM = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([LSTM]*2)
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
embed_dim = 200
embedded = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embedded)
weights = tf.Variable(tf.truncated_normal(shape=(rnn_size,vocab_size),mean=0.0,stddev=0.1))
biases = tf.Variable(tf.zeros(shape=[vocab_size]))
def mul_fn(current_input):
return tf.matmul(current_input, weights) + biases
logits = tf.map_fn(mul_fn, outputs)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
# TODO: Implement Function
n_batches = int(len(int_text) / (batch_size * seq_length))
# Drop the last few characters to make only full batches
xdata = np.array(int_text[: n_batches * batch_size * seq_length])
ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1])
x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1)
return np.array(list(zip(x_batches, y_batches)))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2 3], [ 7 8 9]],
# Batch of targets
[[ 2 3 4], [ 8 9 10]]
],
# Second Batch
[
# Batch of Input
[[ 4 5 6], [10 11 12]],
# Batch of targets
[[ 5 6 7], [11 12 13]]
]
]
```
End of explanation
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 512
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 64
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
Explanation: Checkpoint
End of explanation
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
input_tensor = loaded_graph.get_tensor_by_name("input:0")
initial_state_tensor = loaded_graph.get_tensor_by_name("initial_state:0")
final_state_tensor = loaded_graph.get_tensor_by_name("final_state:0")
probs_tensor = loaded_graph.get_tensor_by_name("probs:0")
return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
next_word = np.random.choice(list(int_to_vocab.values()), p=probabilities)
return next_word
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
gen_length = 300
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation |
11,386 | Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
I have a raster with a set of unique ID patches/regions which I've converted into a two-dimensional Python numpy array. I would like to calculate pairwise Euclidean distances between all regions to obtain the minimum distance separating the nearest edges of each raster patch. As the array was originally a raster, a solution needs to account for diagonal distances across cells (I can always convert any distances measured in cells back to metres by multiplying by the raster resolution). | Problem:
import numpy as np
import scipy.spatial.distance
example_array = np.array([[0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 2, 2, 0, 6, 0, 3, 3, 3],
[0, 0, 0, 0, 2, 2, 0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3],
[1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3],
[1, 1, 1, 0, 0, 0, 3, 3, 3, 0, 0, 3],
[1, 1, 1, 0, 0, 0, 3, 3, 3, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 3, 3, 3, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4]])
import itertools
n = example_array.max()+1
indexes = []
for k in range(1, n):
tmp = np.nonzero(example_array == k)
tmp = np.asarray(tmp).T
indexes.append(tmp)
result = np.zeros((n-1, n-1))
for i, j in itertools.combinations(range(n-1), 2):
d2 = scipy.spatial.distance.cdist(indexes[i], indexes[j], metric='sqeuclidean')
result[i, j] = result[j, i] = d2.min()**0.5 |
11,387 | Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
How can I perform regression in sklearn, using SVM and a polynomial kernel (degree=2)? | Problem:
import numpy as np
import pandas as pd
import sklearn
X, y = load_data()
assert type(X) == np.ndarray
assert type(y) == np.ndarray
# fit, then predict X
from sklearn.svm import SVR
svr_poly = SVR(kernel='poly', degree=2)
svr_poly.fit(X, y)
predict = svr_poly.predict(X) |
11,388 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step3: Assignments
Send in a rough outline of your project idea. This is not graded, I will ask for a more complete description later for inclusion in grading.
Plot the performance of the RCT and EpsilonGreedy algorithms on the same plots so that they can more easily be compared.
Investigate how changing the value of epsilon changes the performance of the EpsilonGreedy algorithm. Reuse your approach from assignment #2 to plot the performance for different epsilon values.
When we have very little information on the relative performance of the two arms a high exploration rate quickly provides us with additional information. However, after several hundred trials we are relatively confident in the performance of each arm and a high exploration rate is detrimental as we will be choosing an arm we know to be inferior at a high rate. A better approach would be to reduce the exploration rate as we acquire more information. This is a very similar approach to the simulated annealing optimizer we looked at in week 2. Create a new class that inherits from EpsilonGreedy and gradually reduces the value of epsilon over time.
Due for next week
Rough outline of project idea
Plots showing both EpsilonGreedy and RCT performance
Plots showing EpsilonGreedy performance with different epsilon values
Class implementing EpsilonGreedy with an adaptive epsilon value.
The same code can be used for both #2 and #3 so only #3 will be covered
Step4: The Adaptive Epsilon Greedy algorithm for #4 should change the value of epsilon (and the likelihood of choosing randomly) as the number of trials increase. There are many ways this could be implemented
Step7: The falling epsilon value means the AdaptiveEpsilonGreedy algorithm will explore less and less of the time, instead exploiting the arm it knows to be best. This has advantages and disadvantages. If the environment is stable the cumulative reward will be higher but it will be very slow to respond to any changes.
For example, if the rewards for the two arms were to switch
Step9: There are two reasons why these algorithms are so slow to respond | Python Code:
def plot_arm_frequency(simulation, ax, marker='.', linestyle='', color='k', label=''):
Plot the frequency with which the second arm is chosen
NOTE: Currently only works for two arms
ax.plot(simulation.arm_choice.mean(axis=0),
marker=marker, linestyle=linestyle, color=color, label=label)
ax.set_title('Frequency of arm choice')
ax.set_xlabel('Trial')
ax.set_ylabel('Frequency')
return ax
def plot_reward(simulation, ax, marker='.', linestyle='-', color='k', label=''):
Plot the average reward for each trial across all simulations
ax.plot(simulation.reward.mean(axis=0),
marker=marker, linestyle=linestyle, color=color, label=label)
ax.set_title('Reward')
ax.set_xlabel('Trial')
ax.set_ylabel('Reward')
return ax
def plot_cumulative_reward(simulation, ax, marker='', linestyle='-', color='k', label=''):
Plot the cumulative reward across all simulations
ax.plot(np.cumsum(simulation.reward, axis=1).mean(axis=0),
marker=marker, linestyle=linestyle, color=color, label=label)
ax.set_title('Cumulative Reward')
ax.set_xlabel('Trial')
ax.set_ylabel('Cumulative Reward')
return ax
def plot_summary(model, axes, color='', label=''):
plot_arm_frequency(model, ax=axes[0], color=color, label=label)
plot_reward(model, ax=axes[1], color=color, label=label)
plot_cumulative_reward(model, ax=axes[2], color=color, label=label)
for ax in axes:
ax.legend(loc=4)
return axes
fig, axes = plt.subplots(1,3, figsize=(18,6))
model = Model(EpsilonGreedy, {'n_arms': 2, 'epsilon':0.05}, weights=[0.1, 0.2])
model.repeat_simulation()
plot_summary(model, axes, color='k', label='epsilon=0.05')
model = Model(EpsilonGreedy, {'n_arms': 2, 'epsilon':0.25}, weights=[0.1, 0.2])
model.repeat_simulation()
plot_summary(model, axes, color='b', label='epsilon=0.25')
model = Model(EpsilonGreedy, {'n_arms': 2, 'epsilon':0.5}, weights=[0.1, 0.2])
model.repeat_simulation()
plot_summary(model, axes, color='c', label='epsilon=0.5')
plt.show()
Explanation: Assignments
Send in a rough outline of your project idea. This is not graded, I will ask for a more complete description later for inclusion in grading.
Plot the performance of the RCT and EpsilonGreedy algorithms on the same plots so that they can more easily be compared.
Investigate how changing the value of epsilon changes the performance of the EpsilonGreedy algorithm. Reuse your approach from assignment #2 to plot the performance for different epsilon values.
When we have very little information on the relative performance of the two arms a high exploration rate quickly provides us with additional information. However, after several hundred trials we are relatively confident in the performance of each arm and a high exploration rate is detrimental as we will be choosing an arm we know to be inferior at a high rate. A better approach would be to reduce the exploration rate as we acquire more information. This is a very similar approach to the simulated annealing optimizer we looked at in week 2. Create a new class that inherits from EpsilonGreedy and gradually reduces the value of epsilon over time.
Due for next week
Rough outline of project idea
Plots showing both EpsilonGreedy and RCT performance
Plots showing EpsilonGreedy performance with different epsilon values
Class implementing EpsilonGreedy with an adaptive epsilon value.
The same code can be used for both #2 and #3 so only #3 will be covered:
The three plotting methods have been moved into standalone functions. This isn't strictly necessary, but is one approach.
An extra function has been created combining the three plotting functions.
Labels and plotting options are used to differentiate between each of the algorithms being plotted.
End of explanation
t = np.arange(1000)
plt.plot(t, (1+t)**-0.5, label='1/sqrt')
plt.plot(t, (1+t)**-0.2, label='1/5th-root')
plt.plot(t, np.exp(-(t/200.)), label='exp^-t/200')
u = np.concatenate((np.ones(100),
np.ones(200) * 0.75,
np.ones(200) * 0.5,
np.ones(200) * 0.25,
np.ones(300) * 0.05))
plt.plot(t,u, label='Steps')
plt.legend()
plt.show()
class AdaptiveEpsilonGreedy(RCT):
@property
def epsilon(self):
return self._epsilon * np.exp(-(sum(self.counts)/200.))
fig, axes = plt.subplots(1,3, figsize=(18,6))
model = Model(EpsilonGreedy, {'n_arms': 2, 'epsilon':0.05}, weights=[0.1, 0.2], size=1000)
model.repeat_simulation()
plot_summary(model, axes, color='k', label='epsilon=0.05')
model = Model(AdaptiveEpsilonGreedy, {'n_arms': 2, 'epsilon':1.00}, weights=[0.1, 0.2], size=1000)
model.repeat_simulation()
plot_summary(model, axes, color='c', label='AdaptiveEpsilonGreedy')
plt.show()
Explanation: The Adaptive Epsilon Greedy algorithm for #4 should change the value of epsilon (and the likelihood of choosing randomly) as the number of trials increase. There are many ways this could be implemented:
End of explanation
class DynamicModel(object):
def __init__(self, algo, algo_kwargs, weights=[0.1, 0.1], size=100, repeats=200):
self.algo = algo
self.algo_kwargs = algo_kwargs
self.weights = weights
self.size = size
self.repeats = repeats
def run_simulation(self):
Run a single simulation, recording the performance
algo = self.algo(**self.algo_kwargs)
arm_choice_record = []
reward_record = []
weights = self.weights[:]
for i in range(self.size):
arm = algo.choose_arm()
arm_choice_record.append(arm)
reward = np.random.random() < weights[arm]
reward_record.append(reward)
algo.update(arm, reward)
if i == self.size / 2:
#print('Switching rewards')
weights[0], weights[1] = weights[1], weights[0]
return arm_choice_record, reward_record
def repeat_simulation(self):
Run multiple simulations, recording the performance of each
arm_choice = []
reward = []
for i in range(self.repeats):
arm_choice_record, reward_record = self.run_simulation()
arm_choice.append(arm_choice_record)
reward.append(reward_record)
self.arm_choice = np.array(arm_choice)
self.reward = np.array(reward)
fig, axes = plt.subplots(1,3, figsize=(18,6))
model = DynamicModel(EpsilonGreedy, {'n_arms': 2, 'epsilon':0.05}, weights=[0.1, 0.2], size=2000)
model.repeat_simulation()
plot_summary(model, axes, color='k', label='epsilon=0.05')
model = DynamicModel(AdaptiveEpsilonGreedy, {'n_arms': 2, 'epsilon':1.00}, weights=[0.1, 0.2], size=2000)
model.repeat_simulation()
plot_summary(model, axes, color='c', label='AdaptiveEpsilonGreedy')
plt.show()
Explanation: The falling epsilon value means the AdaptiveEpsilonGreedy algorithm will explore less and less of the time, instead exploiting the arm it knows to be best. This has advantages and disadvantages. If the environment is stable the cumulative reward will be higher but it will be very slow to respond to any changes.
For example, if the rewards for the two arms were to switch:
End of explanation
class DynamicEpsilonGreedy(AdaptiveEpsilonGreedy):
def update(self, arm, reward):
Update an arm with the reward
self.counts[arm] = self.counts[arm] + 1
n = self.counts[arm]
# New experiences will represent at least 100th of the running value estimation
if n > 100:
n = 100
value = self.values[arm]
self.values[arm] = ((n - 1) / n) * self.values[arm] + (1/n) * reward
fig, axes = plt.subplots(1,3, figsize=(18,6))
model = DynamicModel(EpsilonGreedy, {'n_arms': 2, 'epsilon':0.05}, weights=[0.1, 0.2], size=2000)
model.repeat_simulation()
plot_summary(model, axes, color='k', label='epsilon=0.05')
model = DynamicModel(AdaptiveEpsilonGreedy, {'n_arms': 2, 'epsilon':1.00}, weights=[0.1, 0.2], size=2000)
model.repeat_simulation()
plot_summary(model, axes, color='c', label='AdaptiveEpsilonGreedy')
model = DynamicModel(DynamicEpsilonGreedy, {'n_arms': 2, 'epsilon':1.00}, weights=[0.1, 0.2], size=2000)
model.repeat_simulation()
plot_summary(model, axes, color='r', label='DynamicEpsilonGreedy')
plt.show()
Explanation: There are two reasons why these algorithms are so slow to respond:
The low exploration rate
The use of the mean reward values. As the number of trials gets large the ability of any new experiences to alter the mean value falls.
This second issue can be addressed by giving recent experience greater value than experience from further in the past.
End of explanation |
11,389 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Numpy Exercise 4
Imports
Step1: Complete graph Laplacian
In discrete mathematics a Graph is a set of vertices or nodes that are connected to each other by edges or lines. If those edges don't have directionality, the graph is said to be undirected. Graphs are used to model social and communications networks (Twitter, Facebook, Internet) as well as natural systems such as molecules.
A Complete Graph, $K_n$ on $n$ nodes has an edge that connects each node to every other node.
Here is $K_5$
Step3: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
The degree matrix for $K_n$ is an $n \times n$ diagonal matrix with the value $n-1$ along the diagonal and zeros everywhere else. Write a function to compute the degree matrix for $K_n$ using NumPy.
Step5: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
Step6: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$. | Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
Explanation: Numpy Exercise 4
Imports
End of explanation
import networkx as nx
K_5=nx.complete_graph(5)
nx.draw(K_5)
Explanation: Complete graph Laplacian
In discrete mathematics a Graph is a set of vertices or nodes that are connected to each other by edges or lines. If those edges don't have directionality, the graph is said to be undirected. Graphs are used to model social and communications networks (Twitter, Facebook, Internet) as well as natural systems such as molecules.
A Complete Graph, $K_n$ on $n$ nodes has an edge that connects each node to every other node.
Here is $K_5$:
End of explanation
def complete_deg(n):
Return the integer valued degree matrix D for the complete graph K_n.
a = np.ones(n,n)
x = 0
y = 0
while x <= n:
x = x+1
while y <= n:
y = y+1
if x == y:
a[x,y] == a[n-1,n-1]
return a
va.vizarray(a)
D = complete_deg(5)
assert D.shape==(5,5)
assert D.dtype==np.dtype(int)
assert np.all(D.diagonal()==4*np.ones(5))
assert np.all(D-np.diag(D.diagonal())==np.zeros((5,5),dtype=int))
Explanation: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
The degree matrix for $K_n$ is an $n \times n$ diagonal matrix with the value $n-1$ along the diagonal and zeros everywhere else. Write a function to compute the degree matrix for $K_n$ using NumPy.
End of explanation
def complete_adj(n):
Return the integer valued adjacency matrix A for the complete graph K_n.
# YOUR CODE HERE
raise NotImplementedError()
A = complete_adj(5)
assert A.shape==(5,5)
assert A.dtype==np.dtype(int)
assert np.all(A+np.eye(5,dtype=int)==np.ones((5,5),dtype=int))
Explanation: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
End of explanation
# YOUR CODE HERE
raise NotImplementedError()
Explanation: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$.
End of explanation |
11,390 | Given the following text description, write Python code to implement the functionality described.
Description:
Maximum difference between node and its ancestor in a Directed Acyclic Graph ( DAG )
Python3 program for the above approach ; Function to perform DFS Traversal on the given graph ; Update the value of ans ; Update the currentMin and currentMax ; Traverse the adjacency list of the node src ; Recursively call for the child node ; Function to calculate maximum absolute difference between a node and its ancestor ; Stores the adjacency list of graph ; Create Adjacency list ; Add a directed edge ; Perform DFS Traversal ; Print the maximum absolute difference ; Driver Code
| Python Code:
ans = 0
def DFS(src , Adj , arr , currentMin , currentMax ) :
global ans
ans = max(ans , max(abs(currentMax - arr[src - 1 ] ) , abs(currentMin - arr[src - 1 ] ) ) )
currentMin = min(currentMin , arr[src - 1 ] )
currentMax = min(currentMax , arr[src - 1 ] )
for child in Adj[src ] :
DFS(child , Adj , arr , currentMin , currentMax )
def getMaximumDifference(Edges , arr , N , M ) :
global ans
Adj =[[ ] for i in range(N + 1 ) ]
for i in range(M ) :
u = Edges[i ][0 ]
v = Edges[i ][1 ]
Adj[u ] . append(v )
DFS(1 , Adj , arr , arr[0 ] , arr[0 ] )
print(ans )
if __name__== ' __main __' :
N = 5
M = 4
Edges =[[ 1 , 2 ] ,[2 , 3 ] ,[4 , 5 ] ,[1 , 3 ] ]
arr =[13 , 8 , 3 , 15 , 18 ]
getMaximumDifference(Edges , arr , N , M )
|
11,391 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Priprava podatkov
Najprej sem pri Gašperju v Blenderju narisal krivuljo. Dobil sem triangulacijo objekta. Odstranil sem stranice, ki se nahajajo v dveh trikotnikih in tako sem dobil zunanjost krivulje (ki je sklenjena).
Step1: Each point should be in the hull exactly twice.
Step2: Now check whether a ray originating from the point (x, y) in the east dirrection intersects a line segment with endpoints (ax, ay) and (bx, by). First we check whether y lies in the segment [ay, by]. The we observe whether x, a, b makes a left turn. | Python Code:
-
triangles = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(4, 9, 10),
(6, 8, 11),
(12, 3, 13),
(11, 8, 14),
(9, 15, 16),
(11, 14, 17),
(15, 18, 16),
(14, 19, 17),
(20, 21, 22),
(14, 23, 19),
(18, 24, 16),
(14, 25, 26),
(27, 28, 29),
(25, 30, 26),
(28, 31, 32),
(33, 19, 23),
(31, 34, 35),
(23, 14, 26),
(34, 36, 37),
(25, 38, 30),
(36, 39, 40),
(30, 38, 20),
(39, 41, 42),
(22, 30, 20),
(39, 42, 40),
(20, 12, 21),
(43, 21, 12),
(13, 44, 12),
(45, 46, 47),
(44, 43, 12),
(45, 47, 48),
(5, 13, 3),
(45, 48, 49),
(36, 40, 37),
(49, 48, 50),
(37, 51, 34),
(49, 50, 52),
(51, 53, 34),
(52, 50, 5),
(53, 54, 34),
(55, 52, 5),
(34, 54, 35),
(56, 57, 58),
(31, 35, 59),
(56, 58, 60),
(31, 59, 61),
(56, 60, 62),
(63, 64, 65),
(62, 60, 66),
(67, 63, 68),
(62, 66, 69),
(64, 70, 71),
(66, 72, 69),
(70, 73, 74),
(66, 55, 72),
(73, 75, 74),
(55, 76, 72),
(77, 67, 68),
(72, 76, 78),
(70, 74, 71),
(79, 80, 81),
(64, 71, 65),
(80, 82, 81),
(63, 65, 68),
(80, 83, 82),
(84, 77, 85),
(82, 83, 86),
(77, 68, 85),
(83, 87, 86),
(84, 85, 88),
(86, 87, 89),
(61, 84, 31),
(89, 87, 72),
(84, 88, 31),
(89, 72, 78),
(31, 88, 32),
(55, 5, 76),
(28, 32, 90),
(76, 4, 91),
(28, 92, 93),
(91, 4, 94),
(90, 95, 28),
(94, 4, 96),
(95, 92, 28),
(96, 4, 97),
(28, 93, 98),
(97, 4, 10),
(28, 98, 29),
(76, 5, 4),
(27, 29, 99),
(100, 10, 9),
(101, 102, 103),
(104, 105, 106),
(102, 107, 108),
(109, 105, 110),
(111, 101, 112),
(113, 109, 110),
(114, 115, 116),
(117, 105, 109),
(99, 111, 118),
(119, 113, 110),
(120, 107, 102),
(104, 121, 105),
(116, 120, 114),
(121, 122, 123),
(120, 124, 107),
(123, 122, 125),
(126, 127, 128),
(129, 130, 131),
(127, 132, 133),
(134, 135, 136),
(137, 126, 138),
(135, 129, 136),
(127, 133, 128),
(136, 129, 139),
(126, 128, 138),
(139, 129, 131),
(137, 138, 140),
(130, 141, 131),
(137, 140, 124),
(131, 141, 142),
(124, 140, 107),
(141, 125, 142),
(120, 102, 114),
(125, 122, 142),
(102, 108, 103),
(121, 123, 105),
(101, 103, 112),
(105, 117, 106),
(111, 112, 118),
(106, 117, 143),
(27, 99, 16),
(143, 117, 144),
(99, 118, 145),
(146, 147, 144),
(99, 145, 148),
(146, 149, 147),
(99, 148, 16),
(2, 106, 0),
(24, 27, 16),
(150, 151, 152),
(100, 9, 16),
(153, 154, 155),
(149, 100, 156),
(157, 158, 159),
(160, 2, 1),
(158, 155, 159),
(100, 16, 156),
(158, 153, 155),
(149, 156, 147),
(154, 161, 155),
(162, 163, 160),
(154, 150, 161),
(163, 151, 160),
(161, 150, 152),
(144, 147, 143),
(143, 0, 106),
(1, 162, 160),
(151, 163, 152),
)
def lines(triangle):
t = sorted(triangle)
return [(t[0], t[1]), (t[0], t[2]), (t[1], t[2])]
hull = set()
for triangle in triangles:
for line in lines(triangle):
if line in hull:
hull.remove(line)
else:
hull.add(line)
hull
Explanation: Priprava podatkov
Najprej sem pri Gašperju v Blenderju narisal krivuljo. Dobil sem triangulacijo objekta. Odstranil sem stranice, ki se nahajajo v dveh trikotnikih in tako sem dobil zunanjost krivulje (ki je sklenjena).
End of explanation
points = dict()
for line in hull:
for point in line:
if point not in points:
points[point] = 1
else:
points[point] += 1
for key in points.keys():
assert points[key] == 2
Explanation: Each point should be in the hull exactly twice.
End of explanation
def intersects(p, a, b):
p = (p[0] + random.random()*0.000001, p[1] + random.random()*0.000001)
a, b = sorted([a, b], key=lambda p: p[1])
if a[1] < p[1] and p[1] < b[1]:
# Check the turn
a = numpy.array([[1] + list(p),
[1] + list(a),
[1] + list(b)])
return numpy.linalg.det(a) > 0
return False
a = (1, 3)
b = (-3, 5)
sorted([a, b], key=lambda p: p[1])
intersects((0.00001, 0.00001), (1, -1), (-0.5, 1))
def count_intersections(point_coordines, lines, point):
intersections = 0
for line in lines:
a, b = point_coordinates[line[0]], point_coordinates[line[1]]
if intersects(point, a, b):
intersections += 1
return intersections
count_intersections(point_coordinates, hull, (0,0))
Explanation: Now check whether a ray originating from the point (x, y) in the east dirrection intersects a line segment with endpoints (ax, ay) and (bx, by). First we check whether y lies in the segment [ay, by]. The we observe whether x, a, b makes a left turn.
End of explanation |
11,392 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0.
Classify images from MNIST using LeNet
Dataset
Download the dataset to your workspace (i.e. the notebook folder).
Step1: Create the CNN model
TODO
Step2: Initialize the parameters
weight matrix - guassian distribution
bias - 0
Step3: Set up the optimizer and tensors
Step4: Conduct SGD
process the training data multile time, each time is called on epoch;
for each epoch, read the data as mini-batches in random order
for each mini-batch, do BP and update the parameters
Step5: Save model to disk
Step6: Load model from disk
Step7: Do prediction
Step9: Debug
Print l1 norm or parameter and layer feature
parameter initialization
learning rate
weight decay | Python Code:
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from future import standard_library
from __future__ import print_function
from tqdm import tnrange, tqdm_notebook
standard_library.install_aliases()
import pickle, gzip
# Load the dataset
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, _ = pickle.load(f, encoding='latin1')
f.close()
print(train_set[0].shape, train_set[1].shape)
print(valid_set[0].shape, valid_set[1].shape)
import numpy as np
train_x = np.reshape(train_set[0], (50000, 1, 28, 28)).astype(np.float32, copy=False)
train_y = np.array(train_set[1]).astype(np.int32, copy=False)
valid_x = np.reshape(valid_set[0], (10000, 1, 28, 28))
%matplotlib inline
import matplotlib.pyplot as plt
plt.imshow(train_x[0][0])
Explanation: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0.
Classify images from MNIST using LeNet
Dataset
Download the dataset to your workspace (i.e. the notebook folder).
End of explanation
from singa import net as ffnet
from singa.layer import Conv2D, MaxPooling2D, Dropout, Activation, Flatten, Dense
from singa import optimizer, loss, metric
from singa import layer
layer.engine = 'singacpp'
net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
net.add(Conv2D('conv1', 32, 3, 2, input_sample_shape=(1,28,28)))
net.add(Activation('relu1'))
net.add(Conv2D('conv2', 32, 3, 2))
net.add(Activation('relu2'))
net.add(MaxPooling2D('pool', 3, 2))
net.add(Flatten('flat'))
net.add(Dense('dense', 10))
Explanation: Create the CNN model
TODO: plot the net structure
End of explanation
for pname, pval in zip(net.param_names(), net.param_values()):
if len(pval.shape) > 1:
pval.gaussian(0, 0.1)
else:
pval.set_value(0)
print(pname, pval.shape, pval.l1())
Explanation: Initialize the parameters
weight matrix - guassian distribution
bias - 0
End of explanation
from singa import tensor
#from singa.proto import core_pb2
from singa import device
from singa import utils
cpu = device.get_default_device()
opt = optimizer.SGD(momentum=0.9, weight_decay=1e-4)
batch_size = 32
num_train_batch = old_div(train_x.shape[0], batch_size)
tx = tensor.Tensor((batch_size, 1, 28, 28))
ty = tensor.Tensor((batch_size,), cpu , tensor.int32)
# for progress bar
from tqdm import tnrange
idx = np.arange(train_x.shape[0], dtype=np.int32)
Explanation: Set up the optimizer and tensors
End of explanation
for epoch in range(2):
np.random.shuffle(idx)
loss, acc = 0.0, 0.0
bar = tnrange(num_train_batch, desc='Epoch %d' % epoch)
for b in bar:
x = train_x[idx[b * batch_size: (b + 1) * batch_size]]
y = train_y[idx[b * batch_size: (b + 1) * batch_size]]
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
grads, (l, a) = net.train(tx, ty)
loss += l
acc += a
for (s, p, g) in zip(net.param_names(), net.param_values(), grads):
opt.apply_with_lr(epoch, 0.01, g, p, str(s), b)
# update progress bar
bar.set_postfix(train_loss=l, train_accuracy=a)
print('Epoch = %d, training loss = %f, training accuracy = %f' % (epoch, old_div(loss, num_train_batch), old_div(acc, num_train_batch)))
Explanation: Conduct SGD
process the training data multile time, each time is called on epoch;
for each epoch, read the data as mini-batches in random order
for each mini-batch, do BP and update the parameters
End of explanation
net.save('checkpoint')
Explanation: Save model to disk
End of explanation
for pval in net.param_values():
pval.set_value(0)
net.load('checkpoint')
Explanation: Load model from disk
End of explanation
from PIL import Image
img = Image.open('static/digit.jpg').convert('L')
img = img.resize((28,28))
img = old_div(np.array(img, dtype=np.float32),255)
img = tensor.from_numpy(img)
img.reshape((1,1,28,28))
y=net.predict(img)
prob=tensor.to_numpy(y)[0]
plt.plot(list(range(10)), prob)
Explanation: Do prediction
End of explanation
np.random.shuffle(idx)
ffnet.verbose=True
for pname, pval in zip(net.param_names(), net.param_values()):
if len(pval.shape) > 1:
pval.gaussian(0, 10)
else:
pval.set_value(0)
print(pname, pval.shape, pval.l1())
for b in range(10):
print("\n\nEpoch %d" % b)
x = train_x[idx[b * batch_size: (b + 1) * batch_size]]
y = train_y[idx[b * batch_size: (b + 1) * batch_size]]
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
grads, (l, a) = net.train(tx, ty)
print('\n loss = %f, params' % l)
for (s, p, g) in zip(net.param_names(), net.param_values(), grads):
opt.apply_with_lr(epoch, 0.01, g, p, str(s), b)
print(s, p.l1())
def vis_square(data):
Take an array of shape (n, height, width) or (n, height, width, 3)
and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
# normalize data for display
data = old_div((data - data.min()), (data.max() - data.min()))
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (((0, n ** 2 - data.shape[0]),
(0, 1), (0, 1)) # add some space between filters
+ ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)
data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white)
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data); plt.axis('off')
np.random.shuffle(idx)
ffnet.verbose=False
net.load('checkpoint')
b=1
x = train_x[idx[b * batch_size: (b + 1) * batch_size]]
tx.copy_from_numpy(x)
r = net.forward(False, tx, ['relu1', 'relu2'])
r1 = tensor.to_numpy(r['relu1'])[0]
vis_square(r1)
r2 = tensor.to_numpy(r['relu2'])[0]
vis_square(r2)
p=net.param_values()[2]
print(p.shape)
vis_square(tensor.to_numpy(p)[0].reshape(32, 3,3))
Explanation: Debug
Print l1 norm or parameter and layer feature
parameter initialization
learning rate
weight decay
End of explanation |
11,393 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out
Step1: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
Exercise
Step2: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement
Step3: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
Exercise
Step4: Hyperparameters
Step5: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
Exercise
Step6: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropies, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
Exercise
Step7: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables that start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to the var_list keyword argument of the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
Exercise
Step8: Training
Step9: Training loss
Here we'll check out the training losses for the generator and discriminator.
Step10: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples! | Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, [None, real_dim], name='inputs_real')
inputs_z = tf.placeholder(tf.float32, [None, z_dim], name='inputs_z')
return inputs_real, inputs_z
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
Exercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.
End of explanation
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
#h1 = tf.contrib.layers.fully_connected(z, n_units, activation_fn=None)
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
Exercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.
End of explanation
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
Exercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.
End of explanation
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
Explanation: Hyperparameters
End of explanation
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, g_hidden_size, reuse=False)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, n_units=d_hidden_size, reuse=True, alpha=alpha)
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
Exercise: Build the network from the functions you defined earlier.
End of explanation
# Calculate losses
labels_real = tf.ones_like(d_logits_real) * (1 - smooth)
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=labels_real))
labels_fake = tf.zeros_like(d_logits_real)
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=labels_fake))
d_loss = d_loss_real + d_loss_fake
labels_fake_fooled = tf.ones_like(d_logits_fake)
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=labels_fake_fooled))
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropies, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
Exercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.
End of explanation
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
#print([v.name for v in t_vars])
g_vars = [v for v in t_vars if v.name.startswith('generator')]
d_vars = [v for v in t_vars if v.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars)
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables that start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to the var_list keyword argument of the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
Exercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.
End of explanation
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
Explanation: Training
End of explanation
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
_ = view_samples(-1, samples)
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation |
11,394 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Tests for the Bootstrap code
Step1: Generate test files
Step2: b camera
Arcs
Step3: Flats
Step4: Test via script
desi_bootcalib.py \
--fiberflat /Users/xavier/DESI/Wavelengths/pix-sub_b0-00000001.fits \
--arcfile /Users/xavier/DESI/Wavelengths/pix-sub_b0-00000000.fits \
--outfile /Users/xavier/DESI/Wavelengths/boot_psf-sub_b0.fits \
--qafile /Users/xavier/DESI/Wavelengths/qa_boot-sub_b0.pdf
Success
Setting up for unit tests
Pushing files to NERSC
scp pix-sub_b0-00000000.fits.gz hopper.nersc.gov | Python Code:
# import
Explanation: Tests for the Bootstrap code
End of explanation
def pix_sub(infil, outfil, rows=(80,310)):
hdu = fits.open(infil)
# Trim
img = hdu[0].data
sub_img = img[:,rows[0]:rows[1]]
# New
newhdu = fits.PrimaryHDU(sub_img)
# Header
for key in ['CAMERA','VSPECTER','RDNOISE','EXPTIME']:
newhdu.header[key] = hdu[0].header[key]
# Write
newhdulist = fits.HDUList([newhdu])
newhdulist.writeto(outfil,clobber=True)
print('Wrote: {:s}'.format(outfil))
Explanation: Generate test files
End of explanation
arc_fil = '/u/xavier/DESI/Wavelengths/pix-b0-00000000.fits'
out_arc = '/u/xavier/DESI/Wavelengths/pix-sub_b0-00000000.fits'
pix_sub(arc_fil, out_arc)
Explanation: b camera
Arcs
End of explanation
flat_fil = '/u/xavier/DESI/Wavelengths/pix-b0-00000001.fits'
out_flat = '/u/xavier/DESI/Wavelengths/pix-sub_b0-00000001.fits'
pix_sub(flat_fil, out_flat)
Explanation: Flats
End of explanation
import urllib2
url_arc = 'https://portal.nersc.gov/project/desi/data/spectest/pix-sub_b0-00000000.fits.gz'
f = urllib2.urlopen(url_arc)
tst_fil = 'tmp_arc.fits.gz'
with open(tst_fil, "wb") as code:
code.write(f.read())
url_flat = 'https://portal.nersc.gov/project/desi/data/spectest/pix-sub_b0-00000001.fits.gz'
f = urllib2.urlopen(url_flat)
tst_fil = 'tmp_flat.fits.gz'
with open(tst_fil, "wb") as code:
code.write(f.read())
Explanation: Test via script
desi_bootcalib.py \
--fiberflat /Users/xavier/DESI/Wavelengths/pix-sub_b0-00000001.fits \
--arcfile /Users/xavier/DESI/Wavelengths/pix-sub_b0-00000000.fits \
--outfile /Users/xavier/DESI/Wavelengths/boot_psf-sub_b0.fits \
--qafile /Users/xavier/DESI/Wavelengths/qa_boot-sub_b0.pdf
Success
Setting up for unit tests
Pushing files to NERSC
scp pix-sub_b0-00000000.fits.gz hopper.nersc.gov:/project/projectdirs/desi/www/data/spectest
scp pix-sub_b0-00000001.fits.gz hopper.nersc.gov:/project/projectdirs/desi/www/data/spectest
Testing the read
End of explanation |
11,395 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Simulating DESI Spectra
The goal of this notebook is to demonstrate how to generate some simple DESI spectra using the quickgen utility. For simplicity we will only generate 1D spectra and skip the more computationally intensive (yet still instructive!) step of extracting 1D spectra from simulated 2D spectra (i.e., so-called "pixel-level simulations").
For additional (albeit somewhat outdated) information and documentation about quickgen see
https
Step1: Check and set your environment
Next, make sure we have all the right environment variables set (assuming the bash shell). If any of these environment variables are missing please set them in your .bashrc file (and then restart this notebook) or create them for just this notebook session using the %set_env magic command, as we demonstrate below.
Step2: Let's reassign the $SPECPROD environment to something other than dailytest so that we don't conflict with the outputs of the standard DESI integration test. In addition, we need to make raw data input $DESI_SPECTO_DATA match $DESI_SPECTRO_SIM/$PIXPROD where the simulated data will be written.
Step3: Specify the parameters of the simulation.
Next, let's specify the number and spectral type distribution of spectra we want to simulate, and the random seed. Setting the seed here (which can be any number at all!) ensures that your simulations are reproducible. Let's also explicitly set the night of the "observations" (the default is to use the current date) and the expid or exposure ID number (which would allow you to simulate more than one DESI exposure).
The flavor option is used to choose the correct sky-brightness model and it also determines the distribution of targets for a given flavor. For example, flavor='dark' returns the right relative sampling density of ELGs, LRGs, and QSOs. The other available (science target) options for flavor are 'dark', 'gray', 'grey', 'bright', 'bgs', 'mws', 'lrg', 'elg', 'qso', and 'std'. (You can also set flavor to either 'arc' or 'flat' but that would be boring!)
Step4: Generating noiseless spectra.
The first step is to generate the fibermap and simspec files needed by quickgen. The fibermap table contains (simulated) information about the position of each target in the DESI focal plane, while the simspec table holds the "truth" spectra and the intrinsic properties of each object (redshift, noiseless photometry, [OII] flux, etc.).
In detail, the simspec and fibermap data models are described at
* http
Step5: Reading the fibermap and spectral metadata
First, let's see what got written to the raw data directory as a result of that last command.
Step6: Let's go a step further and read the fibermap and simspec files from on-disk.
Note that in general code should not generate filepaths by hand, but rather call desispec.io.findfile to find the file it needs. If you need to override the standard environment variable locations, you can use the outdir option, while still letting it construct the canonical filename for each type of file.
Step7: Make a simple plot
Here's a fun simple plot of the redshift histogram distributions. Now you try!
Step8: Simulating spectra using quickgen.
We're now ready to simulate DESI spectra using quickgen! Since we're calling quickgen from within this notebook (rather than from the command line in a terminal) we have to parse the simspec and fibermap filename inputs first.
quickgen generates four types of files and writes them to the \$DESI_SPECTRO_REDUX/\$SPECPROD/exposures directory
Step9: Inspect the output cframe files
Let's briefly look at one of the cframe files for the blue camera using desispec.io.frame.read_frame, which returns a Frame class with all the attributes you might want.
Step10: Let's make a quick plot of the zeroth spectrum.
Step11: Regrouping the spectra
As you can imagine, working with cframe files is pretty tedious, especially across three cameras, 10 spectrographs, and more than 35 million targets! Therefore, let's combine and reorganize the individual cframe files into spectra files grouped on the sky. Spectra are organized into healpix pixels (here chosen to have nside=64). If you're interested, you can read more about the healpix directory structure here
Step12: Inspect the output (combined and regrouped) spectra
So what did we end up with in the redux output directory?
* exposures/{night}/{expid}/
Step13: As a quick example, let's plot up the zeroth spectrum in healpix pixel 19435. | Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import Table
import desispec.io
import desisim.io
from desisim.obs import new_exposure
from desisim.scripts import quickgen
from desispec.scripts import group_spectra
%pylab inline
Explanation: Simulating DESI Spectra
The goal of this notebook is to demonstrate how to generate some simple DESI spectra using the quickgen utility. For simplicity we will only generate 1D spectra and skip the more computationally intensive (yet still instructive!) step of extracting 1D spectra from simulated 2D spectra (i.e., so-called "pixel-level simulations").
For additional (albeit somewhat outdated) information and documentation about quickgen see
https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=1429
The heart of quickgen is the SpecSim package, which you can read out here:
http://specsim.readthedocs.io/en/stable
If you identify any errors or have requests for additional functionality please create a new issue on
https://github.com/desihub/desisim/issues
or send a note to desi-data@desi.lbl.gov.
Getting started.
First, import all the package dependencies.
End of explanation
def check_env():
for env in ('DESIMODEL', 'DESI_ROOT', 'DESI_SPECTRO_SIM', 'DESI_SPECTRO_DATA',
'DESI_SPECTRO_REDUX', 'SPECPROD', 'PIXPROD'):
if env in os.environ:
print('{} environment set to {}'.format(env, os.getenv(env)))
else:
print('Required environment variable {} not set!'.format(env))
check_env()
Explanation: Check and set your environment
Next, make sure we have all the right environment variables set (assuming the bash shell). If any of these environment variables are missing please set them in your .bashrc file (and then restart this notebook) or create them for just this notebook session using the %set_env magic command, as we demonstrate below.
End of explanation
%set_env SPECPROD=example
%set_env PIXPROD=example
rawdata_dir = desisim.io.simdir()
%set_env DESI_SPECTRO_DATA=$rawdata_dir
print('Simulated raw data will be written to {}'.format(desisim.io.simdir()))
print('Pipeline will read raw data from {}'.format(desispec.io.rawdata_root()))
print(' (without knowing that it was simulated)')
print('Pipeline will write processed data to {}'.format(desispec.io.specprod_root()))
Explanation: Let's reassign the $SPECPROD environment to something other than dailytest so that we don't conflict with the outputs of the standard DESI integration test. In addition, we need to make raw data input $DESI_SPECTO_DATA match $DESI_SPECTRO_SIM/$PIXPROD where the simulated data will be written.
End of explanation
nspec = 100
seed = 555
flavor = 'dark'
night = '20170615'
expid = 0
Explanation: Specify the parameters of the simulation.
Next, let's specify the number and spectral type distribution of spectra we want to simulate, and the random seed. Setting the seed here (which can be any number at all!) ensures that your simulations are reproducible. Let's also explicitly set the night of the "observations" (the default is to use the current date) and the expid or exposure ID number (which would allow you to simulate more than one DESI exposure).
The flavor option is used to choose the correct sky-brightness model and it also determines the distribution of targets for a given flavor. For example, flavor='dark' returns the right relative sampling density of ELGs, LRGs, and QSOs. The other available (science target) options for flavor are 'dark', 'gray', 'grey', 'bright', 'bgs', 'mws', 'lrg', 'elg', 'qso', and 'std'. (You can also set flavor to either 'arc' or 'flat' but that would be boring!)
End of explanation
fibermap, truth = new_exposure(flavor=flavor, nspec=nspec, seed=seed, night=night,
expid=expid, tileid=None, exptime=None)
Explanation: Generating noiseless spectra.
The first step is to generate the fibermap and simspec files needed by quickgen. The fibermap table contains (simulated) information about the position of each target in the DESI focal plane, while the simspec table holds the "truth" spectra and the intrinsic properties of each object (redshift, noiseless photometry, [OII] flux, etc.).
In detail, the simspec and fibermap data models are described at
* http://desidatamodel.readthedocs.io/en/latest/DESI_SPECTRO_SIM/PIXPROD/NIGHT/simspec-EXPID.html
* http://desidatamodel.readthedocs.io/en/latest/DESI_SPECTRO_DATA/NIGHT/fibermap-EXPID.html
To generate these files we'll use new_exposure, a convenience function for generating random typical exposures of various types for testing. However, note that new_exposure isn't intended for every possible analysis; if you want to use your own mix of objects, you just need to write your own fibermap and simspec files following that format instead of calling new_exposure.
Note that in our call to new_exposure, the tileid and exptime (exposure time) optional inputs are shown for demonstration purposes but do not need to be explicitly set. In particular, the default exposure time is based on the value specified in the $DESIMODEL/data/desi.yaml parameter file.
Note: The simspec file format may change in the near future, so structure your code to separate generating spectra from the code for writing these particular formats.
End of explanation
rawdata_dir = desispec.io.rawdata_root()
!find $rawdata_dir | sort
Explanation: Reading the fibermap and spectral metadata
First, let's see what got written to the raw data directory as a result of that last command.
End of explanation
fiberfile = desispec.io.findfile('fibermap', night=night, expid=expid)
simspecfile = desisim.io.findfile('simspec', night=night, expid=expid)
print('Reading fibermap file {}'.format(fiberfile))
hdu = fits.open(fiberfile)
hdu.info()
fibermap = Table(hdu['FIBERMAP'].data)
hdu.close()
fibermap[:3]
print('Reading simspec file {}.'.format(simspecfile))
hdu = fits.open(simspecfile)
hdu.info()
meta = Table(hdu['METADATA'].data)
hdu.close()
meta[:3]
Explanation: Let's go a step further and read the fibermap and simspec files from on-disk.
Note that in general code should not generate filepaths by hand, but rather call desispec.io.findfile to find the file it needs. If you need to override the standard environment variable locations, you can use the outdir option, while still letting it construct the canonical filename for each type of file.
End of explanation
allobjtype = meta['OBJTYPE']
redlim = (-0.2, 1.1*meta['REDSHIFT'].max())
fig, ax = plt.subplots()
for objtype in sorted(set(allobjtype)):
indx = objtype == allobjtype
hh = ax.hist(meta['REDSHIFT'][indx], bins=nspec//3,
label=objtype, alpha=0.5, range=redlim)
ax.set_xlabel('Redshift')
ax.set_ylabel('Number of Simulated Spectra')
ax.legend(loc='upper right', ncol=3)
ax.margins(0.2)
ax.set_xlim(redlim)
Explanation: Make a simple plot
Here's a fun simple plot of the redshift histogram distributions. Now you try!
End of explanation
args = quickgen.parse([
'--simspec', simspecfile,
'--fibermap', fiberfile
])
quickgen.main(args)
Explanation: Simulating spectra using quickgen.
We're now ready to simulate DESI spectra using quickgen! Since we're calling quickgen from within this notebook (rather than from the command line in a terminal) we have to parse the simspec and fibermap filename inputs first.
quickgen generates four types of files and writes them to the \$DESI_SPECTRO_REDUX/\$SPECPROD/exposures directory: calib, sky, cframe, and frame files. We will use the cframe, or calibrated frame files, which contain the flux-calibrated and sky-subtracted DESI spectra (one file per brz camera and spectrograph).
The data model and the other files and their contents are documented here:
http://desidatamodel.readthedocs.io/en/latest/DESI_SPECTRO_REDUX/PRODNAME/exposures/NIGHT/EXPID/index.html
The code in the following cell calls the equivalent of the command line:
quickgen --simspec {simspecfile} --fibermap {fiberfile}
End of explanation
cframefile = desispec.io.findfile('cframe', night=night, expid=expid, camera='b0')
print('Reading {}'.format(cframefile))
cframe = desispec.io.frame.read_frame(cframefile)
dir(cframe)
Explanation: Inspect the output cframe files
Let's briefly look at one of the cframe files for the blue camera using desispec.io.frame.read_frame, which returns a Frame class with all the attributes you might want.
End of explanation
print(cframe.wave.shape, cframe.flux.shape)
fig, ax = plt.subplots()
ax.errorbar(cframe.wave, cframe.flux[0, :], 1/np.sqrt(cframe.ivar[0, :]))
ax.set_xlabel('Wavelength (A)')
ax.set_ylabel('Flux ($10^{-17}$ erg/s/cm$^2$)')
Explanation: Let's make a quick plot of the zeroth spectrum.
End of explanation
nside = 64
args = group_spectra.parse(['--hpxnside', '{}'.format(nside)])
group_spectra.main(args)
Explanation: Regrouping the spectra
As you can imagine, working with cframe files is pretty tedious, especially across three cameras, 10 spectrographs, and more than 35 million targets! Therefore, let's combine and reorganize the individual cframe files into spectra files grouped on the sky. Spectra are organized into healpix pixels (here chosen to have nside=64). If you're interested, you can read more about the healpix directory structure here:
https://github.com/desihub/desispec/blob/master/doc/nb/Intro_to_DESI_spectra.ipynb
Regrouping is especially important for real observations with overlapping tiles where the same object could be reobserved on different exposures separated by short or large amounts of time.
TODO: Add the spectra files to desidatamodel.
To regroup the spectra, we will run the (notebook) equivalent of the command:
desi_group_spectra --hpxnside 64
End of explanation
reduxdir = desispec.io.specprod_root()
!find $reduxdir | sort
Explanation: Inspect the output (combined and regrouped) spectra
So what did we end up with in the redux output directory?
* exposures/{night}/{expid}/: individual spectrograph camera spectra ("frames")
grouped by night/expid
* spectra-64/: spectra grouped by healpix location on the sky
End of explanation
specfilename = desispec.io.findfile('spectra', groupname=19435, nside=nside)
print('Reading {}'.format(specfilename))
specobj = desispec.io.read_spectra(specfilename)
dir(specobj)
specobj.wave.keys(), specobj.flux.keys()
thisone = 0
fig, ax = plt.subplots()
for camera, color in zip( ('b', 'r', 'z'), ('blue', 'red', 'magenta') ):
ax.plot(specobj.wave[camera], specobj.flux[camera][thisone], color=color)
ax.set_xlabel('Wavelength (A)')
ax.set_ylabel('Flux ($10^{-17}$ erg/s/cm$^2$)')
Explanation: As a quick example, let's plot up the zeroth spectrum in healpix pixel 19435.
End of explanation |
11,396 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Democracy and Economic Dvelopment
Michelle Sabbagh
Data Bootcamp Final Project
I. Overview
Central Research Question
Step1: Step Two
Step2: Step Three
Step3: Access just one level of the multi-index
Step4: Data Description
There are 15,048 rows and 1 column in the World Bank dataset
Step5: The variables included in this dataset are
Step6: Descriptive statistics on the dataset
Step7: III. Accessing Freedom House Data
Freedom House is an international watchdog organization that releases a civil liberties and political rights evaluation for 195 countries. Each country is given a score from 1 to 7 , with 1 being the most free and 7 being the least free. Amanda B. Edgell, a PhD candidate in Comparative Politics at the University of Florida released a version of Freedom House's data which also includes scale inverted, averages, minimum, and maximums,correlates of war, state abbreviations, and state codes.
The data was gathered from
Step8: Step Two
Step9: Data Description
There are 7,843 rows and 18 columns in the Freedom House dataset
Step10: The variables included in this dataset are
Step11: The table below provides a data dictionary of the aforementioned variables
Step12: Descriptive statistics on the dataset
Step13: IV. Analytics Prep
In order to run the necessary analysis, I combined the World Bank Data and Freedom House datasets. To reflect the most up to date information I will only use data from 2015.
Step14: V. Case Study
Step15: I was interested in looking at the richest country's GDP per capita overtime, compared to its level of freedom. The first graph indicates that Luxembourg became free in the late 1970s. Looking at the second graph, that is exactly when Luxembourg started to experience an increase in GDP per capita. In the vacuum of this case study,one would quickly conclude that democracy directly contributed to Luxembourg’s economic development.
Step16: VI. Broad Analysis
Step17: I was curious to see if the same graph would look any different if I plotted the actual civil liberties score and the political rights score. The graphs are similar to the graph of the mean above. Concluding that even when you separate civil liberties and political rights the conclusion from the first graph is very similar.
Step18: The graph below depicts the densest GDP per capita’s divided by their freedom status. It seems that only "free" countries have GDP's over the one hundred thousand mark, but the range in the dataset is largest in free countries. Furthermore, with a mean GDP per capita of around $ 12,851, it makes sense that the density of all the voilins is highest in that range. Notably, partly free countries experience the lowest levels of economic development, even lower than those of not free countries. Moreover, partly free countries have the smallest range in GDP per capita, mostly concentrated in the zero to ten thousand range.
Step19: These two density plots show something very similar, more free countries have outliers that are extremely rich, something you don't see in the less free countries. The first graph divides the density of the dataset by the average freedom score. Further indicating that the largest range of GDP per capita is in the free countries. Evidently, extreme wealth is only found in democratic countries even though low levels of economic development can occur in democracies as well.
Step20: After running the regression below, I can conclude that for the most part, a decrease in GDP per capita is associated with a decrease of freedom. Yet the relatively wealthy, not free countries are glaring outliers that cannot be ignored. | Python Code:
%matplotlib inline
import pandas
import wbdata
import matplotlib.pyplot as plt
import seaborn as sns
Explanation: Democracy and Economic Dvelopment
Michelle Sabbagh
Data Bootcamp Final Project
I. Overview
Central Research Question: What is the relationship, if any, between a country’s level of freedom and economic development?
In academically incorrect terms, the question could be phrased as: What is the relationship, if any, between a country’s level of democracy and economic development? I am intentionally avoiding the term democracy because it is usually measured by degrees of electoral rights. Many countries host rigged elections; presenting a flaw in the democracy variable. For example, North Korea would be considered a democracy because they host elections—with only one candidate on the ballot. To prevent pseudo-democracies from skewing my results the question is asked in terms of freedom and development.
Since the collapse of communism, the democratization trend has been en vogue globally. One of the main arguments for democratizing governments is that democracies allow for increased economic development. However, with the rise of oil rich authoritarian regimes the hypothesis of democracy yielding high levels of economic development is subject to question.
My project will unpack the correlation between economic development and freedom by grappling with the following question: Do countries with a higher degree of freedom experience greater economic development?
Levels of freedom will be measured by a country’s civil liberties and political rights score, provided by Freedom House. Economic development will be measured in GDP per capita by the World Bank’s World Development Indicators.
II. Accessing World Bank Data
The World Bank releases their data on 264 countries' GDP per capita from the years 1960-2016.
Step One: Install World Bank Data
Use pip to install wbdata, a library for getting data from the Wolrd Bank, by typing "pip install wbdata" into your terminal. Ensure all necessary packages, pandas, seaborn, matplotlib are installed as well.
End of explanation
indicators = {'NY.GDP.PCAP.CD':"GDP Per Capita"}
economic_df = wbdata.get_dataframe(indicators=indicators)
Explanation: Step Two: Read World Bank Data
End of explanation
economic_df.tail(15)
Explanation: Step Three: View World Bank Data
End of explanation
economic_df.xs('2015', level='date')
Explanation: Access just one level of the multi-index
End of explanation
economic_df.shape
Explanation: Data Description
There are 15,048 rows and 1 column in the World Bank dataset
End of explanation
economic_df.columns
Explanation: The variables included in this dataset are:
End of explanation
economic_df.describe()
Explanation: Descriptive statistics on the dataset:
End of explanation
freedomhouse_df = pandas.read_excel("fh1972_20161.xlsx", header=0, sheetname=1).set_index('country').rename(columns={"year": "date"})
Explanation: III. Accessing Freedom House Data
Freedom House is an international watchdog organization that releases a civil liberties and political rights evaluation for 195 countries. Each country is given a score from 1 to 7 , with 1 being the most free and 7 being the least free. Amanda B. Edgell, a PhD candidate in Comparative Politics at the University of Florida released a version of Freedom House's data which also includes scale inverted, averages, minimum, and maximums,correlates of war, state abbreviations, and state codes.
The data was gathered from: https://acrowinghen.files.wordpress.com/2014/03/fh1972_20161.xlsx
Step One: Read Freedom House Data
End of explanation
freedomhouse_df.head(15)
Explanation: Step Two: View Freedom House Data
End of explanation
freedomhouse_df.shape
Explanation: Data Description
There are 7,843 rows and 18 columns in the Freedom House dataset
End of explanation
freedomhouse_df.columns
Explanation: The variables included in this dataset are:
End of explanation
freedomhouse_dd = pandas.read_excel("fh1972_20161.xlsx", header=0, sheetname=0)
freedomhouse_dd.head(30)
Explanation: The table below provides a data dictionary of the aforementioned variables
End of explanation
freedomhouse_df.describe()
Explanation: Descriptive statistics on the dataset
End of explanation
combined_df = economic_df.xs('2015', level='date').join(freedomhouse_df[freedomhouse_df["date"] == 2015]).dropna()
combined_df
# Descriptive statistics on the merged World Bank Data and Freedom House data in 2015
combined_df.describe()
Explanation: IV. Analytics Prep
In order to run the necessary analysis, I combined the World Bank Data and Freedom House datasets. To reflect the most up to date information I will only use data from 2015.
End of explanation
#I sorted the data by the highest level of GDP per Capita. Luxembourg had the highest GDP per capita in 2015
combined_df.sort_values('GDP Per Capita', ascending=False)
Explanation: V. Case Study: How Does the Wealthiest Country, Luxembourg, Change Over Time?
End of explanation
date_indexed_df = economic_df.loc["Luxembourg"]
date_indexed_df.index = date_indexed_df.index.map(int)
lux_df = date_indexed_df.join(freedomhouse_df.loc["Luxembourg"].set_index('date')).sort_index()
lux_df.plot(y=['mean'], title='Luxembourg Freedom Over Time' )
lux_df.plot(y=['GDP Per Capita'], title='Luxembourg GDP per capita Over Time')
Explanation: I was interested in looking at the richest country's GDP per capita overtime, compared to its level of freedom. The first graph indicates that Luxembourg became free in the late 1970s. Looking at the second graph, that is exactly when Luxembourg started to experience an increase in GDP per capita. In the vacuum of this case study,one would quickly conclude that democracy directly contributed to Luxembourg’s economic development.
End of explanation
#This graph demostrates decreasing freedoms and their GDP per capita.
combined_df.sort_values("mean").plot(y="GDP Per Capita", kind="line",title='Mean of Political Rights and Civil Liberties Score', figsize=(10,5))
Explanation: VI. Broad Analysis: Comparing GDP Per Capita and Levels of Freedom
I sorted the data by the average of the political rights score and the civil liberties score from countries with high freedom levels to countries with low freedom levels. The left-hand side on the x axis represents countries with high levels of freedom and as you move to the right, the levels of freedom decrease. It is interesting to note that the GDP per capita line is highest in areas with high levels of freedom, and lowest in countries with low levels of freedom. However, both sides of the graph experience extreme outliers. Demonstrating that as a general trend it seems that countries with higher levels of freedom do in fact experience higher levels of economic development. Yet, the variation and outliers in the graph cause us to understand that the correlation between the two variables is not directly linear and making the claim "democracies bring about economic development" is a strong broad generalization.
End of explanation
#These graphs compare decreasing political rights and civil liberties and their gdp per capita.
combined_df.sort_values("pr").plot(y="GDP Per Capita", kind="line",title='Political Rights Score', figsize=(10,5))
combined_df.sort_values("cl").plot(y="GDP Per Capita", kind="line",title='Civil Liberties Score', figsize=(10,5))
Explanation: I was curious to see if the same graph would look any different if I plotted the actual civil liberties score and the political rights score. The graphs are similar to the graph of the mean above. Concluding that even when you separate civil liberties and political rights the conclusion from the first graph is very similar.
End of explanation
f, ax = plt.subplots(figsize=(15, 10))
sns.violinplot(x="status", y="GDP Per Capita", data=combined_df, palette="Set3")
Explanation: The graph below depicts the densest GDP per capita’s divided by their freedom status. It seems that only "free" countries have GDP's over the one hundred thousand mark, but the range in the dataset is largest in free countries. Furthermore, with a mean GDP per capita of around $ 12,851, it makes sense that the density of all the voilins is highest in that range. Notably, partly free countries experience the lowest levels of economic development, even lower than those of not free countries. Moreover, partly free countries have the smallest range in GDP per capita, mostly concentrated in the zero to ten thousand range.
End of explanation
f, ax = plt.subplots(figsize=(15, 10))
sns.violinplot(x="mean", y="GDP Per Capita", data=combined_df, palette="Set3")
sns.jointplot(x='GDP Per Capita', y="mean", data=combined_df, kind='kde', )
Explanation: These two density plots show something very similar, more free countries have outliers that are extremely rich, something you don't see in the less free countries. The first graph divides the density of the dataset by the average freedom score. Further indicating that the largest range of GDP per capita is in the free countries. Evidently, extreme wealth is only found in democratic countries even though low levels of economic development can occur in democracies as well.
End of explanation
# Regression showing a decrease in GDP per capita with increasing mean(decreasing freedom).
sns.regplot(x='mean', y="GDP Per Capita", data=combined_df,)
Explanation: After running the regression below, I can conclude that for the most part, a decrease in GDP per capita is associated with a decrease of freedom. Yet the relatively wealthy, not free countries are glaring outliers that cannot be ignored.
End of explanation |
11,397 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
TensorFlow Inception-v3 Feature Extraction setup
Step1: Some Functions for working with Inception-v3
Step2: Putting it to use!
Step3: How about more than one image
If you put your images folders sorted by class (e.g. | Python Code:
import os
import tensorflow as tf
# import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import pandas as pd
Explanation: TensorFlow Inception-v3 Feature Extraction setup:
Special thanks to KERNIX for their extremely helpful blog
http://www.kernix.com/blog/image-classification-with-a-pre-trained-deep-neural-network_p11
You can follow along at home!
install tensorflow:
conda install -c conda-forge tensorflow
navigate to tensorflow directory:
cd anaconda/lib/python2.7/site-packages/tensorflow/models/image/imagenet
run python command to set up imagenet neural network:
python classify_image.py --model_dir <desired model location>/imagenet
in my case this was:
python classify_image.py --model_dir ~/coradek/CNW_Wildlife_Identification/imagenet
End of explanation
# Create the TensorFlow graph
def create_graph():
model_dir = './CNW_Wildlife_Identification/imagenet'
with gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Start a TensorFlow Session and choose our "tensor" for feature extraction
# ('pool_3:0' is the last layer before classification)
def setup():
create_graph() # Only needs to run the first time
with tf.Session() as sess:
# Get the last feature layer (preclassification) from inception-v3
next_to_last_tensor = sess.graph.get_tensor_by_name('pool_3:0')
s = sess
t = next_to_last_tensor
return s,t
# Get the actual features!
def get_features(image, session, tensor):
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image, 'rb').read()
predictions = session.run(tensor,
{'DecodeJpeg/contents:0': image_data})
features = np.squeeze(predictions)
return features.reshape(1,-1)
Explanation: Some Functions for working with Inception-v3
End of explanation
session, tensor = setup()
features = get_features('CNW_Wildlife_Identification/data/first_sample/EK000026-2.JPG',
session, tensor)
print features
Explanation: Putting it to use!
End of explanation
directory = 'CNW_Wildlife_Identification/data/first_sample'
image_list = []
for p, dirs, files in os.walk(directory):
for ff in files:
if ff[-4:].lower() == '.jpg':
image_list.append(p+'/'+ff)
image_list
# In practice this is only run once - so setup() is included as part of this function
def get_features_repeatedly(image_list):
'''take list of image file paths
return numpy array of features
'''
create_graph()
with tf.Session() as session:
# Get the last feature layer (preclassification) from inception-v3
tensor = session.graph.get_tensor_by_name('pool_3:0')
nb_features = 2048
features = np.empty((len(image_list),nb_features))
for ind, image in enumerate(image_list):
# if (ind%50 == 0):
# print('Processing %s...' % (image))
print('Processing %s...' % (image))
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image, 'rb').read()
predictions = session.run(tensor,
{'DecodeJpeg/contents:0': image_data})
features[ind,:] = np.squeeze(predictions)
return features
lotsafeatures = get_features_repeatedly(image_list)
Explanation: How about more than one image
If you put your images folders sorted by class (e.g.
End of explanation |
11,398 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
TensorFlow Lattice estimators
In this tutorial, we will cover basics of TensorFlow Lattice estimators.
Step1: Synthetic dataset
Here we create a synthetic dataset.
Step2: DNN Estimator
Now let us define feature columns and use DNN regressor to fit a model.
Step3: TensorFlow Lattice calibrated linear model
Let's use calibrated linear model to fit the data.
Since we only have one example, there's no reason to use a lattice. | Python Code:
# import libraries
!pip install tensorflow_lattice
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_lattice as tfl
import tempfile
from six.moves import urllib
Explanation: TensorFlow Lattice estimators
In this tutorial, we will cover basics of TensorFlow Lattice estimators.
End of explanation
%matplotlib inline
# Training dataset contains one feature, "distance".
train_features = {
'distance': np.array([1.0, 1.3, 1.5, 2.0, 2.1, 3.0,
4.0, 5.0, 1.3, 1.7, 2.5, 2.8,
4.7, 4.2, 3.5, 4.75, 5.2,
5.8, 5.9]) * 0.1,
}
train_labels = np.array([4.8, 4.9, 5.0, 5.0,
4.8, 3.3, 2.5, 2.0,
4.7, 4.6, 4.0, 3.2,
2.12, 2.1, 2.5, 2.2,
2.3, 2.34, 2.6])
plt.scatter(train_features['distance'], train_labels)
plt.xlabel('distance')
plt.ylabel('user hapiness')
# This function draws two plots.
# Firstly, we draw the scatter plot of `distance` vs. `label`.
# Secondly, we generate predictions from `estimator` distance ranges in
# [xmin, xmax].
def Plot(distance, label, estimator, xmin=0.0, xmax=10.0):
%matplotlib inline
test_features = {
'distance': np.linspace(xmin, xmax, num=100)
}
# Estimator accepts an input in the form of input_fn (callable).
# numpy_input_fn creates an input function that generates a dictionary where
# the key is a feaeture name ('distance'), and the value is a tensor with
# a shape [batch_size, 1].
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x=test_features,
batch_size=1,
num_epochs=1,
shuffle=False)
# Estimator's prediction is 1d tensor with a shape [batch_size]. Since we
# set batch_size == 1 in the above, p['predictions'] will contain only one
# element in each batch, and we fetch this value by p['predictions'][0].
predictions = [p['predictions'][0]
for p in estimator.predict(input_fn=test_input_fn)]
# Plot estimator's response and (distance, label) scatter plot.
fig, ax = plt.subplots(1, 1)
ax.plot(test_features['distance'], predictions)
ax.scatter(distance, label)
plt.xlabel('distance')
plt.ylabel('user hapiness')
plt.legend(['prediction', 'data'])
Explanation: Synthetic dataset
Here we create a synthetic dataset.
End of explanation
# Specify feature.
feature_columns = [
tf.feature_column.numeric_column('distance'),
]
# Define a neural network legressor.
# The first hidden layer contains 30 hidden units, and the second
# hidden layer contains 10 hidden units.
dnn_estimator = tf.estimator.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[30, 10],
optimizer=tf.train.GradientDescentOptimizer(
learning_rate=0.01,
),
)
# Define training input function.
# mini-batch size is 10, and we iterate the dataset over
# 1000 times.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x=train_features,
y=train_labels,
batch_size=10,
num_epochs=1000,
shuffle=False)
tf.logging.set_verbosity(tf.logging.ERROR)
# Train this estimator
dnn_estimator.train(input_fn=train_input_fn)
# Response in [0.0, 1.0] range
Plot(train_features['distance'], train_labels, dnn_estimator, 0.0, 1.0)
# Now let's increase the prediction range to [0.0, 3.0]
# Note) In most machines, the prediction is going up.
# However, DNN training does not have a unique solution, so it's possible
# not to see this phenomenon.
Plot(train_features['distance'], train_labels, dnn_estimator, 0.0, 3.0)
Explanation: DNN Estimator
Now let us define feature columns and use DNN regressor to fit a model.
End of explanation
# TensorFlow Lattice needs feature names to specify
# per-feature parameters.
feature_names = [fc.name for fc in feature_columns]
num_keypoints = 5
hparams = tfl.CalibratedLinearHParams(
feature_names=feature_names,
learning_rate=0.1,
num_keypoints=num_keypoints)
# input keypoint initializers.
# init_fns are dict of (feature_name, callable initializer).
keypoints_init_fns = {
'distance': lambda: tfl.uniform_keypoints_for_signal(num_keypoints,
input_min=0.0,
input_max=0.7,
output_min=-1.0,
output_max=1.0)}
non_monotnic_estimator = tfl.calibrated_linear_regressor(
feature_columns=feature_columns,
keypoints_initializers_fn=keypoints_init_fns,
hparams=hparams)
non_monotnic_estimator.train(input_fn=train_input_fn)
# The prediction goes up!
Plot(train_features['distance'], train_labels, non_monotnic_estimator, 0.0, 1.0)
# Declare distance as a decreasing monotonic input.
hparams.set_feature_param('distance', 'monotonicity', -1)
monotonic_estimator = tfl.calibrated_linear_regressor(
feature_columns=feature_columns,
keypoints_initializers_fn=keypoints_init_fns,
hparams=hparams)
monotonic_estimator.train(input_fn=train_input_fn)
# Now it's decreasing.
Plot(train_features['distance'], train_labels, monotonic_estimator, 0.0, 1.0)
# Even if the output range becomes larger, the prediction never goes up!
Plot(train_features['distance'], train_labels, monotonic_estimator, 0.0, 3.0)
Explanation: TensorFlow Lattice calibrated linear model
Let's use calibrated linear model to fit the data.
Since we only have one example, there's no reason to use a lattice.
End of explanation |
11,399 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
DO NOT FORGET TO DROP ISSUE_D AFTER PREPPING
Step1: Until I figure out a good imputation method (e.g. bayes PCA), just drop columns with null still
Step2: instantiate network
Step3: get the weights and biases of the nn into np since at this size np is faster (correction, pytorch was faster)
Step4: check that they output the same and speedtest (pytorch was faster)
Step5: Examine performance on test set
Step6: Making model info and saving it
Step7: Examine scores distributions | Python Code:
platform = 'lendingclub'
use_cuda = True
dtype = torch.cuda.FloatTensor
save_path = "model_dump/nn_1_0_0/"
store = pd.HDFStore(
dc.home_path+'/justin_tinkering/data_science/lendingclub/{0}_store.h5'.
format(platform),
append=True)
loan_info = store['train_filtered_columns']
columns = loan_info.columns.values
# checking dtypes to see which columns need one hotting, and which need null or not
to_one_hot = []
to_null_or_not = []
do_nothing = []
for col in columns:
if loan_info[col].dtypes == np.dtype('O'):
# print(col, loan_info[col].isnull().value_counts(dropna=False).to_dict())
to_one_hot.append(col)
elif len(loan_info[col].isnull().value_counts(dropna=False)) > 1:
# print(col, loan_info[col].isnull().value_counts(dropna=False).to_dict())
to_null_or_not.append(col)
else:
# print(col, loan_info[col].isnull().value_counts(dropna=False).to_dict())
do_nothing.append(col)
Explanation: DO NOT FORGET TO DROP ISSUE_D AFTER PREPPING
End of explanation
train_X, train_y, mean_series, std_dev_series = data_prep.process_data_train(
loan_info)
class TrainDataset(Dataset):
def __init__(self, data, targets):
self.data = data
self.targets = targets
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx,:], self.targets[idx,:]
def get_loader(dataset, use_cuda, batch_size=6400, shuffle=True):
return DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=use_cuda)
train_dataset = TrainDataset(train_X.values, train_y.values)
train_loader = get_loader(train_dataset, use_cuda)
Explanation: Until I figure out a good imputation method (e.g. bayes PCA), just drop columns with null still
End of explanation
# %%writefile model_dump/nn_1_0_0/net_class.py
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# from torch.autograd import Variable
# import numpy as np
# dtype = torch.FloatTensor
# nn_input_dim = 223
# hly1_n = 300
# hly2_n = 400
# hly3_n = 300
# hly4_n = 100
# nn_output_dim = 1
# class Net(nn.Module):
# def __init__(self):
# super(Net, self).__init__()
# self.hl1 = nn.Linear(nn_input_dim, hly1_n)
# self.hl2 = nn.Linear(hly1_n, hly2_n)
# self.hl3 = nn.Linear(hly2_n, hly3_n)
# self.hl4 = nn.Linear(hly3_n, hly4_n)
# self.out = nn.Linear(hly4_n, nn_output_dim)
# def forward(self, x):
# x = F.leaky_relu(self.hl1(x))
# x = F.leaky_relu(self.hl2(x))
# x = F.leaky_relu(self.hl3(x))
# x = F.leaky_relu(self.hl4(x))
# x = self.out(x)
# return x
# def torch_version(df_inputs, net):
# input = Variable(torch.from_numpy(df_inputs.values)).type(dtype)
# return np.round(net(input).data.cpu().numpy(),5)
nn_input_dim = 223
hly1_n = 300
hly2_n = 400
hly3_n = 300
hly4_n = 100
nn_output_dim = 1
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.hl1 = nn.Linear(nn_input_dim, hly1_n)
self.hl2 = nn.Linear(hly1_n, hly2_n)
self.hl3 = nn.Linear(hly2_n, hly3_n)
self.hl4 = nn.Linear(hly3_n, hly4_n)
self.out = nn.Linear(hly4_n, nn_output_dim)
def forward(self, x):
x = F.leaky_relu(self.hl1(x))
x = F.leaky_relu(self.hl2(x))
x = F.leaky_relu(self.hl3(x))
x = F.leaky_relu(self.hl4(x))
x = self.out(x)
return x
net = Net()
params = list(net.parameters())
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.001)
if use_cuda:
net.cuda()
criterion.cuda()
n_epochs = 500
epoch_list = []
loss_list = []
fig = plt.gcf()
fig.show()
fig.canvas.draw()
for epoch in range(n_epochs):
running_loss = 0
for i, data in enumerate(train_loader):
inputs, targets = data
# wrap in Variable
inputs, targets = Variable(inputs.cuda()).type(dtype), Variable(targets.cuda()).type(dtype)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
output = net(inputs)
loss = criterion(output, targets)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
try:
last_loss = loss_list[-1]
except:
last_loss = 9999999999999
if running_loss > (2*last_loss):
pass
else:
epoch_list.append(epoch)
loss_list.append(running_loss)
if epoch % 1 == 0:
plt.plot(epoch_list, loss_list)
plt.title("Epoch: {0}".format(epoch))
fig.canvas.draw()
if (epoch >= 99) & ((epoch+1) % 20 == 0):
torch.save(net.state_dict(), save_path+'1.0.0_e{0}'.format(epoch+1))
Explanation: instantiate network
End of explanation
# np_hl1_weight = net.hl1.weight.data.numpy()
# np_hl1_bias = net.hl1.bias.data.numpy()
# np_hl2_weight = net.hl2.weight.data.numpy()
# np_hl2_bias = net.hl2.bias.data.numpy()
# np_out_weight = net.out.weight.data.numpy()
# np_out_bias = net.out.bias.data.numpy()
Explanation: get the weights and biases of the nn into np since at this size np is faster (correction, pytorch was faster)
End of explanation
# def np_version(df_inputs):
# np_hl1_z = df_inputs.dot(np_hl1_weight.T) + np_hl1_bias
# np_hl1_a = np.maximum(.01*np_hl1_z, np_hl1_z)
# np_hl2_z = np_hl1_a.dot(np_hl2_weight.T) + np_hl2_bias
# np_hl2_a = np.maximum(.01*np_hl2_z, np_hl2_z)
# np_out = np_hl2_a.dot(np_out_weight.T) + np_out_bias
# return np_out
def torch_version(df_inputs, net):
input = Variable(torch.from_numpy(df_inputs.values)).type(dtype)
return np.round(net(input).data.cpu().numpy(),5)
#%timeit np_version(standardized)
%timeit torch_version(train_X, net)
Explanation: check that they output the same and speedtest (pytorch was faster)
End of explanation
store.open()
test = store['test_filtered_columns']
train = store['train_filtered_columns']
loan_npv_rois = store['loan_npv_rois']
default_series = test['target_strict']
results = store['results']
store.close()
train_X, train_y = data_prep.process_data_test(train)
train_y = train_y['npv_roi_10'].values
test_X, test_y = data_prep.process_data_test(test)
test_y = test_y['npv_roi_10'].values
# regr = joblib.load('model_dump/model_0.2.1.pkl')
regr_version = '1.0.0'
test_yhat = torch_version(test_X, net)
train_yhat = torch_version(train_X, net)
test_mse = mean_squared_error(test_yhat,test_y)
train_mse = mean_squared_error(train_yhat,train_y)
def eval_models_net(trials, port_size, available_loans, net, regr_version, test, loan_npv_rois,
default_series):
results = {}
pct_default = {}
test_copy = test.copy()
for trial in tqdm_notebook(np.arange(trials)):
loan_ids = np.random.choice(
test_copy.index.values, available_loans, replace=False)
loans_to_pick_from = test_copy.loc[loan_ids, :]
scores = torch_version(loans_to_pick_from, net)
scores_series = pd.Series(dict(zip(loan_ids, scores)))
scores_series.sort_values(ascending=False, inplace=True)
picks = scores_series[:900].index.values
results[trial] = loan_npv_rois.loc[picks, :].mean().to_dict()
pct_default[trial] = (default_series.loc[picks].sum()) / port_size
pct_default_series = pd.Series(pct_default)
results_df = pd.DataFrame(results).T
results_df['pct_def'] = pct_default_series
return results_df
# as per done with baseline models, say 3000 loans available
# , pick 900 of them
trials = 20000
port_size = 900
available_loans = 3000
model_results = eval_models_net(trials, port_size, available_loans, net, regr_version, test_X, loan_npv_rois, default_series)
multi_index = []
for col in model_results.columns.values:
multi_index.append((str(col),regr_version))
append_results = model_results.copy()
append_results.columns = pd.MultiIndex.from_tuples(multi_index, names = ['discount_rate', 'model'])
multi_index_results = []
for col in results.columns.values:
multi_index_results.append((str(col[0]), col[1]))
results.columns = pd.MultiIndex.from_tuples(multi_index_results, names = ['discount_rate', 'model'])
full_results = results.join(append_results)
full_results.sort_index(axis=1, inplace=True)
full_results.describe()
store.open()
store['results'] = full_results
model_info = store['model_info']
store.close()
Explanation: Examine performance on test set
End of explanation
# dump the model
# joblib.dump(regr, 'model_dump/model_0.2.1.pkl')
joblib.dump((mean_series, std_dev_series), 'model_dump/mean_stddev.pkl')
test_mse
train_mse
now = time.strftime("%Y_%m_%d_%Hh_%Mm_%Ss")
# info to stick in detailed dataframe describing each model
model_info_dict = {'model_version': '1.0.0',
'target': 'npv_roi_10',
'weights': 'None',
'algo_model': 'feedforward NN',
'hyperparams': "nn_input_dim = 223, hly1_n = 300, hly2_n = 400, hly3_n = 300, hly4_n = 100, nn_output_dim = 1, optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=0.001)",
'cost_func': 'criterion = nn.MSELoss(),',
'useful_notes': 'test_mse: 0.0644643, train_mse: 0.0636180, epoch_500',
'date': now}
model_info_df = pd.DataFrame(model_info_dict, index = ['1.0.0'])
model_info.ix['1.0.0',:] = model_info_df.values
model_info.sort_index(inplace=True)
model_info
store.open()
store.append(
'model_info',
model_info,
data_columns=True,
index=True,
append=False,
)
store.close()
Explanation: Making model info and saving it
End of explanation
train_preds = pd.Series(train_yhat.ravel())
test_preds = pd.Series(test_yhat.ravel())
train_preds.hist(bins=50)
test_preds.hist(bins=50)
train_preds.describe()
test_preds.describe()
train_preds.value_counts()
test_preds.value_counts()
# try:
# results = results.join(append_results)
# except ValueError:
# results.loc[:, (slice(None), slice('1.0.0','1.0.0'))] = append_results
# results.sort_index(axis=1, inplace = True)
Explanation: Examine scores distributions
End of explanation |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.