Unnamed: 0
int64 0
16k
| text_prompt
stringlengths 110
62.1k
| code_prompt
stringlengths 37
152k
|
---|---|---|
14,500 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Example 2
Step2: # Role of pure dephasing
It is more useful to explicitly construct the various parts of the Bloch-Redfield master equation explicitly, and show that it is the pure-dephasing which suppresses coherence in these oscillations.
Step3: We can switch on/off the pure dephasing terms
Step4: Software versions | Python Code:
%pylab inline
%load_ext autoreload
%autoreload 2
import contextlib
import time
import numpy as np
from qutip import *
from qutip.nonmarkov.heom import HEOMSolver, HSolverDL, BosonicBath, DrudeLorentzBath, DrudeLorentzPadeBath
from qutip.ipynbtools import HTMLProgressBar
def cot(x):
return 1./np.tan(x)
def J0(energy):
#underdamped brownian oscillator
return 2 * lam * gamma * (energy)/( ((energy**2) + (gamma**2)))
def dl_corr_approx(t, nk):
Drude-Lorenz correlation function approximation.
Approximates the correlation function at each time t to nk exponents.
c = lam * gamma * (-1.0j + cot(gamma / (2 * T))) * np.exp(-gamma * t)
for k in range(1, nk):
vk = 2 * np.pi * k * T
c += (4 * lam * gamma * T * vk / (vk**2 - gamma**2)) * np.exp(-vk * t)
return c
#A quick plot of the spectral density and environment correlation functions
wlist = linspace(0, 200*3e10*2*pi,100)
lam = 35 * 3e10 * 2 * pi
gamma = (1/(166e-15))
T = 300 * 0.6949 * 3e10 * 2 * pi
beta = 1/T
tlist = linspace(0,1.e-12,1000)
J = [J0(w)/(3e10*2*pi) for w in wlist]
fig, axes = plt.subplots(1, 2, sharex=False, figsize=(10,3))
fig.subplots_adjust(hspace=0.1) # reduce space between plots
axes[0].plot(wlist/(3e10*2*pi), J, color='r',ls='--')
axes[0].set_xlabel(r'$\omega$ (cm$^{-1}$)', fontsize=20)
axes[0].set_ylabel(r"$J(\omega)$ (cm$^{-1}$)", fontsize=16);
axes[1].plot(tlist, [np.real(dl_corr_approx(t,10))for t in tlist], color='r',ls='--',label="c(t) real")
axes[1].plot(tlist, [np.imag(dl_corr_approx(t,10)) for t in tlist], color='g',ls='--',label="c(t) imaginary")
axes[1].set_xlabel(r'$t$', fontsize=20)
axes[1].set_ylabel(r"$C(t)$", fontsize=16);
axes[1].legend(loc=0)
#fig.savefig("figures/drude.pdf")
#We use the Hamiltonian employed in https://www.pnas.org/content/106/41/17255 and operate in units of Hz
Hsys = 3e10 * 2 * pi *Qobj([[200, -87.7, 5.5, -5.9, 6.7, -13.7, -9.9],
[-87.7, 320, 30.8, 8.2, 0.7, 11.8, 4.3],
[5.5, 30.8, 0, -53.5, -2.2, -9.6, 6.0],
[-5.9, 8.2, -53.5, 110, -70.7, -17.0, -63.3],
[6.7, 0.7, -2.2, -70.7, 270, 81.1, -1.3],
[-13.7,11.8, -9.6, -17.0 ,81.1, 420, 39.7],
[-9.9, 4.3, 6.0, -63.3, -1.3, 39.7, 230]])
#start the excitation at site :1:
rho0 = basis(7,0)*basis(7,0).dag()
optionsODE = Options(nsteps=15000, store_states=True)
#
Nc = 8
Nk = 0
Q_list = []
baths= []
Ltot = liouvillian(Hsys)
for m in range(7):
Q=basis(7,m)*basis(7,m).dag()
Q_list.append(Q)
baths.append(DrudeLorentzBath(
Q,lam=lam, gamma=gamma, T=T, Nk=Nk,
tag=str(m)))
_, terminator = baths[-1].terminator() #Here we set Nk=0 and
#rely on the terminator
# to correct detailed balance
Ltot += terminator
HEOMMats = HEOMSolver(Hsys, baths, Nc, options=optionsODE)
outputFMOHEOM=HEOMMats.run(rho0,tlist)
matplotlib.rcParams['figure.figsize'] = (7, 5)
matplotlib.rcParams['axes.titlesize'] = 25
matplotlib.rcParams['axes.labelsize'] = 30
matplotlib.rcParams['xtick.labelsize'] = 28
matplotlib.rcParams['ytick.labelsize'] = 28
matplotlib.rcParams['legend.fontsize'] = 28
matplotlib.rcParams['axes.grid'] = False
matplotlib.rcParams['savefig.bbox'] = 'tight'
matplotlib.rcParams['lines.markersize'] = 5
matplotlib.rcParams['font.family'] = 'STIXgeneral'
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams["font.serif"] = "STIX"
matplotlib.rcParams['text.usetex'] = False
from cycler import cycler
fig, axes = plt.subplots(1,1, figsize=(12,8))
default_cycler = (cycler(color=['r', 'g', 'b', 'y','c','m','k']) +
cycler(linestyle=['-', '--', ':', '-.',(0, (1, 10)), (0, (5, 10)),(0, (3, 10, 1, 10))]))
plt.rc('axes',prop_cycle=default_cycler )
for m in range(7):
Q = basis(7,m)*basis(7,m).dag()
axes.plot(array(tlist)*1e15, expect(outputFMOHEOM.states,Q),label=m+1)
axes.set_xlabel(r'$t$ (fs)', fontsize=30)
axes.set_ylabel(r"Population", fontsize=30);
axes.locator_params(axis='y', nbins=6)
axes.locator_params(axis='x', nbins=6)
axes.set_title('HEOM solution ',fontsize=24)
axes.legend(loc=0)
axes.set_xlim(0,1000)
plt.yticks([0.,0.5,1],[0,0.5,1])
plt.xticks([0.,500,1000],[0,500,1000])
#fig.savefig("figures/fmoheom.pdf")
DL = " 2*pi* 2.0 * {lam} / (pi * {gamma} * {beta}) if (w==0) else 2*pi*(2.0*{lam}*{gamma} *w /(pi*(w**2+{gamma}**2))) * ((1/(exp((w) * {beta})-1))+1)".format(gamma=gamma, beta = beta, lam = lam)
Nmax = 7
Q_list = [basis(Nmax, n)*basis(Nmax, n).dag() for n in range(Nmax)]
optionsODE = Options(nsteps=15000, store_states=True,rtol=1e-12,atol=1e-12)
outputBR = brmesolve(Hsys, rho0, tlist, a_ops=[[Q,DL] for Q in Q_list], options = optionsODE)
fig, axes = plt.subplots(1,1, figsize=(12,8))
for m,Q in enumerate(Q_list):
axes.plot(array(tlist)*1e15, expect(outputBR.states,Q),label=m+1)
axes.set_xlabel(r'$t$ (fs)', fontsize=30)
axes.set_ylabel(r"Population", fontsize=30);
axes.set_title('Bloch-Redfield solution ',fontsize=24)
axes.legend(loc=0)
axes.set_xlim(0,1000)
plt.yticks([0.,0.5,1],[0,0.5,1])
plt.xticks([0.,500,1000],[0,500,1000])
#fig.savefig("figures/fmoBR.pdf")
Explanation: Example 2: Dynamics in Fenna-Mathews-Olsen complex (FMO)
Introduction
In this example notebook we outline how to employ the HEOM to
solve the FMO photosynthetic complex dynamics.
We aim to replicate the results in reference https://www.pnas.org/content/106/41/17255
and compare them to a Bloch-Redfield (perturbative) solution.
This demonstrates how to to employ the solver for multiple baths, as well as showing how a
quantum environment reduces the effect of pure dephasing.
End of explanation
def n_th(energy):
beta=1./Temperature
return 1./(np.exp(energy*beta) - 1.)
def J0(energy):
#underdamped brownian oscillator
return 2 * lam * gamma * (energy)/( pi * ((energy**2) + (gamma**2)))
def J02(energy):
#underdamped brownian oscillator
return 2 * lam * gamma /(np.pi * ((gamma**2)))
def get_collapse(dephasing = 1):
all_energy, all_state = Hsys.eigenstates()
Nmax = 7
Q_list = [basis(Nmax, n)*basis(Nmax, n).dag() for n in range(Nmax)]
collapse_list = []
for Q in Q_list:
for j in range(Nmax):
for k in range(j+1,Nmax):
Deltajk = abs(all_energy[k] - all_energy[j])
if abs(Deltajk) > 0 :
rate = np.absolute(Q.matrix_element(all_state[j].dag(),all_state[k]))**2 * 2 * pi * J0(Deltajk) * (n_th(Deltajk)+1)
if rate > 0.0:
collapse_list.append((np.sqrt(rate)*all_state[j]*all_state[k].dag())) #emission
rate = np.absolute(Q.matrix_element(all_state[k].dag(),all_state[j]))**2 * 2 * pi * J0(Deltajk) * (n_th(Deltajk))
if rate > 0.0:
collapse_list.append((np.sqrt(rate)*all_state[k]*all_state[j].dag())) #absorption
if dephasing:
for j in range(Nmax):
rate = np.absolute(Q.matrix_element(all_state[j].dag(),all_state[j]))**2 * pi * J02(0.) * Temperature
if rate > 0.0:
collapse_list.append((np.sqrt(rate)*all_state[j]*all_state[j].dag())) #emission
return collapse_list
Explanation: # Role of pure dephasing
It is more useful to explicitly construct the various parts of the Bloch-Redfield master equation explicitly, and show that it is the pure-dephasing which suppresses coherence in these oscillations.
End of explanation
#dephasing terms on, we recover the full BR solution
collapse_list = get_collapse(dephasing=True)
outputFMO = mesolve(Hsys, rho0, tlist, collapse_list)
fig, axes = plt.subplots(1,1, figsize=(12,8))
for m,Q in enumerate(Q_list):
axes.plot(tlist*1e15, expect(outputFMO.states,Q),label=m+1)
axes.set_xlabel(r'$t$', fontsize=20)
axes.set_ylabel(r"Population", fontsize=16);
axes.set_title('With pure dephasing',fontsize=24)
axes.legend(loc=0, fontsize=18)
#dephasing terms off
collapse_list = get_collapse(dephasing=False)
outputFMO = mesolve(Hsys, rho0, tlist, collapse_list)
fig, axes = plt.subplots(1,1, figsize=(12,8))
for m,Q in enumerate(Q_list):
axes.plot(tlist*1e15, expect(outputFMO.states,Q),label=m+1)
axes.set_xlabel(r'$t$', fontsize=20)
axes.set_ylabel(r"Population", fontsize=16);
axes.set_title('Without pure dephasing',fontsize=24)
axes.legend(loc=0, fontsize=18)
Explanation: We can switch on/off the pure dephasing terms:
End of explanation
from qutip.ipynbtools import version_table
version_table()
Explanation: Software versions
End of explanation |
14,501 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Convolutions for Computer Vision
To build and test your intuition for convolutions, you will designa vertical line detector.
We'll apply that detector to each part of an image to create a new tensor showing where vertical lines are located.
The information in this tutorial will be useful in the ImageNet Challenges.
Step1: Example Convolution
Step2: Vertical Line Detector
Step3: Now create a list that contains your convolutions, then apply them to the image data
Step4: Let's see the image with just the horizontal and vertical line filters
Step5: Building Models from Convolutions
This section describes how convolutions are combined in a way that enables computer vision.
At the end of this lesson, you will be able to write TensorFlow and Keras code to use one of the best models in computer vision.
Programming in TensorFlow and Keras
Choose the images to work with
Step6: Write a function to read and prepare images for modeling
Step7: Create a model with the pre-trained weights file and make predictions
Step8: Visualization Time! | Python Code:
import sys
from packages.learntools.deep_learning.exercise_1 import load_my_image, apply_conv_to_image, show, print_hints
Explanation: Convolutions for Computer Vision
To build and test your intuition for convolutions, you will designa vertical line detector.
We'll apply that detector to each part of an image to create a new tensor showing where vertical lines are located.
The information in this tutorial will be useful in the ImageNet Challenges.
End of explanation
# Detects light vs. dark pixels:
horizontal_line_conv = [[1, 1],
[-1, -1]]
Explanation: Example Convolution: Horizontal Line Detector
End of explanation
vertical_line_conv = [[-1, -1],
[1, 1]]
Explanation: Vertical Line Detector
End of explanation
conv_list = [horizontal_line_conv, vertical_line_conv]
original_image = load_my_image()
print("Original Image: ")
show(original_image)
Explanation: Now create a list that contains your convolutions, then apply them to the image data:
End of explanation
for conv in conv_list:
filtered_image = apply_conv_to_image(conv, original_image)
show(filtered_image)
Explanation: Let's see the image with just the horizontal and vertical line filters:
End of explanation
from os.path import join
image_dir = 'data/dog_breed/train/'
img_paths = [join(image_dir, filename) for filename in
['0246f44bb123ce3f91c939861eb97fb7.jpg',
'84728e78632c0910a69d33f82e62638c.jpg',
'8825e914555803f4c67b26593c9d5aff.jpg',
'91a5e8db15bccfb6cfa2df5e8b95ec03.jpg']]
Explanation: Building Models from Convolutions
This section describes how convolutions are combined in a way that enables computer vision.
At the end of this lesson, you will be able to write TensorFlow and Keras code to use one of the best models in computer vision.
Programming in TensorFlow and Keras
Choose the images to work with:
End of explanation
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.applications.resnet50 import preprocess_input
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array
image_size=224
def read_and_prep_images(img_paths, img_height=image_size, img_width=image_size):
imgs = [load_img(img_path, target_size=(img_height, img_width)) for img_path in img_paths]
img_array = np.array([img_to_array(img) for img in imgs])
return preprocess_input(img_array)
Explanation: Write a function to read and prepare images for modeling:
End of explanation
from tensorflow.python.keras.applications import ResNet50
my_model = ResNet50(weights='inputs/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
test_data = read_and_prep_images(img_paths)
preds = my_model.predict(test_data)
Explanation: Create a model with the pre-trained weights file and make predictions:
End of explanation
import sys
# Add a directory with prefabricated code to your path.
sys.path.append('inputs/utils')
from decode_predictions import decode_predictions
from IPython.display import Image, display
most_likely_labels = decode_predictions(preds, top=3, class_list_path='inputs/imagenet_class_index.json')
for i, img_path in enumerate(img_paths):
display(Image(img_path))
print(most_likely_labels[i])
Explanation: Visualization Time!
End of explanation |
14,502 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Vertex SDK
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
Step3: Before you begin
GPU runtime
This tutorial does not require a GPU runtime.
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs
Step4: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas
Step5: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
Step6: Authenticate your Google Cloud account
If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps
Step7: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
Step11: Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
Step12: Tutorial
Now you are ready to start creating your own AutoML text entity extraction model.
Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the JSONL index file in Cloud Storage.
Step13: Quick peek at your data
This tutorial uses a version of the NCBI Biomedical dataset that is stored in a public Cloud Storage bucket, using a JSONL index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of objects in a JSONL index file (wc -l) and then peek at the first few rows.
Step14: Create the Dataset
Next, create the Dataset resource using the create method for the TextDataset class, which takes the following parameters
Step15: Create and run training pipeline
To train an AutoML model, you perform two steps
Step16: Run the training pipeline
Next, you run the DAG to start the training job by invoking the method run, with the following parameters
Step17: Review model evaluation scores
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
Step18: Send a batch prediction request
Send a batch prediction to your deployed model.
Make test items
You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
Step19: Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs
Step20: Make the batch prediction request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters
Step21: Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.
Step22: Get the predictions
Next, get the results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format
Step23: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial | Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
Explanation: Vertex SDK: AutoML training text entity extraction model for batch prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_text_entity_extraction_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_text_entity_extraction_batch.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_text_entity_extraction_batch.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex SDK to create text entity extraction models and do batch prediction using a Google Cloud AutoML model.
Dataset
The dataset used for this tutorial is the NCBI Disease Research Abstracts dataset from National Center for Biotechnology Information. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.
Objective
In this tutorial, you create an AutoML text entity extraction model from a Python script, and then do a batch prediction using the Vertex SDK. You can alternatively create and deploy models using the gcloud command-line tool or online using the Cloud Console.
The steps performed include:
Create a Vertex Dataset resource.
Train the model.
View the model evaluation.
Make a batch prediction.
There is one key difference between using batch prediction and using online prediction:
Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.
Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.
Costs
This tutorial uses billable components of Google Cloud:
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Set up your local development environment
If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.
Otherwise, make sure your environment meets this notebook's requirements. You need the following:
The Cloud Storage SDK
Git
Python 3
virtualenv
Jupyter notebook running in a virtual environment with Python 3
The Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
Install and initialize the SDK.
Install Python 3.
Install virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment.
To install Jupyter, run pip3 install jupyter on the command-line in a terminal shell.
To launch Jupyter, run jupyter notebook on the command-line in a terminal shell.
Open this notebook in the Jupyter Notebook Dashboard.
Installation
Install the latest version of Vertex SDK for Python.
End of explanation
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
Explanation: Install the latest GA version of google-cloud-storage library as well.
End of explanation
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
Explanation: Before you begin
GPU runtime
This tutorial does not require a GPU runtime.
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.
If you are running this notebook locally, you will need to install the Cloud SDK.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.
End of explanation
REGION = "us-central1" # @param {type: "string"}
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions
End of explanation
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
End of explanation
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
Explanation: Authenticate your Google Cloud account
If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
! gsutil mb -l $REGION $BUCKET_NAME
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
! gsutil ls -al $BUCKET_NAME
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
import google.cloud.aiplatform as aip
Explanation: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
End of explanation
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
Explanation: Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
End of explanation
IMPORT_FILE = "gs://cloud-samples-data/language/ucaip_ten_dataset.jsonl"
Explanation: Tutorial
Now you are ready to start creating your own AutoML text entity extraction model.
Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the JSONL index file in Cloud Storage.
End of explanation
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
Explanation: Quick peek at your data
This tutorial uses a version of the NCBI Biomedical dataset that is stored in a public Cloud Storage bucket, using a JSONL index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of objects in a JSONL index file (wc -l) and then peek at the first few rows.
End of explanation
dataset = aip.TextDataset.create(
display_name="NCBI Biomedical" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
import_schema_uri=aip.schema.dataset.ioformat.text.extraction,
)
print(dataset.resource_name)
Explanation: Create the Dataset
Next, create the Dataset resource using the create method for the TextDataset class, which takes the following parameters:
display_name: The human readable name for the Dataset resource.
gcs_source: A list of one or more dataset index files to import the data items into the Dataset resource.
import_schema_uri: The data labeling schema for the data items.
This operation may take several minutes.
End of explanation
dag = aip.AutoMLTextTrainingJob(
display_name="biomedical_" + TIMESTAMP, prediction_type="extraction"
)
print(dag)
Explanation: Create and run training pipeline
To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
Create training pipeline
An AutoML training pipeline is created with the AutoMLTextTrainingJob class, with the following parameters:
display_name: The human readable name for the TrainingJob resource.
prediction_type: The type task to train the model for.
classification: A text classification model.
sentiment: A text sentiment analysis model.
extraction: A text entity extraction model.
multi_label: If a classification task, whether single (False) or multi-labeled (True).
sentiment_max: If a sentiment analysis task, the maximum sentiment value.
The instantiated object is the DAG (directed acyclic graph) for the training pipeline.
End of explanation
model = dag.run(
dataset=dataset,
model_display_name="biomedical_" + TIMESTAMP,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
)
Explanation: Run the training pipeline
Next, you run the DAG to start the training job by invoking the method run, with the following parameters:
dataset: The Dataset resource to train the model.
model_display_name: The human readable name for the trained model.
training_fraction_split: The percentage of the dataset to use for training.
test_fraction_split: The percentage of the dataset to use for test (holdout data).
validation_fraction_split: The percentage of the dataset to use for validation.
The run method when completed returns the Model resource.
The execution of the training pipeline will take upto 20 minutes.
End of explanation
# Get model resource ID
models = aip.Model.list(filter="display_name=biomedical_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
Explanation: Review model evaluation scores
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
End of explanation
test_item_1 = 'Molecular basis of hexosaminidase A deficiency and pseudodeficiency in the Berks County Pennsylvania Dutch.\tFollowing the birth of two infants with Tay-Sachs disease ( TSD ) , a non-Jewish , Pennsylvania Dutch kindred was screened for TSD carriers using the biochemical assay . A high frequency of individuals who appeared to be TSD heterozygotes was detected ( Kelly et al . , 1975 ) . Clinical and biochemical evidence suggested that the increased carrier frequency was due to at least two altered alleles for the hexosaminidase A alpha-subunit . We now report two mutant alleles in this Pennsylvania Dutch kindred , and one polymorphism . One allele , reported originally in a French TSD patient ( Akli et al . , 1991 ) , is a GT-- > AT transition at the donor splice-site of intron 9 . The second , a C-- > T transition at nucleotide 739 ( Arg247Trp ) , has been shown by Triggs-Raine et al . ( 1992 ) to be a clinically benign " pseudodeficient " allele associated with reduced enzyme activity against artificial substrate . Finally , a polymorphism [ G-- > A ( 759 ) ] , which leaves valine at codon 253 unchanged , is described'
test_item_2 = "Analysis of alkaptonuria (AKU) mutations and polymorphisms reveals that the CCC sequence motif is a mutational hot spot in the homogentisate 1,2 dioxygenase gene (HGO). We recently showed that alkaptonuria ( AKU ) is caused by loss-of-function mutations in the homogentisate 1 , 2 dioxygenase gene ( HGO ) . Herein we describe haplotype and mutational analyses of HGO in seven new AKU pedigrees . These analyses identified two novel single-nucleotide polymorphisms ( INV4 + 31A-- > G and INV11 + 18A-- > G ) and six novel AKU mutations ( INV1-1G-- > A , W60G , Y62C , A122D , P230T , and D291E ) , which further illustrates the remarkable allelic heterogeneity found in AKU . Reexamination of all 29 mutations and polymorphisms thus far described in HGO shows that these nucleotide changes are not randomly distributed ; the CCC sequence motif and its inverted complement , GGG , are preferentially mutated . These analyses also demonstrated that the nucleotide substitutions in HGO do not involve CpG dinucleotides , which illustrates important differences between HGO and other genes for the occurrence of mutation at specific short-sequence motifs . Because the CCC sequence motifs comprise a significant proportion ( 34 . 5 % ) of all mutated bases that have been observed in HGO , we conclude that the CCC triplet is a mutational hot spot in HGO ."
Explanation: Send a batch prediction request
Send a batch prediction to your deployed model.
Make test items
You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
End of explanation
import json
import tensorflow as tf
gcs_test_item_1 = BUCKET_NAME + "/test1.txt"
with tf.io.gfile.GFile(gcs_test_item_1, "w") as f:
f.write(test_item_1 + "\n")
gcs_test_item_2 = BUCKET_NAME + "/test2.txt"
with tf.io.gfile.GFile(gcs_test_item_2, "w") as f:
f.write(test_item_2 + "\n")
gcs_input_uri = BUCKET_NAME + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {"content": gcs_test_item_1, "mime_type": "text/plain"}
f.write(json.dumps(data) + "\n")
data = {"content": gcs_test_item_2, "mime_type": "text/plain"}
f.write(json.dumps(data) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
Explanation: Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs:
content: The Cloud Storage path to the file with the text item.
mime_type: The content type. In our example, it is a text file.
For example:
{'content': '[your-bucket]/file1.txt', 'mime_type': 'text'}
End of explanation
batch_predict_job = model.batch_predict(
job_display_name="biomedical_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
sync=False,
)
print(batch_predict_job)
Explanation: Make the batch prediction request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:
job_display_name: The human readable name for the batch prediction job.
gcs_source: A list of one or more batch request input files.
gcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.
sync: If set to True, the call will block while waiting for the asynchronous batch job to complete.
End of explanation
batch_predict_job.wait()
Explanation: Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.
End of explanation
import json
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
line = json.loads(line)
print(line)
break
Explanation: Get the predictions
Next, get the results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:
content: The prediction request.
prediction: The prediction response.
ids: The internal assigned unique identifiers for each prediction request.
displayNames: The class names for each class label.
confidences: The predicted confidence, between 0 and 1, per class label.
textSegmentStartOffsets: The character offset in the text to the start of the entity.
textSegmentEndOffsets: The character offset in the text to the end of the entity.
End of explanation
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
Dataset
Pipeline
Model
Endpoint
AutoML Training Job
Batch Job
Custom Job
Hyperparameter Tuning Job
Cloud Storage Bucket
End of explanation |
14,503 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Collaborative filtering on Google Analytics data
This notebook demonstrates how to implement a WALS matrix refactorization approach to do collaborative filtering.
Step2: Create raw dataset
<p>
For collaborative filtering, we don't need to know anything about either the users or the content. Essentially, all we need to know is userId, itemId, and rating that the particular user gave the particular item.
<p>
In this case, we are working with newspaper articles. The company doesn't ask their users to rate the articles. However, we can use the time-spent on the page as a proxy for rating.
<p>
Normally, we would also add a time filter to this ("latest 7 days"), but our dataset is itself limited to a few days.
Step3: Create dataset for WALS
<p>
The raw dataset (above) won't work for WALS
Step4: Creating rows and columns datasets
Step5: To summarize, we created the following data files from collab_raw.csv
Step6: This code is helpful in developing the input function. You don't need it in production.
Step7: Run as a Python module
Let's run it as Python module for just a few steps.
Step8: Run on Cloud
Step9: This took <b>10 minutes</b> for me.
Get row and column factors
Once you have a trained WALS model, you can get row and column factors (user and item embeddings) from the checkpoint file. We'll look at how to use these in the section on building a recommendation system using deep neural networks.
Step10: You can visualize the embedding vectors using dimensional reduction techniques such as PCA. | Python Code:
import os
PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID
BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "1.13"
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
import tensorflow as tf
print(tf.__version__)
Explanation: Collaborative filtering on Google Analytics data
This notebook demonstrates how to implement a WALS matrix refactorization approach to do collaborative filtering.
End of explanation
from google.cloud import bigquery
bq = bigquery.Client(project = PROJECT)
sql =
#standardSQL
WITH CTE_visitor_page_content AS (
SELECT
fullVisitorID,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS latestContentId,
(LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) - hits.time) AS session_duration
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
GROUP BY
fullVisitorId,
latestContentId,
hits.time )
-- Aggregate web stats
SELECT
fullVisitorID as visitorId,
latestContentId as contentId,
SUM(session_duration) AS session_duration
FROM
CTE_visitor_page_content
WHERE
latestContentId IS NOT NULL
GROUP BY
fullVisitorID,
latestContentId
HAVING
session_duration > 0
ORDER BY
latestContentId
df = bq.query(sql).to_dataframe()
df.head()
stats = df.describe()
stats
df[["session_duration"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5])
# The rating is the session_duration scaled to be in the range 0-1. This will help with training.
median = stats.loc["50%", "session_duration"]
df["rating"] = 0.3 * df["session_duration"] / median
df.loc[df["rating"] > 1, "rating"] = 1
df[["rating"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5])
del df["session_duration"]
%%bash
rm -rf data
mkdir data
df.to_csv(path_or_buf = "data/collab_raw.csv", index = False, header = False)
!head data/collab_raw.csv
Explanation: Create raw dataset
<p>
For collaborative filtering, we don't need to know anything about either the users or the content. Essentially, all we need to know is userId, itemId, and rating that the particular user gave the particular item.
<p>
In this case, we are working with newspaper articles. The company doesn't ask their users to rate the articles. However, we can use the time-spent on the page as a proxy for rating.
<p>
Normally, we would also add a time filter to this ("latest 7 days"), but our dataset is itself limited to a few days.
End of explanation
import pandas as pd
import numpy as np
def create_mapping(values, filename):
with open(filename, 'w') as ofp:
value_to_id = {value:idx for idx, value in enumerate(values.unique())}
for value, idx in value_to_id.items():
ofp.write("{},{}\n".format(value, idx))
return value_to_id
df = pd.read_csv(filepath_or_buffer = "data/collab_raw.csv",
header = None,
names = ["visitorId", "contentId", "rating"],
dtype = {"visitorId": str, "contentId": str, "rating": np.float})
df.to_csv(path_or_buf = "data/collab_raw.csv", index = False, header = False)
user_mapping = create_mapping(df["visitorId"], "data/users.csv")
item_mapping = create_mapping(df["contentId"], "data/items.csv")
!head -3 data/*.csv
df["userId"] = df["visitorId"].map(user_mapping.get)
df["itemId"] = df["contentId"].map(item_mapping.get)
mapped_df = df[["userId", "itemId", "rating"]]
mapped_df.to_csv(path_or_buf = "data/collab_mapped.csv", index = False, header = False)
mapped_df.head()
Explanation: Create dataset for WALS
<p>
The raw dataset (above) won't work for WALS:
<ol>
<li> The userId and itemId have to be 0,1,2 ... so we need to create a mapping from visitorId (in the raw data) to userId and contentId (in the raw data) to itemId.
<li> We will need to save the above mapping to a file because at prediction time, we'll need to know how to map the contentId in the table above to the itemId.
<li> We'll need two files: a "rows" dataset where all the items for a particular user are listed; and a "columns" dataset where all the users for a particular item are listed.
</ol>
<p>
### Mapping
End of explanation
import pandas as pd
import numpy as np
mapped_df = pd.read_csv(filepath_or_buffer = "data/collab_mapped.csv", header = None, names = ["userId", "itemId", "rating"])
mapped_df.head()
NITEMS = np.max(mapped_df["itemId"]) + 1
NUSERS = np.max(mapped_df["userId"]) + 1
mapped_df["rating"] = np.round(mapped_df["rating"].values, 2)
print("{} items, {} users, {} interactions".format( NITEMS, NUSERS, len(mapped_df) ))
grouped_by_items = mapped_df.groupby("itemId")
iter = 0
for item, grouped in grouped_by_items:
print(item, grouped["userId"].values, grouped["rating"].values)
iter = iter + 1
if iter > 5:
break
import tensorflow as tf
grouped_by_items = mapped_df.groupby("itemId")
with tf.python_io.TFRecordWriter("data/users_for_item") as ofp:
for item, grouped in grouped_by_items:
example = tf.train.Example(features = tf.train.Features(feature = {
"key": tf.train.Feature(int64_list = tf.train.Int64List(value = [item])),
"indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["userId"].values)),
"values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values))
}))
ofp.write(example.SerializeToString())
grouped_by_users = mapped_df.groupby("userId")
with tf.python_io.TFRecordWriter("data/items_for_user") as ofp:
for user, grouped in grouped_by_users:
example = tf.train.Example(features = tf.train.Features(feature = {
"key": tf.train.Feature(int64_list = tf.train.Int64List(value = [user])),
"indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["itemId"].values)),
"values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values))
}))
ofp.write(example.SerializeToString())
!ls -lrt data
Explanation: Creating rows and columns datasets
End of explanation
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.contrib.factorization import WALSMatrixFactorization
def read_dataset(mode, args):
def decode_example(protos, vocab_size):
# TODO
return
def remap_keys(sparse_tensor):
# Current indices of our SparseTensor that we need to fix
bad_indices = sparse_tensor.indices # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Current values of our SparseTensor that we need to fix
bad_values = sparse_tensor.values # shape = (current_batch_size * (number_of_items/users[i] + 1),)
# Since batch is ordered, the last value for a batch index is the user
# Find where the batch index chages to extract the user rows
# 1 where user, else 0
user_mask = tf.concat(values = [bad_indices[1:,0] - bad_indices[:-1,0], tf.constant(value = [1], dtype = tf.int64)], axis = 0) # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Mask out the user rows from the values
good_values = tf.boolean_mask(tensor = bad_values, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
item_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
user_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 1))[:, 1] # shape = (current_batch_size,)
good_user_indices = tf.gather(params = user_indices, indices = item_indices[:,0]) # shape = (current_batch_size * number_of_items/users[i],)
# User and item indices are rank 1, need to make rank 1 to concat
good_user_indices_expanded = tf.expand_dims(input = good_user_indices, axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_item_indices_expanded = tf.expand_dims(input = item_indices[:, 1], axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_indices = tf.concat(values = [good_user_indices_expanded, good_item_indices_expanded], axis = 1) # shape = (current_batch_size * number_of_items/users[i], 2)
remapped_sparse_tensor = tf.SparseTensor(indices = good_indices, values = good_values, dense_shape = sparse_tensor.dense_shape)
return remapped_sparse_tensor
def parse_tfrecords(filename, vocab_size):
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
else:
num_epochs = 1 # end-of-input after this
files = tf.gfile.Glob(filename = os.path.join(args["input_path"], filename))
# Create dataset from file list
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(map_func = lambda x: decode_example(x, vocab_size))
dataset = dataset.repeat(count = num_epochs)
dataset = dataset.batch(batch_size = args["batch_size"])
dataset = dataset.map(map_func = lambda x: remap_keys(x))
return dataset.make_one_shot_iterator().get_next()
def _input_fn():
features = {
WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords("items_for_user", args["nitems"]),
WALSMatrixFactorization.INPUT_COLS: parse_tfrecords("users_for_item", args["nusers"]),
WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)
}
return features, None
return _input_fn
def input_cols():
return parse_tfrecords("users_for_item", args["nusers"])
return _input_fn#_subset
Explanation: To summarize, we created the following data files from collab_raw.csv:
<ol>
<li> ```collab_mapped.csv``` is essentially the same data as in ```collab_raw.csv``` except that ```visitorId``` and ```contentId``` which are business-specific have been mapped to ```userId``` and ```itemId``` which are enumerated in 0,1,2,.... The mappings themselves are stored in ```items.csv``` and ```users.csv``` so that they can be used during inference.
<li> ```users_for_item``` contains all the users/ratings for each item in TFExample format
<li> ```items_for_user``` contains all the items/ratings for each user in TFExample format
</ol>
Train with WALS
Once you have the dataset, do matrix factorization with WALS using the WALSMatrixFactorization in the contrib directory.
This is an estimator model, so it should be relatively familiar.
<p>
As usual, we write an input_fn to provide the data to the model, and then create the Estimator to do train_and_evaluate.
Because it is in contrib and hasn't moved over to tf.estimator yet, we use tf.contrib.learn.Experiment to handle the training loop.<p>
Make sure to replace <strong># TODO</strong> in below code.
End of explanation
def try_out():
with tf.Session() as sess:
fn = read_dataset(
mode = tf.estimator.ModeKeys.EVAL,
args = {"input_path": "data", "batch_size": 4, "nitems": NITEMS, "nusers": NUSERS})
feats, _ = fn()
print(feats["input_rows"].eval())
print(feats["input_rows"].eval())
try_out()
def find_top_k(user, item_factors, k):
all_items = tf.matmul(a = tf.expand_dims(input = user, axis = 0), b = tf.transpose(a = item_factors))
topk = tf.nn.top_k(input = all_items, k = k)
return tf.cast(x = topk.indices, dtype = tf.int64)
def batch_predict(args):
import numpy as np
with tf.Session() as sess:
estimator = tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"])
# This is how you would get the row factors for out-of-vocab user data
# row_factors = list(estimator.get_projections(input_fn=read_dataset(tf.estimator.ModeKeys.EVAL, args)))
# user_factors = tf.convert_to_tensor(np.array(row_factors))
# But for in-vocab data, the row factors are already in the checkpoint
user_factors = tf.convert_to_tensor(value = estimator.get_row_factors()[0]) # (nusers, nembeds)
# In either case, we have to assume catalog doesn"t change, so col_factors are read in
item_factors = tf.convert_to_tensor(value = estimator.get_col_factors()[0])# (nitems, nembeds)
# For each user, find the top K items
topk = tf.squeeze(input = tf.map_fn(fn = lambda user: find_top_k(user, item_factors, args["topk"]), elems = user_factors, dtype = tf.int64))
with file_io.FileIO(os.path.join(args["output_dir"], "batch_pred.txt"), mode = 'w') as f:
for best_items_for_user in topk.eval():
f.write(",".join(str(x) for x in best_items_for_user) + '\n')
def train_and_evaluate(args):
train_steps = int(0.5 + (1.0 * args["num_epochs"] * args["nusers"]) / args["batch_size"])
steps_in_epoch = int(0.5 + args["nusers"] / args["batch_size"])
print("Will train for {} steps, evaluating once every {} steps".format(train_steps, steps_in_epoch))
def experiment_fn(output_dir):
return tf.contrib.learn.Experiment(
tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"]),
train_input_fn = read_dataset(tf.estimator.ModeKeys.TRAIN, args),
eval_input_fn = read_dataset(tf.estimator.ModeKeys.EVAL, args),
train_steps = train_steps,
eval_steps = 1,
min_eval_frequency = steps_in_epoch
)
from tensorflow.contrib.learn.python.learn import learn_runner
learn_runner.run(experiment_fn = experiment_fn, output_dir = args["output_dir"])
batch_predict(args)
import shutil
shutil.rmtree(path = "wals_trained", ignore_errors=True)
train_and_evaluate({
"output_dir": "wals_trained",
"input_path": "data/",
"num_epochs": 0.05,
"nitems": NITEMS,
"nusers": NUSERS,
"batch_size": 512,
"n_embeds": 10,
"topk": 3
})
!ls wals_trained
!head wals_trained/batch_pred.txt
Explanation: This code is helpful in developing the input function. You don't need it in production.
End of explanation
os.environ["NITEMS"] = str(NITEMS)
os.environ["NUSERS"] = str(NUSERS)
%%bash
rm -rf wals.tar.gz wals_trained
gcloud ai-platform local train \
--module-name=walsmodel.task \
--package-path=${PWD}/walsmodel \
-- \
--output_dir=${PWD}/wals_trained \
--input_path=${PWD}/data \
--num_epochs=0.01 --nitems=${NITEMS} --nusers=${NUSERS} \
--job-dir=./tmp
Explanation: Run as a Python module
Let's run it as Python module for just a few steps.
End of explanation
%%bash
gsutil -m cp data/* gs://${BUCKET}/wals/data
%%bash
OUTDIR=gs://${BUCKET}/wals/model_trained
JOBNAME=wals_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=walsmodel.task \
--package-path=${PWD}/walsmodel \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC_GPU \
--runtime-version=$TFVERSION \
-- \
--output_dir=$OUTDIR \
--input_path=gs://${BUCKET}/wals/data \
--num_epochs=10 --nitems=${NITEMS} --nusers=${NUSERS}
Explanation: Run on Cloud
End of explanation
def get_factors(args):
with tf.Session() as sess:
estimator = tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"])
row_factors = estimator.get_row_factors()[0]
col_factors = estimator.get_col_factors()[0]
return row_factors, col_factors
args = {
"output_dir": "gs://{}/wals/model_trained".format(BUCKET),
"nitems": NITEMS,
"nusers": NUSERS,
"n_embeds": 10
}
user_embeddings, item_embeddings = get_factors(args)
print(user_embeddings[:3])
print(item_embeddings[:3])
Explanation: This took <b>10 minutes</b> for me.
Get row and column factors
Once you have a trained WALS model, you can get row and column factors (user and item embeddings) from the checkpoint file. We'll look at how to use these in the section on building a recommendation system using deep neural networks.
End of explanation
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
pca = PCA(n_components = 3)
pca.fit(user_embeddings)
user_embeddings_pca = pca.transform(user_embeddings)
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(111, projection = "3d")
xs, ys, zs = user_embeddings_pca[::150].T
ax.scatter(xs, ys, zs)
Explanation: You can visualize the embedding vectors using dimensional reduction techniques such as PCA.
End of explanation |
14,504 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Traffic Sign Classification with Keras
Keras exists to make coding deep neural networks simpler. To demonstrate just how easy it is, you’re going to use Keras to build a convolutional neural network in a few dozen lines of code.
You’ll be connecting the concepts from the previous lessons to the methods that Keras provides.
Dataset
The network you'll build with Keras is similar to the example in Keras’s GitHub repository that builds out a convolutional neural network for MNIST.
However, instead of using the MNIST dataset, you're going to use the German Traffic Sign Recognition Benchmark dataset that you've used previously.
You can download pickle files with sanitized traffic sign data here
Step1: Overview
Here are the steps you'll take to build the network
Step2: Load the Data
Start by importing the data from the pickle file.
Step3: Preprocess the Data
Shuffle the data
Normalize the features using Min-Max scaling between -0.5 and 0.5
One-Hot Encode the labels
Shuffle the data
Hint
Step4: Normalize the features
Hint
Step5: One-Hot Encode the labels
Hint
Step6: Keras Sequential Model
```python
from keras.models import Sequential
Create the Sequential model
model = Sequential()
``
Thekeras.models.Sequentialclass is a wrapper for the neural network model. Just like many of the class models in scikit-learn, it provides common functions likefit(),evaluate(), andcompile()`. We'll cover these functions as we get to them. Let's start looking at the layers of the model.
Keras Layer
A Keras layer is just like a neural network layer. It can be fully connected, max pool, activation, etc. You can add a layer to the model using the model's add() function. For example, a simple model would look like this
Step7: Training a Sequential Model
You built a multi-layer neural network in Keras, now let's look at training a neural network.
```python
from keras.models import Sequential
from keras.layers.core import Dense, Activation
model = Sequential()
...
Configures the learning process and metrics
model.compile('sgd', 'mean_squared_error', ['accuracy'])
Train the model
History is a record of training loss and metrics
history = model.fit(x_train_data, Y_train_data, batch_size=128, nb_epoch=2, validation_split=0.2)
Calculate test score
test_score = model.evaluate(x_test_data, Y_test_data)
``
The code above configures, trains, and tests the model. The linemodel.compile('sgd', 'mean_squared_error', ['accuracy'])configures the model's optimizer to'sgd'(stochastic gradient descent), the loss to'mean_squared_error', and the metric to'accuracy'`.
You can find more optimizers here, loss functions here, and more metrics here.
To train the model, use the fit() function as shown in model.fit(x_train_data, Y_train_data, batch_size=128, nb_epoch=2, validation_split=0.2). The validation_split parameter will split a percentage of the training dataset to be used to validate the model. The model can be further tested with the test dataset using the evaluation() function as shown in the last line.
Train the Network
Compile the network using adam optimizer and categorical_crossentropy loss function.
Train the network for ten epochs and validate with 20% of the training data.
Step8: Convolutions
Re-construct the previous network
Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
Add a ReLU activation after the convolutional layer.
Hint 1
Step9: Pooling
Re-construct the network
Add a 2x2 max pooling layer immediately following your convolutional layer.
Step10: Dropout
Re-construct the network
Add a dropout layer after the pooling layer. Set the dropout rate to 50%.
Step11: Optimization
Congratulations! You've built a neural network with convolutions, pooling, dropout, and fully-connected layers, all in just a few lines of code.
Have fun with the model and see how well you can do! Add more layers, or regularization, or different padding, or batches, or more training epochs.
What is the best validation accuracy you can achieve?
Step12: Best Validation Accuracy | Python Code:
from urllib.request import urlretrieve
from os.path import isfile
from tqdm import tqdm
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('train.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Train Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/train.p',
'train.p',
pbar.hook)
if not isfile('test.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Test Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/test.p',
'test.p',
pbar.hook)
print('Training and Test data downloaded.')
Explanation: Traffic Sign Classification with Keras
Keras exists to make coding deep neural networks simpler. To demonstrate just how easy it is, you’re going to use Keras to build a convolutional neural network in a few dozen lines of code.
You’ll be connecting the concepts from the previous lessons to the methods that Keras provides.
Dataset
The network you'll build with Keras is similar to the example in Keras’s GitHub repository that builds out a convolutional neural network for MNIST.
However, instead of using the MNIST dataset, you're going to use the German Traffic Sign Recognition Benchmark dataset that you've used previously.
You can download pickle files with sanitized traffic sign data here:
End of explanation
import pickle
import numpy as np
import math
# Fix error with TF and Keras
import tensorflow as tf
tf.python.control_flow_ops = tf
print('Modules loaded.')
Explanation: Overview
Here are the steps you'll take to build the network:
Load the training data.
Preprocess the data.
Build a feedforward neural network to classify traffic signs.
Build a convolutional neural network to classify traffic signs.
Evaluate the final neural network on testing data.
Keep an eye on the network’s accuracy over time. Once the accuracy reaches the 98% range, you can be confident that you’ve built and trained an effective model.
End of explanation
with open('train.p', 'rb') as f:
data = pickle.load(f)
# TODO: Load the feature data to the variable X_train
X_train = data['features']
# TODO: Load the label data to the variable y_train
y_train = data['labels']
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert np.array_equal(X_train, data['features']), 'X_train not set to data[\'features\'].'
assert np.array_equal(y_train, data['labels']), 'y_train not set to data[\'labels\'].'
print('Tests passed.')
Explanation: Load the Data
Start by importing the data from the pickle file.
End of explanation
# TODO: Shuffle the data
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert X_train.shape == data['features'].shape, 'X_train has changed shape. The shape shouldn\'t change when shuffling.'
assert y_train.shape == data['labels'].shape, 'y_train has changed shape. The shape shouldn\'t change when shuffling.'
assert not np.array_equal(X_train, data['features']), 'X_train not shuffled.'
assert not np.array_equal(y_train, data['labels']), 'y_train not shuffled.'
print('Tests passed.')
Explanation: Preprocess the Data
Shuffle the data
Normalize the features using Min-Max scaling between -0.5 and 0.5
One-Hot Encode the labels
Shuffle the data
Hint: You can use the scikit-learn shuffle function to shuffle the data.
End of explanation
# TODO: Normalize the data features to the variable X_normalized
def normalize(image_data):
a = -0.5
b = 0.5
color_min = 0.0
color_max = 255.0
return a + ( ( (image_data - color_min) * (b - a) )/(color_max - color_min))
X_normalized = normalize(X_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert math.isclose(np.min(X_normalized), -0.5, abs_tol=1e-5) and math.isclose(np.max(X_normalized), 0.5, abs_tol=1e-5), 'The range of the training data is: {} to {}. It must be -0.5 to 0.5'.format(np.min(X_normalized), np.max(X_normalized))
print('Tests passed.')
Explanation: Normalize the features
Hint: You solved this in TensorFlow lab Problem 1.
End of explanation
# TODO: One Hot encode the labels to the variable y_one_hot
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
import collections
assert y_one_hot.shape == (39209, 43), 'y_one_hot is not the correct shape. It\'s {}, it should be (39209, 43)'.format(y_one_hot.shape)
assert next((False for y in y_one_hot if collections.Counter(y) != {0: 42, 1: 1}), True), 'y_one_hot not one-hot encoded.'
print('Tests passed.')
Explanation: One-Hot Encode the labels
Hint: You can use the scikit-learn LabelBinarizer function to one-hot encode the labels.
End of explanation
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
model = Sequential()
# TODO: Build a Multi-layer feedforward neural network with Keras here.
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.activations import relu, softmax
def check_layers(layers, true_layers):
assert len(true_layers) != 0, 'No layers found'
for layer_i in range(len(layers)):
assert isinstance(true_layers[layer_i], layers[layer_i]), 'Layer {} is not a {} layer'.format(layer_i+1, layers[layer_i].__name__)
assert len(true_layers) == len(layers), '{} layers found, should be {} layers'.format(len(true_layers), len(layers))
check_layers([Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[1].output_shape == (None, 128), 'Second layer output is wrong, it should be (128)'
assert model.layers[2].activation == relu, 'Third layer not a relu activation layer'
assert model.layers[3].output_shape == (None, 43), 'Fourth layer output is wrong, it should be (43)'
assert model.layers[4].activation == softmax, 'Fifth layer not a softmax activation layer'
print('Tests passed.')
Explanation: Keras Sequential Model
```python
from keras.models import Sequential
Create the Sequential model
model = Sequential()
``
Thekeras.models.Sequentialclass is a wrapper for the neural network model. Just like many of the class models in scikit-learn, it provides common functions likefit(),evaluate(), andcompile()`. We'll cover these functions as we get to them. Let's start looking at the layers of the model.
Keras Layer
A Keras layer is just like a neural network layer. It can be fully connected, max pool, activation, etc. You can add a layer to the model using the model's add() function. For example, a simple model would look like this:
```python
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
Create the Sequential model
model = Sequential()
1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
2nd Layer - Add a fully connected layer
model.add(Dense(100))
3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
4th Layer - Add a fully connected layer
model.add(Dense(60))
5th Layer - Add a ReLU activation layer
model.add(Activation('relu'))
```
Keras will automatically infer the shape of all layers after the first layer. This means you only have to set the input dimensions for the first layer.
The first layer from above, model.add(Flatten(input_shape=(32, 32, 3))), sets the input dimension to (32, 32, 3) and output dimension to (3072=32*32*3). The second layer takes in the output of the first layer and sets the output dimenions to (100). This chain of passing output to the next layer continues until the last layer, which is the output of the model.
Build a Multi-Layer Feedforward Network
Build a multi-layer feedforward neural network to classify the traffic sign images.
Set the first layer to a Flatten layer with the input_shape set to (32, 32, 3)
Set the second layer to Dense layer width to 128 output.
Use a ReLU activation function after the second layer.
Set the output layer width to 43, since there are 43 classes in the dataset.
Use a softmax activation function after the output layer.
To get started, review the Keras documentation about models and layers.
The Keras example of a Multi-Layer Perceptron network is similar to what you need to do here. Use that as a guide, but keep in mind that there are a number of differences.
End of explanation
# TODO: Compile and train the model here.
# Configures the learning process and metrics
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
# Train the model
# History is a record of training loss and metrics
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.optimizers import Adam
assert model.loss == 'categorical_crossentropy', 'Not using categorical_crossentropy loss function'
assert isinstance(model.optimizer, Adam), 'Not using adam optimizer'
assert len(history.history['acc']) == 10, 'You\'re using {} epochs when you need to use 10 epochs.'.format(len(history.history['acc']))
assert history.history['acc'][-1] > 0.92, 'The training accuracy was: %.3f. It shoud be greater than 0.92' % history.history['acc'][-1]
assert history.history['val_acc'][-1] > 0.85, 'The validation accuracy is: %.3f. It shoud be greater than 0.85' % history.history['val_acc'][-1]
print('Tests passed.')
Explanation: Training a Sequential Model
You built a multi-layer neural network in Keras, now let's look at training a neural network.
```python
from keras.models import Sequential
from keras.layers.core import Dense, Activation
model = Sequential()
...
Configures the learning process and metrics
model.compile('sgd', 'mean_squared_error', ['accuracy'])
Train the model
History is a record of training loss and metrics
history = model.fit(x_train_data, Y_train_data, batch_size=128, nb_epoch=2, validation_split=0.2)
Calculate test score
test_score = model.evaluate(x_test_data, Y_test_data)
``
The code above configures, trains, and tests the model. The linemodel.compile('sgd', 'mean_squared_error', ['accuracy'])configures the model's optimizer to'sgd'(stochastic gradient descent), the loss to'mean_squared_error', and the metric to'accuracy'`.
You can find more optimizers here, loss functions here, and more metrics here.
To train the model, use the fit() function as shown in model.fit(x_train_data, Y_train_data, batch_size=128, nb_epoch=2, validation_split=0.2). The validation_split parameter will split a percentage of the training dataset to be used to validate the model. The model can be further tested with the test dataset using the evaluation() function as shown in the last line.
Train the Network
Compile the network using adam optimizer and categorical_crossentropy loss function.
Train the network for ten epochs and validate with 20% of the training data.
End of explanation
# TODO: Re-construct the network and add a convolutional layer before the flatten layer.
from keras.layers import Convolution2D
nb_filters = 32
kernel_size = [3, 3]
model = Sequential()
#Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=(32, 32, 3)))
#Add a ReLU activation after the convolutional layer.
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten())
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
check_layers([Convolution2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[0].nb_filter == 32, 'Wrong number of filters, it should be 32'
assert model.layers[0].nb_col == model.layers[0].nb_row == 3, 'Kernel size is wrong, it should be a 3x3'
assert model.layers[0].border_mode == 'valid', 'Wrong padding, it should be valid'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
Explanation: Convolutions
Re-construct the previous network
Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
Add a ReLU activation after the convolutional layer.
Hint 1: The Keras example of a convolutional neural network for MNIST would be a good example to review.
End of explanation
# TODO: Re-construct the network and add a pooling layer after the convolutional layer.
from keras.layers import MaxPooling2D
pool_size = [2, 2]
model = Sequential()
#Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=(32, 32, 3)))
#Add a 2x2 max pooling layer immediately following your convolutional layer.
model.add(MaxPooling2D(pool_size=pool_size))
#Add a ReLU activation after the convolutional layer.
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten())
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[1].pool_size == (2, 2), 'Second layer must be a max pool layer with pool size of 2x2'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
Explanation: Pooling
Re-construct the network
Add a 2x2 max pooling layer immediately following your convolutional layer.
End of explanation
# TODO: Re-construct the network and add dropout after the pooling layer.
from keras.layers import Dropout
model = Sequential()
#Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer.
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=(32, 32, 3)))
#Add a 2x2 max pooling layer immediately following your convolutional layer.
model.add(MaxPooling2D(pool_size=pool_size))
#Add a dropout layer after the pooling layer. Set the dropout rate to 50%.
model.add(Dropout(0.5))
#Add a ReLU activation after the convolutional layer.
model.add(Activation('relu'))
# 1st Layer - Add a flatten layer
model.add(Flatten())
# 2nd Layer - Add a Dense layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Dropout, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[2].p == 0.5, 'Third layer should be a Dropout of 50%'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
Explanation: Dropout
Re-construct the network
Add a dropout layer after the pooling layer. Set the dropout rate to 50%.
End of explanation
# TODO: Build a model
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=(32, 32, 3)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(43))
model.add(Activation('softmax'))
# TODO: Compile and train the model
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
Explanation: Optimization
Congratulations! You've built a neural network with convolutions, pooling, dropout, and fully-connected layers, all in just a few lines of code.
Have fun with the model and see how well you can do! Add more layers, or regularization, or different padding, or batches, or more training epochs.
What is the best validation accuracy you can achieve?
End of explanation
# TODO: Load test data
with open('test.p', 'rb') as f:
samples = pickle.load(f)
X_test = samples['features']
y_test = samples['labels']
# TODO: Preprocess data & one-hot encode the labels
X_test, y_test = shuffle(X_test, y_test)
X_test = normalize(X_test)
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_test)
# TODO: Evaluate model on test data
score = model.evaluate(X_test, y_one_hot, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
Explanation: Best Validation Accuracy: 0.9911
Testing
Once you've picked out your best model, it's time to test it.
Load up the test data and use the evaluate() method to see how well it does.
Hint 1: The evaluate() method should return an array of numbers. Use the metrics_names property to get the labels.
End of explanation |
14,505 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Head model and forward computation
The aim of this tutorial is to be a getting started for forward
computation.
For more extensive details and presentation of the general
concepts for forward modeling, see ch_forward.
Step1: Computing the forward operator
To compute a forward operator we need
Step2: Visualizing the coregistration
The coregistration is the operation that allows to position the head and the
sensors in a common coordinate system. In the MNE software the transformation
to align the head and the sensors in stored in a so-called trans file.
It is a FIF file that ends with -trans.fif. It can be obtained with
Step3: Compute Source Space
The source space defines the position and orientation of the candidate source
locations. There are two types of source spaces
Step4: The surface based source space src contains two parts, one for the left
hemisphere (4098 locations) and one for the right hemisphere (4098
locations). Sources can be visualized on top of the BEM surfaces in purple.
Step5: To compute a volume based source space defined with a grid of candidate
dipoles inside a sphere of radius 90mm centered at (0.0, 0.0, 40.0) mm
you can use the following code.
Obviously here, the sphere is not perfect. It is not restricted to the
brain and it can miss some parts of the cortex.
Step6: To compute a volume based source space defined with a grid of candidate
dipoles inside the brain (requires the
Step7: <div class="alert alert-info"><h4>Note</h4><p>Some sources may appear to be outside the BEM inner skull contour.
This is because the ``slices`` are decimated for plotting here.
Each slice in the figure actually represents several MRI slices,
but only the MRI voxels and BEM boundaries for a single (midpoint
of the given slice range) slice are shown, whereas the source space
points plotted on that midpoint slice consist of all points
for which that slice (out of all slices shown) was the closest.</p></div>
Now let's see how to view all sources in 3D.
Step8: Compute forward solution
We can now compute the forward solution.
To reduce computation we'll just compute a single layer BEM (just inner
skull) that can then be used for MEG (not EEG).
We specify if we want a one-layer or a three-layer BEM using the
conductivity parameter.
The BEM solution requires a BEM model which describes the geometry
of the head the conductivities of the different tissues.
Step9: Note that the
Step10: We can explore the content of fwd to access the numpy array that contains
the gain matrix.
Step11: To extract the numpy array containing the forward operator corresponding to
the source space fwd['src'] with cortical orientation constraint
we can use the following | Python Code:
import os.path as op
import mne
from mne.datasets import sample
data_path = sample.data_path()
# the raw file containing the channel location + types
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# The paths to Freesurfer reconstructions
subjects_dir = data_path + '/subjects'
subject = 'sample'
Explanation: Head model and forward computation
The aim of this tutorial is to be a getting started for forward
computation.
For more extensive details and presentation of the general
concepts for forward modeling, see ch_forward.
End of explanation
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', orientation='coronal')
Explanation: Computing the forward operator
To compute a forward operator we need:
a -trans.fif file that contains the coregistration info.
a source space
the :term:BEM surfaces
Compute and visualize BEM surfaces
The :term:BEM surfaces are the triangulations of the interfaces between
different tissues needed for forward computation. These surfaces are for
example the inner skull surface, the outer skull surface and the outer skin
surface, a.k.a. scalp surface.
Computing the BEM surfaces requires FreeSurfer and makes use of either of
the two following command line tools:
gen_mne_watershed_bem
gen_mne_flash_bem
Or by calling in a Python script one of the functions
:func:mne.bem.make_watershed_bem or :func:mne.bem.make_flash_bem.
Here we'll assume it's already computed. It takes a few minutes per subject.
For EEG we use 3 layers (inner skull, outer skull, and skin) while for
MEG 1 layer (inner skull) is enough.
Let's look at these surfaces. The function :func:mne.viz.plot_bem
assumes that you have the bem folder of your subject's FreeSurfer
reconstruction, containing the necessary files.
End of explanation
# The transformation file obtained by coregistration
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
info = mne.io.read_info(raw_fname)
# Here we look at the dense head, which isn't used for BEM computations but
# is useful for coregistration.
mne.viz.plot_alignment(info, trans, subject=subject, dig=True,
meg=['helmet', 'sensors'], subjects_dir=subjects_dir,
surfaces='head-dense')
Explanation: Visualizing the coregistration
The coregistration is the operation that allows to position the head and the
sensors in a common coordinate system. In the MNE software the transformation
to align the head and the sensors in stored in a so-called trans file.
It is a FIF file that ends with -trans.fif. It can be obtained with
:func:mne.gui.coregistration (or its convenient command line
equivalent gen_mne_coreg), or mrilab if you're using a Neuromag
system.
Here we assume the coregistration is done, so we just visually check the
alignment with the following code.
End of explanation
src = mne.setup_source_space(subject, spacing='oct6', add_dist='patch',
subjects_dir=subjects_dir)
print(src)
Explanation: Compute Source Space
The source space defines the position and orientation of the candidate source
locations. There are two types of source spaces:
surface-based source space when the candidates are confined to a
surface.
volumetric or discrete source space when the candidates are discrete,
arbitrarily located source points bounded by the surface.
Surface-based source space is computed using
:func:mne.setup_source_space, while volumetric source space is computed
using :func:mne.setup_volume_source_space.
We will now compute a surface-based source space with an 'oct6'
resolution. See setting_up_source_space for details on source space
definition and spacing parameter.
End of explanation
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=src, orientation='coronal')
Explanation: The surface based source space src contains two parts, one for the left
hemisphere (4098 locations) and one for the right hemisphere (4098
locations). Sources can be visualized on top of the BEM surfaces in purple.
End of explanation
sphere = (0.0, 0.0, 0.04, 0.09)
vol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,
sphere=sphere, sphere_units='m')
print(vol_src)
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=vol_src, orientation='coronal')
Explanation: To compute a volume based source space defined with a grid of candidate
dipoles inside a sphere of radius 90mm centered at (0.0, 0.0, 40.0) mm
you can use the following code.
Obviously here, the sphere is not perfect. It is not restricted to the
brain and it can miss some parts of the cortex.
End of explanation
surface = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
vol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,
surface=surface)
print(vol_src)
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=vol_src, orientation='coronal')
Explanation: To compute a volume based source space defined with a grid of candidate
dipoles inside the brain (requires the :term:BEM surfaces) you can use the
following.
End of explanation
fig = mne.viz.plot_alignment(subject=subject, subjects_dir=subjects_dir,
surfaces='white', coord_frame='head',
src=src)
mne.viz.set_3d_view(fig, azimuth=173.78, elevation=101.75,
distance=0.30, focalpoint=(-0.03, -0.01, 0.03))
Explanation: <div class="alert alert-info"><h4>Note</h4><p>Some sources may appear to be outside the BEM inner skull contour.
This is because the ``slices`` are decimated for plotting here.
Each slice in the figure actually represents several MRI slices,
but only the MRI voxels and BEM boundaries for a single (midpoint
of the given slice range) slice are shown, whereas the source space
points plotted on that midpoint slice consist of all points
for which that slice (out of all slices shown) was the closest.</p></div>
Now let's see how to view all sources in 3D.
End of explanation
conductivity = (0.3,) # for single layer
# conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject='sample', ico=4,
conductivity=conductivity,
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
Explanation: Compute forward solution
We can now compute the forward solution.
To reduce computation we'll just compute a single layer BEM (just inner
skull) that can then be used for MEG (not EEG).
We specify if we want a one-layer or a three-layer BEM using the
conductivity parameter.
The BEM solution requires a BEM model which describes the geometry
of the head the conductivities of the different tissues.
End of explanation
fwd = mne.make_forward_solution(raw_fname, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=5.0, n_jobs=2)
print(fwd)
Explanation: Note that the :term:BEM does not involve any use of the trans file. The BEM
only depends on the head geometry and conductivities.
It is therefore independent from the MEG data and the head position.
Let's now compute the forward operator, commonly referred to as the
gain or leadfield matrix.
See :func:mne.make_forward_solution for details on the meaning of each
parameter.
End of explanation
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
Explanation: We can explore the content of fwd to access the numpy array that contains
the gain matrix.
End of explanation
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd_fixed['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
Explanation: To extract the numpy array containing the forward operator corresponding to
the source space fwd['src'] with cortical orientation constraint
we can use the following:
End of explanation |
14,506 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
70. データの入手・整形
文に関する極性分析の正解データを用い,以下の要領で正解データ(sentiment.txt)を作成せよ.
rt-polarity.posの各行の先頭に"+1 "という文字列を追加する(極性ラベル"+1"とスペースに続けて肯定的な文の内容が続く)
rt-polarity.negの各行の先頭に"-1 "という文字列を追加する(極性ラベル"-1"とスペースに続けて否定的な文の内容が続く)
上述1と2の内容を結合(concatenate)し,行をランダムに並び替える
sentiment.txtを作成したら,正例(肯定的な文)の数と負例(否定的な文)の数を確認せよ.
Step1: 71. ストップワード
英語のストップワードのリスト(ストップリスト)を適当に作成せよ.さらに,引数に与えられた単語(文字列)がストップリストに含まれている場合は真,それ以外は偽を返す関数を実装せよ.さらに,その関数に対するテストを記述せよ.
Step2: 72. 素性抽出
極性分析に有用そうな素性を各自で設計し,学習データから素性を抽出せよ.素性としては,レビューからストップワードを除去し,各単語をステミング処理したものが最低限のベースラインとなるであろう.
Step3: No.73
72で抽出した素性を用いて,ロジスティック回帰モデルを学習せよ.
Step4: TfidfVectorizer.fit()の引数は単語"リスト" | Python Code:
import random
with open('rt-polarity.neg.utf8', 'r') as f:
negative_list = ['-1 '+i for i in f]
with open("rt-polarity.pos.utf8", "r") as f:
positive_list = ["+1"+i for i in f]
#for sentence in temp:
# positive_list.append('+1 '+"".join([i.encode('replace') for i in sentence]))
concatenate = positive_list + negative_list
random.shuffle(concatenate)
with open('sentiment.txt', 'w') as f:
f.write("".join(concatenate))
Explanation: 70. データの入手・整形
文に関する極性分析の正解データを用い,以下の要領で正解データ(sentiment.txt)を作成せよ.
rt-polarity.posの各行の先頭に"+1 "という文字列を追加する(極性ラベル"+1"とスペースに続けて肯定的な文の内容が続く)
rt-polarity.negの各行の先頭に"-1 "という文字列を追加する(極性ラベル"-1"とスペースに続けて否定的な文の内容が続く)
上述1と2の内容を結合(concatenate)し,行をランダムに並び替える
sentiment.txtを作成したら,正例(肯定的な文)の数と負例(否定的な文)の数を確認せよ.
End of explanation
from nltk.corpus import stopwords
stopwords_list = [s for s in stopwords.words('english')]
print(stopwords_list)
Explanation: 71. ストップワード
英語のストップワードのリスト(ストップリスト)を適当に作成せよ.さらに,引数に与えられた単語(文字列)がストップリストに含まれている場合は真,それ以外は偽を返す関数を実装せよ.さらに,その関数に対するテストを記述せよ.
End of explanation
from nltk.stem.porter import PorterStemmer
def feature(sentence):
porter = PorterStemmer()
result = []
label = sentence[0:2]
for s in sentence[3:].split(' '):
try:
result.append(porter.stem(s))
except KeyError:
pass
return (label + " " + " ".join(result))
feature("+1 intensely romantic , thought-provoking and even an engaging mystery . ")
Explanation: 72. 素性抽出
極性分析に有用そうな素性を各自で設計し,学習データから素性を抽出せよ.素性としては,レビューからストップワードを除去し,各単語をステミング処理したものが最低限のベースラインとなるであろう.
End of explanation
# import passages to construct logistic regression and learn the model.
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
tfv = TfidfVectorizer(encoding='utf-8', lowercase=True,
stop_words=ENGLISH_STOP_WORDS,
#token_pattern='(?u)\b\w\w+\b',
ngram_range=(1, 2))
with open('sentiment.txt') as f:
features = [(s[:2], s[3:]) for s in f]
# make label list
label = [i[0] for i in features]
# make sentence list that is removed English Stop Words
sentence = []
for i in features:
temp = i[1].split(' ')
temp2 = [i+' ' for i in temp]
s = "".join(temp2)
sentence.append(s)
tfv_vector = tfv.fit_transform("".join(sentence).split(' '))
Explanation: No.73
72で抽出した素性を用いて,ロジスティック回帰モデルを学習せよ.
End of explanation
tfv_vector.
Explanation: TfidfVectorizer.fit()の引数は単語"リスト"
End of explanation |
14,507 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Create Conference Consultant Sayuri
Steps
Make Training Data
会議中の画像の収集
画像のデータ化
ラベルの付与
Make Model
モデルに利用する特徴量の選択
学習
予測結果の可視化
Save the Model
モデルの保存
Step1: Make Training Data
会議の画像をGoogle等から収集し、imagesフォルダに格納します。なお、画像の拡張子はPNGに統一しています。
収集した画像をRekognitionを利用し、特徴量に変換します。この役割を担うのがmake_training_data.pyです。
変換されたデータは、training_data.csvとして保存されます。
images_to_data
Step2: 会議を分類するモデルにはSVMを使用します。これで、有効な特徴量を探していきます。
Step3: ここからいくつか特徴量を選択し、モデルを作成します。今回はデータが少ないこともあり、なるべくシンプルなモデルとし特徴量を2つに絞りたいと思います。smile系の特徴量は1つに限定しました。あと一つですが、以下検討の結果pose>pitch minを使用することにしました。
emotionは検出が安定しない
sexは男性/女性がいるかいないかだけで決定するのはちょっとおかしいので、除外
mouth_open/eye_close はその瞬間かどうかで左右されるので除外
pose系が残るが、この中で最も精度がよかったpose>pitch minを特徴量として採用する
選択した特徴量を元にモデルを作成し、学習させます。モデルは今回SVMを使用しますが、そのパラメーターについてはGrid Searchで最適化します。
モデルが作成できたら、model/conf_predict.pklに保存します。 | Python Code:
# enable showing matplotlib image inline
%matplotlib inline
# autoreload module
%load_ext autoreload
%autoreload 2
# load local package
import sys
import os
sys.path.append(os.path.join(os.getcwd(), "../../../")) # load project root
Explanation: Create Conference Consultant Sayuri
Steps
Make Training Data
会議中の画像の収集
画像のデータ化
ラベルの付与
Make Model
モデルに利用する特徴量の選択
学習
予測結果の可視化
Save the Model
モデルの保存
End of explanation
from sklearn import preprocessing
import make_model as maker
dataset = maker.load_data()
header = dataset["header"][1:] # exclude label column
y = dataset["y"]
X = dataset["X"]
scaler = preprocessing.StandardScaler().fit(X) # regularization
X_R = scaler.transform(X)
print(y.shape)
print(X_R.shape)
print(header)
Explanation: Make Training Data
会議の画像をGoogle等から収集し、imagesフォルダに格納します。なお、画像の拡張子はPNGに統一しています。
収集した画像をRekognitionを利用し、特徴量に変換します。この役割を担うのがmake_training_data.pyです。
変換されたデータは、training_data.csvとして保存されます。
images_to_data: imagesフォルダ内の画像を特徴量に変換し、training_data.csvを作成します。
append_image: 画像へのパスを引数に渡すことで、その画像のデータをtraining_data.csvに追記します。
ファイルが作成されたら、良い会議なのか悪い会議なのか、ラベル付を行います(良い:1、悪い:0)。ラベルは、ファイルの一番左端に設定します。
このファイルを、training_data_with_label.csvとして保存してください。
Make Model
training_data_with_label.csvが作成できたら、モデルを作成していきます。当然データの中には複数の特徴量があるため、ここから有用な特徴量を見つけ出し、それをモデルに使用していきます。
まずは、データをロードします。併せて正規化も行っておきます。
End of explanation
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
get_headers = lambda s: [i_h[1] for i_h in enumerate(header) if s[i_h[0]]]
selector = SelectKBest(f_classif, k=10).fit(X_R, y)
selected = selector.get_support()
kbests = sorted(zip(get_headers(selected), selector.scores_[selected]), key=lambda h_s: h_s[1], reverse=True)
print(kbests)
Explanation: 会議を分類するモデルにはSVMを使用します。これで、有効な特徴量を探していきます。
End of explanation
import make_model as maker
header_index = lambda hs: [i_h[0] for i_h in enumerate(header) if i_h[1] in hs]
columns = header_index(["smile avg", "pose>pitch min"])
print([header[c] for c in columns])
model = maker.make_model(y, X, columns, save_model=True)
print(model)
Explanation: ここからいくつか特徴量を選択し、モデルを作成します。今回はデータが少ないこともあり、なるべくシンプルなモデルとし特徴量を2つに絞りたいと思います。smile系の特徴量は1つに限定しました。あと一つですが、以下検討の結果pose>pitch minを使用することにしました。
emotionは検出が安定しない
sexは男性/女性がいるかいないかだけで決定するのはちょっとおかしいので、除外
mouth_open/eye_close はその瞬間かどうかで左右されるので除外
pose系が残るが、この中で最も精度がよかったpose>pitch minを特徴量として採用する
選択した特徴量を元にモデルを作成し、学習させます。モデルは今回SVMを使用しますが、そのパラメーターについてはGrid Searchで最適化します。
モデルが作成できたら、model/conf_predict.pklに保存します。
End of explanation |
14,508 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="https
Step1: load data
Step2: pre-process data into chunks
Step3: Recurrent Neural Networks
RNNs work on sequences of input data and can learn which part of the history is relevant. Thus, they can learn which mouse events are relevant.
Simple RNN
Inexpensive in terms of model size and compute, but are bad at remembering events far in the past.
LSTM
Can learn which events in the past are important and can thus much better match a certain mouse path. However, 4x more expensive than the simple RNN.
GRU
Like LSTMs, but only 3x more expensive. Often as good as LSTMs.
Training
Step4: Convert Model into tfjs format | Python Code:
# for colab
!pip install -q tf-nightly-gpu-2.0-preview
import tensorflow as tf
print(tf.__version__)
# a small sanity check, does tf seem to work ok?
hello = tf.constant('Hello TF!')
print("This works: {}".format(hello))
# this should return True even on Colab
tf.test.is_gpu_available()
tf.test.is_built_with_cuda()
tf.executing_eagerly()
Explanation: <a href="https://colab.research.google.com/github/DJCordhose/deep-learning-crash-course-notebooks/blob/master/tf-v2/ux-rnn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Training on Mouse-Movements
Our challenge is to predict which button a user is going to click based on previous mouse movements. One application might be to highlight the button before hover and give a clearer guidance to the user:
Even though the model will vary between different machines and different users, we have a default data set you can use to train the model.
Read the complete article here https://dev.to/djcordhose/improving-user-experience-with-tensorflow-js-4693 and have a look at the browser application http://djcordhose.github.io/ux-by-tfjs/dist that can load our model to make online predictions.
End of explanation
import pandas as pd
print(pd.__version__)
# local
# URL = '../data/sample4.json'
# remote
URL = 'https://raw.githubusercontent.com/DJCordhose/ux-by-tfjs/master//data/sample4.json'
df = pd.read_json(URL, typ='series')
len(df)
df.head()
X = [item['x'] for item in df]
X[0]
y = [item['y'] - 1 for item in df]
y[0]
Explanation: load data
End of explanation
from math import floor
def make_chunks(list_to_chunk, chunk_size):
length = len(list_to_chunk)
assert length / chunk_size == floor(length / chunk_size), "length of data must be multiple of segment length"
for chunk_start in range(0, length, chunk_size):
yield list_to_chunk[chunk_start : chunk_start + chunk_size]
import numpy as np
CHUNK_SIZE = 25
# only use the final segments
SEGMENTS = 2
X_expanded = []
y_expanded = []
for x_el, y_el in zip(X, y):
chunks = list(make_chunks(x_el, CHUNK_SIZE))
chunks = chunks[len(chunks) - SEGMENTS:]
labels = [y_el] * SEGMENTS
for seq, label in zip(chunks, labels):
X_expanded.append(seq)
y_expanded.append(label)
X_expanded = np.array(X_expanded)
y_expanded = np.array(y_expanded)
X_expanded.shape
X_expanded[100]
X_expanded[100][0]
y_expanded[100]
np.unique(y_expanded)
assert np.array_equal(np.unique(y_expanded), [0, 1, 2])
Explanation: pre-process data into chunks
End of explanation
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, LSTM, GRU, SimpleRNN, BatchNormalization
from tensorflow.keras.models import Sequential, Model
# experiment with
# - type of RNN: SimpleRNN, LSTM, GRU
# - number of units
# - dropout
# - BatchNormalization: yes/no
n_steps = len(X_expanded[0])
n_features = len(X_expanded[0][0])
n_buttons = 3
model = Sequential()
model.add(SimpleRNN(units=50, activation='tanh', input_shape=(n_steps, n_features), name="RNN_Input",
# model.add(GRU(units=50, activation='tanh', input_shape=(n_steps, n_features), name="RNN_Input",
# recurrent_dropout makes things slow
# dropout=0.1, recurrent_dropout=0.1))
dropout=0.1))
# model.add(GRU(units=50, activation='tanh', input_shape=(n_steps, n_features), name="RNN_Input"))
model.add(BatchNormalization())
model.add(Dense(units=n_buttons, name='softmax', activation='softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
%%time
EPOCHS = 200
BATCH_SIZE = 200
history = model.fit(X_expanded, y_expanded,
batch_size=BATCH_SIZE,
epochs=EPOCHS, verbose=0, validation_split=0.2)
loss, accuracy = model.evaluate([X_expanded], y_expanded, batch_size=BATCH_SIZE)
accuracy
%matplotlib inline
import matplotlib.pyplot as plt
# plt.yscale('log')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['loss', 'val_loss'])
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['accuracy', 'val_accuracy'])
model.predict([[X_expanded[0]]])
model.predict([[X_expanded[0]]]).argmax()
y_expanded[0]
y_pred = model.predict([X_expanded]).argmax(axis=1)
cm = tf.math.confusion_matrix(labels=y_expanded, predictions=y_pred)
cm
import seaborn as sns
classes = ["Left Button", "Middle Button", "Right Button"]
sns.heatmap(cm, annot=True, fmt="d", xticklabels=classes, yticklabels=classes)
Explanation: Recurrent Neural Networks
RNNs work on sequences of input data and can learn which part of the history is relevant. Thus, they can learn which mouse events are relevant.
Simple RNN
Inexpensive in terms of model size and compute, but are bad at remembering events far in the past.
LSTM
Can learn which events in the past are important and can thus much better match a certain mouse path. However, 4x more expensive than the simple RNN.
GRU
Like LSTMs, but only 3x more expensive. Often as good as LSTMs.
Training
End of explanation
model.save('ux.hd5')
!ls -l
!pip install -q tensorflowjs
!tensorflowjs_converter --input_format keras ux.hd5 tfjs
!ls -l tfjs
Explanation: Convert Model into tfjs format
End of explanation |
14,509 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Introduction
This kernel gives a starter over the contents of the Gliding Data dataset.
The dataset includes metadata and calculated phases for over 100000 gliding flights from 2016 to 2019, mostly in the region of France but also Belgium, Switzerland and others. In total there are more than 6 million flights phases recorded.
Gliding
Gliding is a leisure aviation activity and sport where pilots fly unpowered aircraft known as gliders or sailplanes.
The principle of gliding is to climb using some sort of lift and convert the altitude gained into distance. Repeating this process allows for very long distance flights, often above 500km or even 1000km - the current free distance world record being just over 3000km in Patagonia. This is the same process birds follow to reduce the amount of effort required to travel great distances.
The most used sources of lift include
Step1: Flight Metadata
There's a lot of information contained in the two flight metadata files (websource and track).
Step2: The most useful information comes from the websource file as this information is passed directly by the pilot when submitting the flight to the online competition. Things like Country or Region provide useful statistics on how popular the sport is in different areas. As an example, what are the most popular regions considering the total number of flights? What about the most popular Takeoff location?
Step3: The three regions above match the Alps area, which is an expected result given this is a gliding Meca. The second result shows Vinon as the most popular takeoff in 2016, a big club also in the Southern Alps. But it's interesting to notice that more recently a club near Montpellier took over as the one with the most activity in terms of number of flights.
Gliding is a seasonal activity peaking in summer months.
Step4: Merging Data
We can get additional flight metadata information from the flight_track file, but you can expect this data to be less reliable as it's often the case that the metadata in the flight recorder is not updated before a flight. It is very useful though to know more about what type of recorder was used, calibration settings, etc. It is also the source of the flight tracks used to generate the data in the phases file.
In some cases we want to handle columns from both flight metadata files, so it's useful to join the two sets. We can rely on the ID for this purpose.
Step5: Flight Phases
In addition to the flight metadata provided in the files above, by analysing the GPS flight tracks we can generate a lot more interesting data.
Here we take a look at flight phases, calculated using the goigc tool. As described earlier to travel further glider pilots use thermals to gain altitude and then convert that altitude into distance. In the phases file we have a record of each individual phase detected for each of the 100k flights, and we'll focus on
Step6: Data Preparation
As a quick example of what is possible with this kind of data let's take try to map all circling phases as a HeatMap.
First we need to do some treatment of the data
Step7: Visualization
Once we have the data ready we can visualize it over a map. We rely on folium for this.
Step8: Single HeatMap
Step9: HeatMap over Time
Another cool possibility is to visualize the same data over time.
In this case we're grouping weekly and playing the data over one year.
Both the most popular areas and times of the year are pretty clear from this animation. | Python Code:
import datetime
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import os
for dirname, _, filenames in os.walk('/tmp/gliding-data'):
for filename in filenames:
print(os.path.join(dirname, filename))
flight_websource = pd.read_csv("/tmp/gliding-data/flight_websource.csv")
flight_track = pd.read_csv("/tmp/gliding-data/flight_track.csv")
flight_phases = pd.read_csv("/tmp/gliding-data/phases.csv", skiprows=lambda i: i>0 and random.random() > 0.5)
Explanation: Introduction
This kernel gives a starter over the contents of the Gliding Data dataset.
The dataset includes metadata and calculated phases for over 100000 gliding flights from 2016 to 2019, mostly in the region of France but also Belgium, Switzerland and others. In total there are more than 6 million flights phases recorded.
Gliding
Gliding is a leisure aviation activity and sport where pilots fly unpowered aircraft known as gliders or sailplanes.
The principle of gliding is to climb using some sort of lift and convert the altitude gained into distance. Repeating this process allows for very long distance flights, often above 500km or even 1000km - the current free distance world record being just over 3000km in Patagonia. This is the same process birds follow to reduce the amount of effort required to travel great distances.
The most used sources of lift include:
* thermals: where the air rises due to heating from the sun, often marked by cumulus clouds at the top
* wave: created by strong winds and stable air layers facing a mountain or a high hill, often marked by lenticular clouds
with thermalling being by far the most common, and this dataset reflects that.
Data Recorders
When flying pilots often carry some sort of GPS recording device which generates a track of points collected every few seconds. Each point contains fields for latitude, longitude and altitude (pressure and GPS) among several others, often stored in IGC format. Often these tracks are uploaded to online competitions where they are shared with other pilots, and can be easily visualized.
The data available in this dataset was scraped from the online competition Netcoupe. The scraping, parsing and analysis was done using goigc, an open source flight parser and analyser.
Getting Started
Let's start by loading the different files in the dataset and taking a peek at the records.
The available files include:
* flight_websource: the metadata exposed in the online competition website, including additional information to what is present in the flight track
* flight_track: the metadata collected directly from the GPS/IGC flight track
* phases: the flight phases (cruising, circling) calculated from the GPS/IGC flight track (this is a large file, we load a subset below)
* handicaps: a mostly static file with the different handicaps attributed to each glider type by the IGC, important when calculating flight performances
End of explanation
flight_websource.head(1)
flight_track.head(1)
Explanation: Flight Metadata
There's a lot of information contained in the two flight metadata files (websource and track).
End of explanation
flight_websource.groupby(['Country', 'Region'])['Region'].value_counts().sort_values(ascending=False).head(3)
flight_websource.groupby(['Country', 'Year', 'Takeoff'])['Takeoff'].value_counts().sort_values(ascending=False).head(3)
Explanation: The most useful information comes from the websource file as this information is passed directly by the pilot when submitting the flight to the online competition. Things like Country or Region provide useful statistics on how popular the sport is in different areas. As an example, what are the most popular regions considering the total number of flights? What about the most popular Takeoff location?
End of explanation
flight_websource['DayOfWeek'] = flight_websource.apply(lambda r: datetime.datetime.strptime(r['Date'], "%Y-%m-%dT%H:%M:%SZ").strftime("%A"), axis=1)
flight_websource.groupby(['DayOfWeek'])['DayOfWeek'].count().plot.bar()
flight_websource['Month'] = flight_websource.apply(lambda r: datetime.datetime.strptime(r['Date'], "%Y-%m-%dT%H:%M:%SZ").strftime("%m"), axis=1)
flight_websource.groupby(['Month'])['Month'].count().plot.bar()
Explanation: The three regions above match the Alps area, which is an expected result given this is a gliding Meca. The second result shows Vinon as the most popular takeoff in 2016, a big club also in the Southern Alps. But it's interesting to notice that more recently a club near Montpellier took over as the one with the most activity in terms of number of flights.
Gliding is a seasonal activity peaking in summer months.
End of explanation
flight_all = pd.merge(flight_websource, flight_track, how='left', on='ID')
flight_all.head(1)
Explanation: Merging Data
We can get additional flight metadata information from the flight_track file, but you can expect this data to be less reliable as it's often the case that the metadata in the flight recorder is not updated before a flight. It is very useful though to know more about what type of recorder was used, calibration settings, etc. It is also the source of the flight tracks used to generate the data in the phases file.
In some cases we want to handle columns from both flight metadata files, so it's useful to join the two sets. We can rely on the ID for this purpose.
End of explanation
flight_phases.head(1)
Explanation: Flight Phases
In addition to the flight metadata provided in the files above, by analysing the GPS flight tracks we can generate a lot more interesting data.
Here we take a look at flight phases, calculated using the goigc tool. As described earlier to travel further glider pilots use thermals to gain altitude and then convert that altitude into distance. In the phases file we have a record of each individual phase detected for each of the 100k flights, and we'll focus on:
* Circling (5): phase where a glider is gaining altitude by circling in an area of rising air
* Cruising (3): phase where a glider is flying straight converting altitude into distance
These are indicated by the integer field Type below. Each phase has a set of additional fields with relevant statistics for each phase type: while circling the average climb rate (vario) and duration are interesting; while cruising the distance covered and LD (glide ratio) are more interesting.
End of explanation
phases = pd.merge(flight_phases, flight_websource[['TrackID', 'Distance', 'Speed']], on='TrackID')
phases['Lat'] = np.rad2deg(phases['CentroidLatitude'])
phases['Lng'] = np.rad2deg(phases['CentroidLongitude'])
phases_copy = phases[phases.Type==5][phases.AvgVario<10][phases.AvgVario>2].copy()
phases_copy.head(2)
#phases_copy['AM'] = phases_copy.apply(lambda r: datetime.datetime.strptime(r['StartTime'], "%Y-%m-%dT%H:%M:%SZ").strftime("%p"), axis=1)
#phases_copy['Day'] = phases_copy.apply(lambda r: datetime.datetime.strptime(r['StartTime'], "%Y-%m-%dT%H:%M:%SZ").strftime("%j"), axis=1)
#phases_copy['Week'] = phases_copy.apply(lambda r: datetime.datetime.strptime(r['StartTime'], "%Y-%m-%dT%H:%M:%SZ").strftime("%W"), axis=1)
#phases_copy['Month'] = phases_copy.apply(lambda r: r['StartTime'][5:7], axis=1)
#phases_copy['Year'] = phases_copy.apply(lambda r: r['StartTime'][0:4], axis=1)
#phases_copy['YearMonth'] = phases_copy.apply(lambda r: r['StartTime'][0:7], axis=1)
#phases_copy['YearMonthDay'] = phases_copy.apply(lambda r: r['StartTime'][0:10], axis=1)
# use the corresponding function above to update the grouping to something other than week
phases_copy['Group'] = phases_copy.apply(lambda r: datetime.datetime.strptime(r['StartTime'], "%Y-%m-%dT%H:%M:%SZ").strftime("%W"), axis=1)
phases_copy.head(1)
Explanation: Data Preparation
As a quick example of what is possible with this kind of data let's take try to map all circling phases as a HeatMap.
First we need to do some treatment of the data: convert coordinates from radians to degrees, filter out unreasonable values (climb rates above 15m/s are due to errors in the recording device), convert the date to the expected format and desired grouping. In this case we're grouping all thermal phases by week.
End of explanation
# This is a workaround for this known issue:
# https://github.com/python-visualization/folium/issues/812#issuecomment-582213307
!pip install git+https://github.com/python-visualization/branca
!pip install git+https://github.com/sknzl/folium@update-css-url-to-https
import folium
from folium import plugins
from folium import Choropleth, Circle, Marker
from folium.plugins import HeatMap, HeatMapWithTime, MarkerCluster
# folium.__version__ # should be '0.10.1+8.g4ea1307'
# folium.branca.__version__ # should be '0.4.0+4.g6ac241a'
Explanation: Visualization
Once we have the data ready we can visualize it over a map. We rely on folium for this.
End of explanation
# we use a smaller sample to improve the visualization
# a better alternative is to group entries by CellID, an example of this will be added later
phases_single = phases_copy.sample(frac=0.01, random_state=1)
m_5 = folium.Map(location=[47.06318, 5.41938], tiles='stamen terrain', zoom_start=7)
HeatMap(
phases_single[['Lat','Lng','AvgVario']], gradient={0.5: 'blue', 0.7: 'yellow', 1: 'red'},
min_opacity=5, max_val=phases_single.AvgVario.max(), radius=4, max_zoom=7, blur=4, use_local_extrema=False).add_to(m_5)
m_5
Explanation: Single HeatMap
End of explanation
m_5 = folium.Map(location=[47.06318, 5.41938], tiles='stamen terrain', zoom_start=7)
groups = phases_copy.Group.sort_values().unique()
data = []
for g in groups:
data.append(phases_copy.loc[phases_copy.Group==g,['Group','Lat','Lng','AvgVario']].groupby(['Lat','Lng']).sum().reset_index().values.tolist())
HeatMapWithTime(
data,
index = list(phases_copy.Group.sort_values().unique()),
gradient={0.1: 'blue', 0.3: 'yellow', 0.8: 'red'},
auto_play=True, scale_radius=False, display_index=True, radius=4, min_speed=1, max_speed=6, speed_step=1,
min_opacity=1, max_opacity=phases_copy.AvgVario.max(), use_local_extrema=True).add_to(m_5)
m_5
Explanation: HeatMap over Time
Another cool possibility is to visualize the same data over time.
In this case we're grouping weekly and playing the data over one year.
Both the most popular areas and times of the year are pretty clear from this animation.
End of explanation |
14,510 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Urban vs. rural living and suicide rates
May 2016
Written by Kara Frantzich at NYU Stern
Contact
Step1: The Data
Data used is from the Center for Disease Control from 2014.
The CDC has defined different levels of urbanization, outlined below
Step2: These visualizations show that there are higher suicide rates as the the level of ruralness increases.
Next, I looked at suicide rates by county. Of the 42,704 suicdes in 2014, the CDC is able to track 35,580 by the county level (the remaining 7,124 were part of the data that was repressed in the CDC's datasets).
Step3: This shows that there is a much higher suicde rate in counties with lower populations.
Next, I graphed only the counties with populations of less that 300K | Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
from pandas.io import wb
Explanation: Urban vs. rural living and suicide rates
May 2016
Written by Kara Frantzich at NYU Stern
Contact: kara.frantzich@stern.nyu.edu
Suicide in the United States
Suicide is the 10th most common cause of death in the United States. In 2014, 42,704 people in the US committed suicide (CDC), compared to about 16,000 homicides in the same year.
This project's goal is to investigate the relationsip between urban and rural populations and suicide rates. Are cities healthier for people by providing a social network for those thay may conisder suicide? Or does rural life make it more unlikely for people to take their own lives?
Packages Imported
I use matplotlib.pyplot to plot scatter plots. I use pandas, a Python package that allows for fast data manipulation and analysis, to organize my dataset.
End of explanation
data_1 = '/Users/karaf/Documents/Data_Bootcamp/Suicide_Rates_by_Urbanization.csv' # file location
df1 = pd.read_csv(data_1, index_col=0)
df1
# GDP bar chart
fig, ax = plt.subplots()
df1['Suicide Rate'].plot(ax=ax, kind='barh', alpha=0.5)
ax.set_title('Suicide rates by urbanization levels', loc='left', fontsize=16)
ax.set_xlabel('Suicides per 100,000 people')
ax.set_ylabel('')
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# scatterplot of life expectancy vs gdp per capita
fig, ax = plt.subplots()
ax.scatter(df1['Total Population'], df1['Suicide Rate'], # x,y variables
s=df1['Total Suicides']/5, # size of bubbles
alpha=0.5)
ax.set_title('Suicide rate by population size', loc='left', fontsize=16)
ax.set_xlabel('Population in 100 Millions')
ax.set_ylabel('Suicide Rate')
ax.text(20,20, 'Bubble size represents suicide totals', horizontalalignment='right')
Explanation: The Data
Data used is from the Center for Disease Control from 2014.
The CDC has defined different levels of urbanization, outlined below:
Definitions of Urbanization Levels, by population of area
Large Metro Center 1,000,000 or more people
Large Metro Suburban 1,000,000 or more outside of city center
Medium Metro 250,000-999,999
Small Metro Less than 250,000
Non-metro Center Less than 49,999
Non-metro Non-Core Less than 49,999 outside of town center
I first looked at suicide rates through these pre-defined concepts of urban and rural.
End of explanation
data_2 = '/Users/karaf/Documents/Data_Bootcamp/Suicide_Rates_by_County.csv' # file location
df2 = pd.read_csv(data_2, index_col=0)
df2
# scatterplot of life expectancy vs gdp per capita
fig, ax = plt.subplots()
ax.scatter(df2['Population'], df2['Suicide Rate per 100,000'])
ax.set_title('Suicide rate by population size', loc='left', fontsize=16)
ax.set_xlabel('Population in 10 Millions')
ax.set_ylabel('Suicide Rate')
Explanation: These visualizations show that there are higher suicide rates as the the level of ruralness increases.
Next, I looked at suicide rates by county. Of the 42,704 suicdes in 2014, the CDC is able to track 35,580 by the county level (the remaining 7,124 were part of the data that was repressed in the CDC's datasets).
End of explanation
data_3 = '/Users/karaf/Documents/Data_Bootcamp/Suicide_Rates_by_County_Below 300K.csv' # file location
df3 = pd.read_csv(data_3, index_col=0)
# scatterplot of life expectancy vs gdp per capita
fig, ax = plt.subplots()
ax.scatter(df3['Population'], df3['Suicide Rate per 100,000'])
ax.set_title('Suicide rate by population size', loc='left', fontsize=16)
ax.set_xlabel('Population')
ax.set_ylabel('Suicide Rate')
Explanation: This shows that there is a much higher suicde rate in counties with lower populations.
Next, I graphed only the counties with populations of less that 300K:
End of explanation |
14,511 | Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
I have problems using scipy.sparse.csr_matrix: | Problem:
from scipy import sparse
sa = sparse.random(10, 10, density = 0.01, format = 'csr')
sb = sparse.random(10, 10, density = 0.01, format = 'csr')
result = sparse.hstack((sa, sb)).tocsr() |
14,512 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Hand tuning hyperparameters
Learning Objectives
Step1: Set Up
In this first cell, we'll load the necessary libraries.
Step2: Next, we'll load our data set.
Step3: Examine the data
It's a good idea to get to know your data a little bit before you work with it.
We'll print out a quick summary of a few useful statistics on each column.
This will include things like mean, standard deviation, max, min, and various quantiles.
Step4: In this exercise, we'll be trying to predict median_house_value. It will be our label (sometimes also called a target). Can we use total_rooms as our input feature? What's going on with the values for that feature?
This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Let's create a different, more appropriate feature. Because we are predicing the price of a single house, we should try to make all our features correspond to a single house as well
Step5: Build the first model
In this exercise, we'll be trying to predict median_house_value. It will be our label (sometimes also called a target). We'll use num_rooms as our input feature.
To train our model, we'll use the LinearRegressor estimator. The Estimator takes care of a lot of the plumbing, and exposes a convenient way to interact with data, training, and evaluation.
Step6: 1. Scale the output
Let's scale the target values so that the default parameters are more appropriate.
Step7: 2. Change learning rate and batch size
Can you come up with better parameters? | Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
Explanation: Hand tuning hyperparameters
Learning Objectives:
* Use the LinearRegressor class in TensorFlow to predict median housing price, at the granularity of city blocks, based on one input feature
* Evaluate the accuracy of a model's predictions using Root Mean Squared Error (RMSE)
* Improve the accuracy of a model by hand-tuning its hyperparameters
The data is based on 1990 census data from California. This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Using only one input feature -- the number of rooms -- predict house value.
End of explanation
import math
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
print(tf.__version__)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
Explanation: Set Up
In this first cell, we'll load the necessary libraries.
End of explanation
df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",")
Explanation: Next, we'll load our data set.
End of explanation
df.head()
df.describe()
Explanation: Examine the data
It's a good idea to get to know your data a little bit before you work with it.
We'll print out a quick summary of a few useful statistics on each column.
This will include things like mean, standard deviation, max, min, and various quantiles.
End of explanation
df['num_rooms'] = df['total_rooms'] / df['households']
df.describe()
# Split into train and eval
np.random.seed(seed=1) #makes split reproducible
msk = np.random.rand(len(df)) < 0.8
traindf = df[msk]
evaldf = df[~msk]
Explanation: In this exercise, we'll be trying to predict median_house_value. It will be our label (sometimes also called a target). Can we use total_rooms as our input feature? What's going on with the values for that feature?
This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Let's create a different, more appropriate feature. Because we are predicing the price of a single house, we should try to make all our features correspond to a single house as well
End of explanation
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
estimator = tf.compat.v1.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')])
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.compat.v1.metrics.root_mean_squared_error(labels, pred_values)}
estimator = tf.compat.v1.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"], # note the scaling
num_epochs = None,
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"], # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
Explanation: Build the first model
In this exercise, we'll be trying to predict median_house_value. It will be our label (sometimes also called a target). We'll use num_rooms as our input feature.
To train our model, we'll use the LinearRegressor estimator. The Estimator takes care of a lot of the plumbing, and exposes a convenient way to interact with data, training, and evaluation.
End of explanation
SCALE = 100000
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
estimator = tf.compat.v1.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')])
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.compat.v1.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)}
estimator = tf.compat.v1.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"] / SCALE, # note the scaling
num_epochs = None,
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"] / SCALE, # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
Explanation: 1. Scale the output
Let's scale the target values so that the default parameters are more appropriate.
End of explanation
SCALE = 100000
OUTDIR = './housing_trained'
def train_and_evaluate(output_dir, num_train_steps):
myopt = tf.compat.v1.train.FtrlOptimizer(learning_rate = 0.2) # note the learning rate
estimator = tf.compat.v1.estimator.LinearRegressor(
model_dir = output_dir,
feature_columns = [tf.feature_column.numeric_column('num_rooms')],
optimizer = myopt)
#Add rmse evaluation metric
def rmse(labels, predictions):
pred_values = tf.cast(predictions['predictions'],tf.float64)
return {'rmse': tf.compat.v1.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)}
estimator = tf.compat.v1.estimator.add_metrics(estimator,rmse)
train_spec=tf.estimator.TrainSpec(
input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(x = traindf[["num_rooms"]],
y = traindf["median_house_value"] / SCALE, # note the scaling
num_epochs = None,
batch_size = 512, # note the batch size
shuffle = True),
max_steps = num_train_steps)
eval_spec=tf.estimator.EvalSpec(
input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(x = evaldf[["num_rooms"]],
y = evaldf["median_house_value"] / SCALE, # note the scaling
num_epochs = 1,
shuffle = False),
steps = None,
start_delay_secs = 1, # start evaluating after N seconds
throttle_secs = 10, # evaluate every N seconds
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run training
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps = 100)
Explanation: 2. Change learning rate and batch size
Can you come up with better parameters?
End of explanation |
14,513 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Author
Step1: Job fiches
First load all the job_groups (fiche metier) from the XML files
Step2: Visualize the distributions of Holland Codes for job fiches
Step3: Holland Codes of activities associated with jobs
Step4: How often are the Holland Codes of the activity the same as for the job?
Step5: Activities
Let's look at the Holland Codes associated with activities. | Python Code:
from __future__ import division
import glob
import json
import os
import itertools as it
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import xmltodict
import numpy as np
from bob_emploi.data_analysis.lib import read_data
data_folder = os.getenv('DATA_FOLDER')
def riasec_dist(first, second):
'''compute the distance between two characteristics on the hexagon'''
if pd.isnull(first) or pd.isnull(second):
return np.nan
riasec = "RIASEC"
a = riasec.find(first.upper())
b = riasec.find(second.upper())
assert a >= 0 and b >= 0
return min( (a-b)%6, (b-a)%6)
# to call it on a dataframe row
riasec_dist_row = lambda row: riasec_dist(row.riasec_majeur, row.riasec_mineur)
Explanation: Author: Stephan, [email protected]
Skip the run test because the ROME version has to be updated to make it work in the exported repository. TODO: Update ROME and remove the skiptest flag.
Holland Codes
https://en.wikipedia.org/wiki/Holland_Codes
There is a theory that associates 6 basic characteristics with people. These characteristics can be used to help people find jobs they like. The theory still seems to be widely used for career counceling.
Usually people are gradually associated with multiple characteristics, a person does ususally not fit neatly into one of the boxes. However when people are associated with multiple characteristics, these tend to be neighboring on the above hexagon. It is less common that someone is associated with two characteristics on opposite ends of the hexagaon. These are even called inconsistent personality patterns.
In the ROME dataset each job and each activity has a major and a minor Holland Code assigned. We want to use these to help people find a job in a field they like, and maybe did not think of previously.
End of explanation
fiche_dicts = read_data.load_fiches_from_xml(os.path.join(data_folder, 'rome/ficheMetierXml'))
fiches = pd.DataFrame(fiche['bloc_code_rome'] for fiche in fiche_dicts)
fiches['riasec_mineur'] = fiches.riasec_mineur.str.upper()
fiches['combined'] = fiches.riasec_majeur + fiches.riasec_mineur
fiches['riasec_dist'] = fiches.apply(riasec_dist_row, axis=1)
Explanation: Job fiches
First load all the job_groups (fiche metier) from the XML files
End of explanation
def visualize_codes(thing):
'''Visualize the distribution of Holland codes
major codes, minor codes, the combinations of both
and distances between
'''
riasec_counts = thing.riasec_majeur.value_counts().to_frame()
riasec_counts['riasec_mineur'] = thing.riasec_mineur.value_counts()
fig, ax = plt.subplots(3, figsize=(10, 10))
riasec_counts.plot(kind='bar', ax=ax[0])
thing.combined.value_counts().plot(kind='bar', ax=ax[1])
thing.riasec_dist.hist(ax=ax[2])
ax[0].set_title('Frequency of major and minor codes')
ax[1].set_title('Frequency of major-minor combinations')
ax[2].set_title('Histogram of hexagon distances')
fig.tight_layout()
visualize_codes(fiches)
Explanation: Visualize the distributions of Holland Codes for job fiches
End of explanation
def extract(fiche):
'''extract the base activities associated with a job fiche'''
base_acts = fiche['bloc_activites_de_base']['activites_de_base']['item_ab']
rome = {'rome_' + k: v for k, v in fiche['bloc_code_rome'].items()}
return [dict(rome, **ba) for ba in base_acts]
fiche_acts = pd.DataFrame(sum(map(extract, fiche_dicts), []))
fiche_acts['riasec_mineur'] = fiche_acts.riasec_mineur.str.upper()
fiche_acts['rome_riasec_mineur'] = fiche_acts.riasec_mineur.str.upper()
Explanation: Holland Codes of activities associated with jobs
End of explanation
combinations = it.product(['majeur', 'mineur'], ['majeur', 'mineur'])
for job, act in combinations:
job_key = 'rome_riasec_' + job
act_key = 'riasec_' + act
match_count = (fiche_acts[job_key] == fiche_acts[act_key]).sum()
fmt_str = "{} job fiche matches {} activity fiche in {:.2f}%"
print(fmt_str.format(job, act, match_count / len(fiche_acts) * 100))
Explanation: How often are the Holland Codes of the activity the same as for the job?
End of explanation
activities = pd.read_csv('../../../data/rome/csv/unix_referentiel_activite_v330_utf8.csv')
act_riasec = pd.read_csv('../../../data/rome/csv/unix_referentiel_activite_riasec_v330_utf8.csv')
acts = pd.merge(activities, act_riasec, on='code_ogr')
acts['riasec_mineur'] = acts.riasec_mineur.str.upper()
acts['combined'] = acts.riasec_majeur + fiches.riasec_mineur
acts['riasec_dist'] = acts.apply(riasec_dist_row, axis=1)
base_acts = acts[acts.libelle_type_activite == 'ACTIVITE DE BASE']
visualize_codes(acts)
visualize_codes(fiches) #for comparison
Explanation: Activities
Let's look at the Holland Codes associated with activities.
End of explanation |
14,514 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Ocean
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 1.3. Model Family
Is Required
Step7: 1.4. Basic Approximations
Is Required
Step8: 1.5. Prognostic Variables
Is Required
Step9: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required
Step10: 2.2. Eos Functional Temp
Is Required
Step11: 2.3. Eos Functional Salt
Is Required
Step12: 2.4. Eos Functional Depth
Is Required
Step13: 2.5. Ocean Freezing Point
Is Required
Step14: 2.6. Ocean Specific Heat
Is Required
Step15: 2.7. Ocean Reference Density
Is Required
Step16: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required
Step17: 3.2. Type
Is Required
Step18: 3.3. Ocean Smoothing
Is Required
Step19: 3.4. Source
Is Required
Step20: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required
Step21: 4.2. River Mouth
Is Required
Step22: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required
Step23: 5.2. Code Version
Is Required
Step24: 5.3. Code Languages
Is Required
Step25: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required
Step26: 6.2. Canonical Horizontal Resolution
Is Required
Step27: 6.3. Range Horizontal Resolution
Is Required
Step28: 6.4. Number Of Horizontal Gridpoints
Is Required
Step29: 6.5. Number Of Vertical Levels
Is Required
Step30: 6.6. Is Adaptive Grid
Is Required
Step31: 6.7. Thickness Level 1
Is Required
Step32: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required
Step33: 7.2. Global Mean Metrics Used
Is Required
Step34: 7.3. Regional Metrics Used
Is Required
Step35: 7.4. Trend Metrics Used
Is Required
Step36: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required
Step37: 8.2. Scheme
Is Required
Step38: 8.3. Consistency Properties
Is Required
Step39: 8.4. Corrected Conserved Prognostic Variables
Is Required
Step40: 8.5. Was Flux Correction Used
Is Required
Step41: 9. Grid
Ocean grid
9.1. Overview
Is Required
Step42: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required
Step43: 10.2. Partial Steps
Is Required
Step44: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required
Step45: 11.2. Staggering
Is Required
Step46: 11.3. Scheme
Is Required
Step47: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required
Step48: 12.2. Diurnal Cycle
Is Required
Step49: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required
Step50: 13.2. Time Step
Is Required
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required
Step52: 14.2. Scheme
Is Required
Step53: 14.3. Time Step
Is Required
Step54: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required
Step55: 15.2. Time Step
Is Required
Step56: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required
Step57: 17. Advection
Ocean advection
17.1. Overview
Is Required
Step58: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required
Step59: 18.2. Scheme Name
Is Required
Step60: 18.3. ALE
Is Required
Step61: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required
Step62: 19.2. Flux Limiter
Is Required
Step63: 19.3. Effective Order
Is Required
Step64: 19.4. Name
Is Required
Step65: 19.5. Passive Tracers
Is Required
Step66: 19.6. Passive Tracers Advection
Is Required
Step67: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required
Step68: 20.2. Flux Limiter
Is Required
Step69: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required
Step70: 21.2. Scheme
Is Required
Step71: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required
Step72: 22.2. Order
Is Required
Step73: 22.3. Discretisation
Is Required
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required
Step75: 23.2. Constant Coefficient
Is Required
Step76: 23.3. Variable Coefficient
Is Required
Step77: 23.4. Coeff Background
Is Required
Step78: 23.5. Coeff Backscatter
Is Required
Step79: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required
Step80: 24.2. Submesoscale Mixing
Is Required
Step81: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required
Step82: 25.2. Order
Is Required
Step83: 25.3. Discretisation
Is Required
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required
Step85: 26.2. Constant Coefficient
Is Required
Step86: 26.3. Variable Coefficient
Is Required
Step87: 26.4. Coeff Background
Is Required
Step88: 26.5. Coeff Backscatter
Is Required
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required
Step90: 27.2. Constant Val
Is Required
Step91: 27.3. Flux Type
Is Required
Step92: 27.4. Added Diffusivity
Is Required
Step93: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required
Step96: 30.2. Closure Order
Is Required
Step97: 30.3. Constant
Is Required
Step98: 30.4. Background
Is Required
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required
Step100: 31.2. Closure Order
Is Required
Step101: 31.3. Constant
Is Required
Step102: 31.4. Background
Is Required
Step103: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required
Step104: 32.2. Tide Induced Mixing
Is Required
Step105: 32.3. Double Diffusion
Is Required
Step106: 32.4. Shear Mixing
Is Required
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required
Step108: 33.2. Constant
Is Required
Step109: 33.3. Profile
Is Required
Step110: 33.4. Background
Is Required
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required
Step112: 34.2. Constant
Is Required
Step113: 34.3. Profile
Is Required
Step114: 34.4. Background
Is Required
Step115: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required
Step116: 35.2. Scheme
Is Required
Step117: 35.3. Embeded Seaice
Is Required
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required
Step119: 36.2. Type Of Bbl
Is Required
Step120: 36.3. Lateral Mixing Coef
Is Required
Step121: 36.4. Sill Overflow
Is Required
Step122: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required
Step123: 37.2. Surface Pressure
Is Required
Step124: 37.3. Momentum Flux Correction
Is Required
Step125: 37.4. Tracers Flux Correction
Is Required
Step126: 37.5. Wave Effects
Is Required
Step127: 37.6. River Runoff Budget
Is Required
Step128: 37.7. Geothermal Heating
Is Required
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required
Step132: 40.2. Ocean Colour
Is Required
Step133: 40.3. Extinction Depth
Is Required
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required
Step135: 41.2. From Sea Ice
Is Required
Step136: 41.3. Forced Mode Restoring
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-hh', 'ocean')
Explanation: ES-DOC CMIP6 Model Properties - Ocean
MIP Era: CMIP6
Institute: NCC
Source ID: NORESM2-HH
Topic: Ocean
Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing.
Properties: 133 (101 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:24
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean model code (NEMO 3.6, MOM 5.0,...)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the ocean.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.5. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the ocean component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EOS for sea water
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
Explanation: 2.2. Eos Functional Temp
Is Required: TRUE Type: ENUM Cardinality: 1.1
Temperature used in EOS for sea water
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
Explanation: 2.3. Eos Functional Salt
Is Required: TRUE Type: ENUM Cardinality: 1.1
Salinity used in EOS for sea water
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
Explanation: 2.4. Eos Functional Depth
Is Required: TRUE Type: ENUM Cardinality: 1.1
Depth or pressure used in EOS for sea water ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 2.5. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 2.6. Ocean Specific Heat
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Specific heat in ocean (cpocean) in J/(kg K)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 2.7. Ocean Reference Density
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Boussinesq reference density (rhozero) in kg / m3
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Reference date of bathymetry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 3.2. Type
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the bathymetry fixed in time in the ocean ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3.3. Ocean Smoothing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any smoothing or hand editing of bathymetry in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3.4. Source
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe source of bathymetry in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how isolated seas is performed
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4.2. River Mouth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how river mouth mixing or estuaries specific treatment is performed
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 6.4. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 6.5. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.6. Is Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 6.7. Thickness Level 1
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Thickness of first surface ocean level (in meters)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Brief description of conservation methodology
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in the ocean by the numerical schemes
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.3. Consistency Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.4. Corrected Conserved Prognostic Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Set of variables which are conserved by more than the numerical scheme alone.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 8.5. Was Flux Correction Used
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does conservation involve flux correction ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9. Grid
Ocean grid
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of grid in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical coordinates in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 10.2. Partial Steps
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Using partial steps with Z or Z vertical coordinate in ocean ?*
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.2. Staggering
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal grid staggering type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation scheme in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of time stepping in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 12.2. Diurnal Cycle
Is Required: TRUE Type: ENUM Cardinality: 1.1
Diurnal cycle type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracers time stepping scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Tracers time step (in seconds)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 14.3. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Baroclinic time step (in seconds)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time splitting method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 15.2. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Barotropic time step (in seconds)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Details of vertical time stepping in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17. Advection
Ocean advection
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of advection in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
Explanation: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of lateral momemtum advection scheme in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 18.2. Scheme Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean momemtum advection scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 18.3. ALE
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Using ALE for vertical advection ? (if vertical coordinates are sigma)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Order of lateral tracer advection scheme in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 19.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for lateral tracer advection scheme in ocean ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 19.3. Effective Order
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Effective order of limited lateral tracer advection scheme in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 19.4. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 19.5. Passive Tracers
Is Required: FALSE Type: ENUM Cardinality: 0.N
Passive tracers advected
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 19.6. Passive Tracers Advection
Is Required: FALSE Type: STRING Cardinality: 0.1
Is advection of passive tracers different than active ? if so, describe.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 20.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for vertical tracer advection scheme in ocean ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lateral physics in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
Explanation: 21.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transient eddy representation in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics momemtum scheme in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 22.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics momemtum scheme in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 22.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics momemtum scheme in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics momemtum eddy viscosity coeff type in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 23.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 23.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 23.4. Coeff Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 23.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a mesoscale closure in the lateral physics tracers scheme ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 24.2. Submesoscale Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics tracers scheme in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 25.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics tracers scheme in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 25.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics tracers scheme in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics tracers eddy diffusity coeff type in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 26.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 26.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 26.4. Coeff Background
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 26.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EIV in lateral physics tracers in the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 27.2. Constant Val
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If EIV scheme for tracers is constant, specify coefficient value (M2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 27.3. Flux Type
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV flux (advective or skew)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 27.4. Added Diffusivity
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV added diffusivity (constant, flow dependent or none)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vertical physics in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there Langmuir cells mixing in upper ocean ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for tracers in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 30.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 30.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of tracers, specific coefficient (m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 30.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for momentum in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 31.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 31.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of momentum, specific coefficient (m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 31.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical convection in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 32.2. Tide Induced Mixing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how tide induced mixing is modelled (barotropic, baroclinic, none)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 32.3. Double Diffusion
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there double diffusion
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 32.4. Shear Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there interior shear mixing
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for tracers in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 33.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of tracers, specific coefficient (m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 33.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 33.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for momentum in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 34.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of momentum, specific coefficient (m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 34.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 34.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of free surface in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 35.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Free surface scheme in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 35.3. Embeded Seaice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the sea-ice embeded in the ocean model (instead of levitating) ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of bottom boundary layer in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 36.2. Type Of Bbl
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of bottom boundary layer in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 36.3. Lateral Mixing Coef
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 36.4. Sill Overflow
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any specific treatment of sill overflows
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of boundary forcing in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37.2. Surface Pressure
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37.3. Momentum Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37.4. Tracers Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37.5. Wave Effects
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how wave effects are modelled at ocean surface.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37.6. River Runoff Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how river runoff from land surface is routed to ocean and any global adjustment done.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37.7. Geothermal Heating
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how geothermal heating is present at ocean bottom.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum bottom friction in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum lateral friction in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of sunlight penetration scheme in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 40.2. Ocean Colour
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the ocean sunlight penetration scheme ocean colour dependent ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 40.3. Extinction Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe and list extinctions depths for sunlight penetration scheme (if applicable).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from atmos in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 41.2. From Sea Ice
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from sea-ice in ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 41.3. Forced Mode Restoring
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of surface salinity restoring in forced mode (OMIP)
End of explanation |
14,515 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Building a Model
Model, Reactions and Metabolites
This simple example demonstrates how to create a model, create a reaction, and then add the reaction to the model.
We'll use the '3OAS140' reaction from the STM_1.0 model
Step1: We need to create metabolites as well. If we were using an existing model, we could use Model.get_by_id to get the appropriate Metabolite objects instead.
Step2: Side note
Step3: The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in Schellenberger et al 2011 Nature Protocols 6(9)
Step4: At this point in time, the model is still empty
Step5: We will add the reaction to the model, which will also add all associated metabolites and genes
Step6: We can iterate through the model objects to observe the contents
Step7: Objective
Last we need to set the objective of the model. Here, we just want this to be the maximization of the flux in the single reaction we added and we do this by assigning the reaction's identifier to the objective property of the model.
Step8: The created objective is a symbolic algebraic expression and we can examine it by printing it
Step9: which here shows that the solver will maximize the flux in the forward direction.
Model Validation
For exchange with other tools you can validate and export the model to SBML.
For more information on serialization and available formats see the section "Reading and Writing Models"
Step10: The model is valid with no COBRA or SBML errors or warnings.
Exchanges, Sinks and Demands
Boundary reactions can be added using the model's method add_boundary.
There are three different types of pre-defined boundary reactions
Step11: Boundary reactions are defined on metabolites. First we add two metabolites to the model then
we define the boundary reactions. We add glycogen to the cytosolic compartment c and CO2 to the external compartment e.
Step12: To create a demand reaction instead of a sink use type demand instead of sink.
Information on all boundary reactions is available via the model's property boundary.
Step13: A neat trick to get all metabolic reactions is | Python Code:
from cobra import Model, Reaction, Metabolite
model = Model('example_model')
reaction = Reaction('R_3OAS140')
reaction.name = '3 oxoacyl acyl carrier protein synthase n C140 '
reaction.subsystem = 'Cell Envelope Biosynthesis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
Explanation: Building a Model
Model, Reactions and Metabolites
This simple example demonstrates how to create a model, create a reaction, and then add the reaction to the model.
We'll use the '3OAS140' reaction from the STM_1.0 model:
1.0 malACP[c] + 1.0 h[c] + 1.0 ddcaACP[c] $\rightarrow$ 1.0 co2[c] + 1.0 ACP[c] + 1.0 3omrsACP[c]
First, create the model and reaction.
End of explanation
ACP_c = Metabolite(
'ACP_c',
formula='C11H21N2O7PRS',
name='acyl-carrier-protein',
compartment='c')
omrsACP_c = Metabolite(
'M3omrsACP_c',
formula='C25H45N2O9PRS',
name='3-Oxotetradecanoyl-acyl-carrier-protein',
compartment='c')
co2_c = Metabolite('co2_c', formula='CO2', name='CO2', compartment='c')
malACP_c = Metabolite(
'malACP_c',
formula='C14H22N2O10PRS',
name='Malonyl-acyl-carrier-protein',
compartment='c')
h_c = Metabolite('h_c', formula='H', name='H', compartment='c')
ddcaACP_c = Metabolite(
'ddcaACP_c',
formula='C23H43N2O8PRS',
name='Dodecanoyl-ACP-n-C120ACP',
compartment='c')
Explanation: We need to create metabolites as well. If we were using an existing model, we could use Model.get_by_id to get the appropriate Metabolite objects instead.
End of explanation
reaction.add_metabolites({
malACP_c: -1.0,
h_c: -1.0,
ddcaACP_c: -1.0,
co2_c: 1.0,
ACP_c: 1.0,
omrsACP_c: 1.0
})
reaction.reaction # This gives a string representation of the reaction
Explanation: Side note: SId
It is highly recommended that the ids for reactions, metabolites and genes are valid SBML identifiers (SId).
SId is a data type derived from the basic XML typestring, but with restrictions about the characters
permitted and the sequences in which those characters may appear.
letter ::= ’a’..’z’,’A’..’Z’
digit ::= ’0’..’9’
idChar ::= letter | digit | ’_’
SId ::= ( letter | ’_’ ) idChar*
The main limitation is that ids cannot start with numbers. Using SIds allows serialization to SBML. In addition
features such as code completion and object access via the dot syntax will work in cobrapy.
Adding metabolites to a reaction uses a dictionary of the metabolites and their stoichiometric coefficients. A group of metabolites can be added all at once, or they can be added one at a time.
End of explanation
reaction.gene_reaction_rule = '( STM2378 or STM1197 )'
reaction.genes
Explanation: The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in Schellenberger et al 2011 Nature Protocols 6(9):1290-307. We will assign the gene reaction rule string, which will automatically create the corresponding gene objects.
End of explanation
print(f'{len(model.reactions)} reactions initially')
print(f'{len(model.metabolites)} metabolites initially')
print(f'{len(model.genes)} genes initially')
Explanation: At this point in time, the model is still empty
End of explanation
model.add_reactions([reaction])
# The objects have been added to the model
print(f'{len(model.reactions)} reactions')
print(f'{len(model.metabolites)} metabolites')
print(f'{len(model.genes)} genes')
Explanation: We will add the reaction to the model, which will also add all associated metabolites and genes
End of explanation
# Iterate through the the objects in the model
print("Reactions")
print("---------")
for x in model.reactions:
print("%s : %s" % (x.id, x.reaction))
print("")
print("Metabolites")
print("-----------")
for x in model.metabolites:
print('%9s : %s' % (x.id, x.formula))
print("")
print("Genes")
print("-----")
for x in model.genes:
associated_ids = (i.id for i in x.reactions)
print("%s is associated with reactions: %s" %
(x.id, "{" + ", ".join(associated_ids) + "}"))
Explanation: We can iterate through the model objects to observe the contents
End of explanation
model.objective = 'R_3OAS140'
Explanation: Objective
Last we need to set the objective of the model. Here, we just want this to be the maximization of the flux in the single reaction we added and we do this by assigning the reaction's identifier to the objective property of the model.
End of explanation
print(model.objective.expression)
print(model.objective.direction)
Explanation: The created objective is a symbolic algebraic expression and we can examine it by printing it
End of explanation
import tempfile
from pprint import pprint
from cobra.io import write_sbml_model, validate_sbml_model
with tempfile.NamedTemporaryFile(suffix='.xml') as f_sbml:
write_sbml_model(model, filename=f_sbml.name)
report = validate_sbml_model(filename=f_sbml.name)
pprint(report)
Explanation: which here shows that the solver will maximize the flux in the forward direction.
Model Validation
For exchange with other tools you can validate and export the model to SBML.
For more information on serialization and available formats see the section "Reading and Writing Models"
End of explanation
print("exchanges", model.exchanges)
print("demands", model.demands)
print("sinks", model.sinks)
Explanation: The model is valid with no COBRA or SBML errors or warnings.
Exchanges, Sinks and Demands
Boundary reactions can be added using the model's method add_boundary.
There are three different types of pre-defined boundary reactions: exchange, demand, and sink reactions. All of them are unbalanced pseudo reactions, that means they fulfill a function for modeling by adding to or removing metabolites from the model system but are not based on real biology. An exchange reaction is a reversible reaction that adds to or removes an extracellular metabolite from the extracellular compartment. A demand reaction is an irreversible reaction that consumes an intracellular metabolite. A sink is similar to an exchange but specifically for intracellular metabolites, i.e., a reversible reaction that adds or removes an intracellular metabolite.
End of explanation
model.add_metabolites([
Metabolite(
'glycogen_c',
name='glycogen',
compartment='c'
),
Metabolite(
'co2_e',
name='CO2',
compartment='e'
),
])
# create exchange reaction
model.add_boundary(model.metabolites.get_by_id("co2_e"), type="exchange")
# create exchange reaction
model.add_boundary(model.metabolites.get_by_id("glycogen_c"), type="sink")
# Now we have an additional exchange and sink reaction in the model
print("exchanges", model.exchanges)
print("sinks", model.sinks)
print("demands", model.demands)
Explanation: Boundary reactions are defined on metabolites. First we add two metabolites to the model then
we define the boundary reactions. We add glycogen to the cytosolic compartment c and CO2 to the external compartment e.
End of explanation
# boundary reactions
model.boundary
Explanation: To create a demand reaction instead of a sink use type demand instead of sink.
Information on all boundary reactions is available via the model's property boundary.
End of explanation
# metabolic reactions
set(model.reactions) - set(model.boundary)
Explanation: A neat trick to get all metabolic reactions is
End of explanation |
14,516 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Univariate Data with the Normal Inverse Chi-Square Distribution
One of the simplest examples of data is univariate data
Let's consider a timeseries example
Step1: Let's plot the kernel density estimate of annual lynx trapping
Step2: Our plot suggests there could be three modes in the Lynx data.
In modeling this timeseries, we could assume that the number of lynx trapped in a given year is falls into one of $k$ states, which are normally distributed with some unknown mean $\mu_i$ and variance $\sigma^2_i$ for each state
In the case of our Lynx data
$$\forall i \in [1,...,k] \hspace{2mm} p(\text{lynx trapped}| \text{state} = i) \sim \mathcal{N}(\mu_i, \sigma^2_i)$$
Now let's consider demographics data from the Titanic Dataset
The Titanic Dataset contains information about passengers of the Titanic.
Step3: Passenger age and fare are both real valued. Are they related? Let's examine the correlation matrix
Step4: Since the correlation is between the two variables is zero, we can model these two real valued columns independently.
Let's plot the kernel density estimate of each variable
Step5: Given the long tail in the fare price, we might want to model this variable on a log scale
Step6: Again, logfare and age have near zero correlation, so we can again model these two variables independently
Let's see what a kernel density estimate of log fare would look like
Step7: In logspace, passenger fare is multimodal, suggesting that we could model this variable with a normal distirbution
If we were to model the passenger list using our Mixture Model, we would have separate likelihoods for logfare and age
$$\forall i \in [1,...,k] \hspace{2mm} p(\text{logfare}|\text{cluster}=i)=\mathcal{N}(\mu_{i,l}, \sigma^2_{i,l})$$
$$\forall i \in [1,...,k] \hspace{2mm} p(\text{age}|\text{cluster}=c)=\mathcal{N}(\mu_{i,a}, \sigma^2_{i,a})$$
Often, real value data is assumed to be normally distributed.
To learn the latent variables, $\mu_i$ $\sigma^2_i$, we would use a normal inverse-chi-square likelihood
The normal inverse-chi-square likelihood is the conjugate univariate normal likelihood in data microscopes. We also have normal likelihood, the normal inverse-wishart likelihood, optimized for multivariate datasets.
It is important to model univariate normal data with this likelihood as it acheives superior performance on univariate data.
In both these examples, we found variables that were amenable to being modeled as univariate normal | Python Code:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set_context('talk')
sns.set_style('darkgrid')
lynx = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/datasets/lynx.csv',
index_col=0)
lynx = lynx.set_index('time')
lynx.head()
lynx.plot(legend=False)
plt.xlabel('Year')
plt.title('Annual Canadian Lynx Trappings 1821-1934')
plt.ylabel('Lynx')
Explanation: Univariate Data with the Normal Inverse Chi-Square Distribution
One of the simplest examples of data is univariate data
Let's consider a timeseries example:
The Annual Canadian Lynx Trappings Dataset as described by Campbel and Walker 1977 contains the number of Lynx trapped near the McKenzie River in the Northwest Territories in Canada between 1821 and 1934.
End of explanation
sns.kdeplot(lynx['lynx'])
plt.title('Kernel Density Estimate of Annual Lynx Trapping')
plt.ylabel('Probability')
plt.xlabel('Number of Lynx')
Explanation: Let's plot the kernel density estimate of annual lynx trapping
End of explanation
ti = sns.load_dataset('titanic')
ti.head()
Explanation: Our plot suggests there could be three modes in the Lynx data.
In modeling this timeseries, we could assume that the number of lynx trapped in a given year is falls into one of $k$ states, which are normally distributed with some unknown mean $\mu_i$ and variance $\sigma^2_i$ for each state
In the case of our Lynx data
$$\forall i \in [1,...,k] \hspace{2mm} p(\text{lynx trapped}| \text{state} = i) \sim \mathcal{N}(\mu_i, \sigma^2_i)$$
Now let's consider demographics data from the Titanic Dataset
The Titanic Dataset contains information about passengers of the Titanic.
End of explanation
ti[['age','fare']].dropna().corr()
Explanation: Passenger age and fare are both real valued. Are they related? Let's examine the correlation matrix
End of explanation
sns.kdeplot(ti['age'])
plt.title('Kernel Density Estimate of Passenger Age in the Titanic Datset')
sns.kdeplot(ti['fare'])
plt.title('Kernel Density Estimate of Passenger Fare in the Titanic Datset')
Explanation: Since the correlation is between the two variables is zero, we can model these two real valued columns independently.
Let's plot the kernel density estimate of each variable
End of explanation
ti['logfare'] = np.log(ti['fare'])
ti[['age','logfare']].dropna().corr()
Explanation: Given the long tail in the fare price, we might want to model this variable on a log scale:
End of explanation
sns.kdeplot(ti['logfare'])
plt.title('Kernel Density Estimate of Log Passenger Fare in the Titanic Datset')
Explanation: Again, logfare and age have near zero correlation, so we can again model these two variables independently
Let's see what a kernel density estimate of log fare would look like
End of explanation
from microscopes.models import nich as normal_inverse_chisquared
Explanation: In logspace, passenger fare is multimodal, suggesting that we could model this variable with a normal distirbution
If we were to model the passenger list using our Mixture Model, we would have separate likelihoods for logfare and age
$$\forall i \in [1,...,k] \hspace{2mm} p(\text{logfare}|\text{cluster}=i)=\mathcal{N}(\mu_{i,l}, \sigma^2_{i,l})$$
$$\forall i \in [1,...,k] \hspace{2mm} p(\text{age}|\text{cluster}=c)=\mathcal{N}(\mu_{i,a}, \sigma^2_{i,a})$$
Often, real value data is assumed to be normally distributed.
To learn the latent variables, $\mu_i$ $\sigma^2_i$, we would use a normal inverse-chi-square likelihood
The normal inverse-chi-square likelihood is the conjugate univariate normal likelihood in data microscopes. We also have normal likelihood, the normal inverse-wishart likelihood, optimized for multivariate datasets.
It is important to model univariate normal data with this likelihood as it acheives superior performance on univariate data.
In both these examples, we found variables that were amenable to being modeled as univariate normal:
Univariate datasets
Datasets containing real valued variables with near zero correlation
To import our univariate normal inverse-chi-squared likelihood, call:
End of explanation |
14,517 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Homework 1
Google Trends is pretty awesome, except that on the site you cannot do more than overlay plots. Here we'll play with search term data downloaded from Google and draw our own conclusions.
Data from
Step1: 1. Use the "trends.csv" file and csv2rec() to import the data and reproduce this plot
Step2: 2. Determine in which week of each year (for all five search trends including "global_warming") that search term reached its peak. What trends can you spot with any of the terms?
Step3: 3. Which term has the largest scatter about its median value? Which term has the smallest scatter? The scatter around the median value can be found using
Step4: 4. Determine the time lag, in weeks, that maximizes the cross-correlation between "skiing" and "spring break". Try this also for "norad" and "spring break".
<code>numpy</code> has tools for cross-correlations | Python Code:
%pylab inline
Explanation: Homework 1
Google Trends is pretty awesome, except that on the site you cannot do more than overlay plots. Here we'll play with search term data downloaded from Google and draw our own conclusions.
Data from:
https://www.google.com/trends/explore#q=spring%20break%2C%20textbooks%2C%20norad%2C%20skiing%2C%20global%20warming&cmpt=q&tz=Etc%2FGMT%2B4
We will be using numpy and matplotlib to explore the data. Remember you can import all these modules at once using:
End of explanation
# we can import the CSV data as a numpy rec array
from matplotlib.pylab import csv2rec
trends = csv2rec('trends.csv')
plot(trends.week_start, trends.spring_break, label='spring break')
plot(trends.week_start, trends.textbooks, label='texbooks')
plot(trends.week_start, trends.norad, label='norad')
plot(trends.week_start, trends.skiing, label='skiing')
legend()
Explanation: 1. Use the "trends.csv" file and csv2rec() to import the data and reproduce this plot:
<img align='left' src="trends.png">
End of explanation
# create vector of year and month numbers
dates = trends.week_start
yrs = zeros_like(dates)
wks = zeros_like(dates)
for i in range(len(dates)):
yrs[i] = dates[i].year
wks[i] = dates[i].isocalendar()[1]
# For each year, list week numbers corresponding to maximum search values
trend = trends.global_warming
for yr in range(2004,2016):
idx = find(yrs==yr)
print yr, wks[find(trend[idx] == max(trend[idx]))]
Explanation: 2. Determine in which week of each year (for all five search trends including "global_warming") that search term reached its peak. What trends can you spot with any of the terms?
End of explanation
# study scatter about median values
def std_median(datums):
return sqrt( sum( (datums - median(datums))**2 ) )
print "spring break: ",std_median(trends.spring_break)
print "textbooks: ",std_median(trends.textbooks)
print "skiing:",std_median(trends.skiing)
print "norad:",std_median(trends.norad)
print "global warming:",std_median(trends.global_warming)
Explanation: 3. Which term has the largest scatter about its median value? Which term has the smallest scatter? The scatter around the median value can be found using:
$\sigma_{median}^2 = \sum (x_i - {\rm median}(x_i))^2$
End of explanation
result = np.correlate(trends.norad,trends.spring_break,mode='full')
plot(arange(result.size) - result.size/2,result)
plot(gap,result)
print gap[find(result==max(result))]
result = np.correlate(trends.textbooks,trends.spring_break, mode='full')
gap = arange(result.size) - result.size/2
plot(gap,result)
print gap[find(result==max(result))]
Explanation: 4. Determine the time lag, in weeks, that maximizes the cross-correlation between "skiing" and "spring break". Try this also for "norad" and "spring break".
<code>numpy</code> has tools for cross-correlations:
<code>
result = np.correlate(trends.spring_break,trends.spring_break,mode='full')
plot(arange(result.size) - result.size/2,result)
</code>
End of explanation |
14,518 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Run many Batch Normalization experiments using Cloud using ML Engine
Step1: Let’s test how Batch Normalization impacts models of varying depths. We can launch many experiments in parallel using Google Cloud ML Engine. We will fire off 14 jobs with varying hyperparameters | Python Code:
# change these to try this notebook out
BUCKET = 'crawles-sandbox' # change this to your GCP bucket
PROJECT = 'crawles-sandbox' # change this to your GCP project
REGION = 'us-central1'
# Import os environment variables
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
Explanation: Run many Batch Normalization experiments using Cloud using ML Engine
End of explanation
!ls mnist_classifier/
!ls mnist_classifier/trainer/
%%bash
submitMLEngineJob() {
gcloud ml-engine jobs submit training $JOBNAME \
--package-path=$(pwd)/mnist_classifier/trainer \
--module-name trainer.task \
--region $REGION \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=1.4 \
-- \
--outdir $OUTDIR \
--hidden_units $net \
--num_steps 10 \
$batchNorm
}
# submit for different layer sizes
export PYTHONPATH=${PYTHONPATH}:${PWD}/mnist_classifier
for batchNorm in '' '--use_batch_normalization'
do
net=''
for layer in 500 400 300 200 100 50 25;
do
net=$net$layer
netname=${net//,/_}${batchNorm/--use_batch_normalization/_bn}
echo $netname
JOBNAME=mnist$netname_$(date -u +%y%m%d_%H%M%S)
OUTDIR=gs://${BUCKET}/mnist_models/mnist_model$netname/trained_model
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
submitMLEngineJob
net=$net,
done
done
Explanation: Let’s test how Batch Normalization impacts models of varying depths. We can launch many experiments in parallel using Google Cloud ML Engine. We will fire off 14 jobs with varying hyperparameters:
With and without Batch Normalization
Varying model depths from 1 hidden layer to 7 hidden layers
We use the tf.estimator API to build a model and deploy it using Cloud ML Engine.
End of explanation |
14,519 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Example flow for processing and aggregating stats about committee meeting attendees and protocol parts
See the DataFlows documentation for more details regarding the Flow object and processing functions.
Feel free to modify and commit changes which demonstrate additional functionality or relevant data.
Constants
Step1: Load source data
Step2: Inspect the datapackages which will be loaded
Last command's output log should contain urls to datapackage.json files, open them and check the table schema to see the resource metadata and available fields which you can use in the processing functions.
Check the frictionlessdata docs for more details about the datapackage file format.
Main processing functions
Step3: Run the flow
Step4: Aggregate and print stats | Python Code:
# Limit processing of protocol parts for development
PROCESS_PARTS_LIMIT = 500
# Enable caching of protocol parts data (not efficient, should only be used for local development with sensible PROCESS_PARTS_LIMIT)
PROCESS_PARTS_CACHE = True
# Filter the meetings to be processed, these kwargs are passed along to DataFlows filter_rows processor for meetings resource
MEETINGS_FILTER_ROWS_KWARGS = {'equals': [{'KnessetNum': 20}]}
# Don'e use local data - loads everything from knesset data remote storage
# When set to False - also enables caching, so you won't download from remote storage on 2nd run.
USE_DATA = False
Explanation: Example flow for processing and aggregating stats about committee meeting attendees and protocol parts
See the DataFlows documentation for more details regarding the Flow object and processing functions.
Feel free to modify and commit changes which demonstrate additional functionality or relevant data.
Constants
End of explanation
from dataflows import filter_rows, cache
from datapackage_pipelines_knesset.common_flow import load_knesset_data, load_member_names
# Loads a dict containing mapping between knesset member id and the member name
member_names = load_member_names(use_data=USE_DATA)
# define flow steps for loading the source committee meetings data
# the actual loading is done later in the Flow
load_steps = (
load_knesset_data('people/committees/meeting-attendees/datapackage.json', USE_DATA),
filter_rows(**MEETINGS_FILTER_ROWS_KWARGS)
)
if not USE_DATA:
# when loading from URL - enable caching which will skip loading on 2nd run
load_steps = (cache(*load_steps, cache_path='.cache/people-committee-meeting-attendees-knesset-20'),)
Explanation: Load source data
End of explanation
from collections import defaultdict
from dataflows import Flow
stats = defaultdict(int)
member_attended_meetings = defaultdict(int)
def process_meeting_protocol_part(row):
stats['processed parts'] += 1
if row['body'] and 'אנחנו ככנסת צריכים להיות ערוכים' in row['body']:
stats['meetings contain text: we as knesset need to be prepared'] += 1
def process_meeting(row):
stats['total meetings'] += 1
if row['attended_mk_individual_ids']:
for mk_id in row['attended_mk_individual_ids']:
member_attended_meetings[mk_id] += 1
parts_filename = row['parts_parsed_filename']
if parts_filename:
if PROCESS_PARTS_LIMIT and stats['processed parts'] < PROCESS_PARTS_LIMIT:
steps = (load_knesset_data('committees/meeting_protocols_parts/' + parts_filename, USE_DATA),)
if not USE_DATA and PROCESS_PARTS_CACHE:
steps = (cache(*steps, cache_path='.cache/committee-meeting-protocol-parts/' + parts_filename),)
steps += (process_meeting_protocol_part,)
Flow(*steps).process()
process_steps = (process_meeting,)
Explanation: Inspect the datapackages which will be loaded
Last command's output log should contain urls to datapackage.json files, open them and check the table schema to see the resource metadata and available fields which you can use in the processing functions.
Check the frictionlessdata docs for more details about the datapackage file format.
Main processing functions
End of explanation
from dataflows import Flow, dump_to_path
Flow(*load_steps, *process_steps, dump_to_path('data/committee-meeting-attendees-parts')).process()
Explanation: Run the flow
End of explanation
from collections import deque
import yaml
top_attended_member_names = [member_names[mk_id] for mk_id, num_attended in
deque(sorted(member_attended_meetings.items(), key=lambda kv: kv[1]), maxlen=5)]
print('\n')
print('-- top attended members --')
print(top_attended_member_names)
print('\n')
print('-- stats --')
print(yaml.dump(dict(stats), default_flow_style=False, allow_unicode=True))
Explanation: Aggregate and print stats
End of explanation |
14,520 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Lifetime Value prediction for Kaggle Acquire Valued Customer Challenge
<table align="left">
<td>
<a target="_blank" href="https
Step1: Global variables
Step2: Data
Download data
Setup kaggle API correctly following https
Step3: Load transaction csv
Step4: Preprocess data
Step5: Load customer-level csv
Step6: We observe a mixture of zero and lognormal distribution of holdout value.
Step7: Make train/eval
Step8: Model
Step9: Train
Step10: Eval
Step11: Gini Coefficient
Step12: Calibration
Step14: Rank Correlation
Step15: All metrics together
Step16: Save | Python Code:
import os
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
import tensorflow_probability as tfp
import tqdm
from typing import Sequence
# install and import ltv
!pip install -q git+https://github.com/google/lifetime_value
import lifetime_value as ltv
tfd = tfp.distributions
%config InlineBackend.figure_format='retina'
sns.set_style('whitegrid')
pd.options.mode.chained_assignment = None # default='warn'
Explanation: Lifetime Value prediction for Kaggle Acquire Valued Customer Challenge
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/google/lifetime_value/blob/master/notebooks/kaggle_acquire_valued_shoppers_challenge/regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/google/lifetime_value/blob/master/notebooks/kaggle_acquire_valued_shoppers_challenge/regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
End of explanation
COMPANY = '103600030' # @param { isTemplate: true, type: 'string'}
LOSS = 'ziln' # @param { isTemplate: true, type: 'string'} ['mse', 'ziln']
MODEL = 'dnn' # @param { isTemplate: true, type: 'string'} ['linear', 'dnn']
LEARNING_RATE = 0.0002 # @param { isTemplate: true}
EPOCHS = 400 # @param { isTemplate: true, type: 'integer'}
OUTPUT_CSV_FOLDER = '/tmp/lifetime-value/kaggle_acquire_valued_shoppers_challenge/result' # @param { isTemplate: true, type: 'string'}
CATEGORICAL_FEATURES = ['chain', 'dept', 'category', 'brand', 'productmeasure']
NUMERIC_FEATURES = ['log_calibration_value']
ALL_FEATURES = CATEGORICAL_FEATURES + NUMERIC_FEATURES
Explanation: Global variables
End of explanation
%%shell
if [ -e /tmp/lifetime-value/acquire-valued-shoppers-challenge/transactions.csv ]
then
echo "File already exists, no need to download."
else
rm -rf /tmp/lifetime-value/acquire-valued-shoppers-challenge
mkdir -p /tmp/lifetime-value/acquire-valued-shoppers-challenge
cd /tmp/lifetime-value/acquire-valued-shoppers-challenge
kaggle competitions download -c acquire-valued-shoppers-challenge
echo "Unzip file. This may take 10 min."
gunzip transactions.csv.gz
fi
Explanation: Data
Download data
Setup kaggle API correctly following https://www.kaggle.com/docs/api
%%shell
mkdir ~/.kaggle
echo \{\"username\":\"{your kaggle username}\",\"key\":\"{your kaggle api key}\"\} > ~/.kaggle/kaggle.json
pip install kaggle
End of explanation
def load_transaction_data(company):
all_data_filename = '/tmp/lifetime-value/acquire-valued-shoppers-challenge/transactions.csv'
one_company_data_filename = (
'/tmp/lifetime-value/acquire-valued-shoppers-challenge/transactions_company_{}.csv'
.format(COMPANY))
if os.path.isfile(one_company_data_filename):
df = pd.read_csv(one_company_data_filename)
else:
data_list = []
chunksize = 10**6
# 350 iterations
for chunk in tqdm.tqdm(pd.read_csv(all_data_filename, chunksize=chunksize)):
data_list.append(chunk.query("company=='{}'".format(company)))
df = pd.concat(data_list, axis=0)
df.to_csv(one_company_data_filename, index=None)
return df
Explanation: Load transaction csv
End of explanation
def preprocess(df):
df = df.query('purchaseamount>0')
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
df['start_date'] = df.groupby('id')['date'].transform('min')
# Compute calibration values
calibration_value = (
df.query('date==start_date').groupby('id')
['purchaseamount'].sum().reset_index())
calibration_value.columns = ['id', 'calibration_value']
# Compute holdout values
one_year_holdout_window_mask = (
(df['date'] > df['start_date']) &
(df['date'] <= df['start_date'] + np.timedelta64(365, 'D')))
holdout_value = (
df[one_year_holdout_window_mask].groupby('id')
['purchaseamount'].sum().reset_index())
holdout_value.columns = ['id', 'holdout_value']
# Compute calibration attributes
calibration_attributes = (
df.query('date==start_date').sort_values(
'purchaseamount', ascending=False).groupby('id')[[
'chain', 'dept', 'category', 'brand', 'productmeasure'
]].first().reset_index())
# Merge dataframes
customer_level_data = (
calibration_value.merge(calibration_attributes, how='left',
on='id').merge(
holdout_value, how='left', on='id'))
customer_level_data['holdout_value'] = (
customer_level_data['holdout_value'].fillna(0.))
customer_level_data[CATEGORICAL_FEATURES] = (
customer_level_data[CATEGORICAL_FEATURES].fillna('UNKNOWN'))
# Specify data types
customer_level_data['log_calibration_value'] = (
np.log(customer_level_data['calibration_value']).astype('float32'))
customer_level_data['chain'] = (
customer_level_data['chain'].astype('category'))
customer_level_data['dept'] = (customer_level_data['dept'].astype('category'))
customer_level_data['brand'] = (
customer_level_data['brand'].astype('category'))
customer_level_data['category'] = (
customer_level_data['category'].astype('category'))
customer_level_data['label'] = (
customer_level_data['holdout_value'].astype('float32'))
return customer_level_data
Explanation: Preprocess data
End of explanation
def load_customer_level_csv(company):
customer_level_data_file = (
'/tmp/lifetime-value/acquire-valued-shoppers-challenge/customer_level_data_company_{}.csv'
.format(company))
if os.path.isfile(customer_level_data_file):
customer_level_data = pd.read_csv(customer_level_data_file)
else:
customer_level_data = preprocess(load_transaction_data(company))
for cat_col in CATEGORICAL_FEATURES:
customer_level_data[cat_col] = (
customer_level_data[cat_col].astype('category'))
for num_col in [
'log_calibration_value', 'calibration_value', 'holdout_value'
]:
customer_level_data[num_col] = (
customer_level_data[num_col].astype('float32'))
return customer_level_data
# Processes data. 350 iteration in total. May take 10min.
customer_level_data = load_customer_level_csv(COMPANY)
Explanation: Load customer-level csv
End of explanation
customer_level_data.label.apply(np.log1p).hist(bins=50)
Explanation: We observe a mixture of zero and lognormal distribution of holdout value.
End of explanation
def linear_split(df):
# get_dummies preserves numeric features.
x = pd.get_dummies(df[ALL_FEATURES], drop_first=True).astype('float32').values
y = df['label'].values
y0 = df['calibration_value'].values
x_train, x_eval, y_train, y_eval, y0_train, y0_eval = (
model_selection.train_test_split(
x, y, y0, test_size=0.2, random_state=123))
return x_train, x_eval, y_train, y_eval, y0_eval
def dnn_split(df):
for key in CATEGORICAL_FEATURES:
encoder = preprocessing.LabelEncoder()
df[key] = encoder.fit_transform(df[key])
y0 = df['calibration_value'].values
df_train, df_eval, y0_train, y0_eval = model_selection.train_test_split(
df, y0, test_size=0.2, random_state=123)
def feature_dict(df):
features = {k: v.values for k, v in dict(df[CATEGORICAL_FEATURES]).items()}
features['numeric'] = df[NUMERIC_FEATURES].values
return features
x_train, y_train = feature_dict(df_train), df_train['label'].values
x_eval, y_eval = feature_dict(df_eval), df_eval['label'].values
return x_train, x_eval, y_train, y_eval, y0_eval
Explanation: Make train/eval
End of explanation
def linear_model(output_units):
return tf.keras.experimental.LinearModel(output_units)
def embedding_dim(x):
return int(x**.25) + 1
def embedding_layer(vocab_size):
return tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim=vocab_size,
output_dim=embedding_dim(vocab_size),
input_length=1),
tf.keras.layers.Flatten(),
])
def dnn_model(output_units, df):
numeric_input = tf.keras.layers.Input(
shape=(len(NUMERIC_FEATURES),), name='numeric')
embedding_inputs = [
tf.keras.layers.Input(shape=(1,), name=key, dtype=np.int64)
for key in CATEGORICAL_FEATURES
]
embedding_outputs = [
embedding_layer(vocab_size=df[key].nunique())(input)
for key, input in zip(CATEGORICAL_FEATURES, embedding_inputs)
]
deep_input = tf.keras.layers.concatenate([numeric_input] + embedding_outputs)
deep_model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(output_units),
])
return tf.keras.Model(
inputs=[numeric_input] + embedding_inputs, outputs=deep_model(deep_input))
Explanation: Model
End of explanation
if LOSS == 'mse':
loss = keras.losses.MeanSquaredError()
output_units = 1
if LOSS == 'ziln':
loss = ltv.zero_inflated_lognormal_loss
output_units = 3
if MODEL == 'linear':
x_train, x_eval, y_train, y_eval, y0_eval = linear_split(customer_level_data)
model = linear_model(output_units)
if MODEL == 'dnn':
x_train, x_eval, y_train, y_eval, y0_eval = dnn_split(customer_level_data)
model = dnn_model(output_units, customer_level_data)
model.compile(loss=loss, optimizer=keras.optimizers.Adam(lr=LEARNING_RATE))
callbacks = [
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', min_lr=1e-6),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10),
]
history = model.fit(
x=x_train,
y=y_train,
batch_size=1024,
epochs=EPOCHS,
verbose=2,
callbacks=callbacks,
validation_data=(x_eval, y_eval)).history
pd.DataFrame(history)[['loss', 'val_loss']][2:].plot()
Explanation: Train
End of explanation
if LOSS == 'mse':
y_pred = model.predict(x=x_eval, batch_size=1024).flatten()
if LOSS == 'ziln':
logits = model.predict(x=x_eval, batch_size=1024)
y_pred = ltv.zero_inflated_lognormal_pred(logits).numpy().flatten()
df_pred = pd.DataFrame({
'y_true': y_eval,
'y_pred': y_pred,
})
df_pred.head(10)
Explanation: Eval
End of explanation
gain = pd.DataFrame({
'lorenz': ltv.cumulative_true(y_eval, y_eval),
'baseline': ltv.cumulative_true(y_eval, y0_eval),
'model': ltv.cumulative_true(y_eval, y_pred),
})
num_customers = np.float32(gain.shape[0])
gain['cumulative_customer'] = (np.arange(num_customers) + 1.) / num_customers
ax = gain[[
'cumulative_customer',
'lorenz',
'baseline',
'model',
]].plot(
x='cumulative_customer', figsize=(8, 5), legend=True)
ax.legend(['Groundtruth', 'Baseline', 'Model'], loc='upper left')
ax.set_xlabel('Cumulative Fraction of Customers')
ax.set_xticks(np.arange(0, 1.1, 0.1))
ax.set_xlim((0, 1.))
ax.set_ylabel('Cumulative Fraction of Total Lifetime Value')
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.set_ylim((0, 1.05))
ax.set_title('Gain Chart')
gini = ltv.gini_from_gain(gain[['lorenz', 'baseline', 'model']])
gini
Explanation: Gini Coefficient
End of explanation
df_decile = ltv.decile_stats(y_eval, y_pred)
df_decile
ax = df_decile[['label_mean', 'pred_mean']].plot.bar(rot=0)
ax.set_title('Decile Chart')
ax.set_xlabel('Prediction bucket')
ax.set_ylabel('Average bucket value')
ax.legend(['Label', 'Prediction'], loc='upper left')
Explanation: Calibration
End of explanation
def spearmanr(x1: Sequence[float], x2: Sequence[float]) -> float:
Calculates spearmanr rank correlation coefficient.
See https://docs.scipy.org/doc/scipy/reference/stats.html.
Args:
x1: 1D array_like.
x2: 1D array_like.
Returns:
correlation: float.
return stats.spearmanr(x1, x2, nan_policy='raise')[0]
spearman_corr = spearmanr(y_eval, y_pred)
spearman_corr
Explanation: Rank Correlation
End of explanation
df_metrics = pd.DataFrame(
{
'company': COMPANY,
'model': MODEL,
'loss': LOSS,
'label_mean': y_eval.mean(),
'pred_mean': y_pred.mean(),
'label_positive': np.mean(y_eval > 0),
'decile_mape': df_decile['decile_mape'].mean(),
'baseline_gini': gini['normalized'][1],
'gini': gini['normalized'][2],
'spearman_corr': spearman_corr,
},
index=[0])
df_metrics[[
'company',
'model',
'loss',
'label_mean',
'pred_mean',
'label_positive',
'decile_mape',
'baseline_gini',
'gini',
'spearman_corr',
]]
Explanation: All metrics together
End of explanation
output_path = os.path.join(OUTPUT_CSV_FOLDER, COMPANY)
if not os.path.isdir(output_path):
os.makedirs(output_path)
output_file = os.path.join(output_path,
'{}_regression_{}.csv'.format(MODEL, LOSS))
df_metrics.to_csv(output_file, index=False)
Explanation: Save
End of explanation |
14,521 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Goal
Questions
How is incorporator identification accuracy affected by the percent isotope incorporation of taxa?
How variable is sensitivity depending on model stochasticity
Each simulation has differing taxa as incorporators, therefore, the incorporators then differ by GC and abundance between simulations
Method
Using genome dataset created in the "dataset" notebook
Simulates isotope dilution or short incubations
Method
25% taxa incorporate
incorporation % same for all incorporators
incorporation % treatments
Step1: Init
Step2: Creating input files (eg., fragments & communities)
Simulating fragments
Step3: Converting to kde object
Step4: Adding diffusion
Step5: Running nestly
Step6: R analysis
Step7: Analyzing the data | Python Code:
workDir = '/home/nick/notebook/SIPSim/dev/bac_genome1210/'
buildDir = os.path.join(workDir, 'percIncorpUnifRep')
genomeDir = '/home/nick/notebook/SIPSim/dev/bac_genome1210/genomes/'
R_dir = '/home/nick/notebook/SIPSim/lib/R/'
Explanation: Goal
Questions
How is incorporator identification accuracy affected by the percent isotope incorporation of taxa?
How variable is sensitivity depending on model stochasticity
Each simulation has differing taxa as incorporators, therefore, the incorporators then differ by GC and abundance between simulations
Method
Using genome dataset created in the "dataset" notebook
Simulates isotope dilution or short incubations
Method
25% taxa incorporate
incorporation % same for all incorporators
incorporation % treatments: 0, 5, 10, 25, 50
n-replicates = 10
Total treatments: 50
User variables
End of explanation
import glob
from os.path import abspath
import nestly
from IPython.display import Image, display
%load_ext rpy2.ipython
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(gridExtra)
if not os.path.isdir(buildDir):
os.makedirs(buildDir)
Explanation: Init
End of explanation
!cd $buildDir; \
SIPSim fragments \
$genomeDir/genome_index.txt \
--fp $genomeDir \
--fr ../../515F-806R.fna \
--fld skewed-normal,9000,2500,-5 \
--flr None,None \
--nf 10000 \
--np 24 \
2> ampFrags.log \
> ampFrags.pkl
Explanation: Creating input files (eg., fragments & communities)
Simulating fragments
End of explanation
!cd $buildDir; \
SIPSim fragment_kde \
ampFrags.pkl \
> ampFrags_kde.pkl
Explanation: Converting to kde object
End of explanation
!cd $buildDir; \
SIPSim diffusion \
ampFrags_kde.pkl \
--np 24 \
> ampFrags_kde_dif.pkl
Explanation: Adding diffusion
End of explanation
# building tree structure
nest = nestly.Nest()
## varying params
nest.add('rep', range(1,11))
nest.add('percIncorp', [10, 25, 50])
## set params
nest.add('np_many', [24], create_dir=False)
nest.add('np_few', [8], create_dir=False)
nest.add('percTaxa', [25], create_dir=False)
nest.add('abs', ['1e10'], create_dir=False)
#nest.add('subsample', [20000], create_dir=False)
nest.add('subsample_mean', [30000], create_dir=False)
nest.add('subsample_scale', [5000], create_dir=False)
nest.add('BD_min', [1.71], create_dir=False)
nest.add('BD_max', [1.75], create_dir=False)
nest.add('padj', [0.1], create_dir=False)
nest.add('log2', [0.25], create_dir=False)
nest.add('topTaxaToPlot', [100], create_dir=False)
## input/output files
nest.add('buildDir', [buildDir], create_dir=False)
nest.add('frag_file', ['ampFrags_kde_dif'], create_dir=False)
nest.add('comm_file', ['comm.txt'], create_dir=False)
nest.add('genome_index', [os.path.join(genomeDir, 'genome_index.txt')], create_dir=False)
nest.add('R_dir', [R_dir], create_dir=False)
# building directory tree
nest.build(buildDir)
bashFile = os.path.join(buildDir, 'SIPSimRun.sh')
%%writefile $bashFile
#!/bin/bash
# symlinking input files
ln -s {buildDir}/{frag_file}.pkl {frag_file}.pkl
# Creating a community file
SIPSim communities \
{genome_index} \
--n_comm 2 \
> comm.txt
# simulating gradient fractions
SIPSim gradient_fractions \
{comm_file} \
> fracs.txt
# making incorp file
SIPSim incorpConfigExample \
--percTaxa {percTaxa} \
--percIncorpUnif {percIncorp} \
> {percTaxa}_{percIncorp}.config
# adding isotope incorporation to BD distribution
SIPSim isotope_incorp \
{frag_file}.pkl \
{percTaxa}_{percIncorp}.config \
--comm {comm_file} \
--np {np_many} \
> {frag_file}_incorp.pkl
# calculating BD shift from isotope incorporation
SIPSim BD_shift \
{frag_file}.pkl \
{frag_file}_incorp.pkl \
--np {np_few} \
> {frag_file}_incorp_BD-shift.txt
# simulating an OTU table
SIPSim OTU_table \
{frag_file}_incorp.pkl \
{comm_file} \
fracs.txt \
--abs {abs} \
--np {np_few} \
> OTU_n2_abs{abs}.txt
# subsampling from the OTU table (simulating sequencing of the DNA pool)
SIPSim OTU_subsample \
--dist normal \
--dist_params loc:{subsample_mean},scale:{subsample_scale} \
OTU_n2_abs{abs}.txt \
> OTU_n2_abs{abs}_sub-norm.txt
# making a wide table
SIPSim OTU_wideLong -w \
OTU_n2_abs{abs}_sub-norm.txt \
> OTU_n2_abs{abs}_sub-norm_w.txt
# making metadata (phyloseq: sample_data)
SIPSim OTU_sampleData \
OTU_n2_abs{abs}_sub-norm.txt \
> OTU_n2_abs{abs}_sub-norm_meta.txt
!chmod 775 $bashFile
!cd $workDir; \
nestrun -j 1 --template-file $bashFile -d percIncorpUnifRep --log-file log.txt
Explanation: Running nestly
End of explanation
%%writefile $bashFile
#!/bin/bash
#-- R analysis --#
export PATH={R_dir}:$PATH
# plotting taxon abundances
OTU_taxonAbund.r \
OTU_n2_abs{abs}.txt \
-r {topTaxaToPlot} \
-o OTU_n2_abs{abs}
# plotting taxon abundances
OTU_taxonAbund.r \
OTU_n2_abs{abs}_sub-norm.txt \
-r {topTaxaToPlot} \
-o OTU_n2_abs{abs}_subsub-norm
# running DeSeq2 and making confusion matrix on predicting incorporators
## making phyloseq object from OTU table
phyloseq_make.r \
OTU_n2_abs{abs}_sub-norm_w.txt \
-s OTU_n2_abs{abs}_sub-norm_meta.txt \
> OTU_n2_abs{abs}_sub-norm.physeq
## filtering phyloseq object to just taxa/samples of interest
phyloseq_edit.r \
OTU_n2_abs{abs}_sub-norm.physeq \
--BD_min {BD_min} \
--BD_max {BD_max} \
> OTU_n2_abs{abs}_sub-norm_filt.physeq
## making ordination
phyloseq_ordination.r \
OTU_n2_abs{abs}_sub-norm_filt.physeq \
OTU_n2_abs{abs}_sub-norm_bray-NMDS.pdf
## DESeq2
phyloseq_DESeq2.r \
OTU_n2_abs{abs}_sub-norm_filt.physeq \
--log2 {log2} \
--hypo greater \
> OTU_n2_abs{abs}_sub-norm_DESeq2
## Confusion matrix
DESeq2_confuseMtx.r \
{frag_file}_incorp_BD-shift.txt \
OTU_n2_abs{abs}_sub-norm_DESeq2 \
--padj {padj}
!chmod 775 $bashFile
!cd $workDir; \
nestrun -j 30 --template-file $bashFile -d percIncorpUnifRep --log-file logR.txt
# aggregating confusion matrix data
## table
!cd $workDir; \
nestagg delim \
-d percIncorpUnifRep \
-k percIncorp,rep \
-o ./percIncorpUnifRep/DESeq2-cMtx_table.csv \
DESeq2-cMtx_table.csv
## overall
!cd $workDir; \
nestagg delim \
-d percIncorpUnifRep\
-k percIncorp,rep \
-o ./percIncorpUnifRep/DESeq2-cMtx_overall.csv \
DESeq2-cMtx_overall.csv
## byClass
!cd $workDir; \
nestagg delim \
-d percIncorpUnifRep \
-k percIncorp,rep \
-o ./percIncorpUnifRep/DESeq2-cMtx_byClass.csv \
DESeq2-cMtx_byClass.csv
Explanation: R analysis
End of explanation
%%R -i workDir
setwd(workDir)
byClass = read.csv('./percIncorpUnifRep/DESeq2-cMtx_byClass.csv')
byClass %>% head
%%R -w 500 -h 350
col2keep = c('Balanced Accuracy', 'Sensitivity','Specificity')
byClass.f = byClass %>%
filter(X %in% col2keep) %>%
mutate(percIncorp = as.character(percIncorp))
ggplot(byClass.f, aes(X, byClass, fill=percIncorp)) +
geom_boxplot(position='dodge') +
labs(y='Value') +
theme(
text = element_text(size=16),
axis.title.x = element_blank()
)
Explanation: Analyzing the data
End of explanation |
14,522 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Compute envelope correlations in volume source space
Compute envelope correlations of orthogonalized activity [1] [2] in source
space using resting state CTF data in a volume source space.
Step1: Here we do some things in the name of speed, such as crop (which will
hurt SNR) and downsample. Then we compute SSP projectors and apply them.
Step2: Now we band-pass filter our data and create epochs.
Step3: Compute the forward and inverse
Step4: Compute label time series and do envelope correlation
Step5: Compute the degree and plot it | Python Code:
# Authors: Eric Larson <[email protected]>
# Sheraz Khan <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import mne
from mne.beamformer import make_lcmv, apply_lcmv_epochs
from mne.connectivity import envelope_correlation
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
data_path = mne.datasets.brainstorm.bst_resting.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'bst_resting'
trans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif')
bem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif')
raw_fname = op.join(data_path, 'MEG', 'bst_resting',
'subj002_spontaneous_20111102_01_AUX.ds')
crop_to = 60.
Explanation: Compute envelope correlations in volume source space
Compute envelope correlations of orthogonalized activity [1] [2] in source
space using resting state CTF data in a volume source space.
End of explanation
raw = mne.io.read_raw_ctf(raw_fname, verbose='error')
raw.crop(0, crop_to).load_data().pick_types(meg=True, eeg=False).resample(80)
raw.apply_gradient_compensation(3)
projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2)
projs_eog, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='MLT31-4407')
raw.info['projs'] += projs_ecg
raw.info['projs'] += projs_eog
raw.apply_proj()
cov = mne.compute_raw_covariance(raw) # compute before band-pass of interest
Explanation: Here we do some things in the name of speed, such as crop (which will
hurt SNR) and downsample. Then we compute SSP projectors and apply them.
End of explanation
raw.filter(14, 30)
events = mne.make_fixed_length_events(raw, duration=5.)
epochs = mne.Epochs(raw, events=events, tmin=0, tmax=5.,
baseline=None, reject=dict(mag=8e-13), preload=True)
del raw
Explanation: Now we band-pass filter our data and create epochs.
End of explanation
# This source space is really far too coarse, but we do this for speed
# considerations here
pos = 15. # 1.5 cm is very broad, done here for speed!
src = mne.setup_volume_source_space('bst_resting', pos, bem=bem,
subjects_dir=subjects_dir, verbose=True)
fwd = mne.make_forward_solution(epochs.info, trans, src, bem)
data_cov = mne.compute_covariance(epochs)
filters = make_lcmv(epochs.info, fwd, data_cov, 0.05, cov,
pick_ori='max-power', weight_norm='nai')
del fwd
Explanation: Compute the forward and inverse
End of explanation
epochs.apply_hilbert() # faster to do in sensor space
stcs = apply_lcmv_epochs(epochs, filters, return_generator=True)
corr = envelope_correlation(stcs, verbose=True)
Explanation: Compute label time series and do envelope correlation
End of explanation
degree = mne.connectivity.degree(corr, 0.15)
stc = mne.VolSourceEstimate(degree, src[0]['vertno'], 0, 1, 'bst_resting')
brain = stc.plot(
src, clim=dict(kind='percent', lims=[75, 85, 95]), colormap='gnuplot',
subjects_dir=subjects_dir, mode='glass_brain')
Explanation: Compute the degree and plot it
End of explanation |
14,523 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Running and evaluating a block algorithm
In this notebook we run a block-wise algorithm on a training data set and evaluate performance.
Setup environment
Step1: Setup plotting
Step2: Load data
set the path
Step3: load and cache the raw data (we only load first 100 time points because we're on a single node)
Step4: load the sources
Step5: estimate the mean
Step6: Run a block algorithm
Step7: estimate score (fraction of matches based on centroid distance)
Step8: estimate overlap and exactness (based on degree of pixel overlap for matching sources) | Python Code:
import numpy as np
from scipy.stats import norm
from thunder import SourceExtraction
from thunder.extraction import OverlapBlockMerger
Explanation: Running and evaluating a block algorithm
In this notebook we run a block-wise algorithm on a training data set and evaluate performance.
Setup environment
End of explanation
import matplotlib.pyplot as plt
%matplotlib inline
from thunder import Colorize
image = Colorize.image
Explanation: Setup plotting
End of explanation
path = 's3://neuro.datasets/challenges/neurofinder/02.00/'
Explanation: Load data
set the path
End of explanation
data = tsc.loadImages(path + 'images', startIdx=0, stopIdx=100)
data.cache()
data.count();
Explanation: load and cache the raw data (we only load first 100 time points because we're on a single node)
End of explanation
truth = tsc.loadSources(path + 'sources/sources.json')
Explanation: load the sources
End of explanation
im = data.mean()
Explanation: estimate the mean
End of explanation
merger = OverlapBlockMerger(0.1)
model = SourceExtraction('nmf', merger=merger, componentsPerBlock=5, percentile=95, minArea=100, maxArea=500)
sources = model.fit(data, size=(32, 32), padding=8)
image(sources.masks(im.shape, base=truth, color='random', outline=True), size=10)
Explanation: Run a block algorithm
End of explanation
recall, precision, score = truth.similarity(sources, metric='distance', minDistance=5)
print('recall: %.2f' % recall)
print('precision: %.2f' % precision)
print('score: %.2f' % score)
Explanation: estimate score (fraction of matches based on centroid distance)
End of explanation
overlap, exactness = tuple(np.nanmean(truth.overlap(sources, method='rates', minDistance=5), axis=0))
print('overlap: %.2f' % overlap)
print('exactness: %.2f' % exactness)
Explanation: estimate overlap and exactness (based on degree of pixel overlap for matching sources)
End of explanation |
14,524 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<table class="ee-notebook-buttons" align="left"><td>
<a target="_blank" href="http
Step1: Obtain a private key file for your service account
You should already have a service account registered to use Earth Engine. If you don't, follow these instructions to get one. Copy the email address of your service account into the following cell. (The service account must already be registered to use Earth Engine). In the following cell, the gsutil command line is used to generate a key file for the service account. The key file will be created on the notebook VM.
Step2: Start an AuthorizedSession and test your credentials
Test the private key by using it to get credentials. Use the credentials to create an authorized session to make HTTP requests. Make a GET request through the session to check that the credentials work.
Step3: Serialize a computation
Before you can send a request to compute something, the computation needs to be put into the Earth Engine expression graph format. The following demonstrates how to obtain the expression graph.
Authenticate to Earth Engine
Get Earth Engine scoped credentials from the service account. Use them to initialize Earth Engine.
Step4: Define a computation
Prototype a simple computation with the client API. Note that the result of the computation is a FeatureCollection.
To check that the computation can succeed without errors, get a value from the first Feature (the mean NDVI in the polygon).
Step5: Serialize the expression graph
This will create an object that represents the Earth Engine expression graph (specifically, an Expression). In general, you should build these with one of the client APIs.
Step6: Send the request
Make a POST request to the computeFeatures endpoint. Note that the request contains the Expression, which is the serialized computation. | Python Code:
# INSERT YOUR PROJECT HERE
PROJECT = 'your-project'
!gcloud auth login --project {PROJECT}
Explanation: <table class="ee-notebook-buttons" align="left"><td>
<a target="_blank" href="http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_REST_API_compute_table.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_REST_API_compute_table.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td></table>
Table computations with the Earth Engine REST API
Note: The REST API contains new and advanced features that may not be suitable for all users. If you are new to Earth Engine, please get started with the JavaScript guide.
The Earth Engine REST API quickstart shows how to access blocks of pixels from an Earth Engine asset. The compute pixels example demonstrates how to apply a computation to the pixels before obtaining the result. This example demonstrates getting the mean of pixels in each image of an ImageCollection in each feature of a FeatureCollection. Specifically, this is a POST request to the computeFeatures endpoint.
Before you begin
Follow these instructions to:
Apply for Earth Engine
Create a Google Cloud project
Enable the Earth Engine API on the project
Create a service account
Give the service account project level permission to perform Earth Engine computations
Note: To complete this tutorial, you will need a service account that is registered for Earth Engine access. See these instructions to register a service account before proceeding.
Authenticate to Google Cloud
The first thing to do is login so that you can make authenticated requests to Google Cloud. You will set the project at the same time. Follow the instructions in the output to complete the sign in.
End of explanation
# INSERT YOUR SERVICE ACCOUNT HERE
SERVICE_ACCOUNT='[email protected]'
KEY = 'key.json'
!gcloud iam service-accounts keys create {KEY} --iam-account {SERVICE_ACCOUNT}
Explanation: Obtain a private key file for your service account
You should already have a service account registered to use Earth Engine. If you don't, follow these instructions to get one. Copy the email address of your service account into the following cell. (The service account must already be registered to use Earth Engine). In the following cell, the gsutil command line is used to generate a key file for the service account. The key file will be created on the notebook VM.
End of explanation
from google.auth.transport.requests import AuthorizedSession
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(KEY)
scoped_credentials = credentials.with_scopes(
['https://www.googleapis.com/auth/cloud-platform'])
session = AuthorizedSession(scoped_credentials)
url = 'https://earthengine.googleapis.com/v1beta/projects/earthengine-public/assets/LANDSAT'
response = session.get(url)
from pprint import pprint
import json
pprint(json.loads(response.content))
Explanation: Start an AuthorizedSession and test your credentials
Test the private key by using it to get credentials. Use the credentials to create an authorized session to make HTTP requests. Make a GET request through the session to check that the credentials work.
End of explanation
import ee
# Get some new credentials since the other ones are cloud scope.
ee_creds = ee.ServiceAccountCredentials(SERVICE_ACCOUNT, KEY)
ee.Initialize(ee_creds)
Explanation: Serialize a computation
Before you can send a request to compute something, the computation needs to be put into the Earth Engine expression graph format. The following demonstrates how to obtain the expression graph.
Authenticate to Earth Engine
Get Earth Engine scoped credentials from the service account. Use them to initialize Earth Engine.
End of explanation
# A collection of polygons.
states = ee.FeatureCollection('TIGER/2018/States')
maine = states.filter(ee.Filter.eq('NAME', 'Maine'))
# Imagery: NDVI vegetation index from MODIS.
band = 'NDVI'
images = ee.ImageCollection('MODIS/006/MOD13Q1').select(band)
image = images.first()
computation = image.reduceRegions(
collection=maine,
reducer=ee.Reducer.mean().setOutputs([band]),
scale=image.projection().nominalScale()
)
# Print the value to test.
print(computation.first().get(band).getInfo())
Explanation: Define a computation
Prototype a simple computation with the client API. Note that the result of the computation is a FeatureCollection.
To check that the computation can succeed without errors, get a value from the first Feature (the mean NDVI in the polygon).
End of explanation
# Serialize the computation.
serialized = ee.serializer.encode(computation)
Explanation: Serialize the expression graph
This will create an object that represents the Earth Engine expression graph (specifically, an Expression). In general, you should build these with one of the client APIs.
End of explanation
import json
url = 'https://earthengine.googleapis.com/v1beta/projects/{}/table:computeFeatures'
response = session.post(
url = url.format(PROJECT),
data = json.dumps({'expression': serialized})
)
import json
pprint(json.loads(response.content))
Explanation: Send the request
Make a POST request to the computeFeatures endpoint. Note that the request contains the Expression, which is the serialized computation.
End of explanation |
14,525 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Vertex client library
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Once you've installed the Vertex client library and Google cloud-storage, you need to restart the notebook kernel so it can find the packages.
Step3: Before you begin
GPU runtime
Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the Vertex APIs and Compute Engine APIs.
The Google Cloud SDK is already installed in Google Cloud Notebook.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note
Step4: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
Americas
Step5: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
Step6: Authenticate your Google Cloud account
If you are using Google Cloud Notebook, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps
Step7: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you submit a custom training job using the Vertex client library, you upload a Python package
containing your training code to a Cloud Storage bucket. Vertex runs
the code from this package. In this tutorial, Vertex also saves the
trained model that results from your job in the same bucket. You can then
create an Endpoint resource based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
Import Vertex client library
Import the Vertex client library into our Python environment.
Step11: Vertex constants
Setup up the following constants for Vertex
Step12: CustomJob constants
Set constants unique to CustomJob training
Step13: Hardware Accelerators
Set the hardware accelerators (e.g., GPU), if any, for training and prediction.
Set the variables TRAIN_GPU/TRAIN_NGPU and DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify
Step14: Container (Docker) image
Next, we will set the Docker container images for training and prediction
TensorFlow 1.15
gcr.io/cloud-aiplatform/training/tf-cpu.1-15
Step15: Machine Type
Next, set the machine type to use for training and prediction.
Set the variables TRAIN_COMPUTE and DEPLOY_COMPUTE to configure the compute resources for the VMs you will use for for training and prediction.
machine type
n1-standard
Step16: Tutorial
Now you are ready to start creating your own custom model and training for Boston Housing.
Set up clients
The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.
You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
Model Service for Model resources.
Pipeline Service for training.
Endpoint Service for deployment.
Job Service for batch jobs and custom training.
Prediction Service for serving.
Step17: Train a model
There are two ways you can train a custom model using a container image
Step18: Prepare your disk specification
(optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training.
boot_disk_type
Step19: Define the worker pool specification
Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following
Step20: Examine the training package
Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
PKG-INFO
README.md
setup.cfg
setup.py
trainer
__init__.py
task.py
The files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image.
The file trainer/task.py is the Python script for executing the custom training job. Note, when we referred to it in the worker pool specification, we replace the directory slash with a dot (trainer.task) and dropped the file suffix (.py).
Package Assembly
In the following cells, you will assemble the training package.
Step21: Task.py contents
In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary
Step22: Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
Step23: Train the model using a TrainingPipeline resource
Now start training of your custom training job using a training pipeline on Vertex. To train the your custom model, do the following steps
Step24: Create the training pipeline
Use this helper function create_pipeline, which takes the following parameter
Step25: Now save the unique identifier of the training pipeline you created.
Step26: Get information on a training pipeline
Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's get_training_pipeline method, with the following parameter
Step27: Deployment
Training the above model may take upwards of 20 minutes time.
Once your model is done training, you can calculate the actual time it took to train the model by subtracting end_time from start_time. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field model_to_deploy.name.
Step28: Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras model.load_model() method passing it the Cloud Storage path where the model is saved -- specified by MODEL_DIR.
Step29: Evaluate the model
Now let's find out how good the model is.
Load evaluation data
You will load the Boston Housing test (holdout) data from tf.keras.datasets, using the method load_data(). This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements
Step30: Perform the model evaluation
Now evaluate how well the model in the custom job did.
Step31: Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex Model service, which will create a Vertex Model resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a tf.string.
The serving function consists of two parts
Step32: Upload the model
Use this helper function upload_model to upload your model, stored in SavedModel format, up to the Model service, which will instantiate a Vertex Model resource instance for your model. Once you've done that, you can use the Model resource instance in the same way as any other Vertex Model resource instance, such as deploying to an Endpoint resource for serving predictions.
The helper function takes the following parameters
Step33: Get Model resource information
Now let's get the model information for just your model. Use this helper function get_model, with the following parameter
Step34: Deploy the Model resource
Now deploy the trained Vertex custom Model resource. This requires two steps
Step35: Now get the unique identifier for the Endpoint resource you created.
Step36: Compute instance scaling
You have several choices on scaling the compute instances for handling your online prediction requests
Step37: Deploy Model resource to the Endpoint resource
Use this helper function deploy_model to deploy the Model resource to the Endpoint resource you created for serving predictions, with the following parameters
Step38: Make a online prediction request
Now do a online prediction to your deployed model.
Get test item
You will use an example out of the test (holdout) portion of the dataset as a test item.
Step39: Send the prediction request
Ok, now you have a test data item. Use this helper function predict_data, which takes the parameters
Step40: Undeploy the Model resource
Now undeploy your Model resource from the serving Endpoint resoure. Use this helper function undeploy_model, which takes the following parameters
Step41: Cleaning up
To clean up all GCP resources used in this project, you can delete the GCP
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial | Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
Explanation: Vertex client library: Custom training tabular regression model with pipeline for online prediction with training pipeline
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom tabular regression model for online prediction, using a training pipeline.
Dataset
The dataset used for this tutorial is the Boston Housing Prices dataset. The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD.
Objective
In this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using gcloud command-line tool or online using Google Cloud Console.
The steps performed include:
Create a Vertex custom job for training a model.
Create a TrainingPipeline resource.
Train a TensorFlow model with the TrainingPipeline resource.
Retrieve and load the model artifacts.
View the model evaluation.
Upload the model as a Vertex Model resource.
Deploy the Model resource to a serving Endpoint resource.
Make a prediction.
Undeploy the Model resource.
Costs
This tutorial uses billable components of Google Cloud (GCP):
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Installation
Install the latest version of Vertex client library.
End of explanation
! pip3 install -U google-cloud-storage $USER_FLAG
Explanation: Install the latest GA version of google-cloud-storage library as well.
End of explanation
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
Explanation: Restart the kernel
Once you've installed the Vertex client library and Google cloud-storage, you need to restart the notebook kernel so it can find the packages.
End of explanation
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
Explanation: Before you begin
GPU runtime
Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the Vertex APIs and Compute Engine APIs.
The Google Cloud SDK is already installed in Google Cloud Notebook.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.
End of explanation
REGION = "us-central1" # @param {type: "string"}
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the Vertex locations documentation
End of explanation
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
End of explanation
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
Explanation: Authenticate your Google Cloud account
If you are using Google Cloud Notebook, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you submit a custom training job using the Vertex client library, you upload a Python package
containing your training code to a Cloud Storage bucket. Vertex runs
the code from this package. In this tutorial, Vertex also saves the
trained model that results from your job in the same bucket. You can then
create an Endpoint resource based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
! gsutil mb -l $REGION $BUCKET_NAME
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
! gsutil ls -al $BUCKET_NAME
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
Explanation: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
Import Vertex client library
Import the Vertex client library into our Python environment.
End of explanation
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
Explanation: Vertex constants
Setup up the following constants for Vertex:
API_ENDPOINT: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.
PARENT: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.
End of explanation
CUSTOM_TASK_GCS_PATH = (
"gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml"
)
Explanation: CustomJob constants
Set constants unique to CustomJob training:
Dataset Training Schemas: Tells the Pipeline resource service the task (e.g., classification) to train the model for.
End of explanation
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
Explanation: Hardware Accelerators
Set the hardware accelerators (e.g., GPU), if any, for training and prediction.
Set the variables TRAIN_GPU/TRAIN_NGPU and DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
For GPU, available accelerators include:
- aip.AcceleratorType.NVIDIA_TESLA_K80
- aip.AcceleratorType.NVIDIA_TESLA_P100
- aip.AcceleratorType.NVIDIA_TESLA_P4
- aip.AcceleratorType.NVIDIA_TESLA_T4
- aip.AcceleratorType.NVIDIA_TESLA_V100
Otherwise specify (None, None) to use a container image to run on a CPU.
Note: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
End of explanation
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
Explanation: Container (Docker) image
Next, we will set the Docker container images for training and prediction
TensorFlow 1.15
gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest
gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest
TensorFlow 2.1
gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest
gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest
TensorFlow 2.2
gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest
gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest
TensorFlow 2.3
gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest
gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest
TensorFlow 2.4
gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest
gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest
XGBoost
gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1
Scikit-learn
gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest
Pytorch
gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest
gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest
gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest
gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest
For the latest list, see Pre-built containers for training.
TensorFlow 1.15
gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest
gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest
TensorFlow 2.1
gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest
gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest
TensorFlow 2.2
gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest
gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest
TensorFlow 2.3
gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest
gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest
XGBoost
gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest
gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest
gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest
gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest
Scikit-learn
gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest
gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest
gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest
For the latest list, see Pre-built containers for prediction
End of explanation
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
Explanation: Machine Type
Next, set the machine type to use for training and prediction.
Set the variables TRAIN_COMPUTE and DEPLOY_COMPUTE to configure the compute resources for the VMs you will use for for training and prediction.
machine type
n1-standard: 3.75GB of memory per vCPU.
n1-highmem: 6.5GB of memory per vCPU
n1-highcpu: 0.9 GB of memory per vCPU
vCPUs: number of [2, 4, 8, 16, 32, 64, 96 ]
Note: The following is not supported for training:
standard: 2 vCPUs
highcpu: 2, 4 and 8 vCPUs
Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs.
End of explanation
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
Explanation: Tutorial
Now you are ready to start creating your own custom model and training for Boston Housing.
Set up clients
The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.
You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
Model Service for Model resources.
Pipeline Service for training.
Endpoint Service for deployment.
Job Service for batch jobs and custom training.
Prediction Service for serving.
End of explanation
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
Explanation: Train a model
There are two ways you can train a custom model using a container image:
Use a Google Cloud prebuilt container. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.
Use your own custom container image. If you use your own container, the container needs to contain your code for training a custom model.
Prepare your custom job specification
Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:
worker_pool_spec : The specification of the type of machine(s) you will use for training and how many (single or distributed)
python_package_spec : The specification of the Python package to be installed with the pre-built container.
Prepare your machine specification
Now define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training.
- machine_type: The type of GCP instance to provision -- e.g., n1-standard-8.
- accelerator_type: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable TRAIN_GPU != None, you are using a GPU; otherwise you will use a CPU.
- accelerator_count: The number of accelerators.
End of explanation
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
Explanation: Prepare your disk specification
(optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training.
boot_disk_type: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD.
boot_disk_size_gb: Size of disk in GB.
End of explanation
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
PARAM_FILE = BUCKET_NAME + "/params.txt"
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_boston.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
Explanation: Define the worker pool specification
Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:
replica_count: The number of instances to provision of this machine type.
machine_spec: The hardware specification.
disk_spec : (optional) The disk storage specification.
python_package: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.
Let's dive deeper now into the python package specification:
-executor_image_spec: This is the docker image which is configured for your custom training job.
-package_uris: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.
-python_module: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking trainer.task.py -- note that it was not neccessary to append the .py suffix.
-args: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting:
- "--model-dir=" + MODEL_DIR : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts:
- direct: You pass the Cloud Storage location as a command line argument to your training script (set variable DIRECT = True), or
- indirect: The service passes the Cloud Storage location as the environment variable AIP_MODEL_DIR to your training script (set variable DIRECT = False). In this case, you tell the service the model artifact location in the job specification.
- "--epochs=" + EPOCHS: The number of epochs for training.
- "--steps=" + STEPS: The number of steps (batches) per epoch.
- "--distribute=" + TRAIN_STRATEGY" : The training distribution strategy to use for single or distributed training.
- "single": single device.
- "mirror": all GPU devices on a single compute instance.
- "multi": all GPU devices on all compute instances.
- "--param-file=" + PARAM_FILE: The Cloud Storage location for storing feature normalization values.
End of explanation
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
Explanation: Examine the training package
Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
PKG-INFO
README.md
setup.cfg
setup.py
trainer
__init__.py
task.py
The files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image.
The file trainer/task.py is the Python script for executing the custom training job. Note, when we referred to it in the worker pool specification, we replace the directory slash with a dot (trainer.task) and dropped the file suffix (.py).
Package Assembly
In the following cells, you will assemble the training package.
End of explanation
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
Explanation: Task.py contents
In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary:
Get the directory where to save the model artifacts from the command line (--model_dir), and if not specified, then from the environment variable AIP_MODEL_DIR.
Loads Boston Housing dataset from TF.Keras builtin datasets
Builds a simple deep neural network model using TF.Keras model API.
Compiles the model (compile()).
Sets a training distribution strategy according to the argument args.distribute.
Trains the model (fit()) with epochs specified by args.epochs.
Saves the trained model (save(args.model_dir)) to the specified model directory.
Saves the maximum value for each feature f.write(str(params)) to the specified parameters file.
End of explanation
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz
Explanation: Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
End of explanation
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
MODEL_NAME = "custom_pipeline-" + TIMESTAMP
PIPELINE_DISPLAY_NAME = "custom-training-pipeline" + TIMESTAMP
training_task_inputs = json_format.ParseDict(
{"workerPoolSpecs": worker_pool_spec}, Value()
)
pipeline = {
"display_name": PIPELINE_DISPLAY_NAME,
"training_task_definition": CUSTOM_TASK_GCS_PATH,
"training_task_inputs": training_task_inputs,
"model_to_upload": {
"display_name": PIPELINE_DISPLAY_NAME + "-model",
"artifact_uri": MODEL_DIR,
"container_spec": {"image_uri": DEPLOY_IMAGE},
},
}
print(pipeline)
Explanation: Train the model using a TrainingPipeline resource
Now start training of your custom training job using a training pipeline on Vertex. To train the your custom model, do the following steps:
Create a Vertex TrainingPipeline resource for the Dataset resource.
Execute the pipeline to start the training.
Create a TrainingPipeline resource
You may ask, what do we use a pipeline for? We typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of:
Being reusable for subsequent training jobs.
Can be containerized and ran as a batch job.
Can be distributed.
All the steps are associated with the same pipeline job for tracking progress.
The training_pipeline specification
First, you need to describe a pipeline specification. Let's look into the minimal requirements for constructing a training_pipeline specification for a custom job:
display_name: A human readable name for the pipeline job.
training_task_definition: The training task schema.
training_task_inputs: A dictionary describing the requirements for the training job.
model_to_upload: A dictionary describing the specification for the (uploaded) Vertex custom Model resource.
display_name: A human readable name for the Model resource.
artificat_uri: The Cloud Storage path where the model artifacts are stored in SavedModel format.
container_spec: This is the specification for the Docker container that will be installed on the Endpoint resource, from which the custom model will serve predictions.
End of explanation
def create_pipeline(training_pipeline):
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
response = create_pipeline(pipeline)
Explanation: Create the training pipeline
Use this helper function create_pipeline, which takes the following parameter:
training_pipeline: the full specification for the pipeline training job.
The helper function calls the pipeline client service's create_pipeline method, which takes the following parameters:
parent: The Vertex location root path for your Dataset, Model and Endpoint resources.
training_pipeline: The full specification for the pipeline training job.
The helper function will return the Vertex fully qualified identifier assigned to the training pipeline, which is saved as pipeline.name.
End of explanation
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
Explanation: Now save the unique identifier of the training pipeline you created.
End of explanation
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
Explanation: Get information on a training pipeline
Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's get_training_pipeline method, with the following parameter:
name: The Vertex fully qualified pipeline identifier.
When the model is done training, the pipeline state will be PIPELINE_STATE_SUCCEEDED.
End of explanation
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
Explanation: Deployment
Training the above model may take upwards of 20 minutes time.
Once your model is done training, you can calculate the actual time it took to train the model by subtracting end_time from start_time. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field model_to_deploy.name.
End of explanation
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
Explanation: Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras model.load_model() method passing it the Cloud Storage path where the model is saved -- specified by MODEL_DIR.
End of explanation
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
Explanation: Evaluate the model
Now let's find out how good the model is.
Load evaluation data
You will load the Boston Housing test (holdout) data from tf.keras.datasets, using the method load_data(). This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home).
You don't need the training data, and hence why we loaded it as (_, _).
Before you can run the data through evaluation, you need to preprocess it:
x_test:
1. Normalize (rescaling) the data in each column by dividing each value by the maximum value of that column. This will replace each single value with a 32-bit floating point number between 0 and 1.
End of explanation
model.evaluate(x_test, y_test)
Explanation: Perform the model evaluation
Now evaluate how well the model in the custom job did.
End of explanation
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
Explanation: Upload the model for serving
Next, you will upload your TF.Keras model from the custom job to Vertex Model service, which will create a Vertex Model resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model.
How does the serving function work
When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a tf.string.
The serving function consists of two parts:
preprocessing function:
Converts the input (tf.string) to the input shape and data type of the underlying model (dynamic graph).
Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.
post-processing function:
Converts the model output to format expected by the receiving application -- e.q., compresses the output.
Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.
Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.
One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported.
Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
End of explanation
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
)
Explanation: Upload the model
Use this helper function upload_model to upload your model, stored in SavedModel format, up to the Model service, which will instantiate a Vertex Model resource instance for your model. Once you've done that, you can use the Model resource instance in the same way as any other Vertex Model resource instance, such as deploying to an Endpoint resource for serving predictions.
The helper function takes the following parameters:
display_name: A human readable name for the Endpoint service.
image_uri: The container image for the model deployment.
model_uri: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the trainer/task.py saved the model artifacts, which we specified in the variable MODEL_DIR.
The helper function calls the Model client service's method upload_model, which takes the following parameters:
parent: The Vertex location root path for Dataset, Model and Endpoint resources.
model: The specification for the Vertex Model resource instance.
Let's now dive deeper into the Vertex model specification model. This is a dictionary object that consists of the following fields:
display_name: A human readable name for the Model resource.
metadata_schema_uri: Since your model was built without an Vertex Dataset resource, you will leave this blank ('').
artificat_uri: The Cloud Storage path where the model is stored in SavedModel format.
container_spec: This is the specification for the Docker container that will be installed on the Endpoint resource, from which the Model resource will serve predictions. Use the variable you set earlier DEPLOY_GPU != None to use a GPU; otherwise only a CPU is allocated.
Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready.
The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id.
End of explanation
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
Explanation: Get Model resource information
Now let's get the model information for just your model. Use this helper function get_model, with the following parameter:
name: The Vertex unique identifier for the Model resource.
This helper function calls the Vertex Model client service's method get_model, with the following parameter:
name: The Vertex unique identifier for the Model resource.
End of explanation
ENDPOINT_NAME = "boston_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
Explanation: Deploy the Model resource
Now deploy the trained Vertex custom Model resource. This requires two steps:
Create an Endpoint resource for deploying the Model resource to.
Deploy the Model resource to the Endpoint resource.
Create an Endpoint resource
Use this helper function create_endpoint to create an endpoint to deploy the model to for serving predictions, with the following parameter:
display_name: A human readable name for the Endpoint resource.
The helper function uses the endpoint client service's create_endpoint method, which takes the following parameter:
display_name: A human readable name for the Endpoint resource.
Creating an Endpoint resource returns a long running operation, since it may take a few moments to provision the Endpoint resource for serving. You call response.result(), which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the Endpoint resource: response.name.
End of explanation
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
Explanation: Now get the unique identifier for the Endpoint resource you created.
End of explanation
MIN_NODES = 1
MAX_NODES = 1
Explanation: Compute instance scaling
You have several choices on scaling the compute instances for handling your online prediction requests:
Single Instance: The online prediction requests are processed on a single compute instance.
Set the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to one.
Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified.
Set the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.
Auto Scaling: The online prediction requests are split across a scaleable number of compute instances.
Set the minimum (MIN_NODES) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.
The minimum number of compute instances corresponds to the field min_replica_count and the maximum number of compute instances corresponds to the field max_replica_count, in your subsequent deployment request.
End of explanation
DEPLOYED_NAME = "boston_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
Explanation: Deploy Model resource to the Endpoint resource
Use this helper function deploy_model to deploy the Model resource to the Endpoint resource you created for serving predictions, with the following parameters:
model: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline.
deploy_model_display_name: A human readable name for the deployed model.
endpoint: The Vertex fully qualified endpoint identifier to deploy the model to.
The helper function calls the Endpoint client service's method deploy_model, which takes the following parameters:
endpoint: The Vertex fully qualified Endpoint resource identifier to deploy the Model resource to.
deployed_model: The requirements specification for deploying the model.
traffic_split: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs.
If only one model, then specify as { "0": 100 }, where "0" refers to this model being uploaded and 100 means 100% of the traffic.
If there are existing models on the endpoint, for which the traffic will be split, then use model_id to specify as { "0": percent, model_id: percent, ... }, where model_id is the model id of an existing model to the deployed endpoint. The percents must add up to 100.
Let's now dive deeper into the deployed_model parameter. This parameter is specified as a Python dictionary with the minimum required fields:
model: The Vertex fully qualified model identifier of the (upload) model to deploy.
display_name: A human readable name for the deployed model.
disable_container_logging: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.
dedicated_resources: This refers to how many compute instances (replicas) that are scaled for serving prediction requests.
machine_spec: The compute instance to provision. Use the variable you set earlier DEPLOY_GPU != None to use a GPU; otherwise only a CPU is allocated.
min_replica_count: The number of compute instances to initially provision, which you set earlier as the variable MIN_NODES.
max_replica_count: The maximum number of compute instances to scale to, which you set earlier as the variable MAX_NODES.
Traffic Split
Let's now dive deeper into the traffic_split parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.
Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision.
Response
The method returns a long running operation response. We will wait sychronously for the operation to complete by calling the response.result(), which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources.
End of explanation
test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape)
Explanation: Make a online prediction request
Now do a online prediction to your deployed model.
Get test item
You will use an example out of the test (holdout) portion of the dataset as a test item.
End of explanation
def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None)
Explanation: Send the prediction request
Ok, now you have a test data item. Use this helper function predict_data, which takes the parameters:
data: The test data item as a numpy 1D array of floating point values.
endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed.
parameters_dict: Additional parameters for serving.
This function uses the prediction client service and calls the predict method with the parameters:
endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed.
instances: A list of instances (data items) to predict.
parameters: Additional parameters for serving.
To pass the test data to the prediction service, you package it for transmission to the serving binary as follows:
1. Convert the data item from a 1D numpy array to a 1D Python list.
2. Convert the prediction request to a serialized Google protobuf (`json_format.ParseDict()`)
Each instance in the prediction request is a dictionary entry of the form:
{input_name: content}
input_name: the name of the input layer of the underlying model.
content: The data item as a 1D Python list.
Since the predict() service can take multiple data items (instances), you will send your single data item as a list of one data item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the predict() service.
The response object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:
predictions -- the predicated median value of a house in units of 1K USD.
End of explanation
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
Explanation: Undeploy the Model resource
Now undeploy your Model resource from the serving Endpoint resoure. Use this helper function undeploy_model, which takes the following parameters:
deployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed to.
endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model is deployed to.
This function calls the endpoint client service's method undeploy_model, with the following parameters:
deployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed.
endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource is deployed.
traffic_split: How to split traffic among the remaining deployed models on the Endpoint resource.
Since this is the only deployed model on the Endpoint resource, you simply can leave traffic_split empty by setting it to {}.
End of explanation
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
Explanation: Cleaning up
To clean up all GCP resources used in this project, you can delete the GCP
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
Dataset
Pipeline
Model
Endpoint
Batch Job
Custom Job
Hyperparameter Tuning Job
Cloud Storage Bucket
End of explanation |
14,526 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
CVC Data Summaries (with simple method hydrology)
Setup the basic working environment
Step1: Load water quality data
External sources
Step2: CVC tidy data
Data using the Simple Method hydrology is suffixed with _simple.
You could also use the SWMM Model hydrology with the _SWMM files.
Loads from the July 8, 2013 storm are removed here.
Step3: Water Quality Summaries
Prevalence Tables
Step4: Concentrations Stats
Step5: Load Stats
Step6: Total Loads Summary
Step7: Total Load Reduction Tables and Figures
Step8: Faceted Plots
Combine NSQD, BMP DB datasets with CVC data
Step9: Boxplots with external sources
Step10: Time series, probability and seasonal box and whisker plots | Python Code:
%matplotlib inline
import os
import sys
import datetime
import warnings
import numpy as np
import matplotlib.pyplot as plt
import pandas
import seaborn
seaborn.set(style='ticks', context='paper')
import wqio
from wqio import utils
import pybmpdb
import pynsqd
import pycvc
min_precip = 1.9999
big_storm_date = datetime.date(2013, 7, 8)
pybmpdb.setMPLStyle()
seaborn.set(style='ticks', rc={'text.usetex': False}, palette='deep')
POCs = [
p['cvcname']
for p in filter(
lambda p: p['include'],
pycvc.info.POC_dicts
)
]
warning_filter = "ignore"
warnings.simplefilter(warning_filter)
## general result groupings
groups = [
{'name': 'Overall', 'col': None},
{'name': 'By Year', 'col': 'year'},
{'name': 'By Season', 'col': 'season'},
{'name': 'By Grouped Season', 'col': 'grouped_season'},
{'name': 'By Storm Size', 'col': 'storm_bin'},
]
site_lists = [
{
'sites': ['ED-1'],
'name': 'ElmDrive',
'colors': [seaborn.color_palette()[0]],
'markers': ['o'],
}, {
'sites': ['LV-1', 'LV-2', 'LV-4'],
'name': 'Lakeview',
'colors': seaborn.color_palette()[1:4],
'markers': ['s', '^', 'v'],
},
]
poc_lists = [
{
'params': POCs[6:],
'units': 'mg/L',
'name': 'Nutrients'
}, {
'params': POCs[:6],
'units': 'μg/L',
'name': 'Metals'
},
]
Explanation: CVC Data Summaries (with simple method hydrology)
Setup the basic working environment
End of explanation
bmpdb = pycvc.external.bmpdb('black', 'D')
nsqdata = pycvc.external.nsqd('black', 'd')
Explanation: Load water quality data
External sources
End of explanation
# simple method file
tidy_file = 'output/tidy/wq_simple.csv'
# # SWMM file
# tidy_file = 'output/tidy/wq_swmm.csv'
datecols = [
'start_date',
'end_date',
'samplestart',
'samplestop',
]
wq = (
pandas.read_csv(tidy_file, parse_dates=datecols)
.pipe(pycvc.summary.classify_storms, 'total_precip_depth')
.pipe(pycvc.summary.remove_load_data_from_storms, [big_storm_date], 'start_date')
)
Explanation: CVC tidy data
Data using the Simple Method hydrology is suffixed with _simple.
You could also use the SWMM Model hydrology with the _SWMM files.
Loads from the July 8, 2013 storm are removed here.
End of explanation
with pandas.ExcelWriter('output/xlsx/CVCWQ_DataInventory.xlsx') as xl_prev_tables:
raw = pycvc.summary.prevalence_table(wq, groupby_col='samplestart')
raw.to_excel(xl_prev_tables, sheet_name='Raw', index=False)
for g in groups:
prevalence = pycvc.summary.prevalence_table(wq, groupby_col=g['col'])
prevalence.to_excel(xl_prev_tables, sheet_name=g['name'], index=False)
Explanation: Water Quality Summaries
Prevalence Tables
End of explanation
summaryopts = dict(rescol='concentration', sampletype='composite')
with pandas.ExcelWriter('output/xlsx/CVCWQ_ConcStats.xlsx') as xl_conc:
for g in groups:
wq_stats = pycvc.summary.wq_summary(wq, groupby_col=g['col'], **summaryopts)
wq_stats.to_excel(xl_conc, sheet_name=g['name'], index=False)
Explanation: Concentrations Stats
End of explanation
summaryopts = dict(rescol='load_outflow', sampletype='composite')
with pandas.ExcelWriter('output/xlsx/CVCWQ_LoadStats.xlsx') as xl_loads:
for g in groups:
load_stats = pycvc.summary.wq_summary(wq, groupby_col=g['col'], **summaryopts)
load_stats.to_excel(xl_loads, sheet_name=g['name'], index=False)
Explanation: Load Stats
End of explanation
with pandas.ExcelWriter('output/xlsx/CVCWQ_LoadsTotals.xlsx') as xl_load_totals:
for g in groups:
load_totals = pycvc.summary.load_totals(wq, groupby_col=g['col'])
load_totals.to_excel(xl_load_totals, sheet_name=g['name'], index=False)
Explanation: Total Loads Summary
End of explanation
ed_nutrients = ['Nitrate + Nitrite', 'Orthophosphate (P)', 'Total Kjeldahl Nitrogen (TKN)', 'Total Phosphorus']
ed_metals = ['Cadmium (Cd)', 'Copper (Cu)', 'Lead (Pb)', 'Nickel (Ni)', 'Zinc (Zn)']
lv_nutrients = ['Nitrate (N)', 'Orthophosphate (P)', 'Total Kjeldahl Nitrogen (TKN)', 'Total Phosphorus']
lv_metals = ['Cadmium (Cd)', 'Copper (Cu)', 'Lead (Pb)', 'Nickel (Ni)', 'Iron (Fe)', 'Zinc (Zn)']
figures = [
{
'sites': ['ED-1'],
'name': 'ElmDrive_TSS',
'params': ['Total Suspended Solids'],
'leg_loc': (0.5, 0.05)
}, {
'sites': ['LV-2', 'LV-4'],
'name': 'LakeViewTSS',
'params': ['Total Suspended Solids'],
'leg_loc': (0.5, 0.05)
}, {
'sites': ['ED-1'],
'name': 'ElmDrive_Nutrients',
'params': ed_nutrients,
'leg_loc': (0.6, 0.03)
}, {
'sites': ['LV-2', 'LV-4'],
'name': 'LakeView_Nutrients',
'params': lv_nutrients,
'leg_loc': (0.6, 0.03)
}, {
'sites': ['ED-1'],
'name': 'ElmDrive_Metals',
'params': ed_metals,
'leg_loc': (0.6, 0.03)
}, {
'sites': ['LV-2', 'LV-4'],
'name': 'LakeView_Metals',
'params': lv_metals,
'leg_loc': (0.57, 0.02)
},
]
with pandas.ExcelWriter('output/xlsx/CVCWQ_LoadReductionPct.xlsx') as xl_load_pct:
for g in groups:
reduction = (
wq.pipe(pycvc.summary.load_reduction_pct, groupby_col=g['col'])
)
reduction.to_excel(xl_load_pct, sheet_name=g['name'], index=False)
if g['col'] is not None and g['col'] != 'season':
for f in figures:
_params = f['params']
_sites = f['sites']
fg = pycvc.viz.reduction_plot(
reduction.query("site in @_sites and parameter in @_params"),
_params,
'parameter',
'site',
g['col'],
f['leg_loc'],
lower='load_red_lower',
reduction='load_red',
upper='load_red_upper',
)
fg.set_axis_labels(x_var='', y_var='Load Reduction (%)')
for ax in fg.axes:
ax.set_ylim(top=100)
if g['col'] == 'storm_bin':
utils.figutils.rotateTickLabels(ax, 20, 'x')
fg.savefig('output/img/LoadReduction/{}_{}.png'.format(f['name'], g['col']))
Explanation: Total Load Reduction Tables and Figures
End of explanation
bmptidy = pycvc.external.combine_wq(wq, bmpdb, 'category')
nsqdtidy = pycvc.external.combine_wq(wq, nsqdata, 'primary_landuse')
Explanation: Faceted Plots
Combine NSQD, BMP DB datasets with CVC data
End of explanation
bmps = [
'Bioretention', 'Detention Basin',
'Manufactured Device', 'Retention Pond',
'Wetland Channel',
]
LUs = [
'Commercial', 'Freeway', 'Industrial',
'Institutional', 'Residential', 'Open Space',
]
for sl in site_lists:
for pocs in poc_lists:
box_opts = dict(
sites=sl['sites'],
params=pocs['params'],
units=pocs['units'],
)
bmppal = sl['colors'].copy() + seaborn.color_palette('BuPu', n_colors=len(bmps))
fg1 = pycvc.viz.external_boxplot(bmptidy, categories=bmps, palette=bmppal, **box_opts)
fg1name = 'Boxplot_BMPBD_{}_{}.png'.format(sl['name'], pocs['name'])
pycvc.viz.savefig(fg1.fig, fg1name, extra='Megafigure')
nsqdpal = sl['colors'].copy() + seaborn.color_palette('RdPu', n_colors=len(LUs))
fg2 = pycvc.viz.external_boxplot(nsqdtidy, categories=LUs, palette=nsqdpal, **box_opts)
fg2name = 'Boxplot_NSQD_{}_{}.png'.format(sl['name'], pocs['name'])
pycvc.viz.savefig(fg2.fig, fg2name, extra='Megafigure')
Explanation: Boxplots with external sources
End of explanation
for sl in site_lists:
for pocs in poc_lists:
# common options for the plots
plot_opts = dict(
sites=sl['sites'],
params=pocs['params'],
units=pocs['units'],
palette=sl['colors'],
markers=sl['markers'],
)
# plots
ts = pycvc.viz.ts_plot(wq, 'samplestart', 'concentration', **plot_opts)
pp = pycvc.viz.prob_plot(wq, 'concentration', **plot_opts)
bp = pycvc.viz.seasonal_boxplot(wq, 'concentration', params=pocs['params'],
units=pocs['units'])
# output filenames
tsname = 'TimeSeries_{}_{}.png'.format(sl['name'], pocs['name'])
ppname = 'ProbPlot_{}_{}.png'.format(sl['name'], pocs['name'])
bpname = 'Boxplot_Seasonal_{}_{}.png'.format(sl['name'], pocs['name'])
# save the figures
pycvc.viz.savefig(ts.fig, tsname, extra='MegaFigure')
pycvc.viz.savefig(pp.fig, ppname, extra='MegaFigure')
pycvc.viz.savefig(bp.fig, bpname, extra='MegaFigure')
Explanation: Time series, probability and seasonal box and whisker plots
End of explanation |
14,527 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<img src="https
Step1: Basics
Set-up a simple run with a constant linear bed. We will first define the bed
Step2: Now we have to decide how wide our glacier is, and what it the shape of its bed. For a start, we will use a "u-shaped" bed (see the documentation), with a constant width of 300m
Step3: The init_flowline variable now contains all deometrical information needed by the model. It can give access to some attributes, which are quite useless for a non-existing glacier
Step4: Mass balance
Then we will need a mass balance model. In our case this will be a simple linear mass-balance, defined by the equilibrium line altitude and an altitude gradient (in [mm m$^{-1}$])
Step5: The mass-balance model gives you the mass-balance for any altitude you want, in units [m s$^{-1}$]. Let us compute the annual mass-balance along the glacier profile
Step6: Model run
Now that we have all the ingredients to run the model, we just have to initialize it
Step7: We can now run the model for 150 years and see how the output looks like
Step8: Let's print out a few infos about our glacier
Step9: Note that the model time is now 150. Runing the model with the sane input will do nothing
Step10: If we want to compute longer, we have to set the desired date
Step11: Note that in order to store some intermediate steps of the evolution of the glacier, it might be useful to make a loop
Step12: We can now plot the evolution of the glacier length and volume with time
Step13: A first experiment
Ok, now we have seen the basics. Will will now define a simple experiment, in which we will now make the glacier wider at the top (in the accumulation area). This is a common situation for valley glaciers.
Step14: We will now run our model with the new inital conditions, and store the output in a new variable for comparison
Step15: Compare the results
Step16: Ice flow parameters
The ice flow parameters are going to have a strong influence on the behavior of the glacier. The default in OGGM is to set Glen's creep parameter A to the "standard value" defined by Cuffey and Patterson
Step17: We can change this and see what happens
Step18: In his seminal paper, Oerlemans also uses a so-called "sliding parameter", representing basal sliding. In OGGM this parameter is set to 0 per default, but it can be modified at whish | Python Code:
# The commands below are just importing the necessary modules and functions
# Plot defaults
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (9, 6) # Default plot size
# Scientific packages
import numpy as np
# Constants
from oggm.cfg import SEC_IN_YEAR, A
# OGGM models
from oggm.core.models.massbalance import LinearMassBalanceModel
from oggm.core.models.flowline import FluxBasedModel
from oggm.core.models.flowline import VerticalWallFlowline, TrapezoidalFlowline, ParabolicFlowline
# This is to set a default parameter to a function. Just ignore it for now
from functools import partial
FlowlineModel = partial(FluxBasedModel, inplace=False)
Explanation: <img src="https://raw.githubusercontent.com/OGGM/oggm/master/docs/_static/logo.png" width="40%" align="left">
Getting started with flowline models: idealized experiments
In this notebook we are going to explore the basic functionalities of OGGM flowline model(s). For this purpose we are going to used simple, "idealized" glaciers, run with simple linear mass-balance profiles.
End of explanation
# This is the bed rock, linearily decreasing from 3000m altitude to 1000m, in 200 steps
nx = 200
bed_h = np.linspace(3400, 1400, nx)
# At the begining, there is no glacier so our glacier surface is at the bed altitude
surface_h = bed_h
# Let's set the model grid spacing to 100m (needed later)
map_dx = 100
# plot this
plt.plot(bed_h, color='k', label='Bedrock')
plt.plot(surface_h, label='Initial glacier')
plt.xlabel('Grid points')
plt.ylabel('Altitude (m)')
plt.legend(loc='best');
Explanation: Basics
Set-up a simple run with a constant linear bed. We will first define the bed:
Glacier bed
End of explanation
# The units of widths is in "grid points", i.e. 3 grid points = 300 m in our case
widths = np.zeros(nx) + 3.
# Define our bed
init_flowline = VerticalWallFlowline(surface_h=surface_h, bed_h=bed_h, widths=widths, map_dx=map_dx)
Explanation: Now we have to decide how wide our glacier is, and what it the shape of its bed. For a start, we will use a "u-shaped" bed (see the documentation), with a constant width of 300m:
End of explanation
print('Glacier length:', init_flowline.length_m)
print('Glacier area:', init_flowline.area_km2)
print('Glacier volume:', init_flowline.volume_km3)
Explanation: The init_flowline variable now contains all deometrical information needed by the model. It can give access to some attributes, which are quite useless for a non-existing glacier:
End of explanation
# ELA at 3000m a.s.l., gradient 4 mm m-1
mb_model = LinearMassBalanceModel(3000, grad=4)
Explanation: Mass balance
Then we will need a mass balance model. In our case this will be a simple linear mass-balance, defined by the equilibrium line altitude and an altitude gradient (in [mm m$^{-1}$]):
End of explanation
annual_mb = mb_model.get_mb(surface_h) * SEC_IN_YEAR
# Plot it
plt.plot(annual_mb, bed_h, color='C2', label='Mass-balance')
plt.xlabel('Annual mass-balance (m yr-1)')
plt.ylabel('Altitude (m)')
plt.legend(loc='best');
Explanation: The mass-balance model gives you the mass-balance for any altitude you want, in units [m s$^{-1}$]. Let us compute the annual mass-balance along the glacier profile:
End of explanation
# The model requires the initial glacier bed, a mass-balance model, and an initial time (the year y0)
model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0.)
Explanation: Model run
Now that we have all the ingredients to run the model, we just have to initialize it:
End of explanation
model.run_until(150)
# Plot the initial conditions first:
plt.plot(init_flowline.bed_h, color='k', label='Bedrock')
plt.plot(init_flowline.surface_h, label='Initial glacier')
# The get the modelled flowline (model.fls[-1]) and plot it's new surface
plt.plot(model.fls[-1].surface_h, label='Glacier after {} years'.format(model.yr))
plt.xlabel('Grid points')
plt.ylabel('Altitude (m)')
plt.legend(loc='best');
Explanation: We can now run the model for 150 years and see how the output looks like:
End of explanation
print('Year:', model.yr)
print('Glacier length (m):', model.length_m)
print('Glacier area (km2):', model.area_km2)
print('Glacier volume (km3):', model.volume_km3)
Explanation: Let's print out a few infos about our glacier:
End of explanation
model.run_until(150)
print('Year:', model.yr)
print('Glacier length (m):', model.length_m)
Explanation: Note that the model time is now 150. Runing the model with the sane input will do nothing:
End of explanation
model.run_until(500)
# Plot the initial conditions first:
plt.plot(init_flowline.bed_h, color='k', label='Bedrock')
plt.plot(init_flowline.surface_h, label='Initial glacier')
# The get the modelled flowline (model.fls[-1]) and plot it's new surface
plt.plot(model.fls[-1].surface_h, label='Glacier after {} years'.format(model.yr))
plt.xlabel('Grid points')
plt.ylabel('Altitude (m)')
plt.legend(loc='best');
print('Year:', model.yr)
print('Glacier length (m):', model.length_m)
print('Glacier area (km2):', model.area_km2)
print('Glacier volume (km3):', model.volume_km3)
Explanation: If we want to compute longer, we have to set the desired date:
End of explanation
# Reinitialize the model
model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0.)
# Year 0 to 600 in 6 years step
yrs = np.arange(0, 600, 5)
# Array to fill with data
nsteps = len(yrs)
length = np.zeros(nsteps)
vol = np.zeros(nsteps)
# Loop
for i, yr in enumerate(yrs):
model.run_until(yr)
length[i] = model.length_m
vol[i] = model.volume_km3
# I store the final results for later use
simple_glacier_h = model.fls[-1].surface_h
Explanation: Note that in order to store some intermediate steps of the evolution of the glacier, it might be useful to make a loop:
End of explanation
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
ax1.plot(yrs, length);
ax1.set_xlabel('Years')
ax1.set_ylabel('Length (m)');
ax2.plot(yrs, vol);
ax2.set_xlabel('Years')
ax2.set_ylabel('Volume (km3)');
Explanation: We can now plot the evolution of the glacier length and volume with time:
End of explanation
# We define the widths as before:
widths = np.zeros(nx) + 3.
# But we now make our glacier 600 me wide fir the first grid points:
widths[0:15] = 6
# Define our new bed
wider_flowline = VerticalWallFlowline(surface_h=surface_h, bed_h=bed_h, widths=widths, map_dx=map_dx)
Explanation: A first experiment
Ok, now we have seen the basics. Will will now define a simple experiment, in which we will now make the glacier wider at the top (in the accumulation area). This is a common situation for valley glaciers.
End of explanation
# Reinitialize the model with the new input
model = FlowlineModel(wider_flowline, mb_model=mb_model, y0=0.)
# Array to fill with data
nsteps = len(yrs)
length_w = np.zeros(nsteps)
vol_w = np.zeros(nsteps)
# Loop
for i, yr in enumerate(yrs):
model.run_until(yr)
length_w[i] = model.length_m
vol_w[i] = model.volume_km3
# I store the final results for later use
wider_glacier_h = model.fls[-1].surface_h
Explanation: We will now run our model with the new inital conditions, and store the output in a new variable for comparison:
End of explanation
# Plot the initial conditions first:
plt.plot(init_flowline.bed_h, color='k', label='Bedrock')
# Then the final result
plt.plot(simple_glacier_h, label='Simple glacier')
plt.plot(wider_glacier_h, label='Wider glacier')
plt.xlabel('Grid points')
plt.ylabel('Altitude (m)')
plt.legend(loc='best');
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
ax1.plot(yrs, length, label='Simple glacier');
ax1.plot(yrs, length_w, label='Wider glacier');
ax1.legend(loc='best')
ax1.set_xlabel('Years')
ax1.set_ylabel('Length (m)');
ax2.plot(yrs, vol, label='Simple glacier');
ax2.plot(yrs, vol_w, label='Wider glacier');
ax2.legend(loc='best')
ax2.set_xlabel('Years')
ax2.set_ylabel('Volume (km3)');
Explanation: Compare the results:
End of explanation
# Default in OGGM
print(A)
Explanation: Ice flow parameters
The ice flow parameters are going to have a strong influence on the behavior of the glacier. The default in OGGM is to set Glen's creep parameter A to the "standard value" defined by Cuffey and Patterson:
End of explanation
# Reinitialize the model with the new parameter
model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0., glen_a=A / 10)
# Array to fill with data
nsteps = len(yrs)
length_s1 = np.zeros(nsteps)
vol_s1 = np.zeros(nsteps)
# Loop
for i, yr in enumerate(yrs):
model.run_until(yr)
length_s1[i] = model.length_m
vol_s1[i] = model.volume_km3
# I store the final results for later use
stiffer_glacier_h = model.fls[-1].surface_h
# And again
model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0., glen_a=A * 10)
# Array to fill with data
nsteps = len(yrs)
length_s2 = np.zeros(nsteps)
vol_s2 = np.zeros(nsteps)
# Loop
for i, yr in enumerate(yrs):
model.run_until(yr)
length_s2[i] = model.length_m
vol_s2[i] = model.volume_km3
# I store the final results for later use
softer_glacier_h = model.fls[-1].surface_h
# Plot the initial conditions first:
plt.plot(init_flowline.bed_h, color='k', label='Bedrock')
# Then the final result
plt.plot(simple_glacier_h, label='Default A')
plt.plot(stiffer_glacier_h, label='A / 10')
plt.plot(softer_glacier_h, label='A * 10')
plt.xlabel('Grid points')
plt.ylabel('Altitude (m)')
plt.legend(loc='best');
Explanation: We can change this and see what happens:
End of explanation
# Change sliding to use Oerlemans value:
model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0., glen_a=A, fs=5.7e-20)
# Array to fill with data
nsteps = len(yrs)
length_s3 = np.zeros(nsteps)
vol_s3 = np.zeros(nsteps)
# Loop
for i, yr in enumerate(yrs):
model.run_until(yr)
length_s3[i] = model.length_m
vol_s3[i] = model.volume_km3
# I store the final results for later use
sliding_glacier_h = model.fls[-1].surface_h
# Plot the initial conditions first:
plt.plot(init_flowline.bed_h, color='k', label='Bedrock')
# Then the final result
plt.plot(simple_glacier_h, label='Default')
plt.plot(sliding_glacier_h, label='Sliding glacier')
plt.xlabel('Grid points')
plt.ylabel('Altitude (m)')
plt.legend(loc='best');
Explanation: In his seminal paper, Oerlemans also uses a so-called "sliding parameter", representing basal sliding. In OGGM this parameter is set to 0 per default, but it can be modified at whish:
End of explanation |
14,528 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step 0
Step4: Step 1
Step5: Step 2
Step6: Step 3
Step7: Step 4
Step8: Step 5
Step9: Step 6
Step10: Step 7
Step11: TODO
Step12: Deployment Option 1
Step13: Deployment Option 2 | Python Code:
df = spark.read.format("csv") \
.option("inferSchema", "true").option("header", "true") \
.load("s3a://datapalooza/airbnb/airbnb.csv.bz2")
df.registerTempTable("df")
print(df.head())
print(df.count())
Explanation: Step 0: Load Libraries and Data
End of explanation
df_filtered = df.filter("price >= 50 AND price <= 750 AND bathrooms > 0.0 AND bedrooms is not null")
df_filtered.registerTempTable("df_filtered")
df_final = spark.sql(
select
id,
city,
case when state in('NY', 'CA', 'London', 'Berlin', 'TX' ,'IL', 'OR', 'DC', 'WA')
then state
else 'Other'
end as state,
space,
cast(price as double) as price,
cast(bathrooms as double) as bathrooms,
cast(bedrooms as double) as bedrooms,
room_type,
host_is_super_host,
cancellation_policy,
cast(case when security_deposit is null
then 0.0
else security_deposit
end as double) as security_deposit,
price_per_bedroom,
cast(case when number_of_reviews is null
then 0.0
else number_of_reviews
end as double) as number_of_reviews,
cast(case when extra_people is null
then 0.0
else extra_people
end as double) as extra_people,
instant_bookable,
cast(case when cleaning_fee is null
then 0.0
else cleaning_fee
end as double) as cleaning_fee,
cast(case when review_scores_rating is null
then 80.0
else review_scores_rating
end as double) as review_scores_rating,
cast(case when square_feet is not null and square_feet > 100
then square_feet
when (square_feet is null or square_feet <=100) and (bedrooms is null or bedrooms = 0)
then 350.0
else 380 * bedrooms
end as double) as square_feet
from df_filtered
).persist()
df_final.registerTempTable("df_final")
df_final.select("square_feet", "price", "bedrooms", "bathrooms", "cleaning_fee").describe().show()
print(df_final.count())
print(df_final.schema)
# Most popular cities
spark.sql(
select
state,
count(*) as ct,
avg(price) as avg_price,
max(price) as max_price
from df_final
group by state
order by count(*) desc
).show()
# Most expensive popular cities
spark.sql(
select
city,
count(*) as ct,
avg(price) as avg_price,
max(price) as max_price
from df_final
group by city
order by avg(price) desc
).filter("ct > 25").show()
Explanation: Step 1: Clean, Filter, and Summarize the Data
End of explanation
continuous_features = ["bathrooms", \
"bedrooms", \
"security_deposit", \
"cleaning_fee", \
"extra_people", \
"number_of_reviews", \
"square_feet", \
"review_scores_rating"]
categorical_features = ["room_type", \
"host_is_super_host", \
"cancellation_policy", \
"instant_bookable", \
"state"]
Explanation: Step 2: Define Continous and Categorical Features
End of explanation
[training_dataset, validation_dataset] = df_final.randomSplit([0.8, 0.2])
Explanation: Step 3: Split Data into Training and Validation
End of explanation
continuous_feature_assembler = VectorAssembler(inputCols=continuous_features, outputCol="unscaled_continuous_features")
continuous_feature_scaler = StandardScaler(inputCol="unscaled_continuous_features", outputCol="scaled_continuous_features", \
withStd=True, withMean=False)
Explanation: Step 4: Continous Feature Pipeline
End of explanation
categorical_feature_indexers = [StringIndexer(inputCol=x, \
outputCol="{}_index".format(x)) \
for x in categorical_features]
categorical_feature_one_hot_encoders = [OneHotEncoder(inputCol=x.getOutputCol(), \
outputCol="oh_encoder_{}".format(x.getOutputCol() )) \
for x in categorical_feature_indexers]
Explanation: Step 5: Categorical Feature Pipeline
End of explanation
feature_cols_lr = [x.getOutputCol() \
for x in categorical_feature_one_hot_encoders]
feature_cols_lr.append("scaled_continuous_features")
feature_assembler_lr = VectorAssembler(inputCols=feature_cols_lr, \
outputCol="features_lr")
Explanation: Step 6: Assemble our features and feature pipeline
End of explanation
linear_regression = LinearRegression(featuresCol="features_lr", \
labelCol="price", \
predictionCol="price_prediction", \
maxIter=10, \
regParam=0.3, \
elasticNetParam=0.8)
estimators_lr = \
[continuous_feature_assembler, continuous_feature_scaler] \
+ categorical_feature_indexers + categorical_feature_one_hot_encoders \
+ [feature_assembler_lr] + [linear_regression]
pipeline = Pipeline(stages=estimators_lr)
pipeline_model = pipeline.fit(training_dataset)
print(pipeline_model)
Explanation: Step 7: Train a Linear Regression Model
End of explanation
from jpmml import toPMMLBytes
pmmlBytes = toPMMLBytes(spark, training_dataset, pipeline_model)
print(pmmlBytes.decode("utf-8"))
Explanation: TODO: Step 8: Validate Linear Regression Model
Step 9: Convert PipelineModel to PMML
End of explanation
import urllib.request
update_url = 'http://prediction-pmml-aws.demo.pipeline.io/update-pmml/pmml_airbnb'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = urllib.request.Request(update_url, \
headers=update_headers, \
data=pmmlBytes)
resp = urllib.request.urlopen(req)
print(resp.status) # Should return Http Status 200
import urllib.request
update_url = 'http://prediction-pmml-gcp.demo.pipeline.io/update-pmml/pmml_airbnb'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = urllib.request.Request(update_url, \
headers=update_headers, \
data=pmmlBytes)
resp = urllib.request.urlopen(req)
print(resp.status) # Should return Http Status 200
import urllib.parse
import json
evaluate_url = 'http://prediction-pmml-aws.demo.pipeline.io/evaluate-pmml/pmml_airbnb'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"bathrooms":2.0, \
"bedrooms":2.0, \
"security_deposit":175.00, \
"cleaning_fee":25.0, \
"extra_people":1.0, \
"number_of_reviews": 2.0, \
"square_feet": 250.0, \
"review_scores_rating": 2.0, \
"room_type": "Entire home/apt", \
"host_is_super_host": "0.0", \
"cancellation_policy": "flexible", \
"instant_bookable": "1.0", \
"state": "CA"}'
encoded_input_params = input_params.encode('utf-8')
req = urllib.request.Request(evaluate_url, \
headers=evaluate_headers, \
data=encoded_input_params)
resp = urllib.request.urlopen(req)
print(resp.read())
import urllib.parse
import json
evaluate_url = 'http://prediction-pmml-gcp.demo.pipeline.io/evaluate-pmml/pmml_airbnb'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"bathrooms":2.0, \
"bedrooms":2.0, \
"security_deposit":175.00, \
"cleaning_fee":25.0, \
"extra_people":1.0, \
"number_of_reviews": 2.0, \
"square_feet": 250.0, \
"review_scores_rating": 2.0, \
"room_type": "Entire home/apt", \
"host_is_super_host": "0.0", \
"cancellation_policy": "flexible", \
"instant_bookable": "1.0", \
"state": "CA"}'
encoded_input_params = input_params.encode('utf-8')
req = urllib.request.Request(evaluate_url, \
headers=evaluate_headers, \
data=encoded_input_params)
resp = urllib.request.urlopen(req)
print(resp.read())
Explanation: Deployment Option 1: Mutable Model Deployment
Deploy New Model to Live, Running Model Server
End of explanation
with open('/root/pipeline/prediction.ml/pmml/data/pmml_airbnb/pmml_airbnb.pmml', 'wb') as f:
f.write(pmmlBytes)
!cat /root/pipeline/prediction.ml/pmml/data/pmml_airbnb/pmml_airbnb.pmml
!git
!git status
Explanation: Deployment Option 2: Immutable Model Deployment
Save Model to Disk
End of explanation |
14,529 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
コブ・ダクラス型生産関数と課題文で例に出された関数を用いる。
いずれも定義域は0≤x≤1である。
<P>コブ・ダグラス型生産関数は以下の通りである。</P>
<P>z = x_1**0.5*x_2*0.5</P>
Step1: <P>課題の例で使われた関数は以下の通りである。</P>
<P>z = (1+np.sin(4*math.pi*x_1))*x_2*1/2</P>
NNのクラスはすでにNN.pyからimportしてある。
Step2: 以下に使い方を説明する。
初めに、このコブ・ダグラス型生産関数を用いる。
Step3: 入力層、中間層、出力層を作る関数を実行する。引数には層の数を用いる。
Step4: <p>nn.set_hidden_layer()は同時にシグモイド関数で変換する前の中間層も作る。</p>
<p>set_output_layer()は同時にシグモイド関数で変換する前の出力層、さらに教師データを入れる配列も作る。</p>
nn.setup()で入力層ー中間層、中間層ー出力層間の重みを入れる配列を作成する。
nn.initialize()で重みを初期化する。重みは-1/√d ≤ w ≤ 1/√d (dは入力層及び中間層の数)の範囲で一様分布から決定される。
Step5: nn.supervised_function(f, idata)は教師データを作成する。引数は関数とサンプルデータをとる。
Step6: nn.simulate(N, eta)は引数に更新回数と学習率をとる。普通はN=1で行うべきかもしれないが、工夫として作成してみた。N回学習した後に出力層を返す。
Step7: nn.calculation()は学習せずに入力層から出力層の計算を行う。nn.simulate()内にも用いられている。
次に実際に学習を行う。サンプルデータは、
Step8: の組み合わせである。
Step9: 例えば(0, 0)を入力すると0.52328635を返している(つまりa[0]とb[0]を入力して、c[0]の値を返している)。
Step10: 確率的勾配降下法を100回繰り返したが見た感じから近づいている。回数を10000回に増やしてみる。
Step11: 見た感じ随分近づいているように見える。
Step12: 同様のことを課題の例で使われた関数でも試してみる。
Step13: 上手く近似できないので中間層の数を変えてみる。5層にしてみる。
Step14: 目標と比べると大きく異なる。
サンプル数を、
Step15: で取り、学習の際にランダムに一個選ばれたサンプルを何十回も学習させてみた。
Step16: 本来ならば下のような形になるべきであるので上手くいっているとは言い難い。
Step17: 同じ方法でコブ・ダグラス型生産関数を学習させた様子をアニメーションにしてみた。この方法が何の意味を持つかは分からないが学習はまあまよくできていた。
mp4ファイルで添付した。
[考察]ここにはコードを書けなかったが、サンプル数が多くなった時は、ミニバッチ式の学習を試すべきだと思った。
最後に交差検定を行う。
初めに学習回数が極めて少ないNNである。
Step18: 次に十分大きく(1000回に)してみる。
Step19: 誤差の平均であるので小さい方よい。
学習回数を大幅に増やした結果、精度が上がった。
次に回数を100回にして、中間層の数を2と5で精度を比較する。 | Python Code:
def example1(x_1, x_2):
z = x_1**0.5*x_2*0.5
return z
fig = pl.figure()
ax = Axes3D(fig)
X = np.arange(0, 1, 0.1)
Y = np.arange(0, 1, 0.1)
X, Y = np.meshgrid(X, Y)
Z = example1(X, Y)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1)
pl.show()
Explanation: コブ・ダクラス型生産関数と課題文で例に出された関数を用いる。
いずれも定義域は0≤x≤1である。
<P>コブ・ダグラス型生産関数は以下の通りである。</P>
<P>z = x_1**0.5*x_2*0.5</P>
End of explanation
nn = NN()
Explanation: <P>課題の例で使われた関数は以下の通りである。</P>
<P>z = (1+np.sin(4*math.pi*x_1))*x_2*1/2</P>
NNのクラスはすでにNN.pyからimportしてある。
End of explanation
x_1 = Symbol('x_1')
x_2 = Symbol('x_2')
f = x_1**0.5*x_2*0.5
Explanation: 以下に使い方を説明する。
初めに、このコブ・ダグラス型生産関数を用いる。
End of explanation
nn.set_input_layer(2)
nn.set_hidden_layer(2)
nn.set_output_layer(2)
Explanation: 入力層、中間層、出力層を作る関数を実行する。引数には層の数を用いる。
End of explanation
nn.setup()
nn.initialize()
Explanation: <p>nn.set_hidden_layer()は同時にシグモイド関数で変換する前の中間層も作る。</p>
<p>set_output_layer()は同時にシグモイド関数で変換する前の出力層、さらに教師データを入れる配列も作る。</p>
nn.setup()で入力層ー中間層、中間層ー出力層間の重みを入れる配列を作成する。
nn.initialize()で重みを初期化する。重みは-1/√d ≤ w ≤ 1/√d (dは入力層及び中間層の数)の範囲で一様分布から決定される。
End of explanation
idata = [1, 2]
nn.supervised_function(f, idata)
Explanation: nn.supervised_function(f, idata)は教師データを作成する。引数は関数とサンプルデータをとる。
End of explanation
nn.simulate(1, 0.1)
Explanation: nn.simulate(N, eta)は引数に更新回数と学習率をとる。普通はN=1で行うべきかもしれないが、工夫として作成してみた。N回学習した後に出力層を返す。
End of explanation
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
print X, Y
Explanation: nn.calculation()は学習せずに入力層から出力層の計算を行う。nn.simulate()内にも用いられている。
次に実際に学習を行う。サンプルデータは、
End of explanation
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network()
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
for i in range(100):
l = np.random.choice([i for i in range(len(a))])
m = nn.main(1, f, [a[l], b[l]], 0.5)
for x in X:
for y in Y:
idata = [x, y]
c = np.append(c, nn.realize(f, idata))
a
b
c
Explanation: の組み合わせである。
End of explanation
fig = pl.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c)
pl.show()
Explanation: 例えば(0, 0)を入力すると0.52328635を返している(つまりa[0]とb[0]を入力して、c[0]の値を返している)。
End of explanation
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network()
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
for i in range(10000):
l = np.random.choice([i for i in range(len(a))])
m = nn.main(1, f, [a[l], b[l]], 0.5)
for x in X:
for y in Y:
idata = [x, y]
c = np.append(c, nn.realize(f, idata))
fig = pl.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c)
pl.show()
Explanation: 確率的勾配降下法を100回繰り返したが見た感じから近づいている。回数を10000回に増やしてみる。
End of explanation
ここで、
nn.hidden_layer
Explanation: 見た感じ随分近づいているように見える。
End of explanation
x_1 = Symbol('x_1')
x_2 = Symbol('x_2')
f = (1+sin(4*math.pi*x_1))*x_2*1/2
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network()
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
for i in range(1000):
l = np.random.choice([i for i in range(len(a))])
m = nn.main(1, f, [a[l], b[l]], 0.5)
for x in X:
for y in Y:
idata = [x, y]
c = np.append(c, nn.realize(f, idata))
fig = pl.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c)
pl.show()
Explanation: 同様のことを課題の例で使われた関数でも試してみる。
End of explanation
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network(h=5)
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
for i in range(1000):
l = np.random.choice([i for i in range(len(a))])
m = nn.main(1, f, [a[l], b[l]], 0.5)
for x in X:
for y in Y:
idata = [x, y]
c = np.append(c, nn.realize(f, idata))
fig = pl.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c)
pl.show()
Explanation: 上手く近似できないので中間層の数を変えてみる。5層にしてみる。
End of explanation
X = np.arange(-1, 1, 0.1)
Y = np.arange(-1, 1, 0.1)
print X, Y
Explanation: 目標と比べると大きく異なる。
サンプル数を、
End of explanation
fig = pl.figure()
ax = Axes3D(fig)
f = (1+sin(4*math.pi*x_1))*x_2*1/2
X = np.arange(-1, 1, 0.1)
Y = np.arange(-1, 1, 0.1)
a = np.array([])
b = np.array([])
c = np.array([])
fig = plt.figure()
nn = NN()
nn.set_network()
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
c = np.append(c, nn.main2(50, f, [x, y], 0.8))
for i in range(50):
l = np.random.choice([i for i in range(len(a))])
m = nn.main2(20, f, [a[l], b[l]], 0.5)
c[l] = m
a = np.array([])
b = np.array([])
c = np.array([])
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
c = np.append(c, nn.realize(f, [x, y]))
ax.scatter(a, b, c)
ax.set_zlim(0, 1)
pl.show()
Explanation: で取り、学習の際にランダムに一個選ばれたサンプルを何十回も学習させてみた。
End of explanation
def example2(x_1, x_2):
z = (1+np.sin(4*math.pi*x_1))*x_2*1/2
return z
fig = pl.figure()
ax = Axes3D(fig)
X = np.arange(-1, 1, 0.1)
Y = np.arange(-1, 1, 0.1)
X, Y = np.meshgrid(X, Y)
Z = example2(X, Y)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1)
ax.set_zlim(-1, 1)
pl.show()
Explanation: 本来ならば下のような形になるべきであるので上手くいっているとは言い難い。
End of explanation
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
evl = np.array([])
for i in range(len(a)):
nn = NN()
nn.set_network()
for j in range(1):
l = np.random.choice([i for i in range(len(a))])
if l != i:
m = nn.main(1, f, [a[l], b[l]], 0.5)
idata = [a[i], b[i]]
est = nn.realize(f, idata)
evl = np.append(evl, math.fabs(est - nn.supervised_data))
np.average(evl)
Explanation: 同じ方法でコブ・ダグラス型生産関数を学習させた様子をアニメーションにしてみた。この方法が何の意味を持つかは分からないが学習はまあまよくできていた。
mp4ファイルで添付した。
[考察]ここにはコードを書けなかったが、サンプル数が多くなった時は、ミニバッチ式の学習を試すべきだと思った。
最後に交差検定を行う。
初めに学習回数が極めて少ないNNである。
End of explanation
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
nn = NN()
nn.set_network(h=7)
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
evl = np.array([])
for i in range(len(a)):
for j in range(10000):
nn = NN()
nn.set_network()
l = np.random.choice([i for i in range(len(a))])
if l != i:
m = nn.main(1, f, [a[l], b[l]], 0.5)
idata = [a[i], b[i]]
evl = np.append(evl, math.fabs(nn.realize(f, idata) - nn.supervised_data))
evl
np.average(evl)
Explanation: 次に十分大きく(1000回に)してみる。
End of explanation
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
evl = np.array([])
for i in range(len(a)):
for j in range(100):
nn = NN()
nn.set_network()
l = np.random.choice([i for i in range(len(a))])
if l != i:
m = nn.main(1, f, [a[l], b[l]], 0.5)
idata = [a[i], b[i]]
est = nn.realize(f, idata)
evl = np.append(evl, math.fabs(est - nn.supervised_data))
np.average(evl)
X = np.arange(0, 1, 0.2)
Y = np.arange(0, 1, 0.2)
a = np.array([])
b = np.array([])
c = np.array([])
for x in X:
for y in Y:
a = np.append(a, x)
b = np.append(b, y)
evl = np.array([])
for i in range(len(a)):
for j in range(100):
nn = NN()
nn.set_network(h=5)
l = np.random.choice([i for i in range(len(a))])
if l != i:
m = nn.main(1, f, [a[l], b[l]], 0.5)
idata = [a[i], b[i]]
est = nn.realize(f, idata)
evl = np.append(evl, math.fabs(est - nn.supervised_data))
np.average(evl)
Explanation: 誤差の平均であるので小さい方よい。
学習回数を大幅に増やした結果、精度が上がった。
次に回数を100回にして、中間層の数を2と5で精度を比較する。
End of explanation |
14,530 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Exercício 01
Step1: Exercício 02 | Python Code:
G1 = nx.erdos_renyi_graph(10,0.4)
nx.draw_shell(G1)
G2 = nx.barabasi_albert_graph(10,3)
nx.draw_shell(G2)
G3 = nx.barabasi_albert_graph(10,4)
nx.draw_shell(G3)
Explanation: Exercício 01: Calcule a distância média, o diâmetro e o coeficiente de agrupamento das redes abaixo.
End of explanation
G4 = nx.barabasi_albert_graph(10,3)
plt.pyplot.figure(figsize=(10,10))
pos = nx.shell_layout(G4)
nx.draw_networkx_nodes(G4,pos);
nx.draw_networkx_edges(G4,pos);
nx.draw_networkx_labels(G4,pos);
plt.pyplot.axis('off')
Explanation: Exercício 02: Calcule a centralidade de grau, betweenness e pagerank dos nós das redes abaixo:
End of explanation |
14,531 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Atmos
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 1.3. Model Family
Is Required
Step7: 1.4. Basic Approximations
Is Required
Step8: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required
Step9: 2.2. Canonical Horizontal Resolution
Is Required
Step10: 2.3. Range Horizontal Resolution
Is Required
Step11: 2.4. Number Of Vertical Levels
Is Required
Step12: 2.5. High Top
Is Required
Step13: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required
Step14: 3.2. Timestep Shortwave Radiative Transfer
Is Required
Step15: 3.3. Timestep Longwave Radiative Transfer
Is Required
Step16: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required
Step17: 4.2. Changes
Is Required
Step18: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required
Step19: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required
Step20: 6.2. Scheme Method
Is Required
Step21: 6.3. Scheme Order
Is Required
Step22: 6.4. Horizontal Pole
Is Required
Step23: 6.5. Grid Type
Is Required
Step24: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required
Step25: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required
Step26: 8.2. Name
Is Required
Step27: 8.3. Timestepping Type
Is Required
Step28: 8.4. Prognostic Variables
Is Required
Step29: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required
Step30: 9.2. Top Heat
Is Required
Step31: 9.3. Top Wind
Is Required
Step32: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required
Step33: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required
Step34: 11.2. Scheme Method
Is Required
Step35: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required
Step36: 12.2. Scheme Characteristics
Is Required
Step37: 12.3. Conserved Quantities
Is Required
Step38: 12.4. Conservation Method
Is Required
Step39: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required
Step40: 13.2. Scheme Characteristics
Is Required
Step41: 13.3. Scheme Staggering Type
Is Required
Step42: 13.4. Conserved Quantities
Is Required
Step43: 13.5. Conservation Method
Is Required
Step44: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required
Step45: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required
Step46: 15.2. Name
Is Required
Step47: 15.3. Spectral Integration
Is Required
Step48: 15.4. Transport Calculation
Is Required
Step49: 15.5. Spectral Intervals
Is Required
Step50: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required
Step51: 16.2. ODS
Is Required
Step52: 16.3. Other Flourinated Gases
Is Required
Step53: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required
Step54: 17.2. Physical Representation
Is Required
Step55: 17.3. Optical Methods
Is Required
Step56: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required
Step57: 18.2. Physical Representation
Is Required
Step58: 18.3. Optical Methods
Is Required
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required
Step60: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required
Step61: 20.2. Physical Representation
Is Required
Step62: 20.3. Optical Methods
Is Required
Step63: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required
Step64: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required
Step65: 22.2. Name
Is Required
Step66: 22.3. Spectral Integration
Is Required
Step67: 22.4. Transport Calculation
Is Required
Step68: 22.5. Spectral Intervals
Is Required
Step69: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required
Step70: 23.2. ODS
Is Required
Step71: 23.3. Other Flourinated Gases
Is Required
Step72: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required
Step73: 24.2. Physical Reprenstation
Is Required
Step74: 24.3. Optical Methods
Is Required
Step75: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required
Step76: 25.2. Physical Representation
Is Required
Step77: 25.3. Optical Methods
Is Required
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required
Step79: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required
Step80: 27.2. Physical Representation
Is Required
Step81: 27.3. Optical Methods
Is Required
Step82: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required
Step83: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required
Step85: 30.2. Scheme Type
Is Required
Step86: 30.3. Closure Order
Is Required
Step87: 30.4. Counter Gradient
Is Required
Step88: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required
Step89: 31.2. Scheme Type
Is Required
Step90: 31.3. Scheme Method
Is Required
Step91: 31.4. Processes
Is Required
Step92: 31.5. Microphysics
Is Required
Step93: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required
Step94: 32.2. Scheme Type
Is Required
Step95: 32.3. Scheme Method
Is Required
Step96: 32.4. Processes
Is Required
Step97: 32.5. Microphysics
Is Required
Step98: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required
Step100: 34.2. Hydrometeors
Is Required
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required
Step102: 35.2. Processes
Is Required
Step103: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required
Step104: 36.2. Name
Is Required
Step105: 36.3. Atmos Coupling
Is Required
Step106: 36.4. Uses Separate Treatment
Is Required
Step107: 36.5. Processes
Is Required
Step108: 36.6. Prognostic Scheme
Is Required
Step109: 36.7. Diagnostic Scheme
Is Required
Step110: 36.8. Prognostic Variables
Is Required
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required
Step112: 37.2. Cloud Inhomogeneity
Is Required
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required
Step114: 38.2. Function Name
Is Required
Step115: 38.3. Function Order
Is Required
Step116: 38.4. Convection Coupling
Is Required
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required
Step118: 39.2. Function Name
Is Required
Step119: 39.3. Function Order
Is Required
Step120: 39.4. Convection Coupling
Is Required
Step121: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required
Step122: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required
Step123: 41.2. Top Height Direction
Is Required
Step124: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required
Step125: 42.2. Number Of Grid Points
Is Required
Step126: 42.3. Number Of Sub Columns
Is Required
Step127: 42.4. Number Of Levels
Is Required
Step128: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required
Step129: 43.2. Type
Is Required
Step130: 43.3. Gas Absorption
Is Required
Step131: 43.4. Effective Radius
Is Required
Step132: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required
Step133: 44.2. Overlap
Is Required
Step134: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required
Step135: 45.2. Sponge Layer
Is Required
Step136: 45.3. Background
Is Required
Step137: 45.4. Subgrid Scale Orography
Is Required
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required
Step139: 46.2. Source Mechanisms
Is Required
Step140: 46.3. Calculation Method
Is Required
Step141: 46.4. Propagation Scheme
Is Required
Step142: 46.5. Dissipation Scheme
Is Required
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required
Step144: 47.2. Source Mechanisms
Is Required
Step145: 47.3. Calculation Method
Is Required
Step146: 47.4. Propagation Scheme
Is Required
Step147: 47.5. Dissipation Scheme
Is Required
Step148: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required
Step149: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required
Step150: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required
Step151: 50.2. Fixed Value
Is Required
Step152: 50.3. Transient Characteristics
Is Required
Step153: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required
Step154: 51.2. Fixed Reference Date
Is Required
Step155: 51.3. Transient Method
Is Required
Step156: 51.4. Computation Method
Is Required
Step157: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required
Step158: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required
Step159: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'giss-e2-1h', 'atmos')
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: NASA-GISS
Source ID: GISS-E2-1H
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:20
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation |
14,532 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Saving Data
Once you request data, Hydrofunctions can automatically save the JSON in a compact zip file. The next time that you re-run your request, the data are retrieved automatically from the local file. Using a data cache like this saves on internet traffic, speeds up your code, and prevents spamming the NWIS just because you are making minor changes to your code. As an alternative to zipped JSON, Hydrofunctions also makes it easy to use Parquet, a compact file format for storing large datasets. Parquet is efficient
Step1: Automatic file reading & writing
The first time that you make the request, hydrofunctions will save the incoming data into a new file, and you will get a message, Saving data to filename.
The second time that you make the request, hydrofunctions will read the data from the file instead of requesting it, and you will get a message, Reading data from filename.
Step2: In effect, the local file will act as a cache for your data, reducing your network traffic.
Manual file reading & writing
It is also possible to force hydrofunctions to read or write a file by using the NWIS.read() and NWIS.save() methods. | Python Code:
import hydrofunctions as hf
new = hf.NWIS('01585200', 'dv', start_date='2018-01-01', end_date='2019-01-01', file='save_example.json.gz')
new
Explanation: Saving Data
Once you request data, Hydrofunctions can automatically save the JSON in a compact zip file. The next time that you re-run your request, the data are retrieved automatically from the local file. Using a data cache like this saves on internet traffic, speeds up your code, and prevents spamming the NWIS just because you are making minor changes to your code. As an alternative to zipped JSON, Hydrofunctions also makes it easy to use Parquet, a compact file format for storing large datasets. Parquet is efficient: file sizes are small and can be read quickly. Parquet is great for large datasets, because it is possible to access parts of the file without reading the entire file.
To save your data, simply provide a filename as a parameter to the NWIS object. If you supply a .parquet file extension, Hydrofunctions will save a parquet file; otherwise it will supply a .json.gz extension and save it in that format.
End of explanation
new = hf.NWIS('01585200', 'dv', start_date='2018-01-01', end_date='2019-01-01', file='save_example.json.gz')
new
Explanation: Automatic file reading & writing
The first time that you make the request, hydrofunctions will save the incoming data into a new file, and you will get a message, Saving data to filename.
The second time that you make the request, hydrofunctions will read the data from the file instead of requesting it, and you will get a message, Reading data from filename.
End of explanation
new.save('save_example.parquet')
new.read('save_example.parquet')
new
Explanation: In effect, the local file will act as a cache for your data, reducing your network traffic.
Manual file reading & writing
It is also possible to force hydrofunctions to read or write a file by using the NWIS.read() and NWIS.save() methods.
End of explanation |
14,533 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Word 2 Vec
Step1: Install necessary NL modules.
Step2: Load data for Natural Language processing.
Step3: Load both labelled and unlabelled train datasets.
Step4: Define preprocessors
Step5: Cut reviews into sentences.
Step6: Cut each review into sentences.
Step7: Get the vector representation of words using word2vec in gensim module.
Step8: Let's see how well the trained model performs over the google analogical proportions dataset.
Step9: The reason why thiese results are so poor is that the reviews database is not a language corpus, it is does not provide enough coverage of the natural language variety (English), it is topically biased, and, since it is mainly user generated content, it is stylistically more colloquial.
Let's see how exactly the IMDB reviews fails as a corpus for the Google's analogical proportion test
Step10: Not unexpectedly, the reviews do not cover geographical terms relations well enough.
Step11: The most similar terms to "king" are the name of the Dinsey animation "Lion King", a fictional beast "King Kong" and the author of many a horror and supertnatural fiction novel "Stephen King". This document set is no good for general language semantics testing. Aladdin is no king.
Step12: One would expect to see at least one reference to architecutral style, but the reviews are mostly focused on genres and movies.
Step13: The model, trained on IMDB reviews, cannot correctly identify three cardinal directions out of 4.
* South is to north as east is to coast, -- brilliant!
Step14: LDA
Implement a lemmatizer based on WordNet relationship data and sentences of reivews.
Step15: Collect lemmatized reviews into one "corpus"
Step16: Import gensim toolkit
Step17: Construct the term vocabulary
Step18: Ditch too frequent or too rare terms.
Step19: Transform the document words into word ID vectors
Step20: Train a Latent Dirichlet Allocation model.
Step21: What is the LDA model? Basically the setting is as follows
Step22: Sadly, they do readily lend themselves as topic keywords. | Python Code:
import numpy as np, sklearn as sk, pandas as pd
from bs4 import BeautifulSoup as bs
import matplotlib.pyplot as plt
import time as tm, os, regex as re
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
DATAPATH = os.path.realpath( os.path.join( ".", "data", "imdb" ) )
Explanation: Word 2 Vec
End of explanation
import nltk
assert( nltk.download( [ "stopwords", "wordnet", "wordnet_ic", "punkt" ] ) )
Explanation: Install necessary NL modules.
End of explanation
from nltk.corpus import stopwords as nl_sw
import nltk.data
english_stopwords = set( nl_sw.words( "english" ) )
english_tokenizer = nltk.data.load( "tokenizers/punkt/english.pickle" )
Explanation: Load data for Natural Language processing.
End of explanation
# Read data from files
unlabelled_train_data = pd.read_csv( os.path.join( DATAPATH, 'unlabeledTrainData.tsv' ),
sep = "\t", header = 0, quoting = 3, encoding="utf-8" )
labelled_train_data = pd.read_csv( os.path.join( DATAPATH, 'labeledTrainData.tsv' ),
sep = "\t", header = 0, quoting = 3, encoding="utf-8" )
Explanation: Load both labelled and unlabelled train datasets.
End of explanation
def __wordlist( text, stops = None ) :
letters_only = re.sub("[^a-zA-Z]", " ", bs( text ).get_text( ) )
words = letters_only.lower( ).split()
if stops is not None :
return [ w for w in words if not w in stops ]
return words
Explanation: Define preprocessors
End of explanation
def __sentences( text, tokenizer = None, stops = None ):
raw_sentences = tokenizer.tokenize( text.strip( ) )
return [ __wordlist( s, stops = stops )
for s in raw_sentences if len( s ) > 0 ]
Explanation: Cut reviews into sentences.
End of explanation
train_sentences = list( )
if not os.path.exists( os.path.join( DATAPATH, 'imdb_review_train_sentences.txt' ) ) :
print "Cutting reviews into sentences."
## Begin time
tock = tm.time( )
## Convert reviews into sentences
print "Labelled train dataset..."
for r in labelled_train_data.review :
train_sentences.extend( __sentences( r, english_tokenizer, stops = None ) )
print "Unabelled train dataset..."
for r in unlabelled_train_data.review :
train_sentences.extend( __sentences( r, english_tokenizer, stops = None ) )
## End time
tick = tm.time( )
## Report
print "Preprocessing took %.1f sec." % ( tick - tock, )
print "Caching..."
## Store the processed sentences in a UTF-8 text file
with open( os.path.join( DATAPATH, 'imdb_review_train_sentences.txt' ), 'wb' ) as cache :
cache.writelines( "\t".join( s ).encode( 'utf8' ) + "\n" for s in train_sentences )
## Final time
tock = tm.time( )
else :
print "Loading cached sentences..."
## Begin time
tick = tm.time( )
with open( os.path.join( DATAPATH, 'imdb_review_train_sentences.txt' ), 'rb' ) as cache :
train_sentences.extend( l.decode( 'utf8' ).strip( ).split( '\t' ) for l in cache.readlines( ) )
## End time
tock = tm.time( )
## Report
print "Loaded sentences in %.1f sec." % ( tock - tick, )
Explanation: Cut each review into sentences.
End of explanation
import gensim.models, time as tm
# Initialize the model
model = gensim.models.Word2Vec(
workers = 7, # Number of threads to run in parallel
size = 300, # Word vector dimensionality
min_count = 40, # Minimum word count for pruning the internal dictionary
window = 10, # Context sindow size
sample = 1e-3 ) # Downsample setting for frequent words
model_cache_name = "W2V_%d-%d-%d.mdl" % ( model.layer1_size, model.min_count, model.window , )
if not os.path.exists( os.path.join( DATAPATH, model_cache_name ) ) :
## Begin time
tock = tm.time( )
## First pass -- building the vocabulary
model.build_vocab( train_sentences )
## Second pass -- training the neural net
model.train( train_sentences )
## End time
tick = tm.time( )
## Report
print "Training word2vec took %.1f sec." % ( tick - tock, )
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims( replace = True )
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model.save( os.path.join( DATAPATH, model_cache_name ) )
## End time
tock = tm.time( )
else :
## Begin time
tick = tm.time( )
## Load the model from the blob
model = gensim.models.Word2Vec.load( os.path.join( DATAPATH, model_cache_name ) )
## End time
tock = tm.time( )
## Report
print "Model loaded in %.1f sec." % ( tock - tick, )
Explanation: Get the vector representation of words using word2vec in gensim module.
End of explanation
print "Testing Google's analogical proportions..."
tick = tm.time( )
## test model accuracy against the Google dataset
google_dataset_accuracy = model.accuracy( os.path.join( DATAPATH, 'questions-words.txt' ) )
tock = tm.time( )
print "Completed in %.1f sec." % ( tock - tick, )
print "####\tCORRECT\tTOTAL\tSECTION"
for i, s in enumerate( google_dataset_accuracy, 0 ) :
total = len( s['correct'] ) + len( s['incorrect'] )
print "%4d\t%4d\t%5d\t%s." % ( i, len( s['correct'] ), total, s['section'], )
Explanation: Let's see how well the trained model performs over the google analogical proportions dataset.
End of explanation
for A, B, C, expected in google_dataset_accuracy[1]["incorrect"][:10] :
predictions = [ p for p, s in model.most_similar( positive=[ B, C ], negative=[ A ], topn = 5 ) ]
if expected not in predictions :
print "%s - %s : %s - %s " % ( A,B,C, expected, ) , predictions
else :
pass
Explanation: The reason why thiese results are so poor is that the reviews database is not a language corpus, it is does not provide enough coverage of the natural language variety (English), it is topically biased, and, since it is mainly user generated content, it is stylistically more colloquial.
Let's see how exactly the IMDB reviews fails as a corpus for the Google's analogical proportion test :
word A is to B as C is to D <!-- B / A = C / D -->
End of explanation
model.most_similar( "king" )
Explanation: Not unexpectedly, the reviews do not cover geographical terms relations well enough.
End of explanation
model.most_similar( "gothic" )
Explanation: The most similar terms to "king" are the name of the Dinsey animation "Lion King", a fictional beast "King Kong" and the author of many a horror and supertnatural fiction novel "Stephen King". This document set is no good for general language semantics testing. Aladdin is no king.
End of explanation
print "west\t - ", [ d for d, s in model.most_similar( [ "south", "west" ], [ "north" ], topn = 5 ) ]
print "east\t - ", [ d for d, s in model.most_similar( [ "south", "east" ], [ "north" ], topn = 5 ) ]
print "north\t - ", [ d for d, s in model.most_similar( [ "west", "north" ], [ "east" ], topn = 5 ) ]
print "south\t - ", [ d for d, s in model.most_similar( [ "west", "south" ], [ "east" ], topn = 5 ) ]
Explanation: One would expect to see at least one reference to architecutral style, but the reviews are mostly focused on genres and movies.
End of explanation
print model.doesnt_match("sea ocean lake river".split())
print model.doesnt_match( "good bad ugly horrible".split( ) )
print model.most_similar( positive=['woman', 'king'], negative=['man'], topn=1)
print model.doesnt_match("breakfast cereal dinner milk".split())
print model.similarity('woman', 'man')
vocab = np.asarray( model.vocab.keys(), dtype = np.str)
# vocab[ np.argmax( np.abs(model.syn0), axis = 0 ) ]
vocab
Explanation: The model, trained on IMDB reviews, cannot correctly identify three cardinal directions out of 4.
* South is to north as east is to coast, -- brilliant!
End of explanation
wnl = nltk.WordNetLemmatizer( )
def __lemmatize( text, lemmatizer, tokenizer ) :
processed_text = re.sub( "\"", "", bs( text ).get_text( ) )
raw_sentences = tokenizer.tokenize( processed_text.strip( ).lower( ) )
return [ lemmatizer.lemmatize( w )
for s in raw_sentences for w in re.sub( r"\p{Punctuation}+", " ", s ).split( ) ]
Explanation: LDA
Implement a lemmatizer based on WordNet relationship data and sentences of reivews.
End of explanation
lemmatized_reviews = list( )
print "Cutting reviews into sentences."
## Begin time
tock = tm.time( )
## Convert reviews into sentences
print "Labelled train dataset..."
for r in labelled_train_data.review :
lemmatized_reviews.append( __lemmatize( r, wnl, english_tokenizer ) )
print "Unabelled train dataset..."
for r in unlabelled_train_data.review :
lemmatized_reviews.append( __lemmatize( r, wnl, english_tokenizer ) )
## End time
tick = tm.time( )
## Report
print "Preprocessing took %.1f sec." % ( tick - tock, )
Explanation: Collect lemmatized reviews into one "corpus"
End of explanation
from gensim import corpora, models, similarities
Explanation: Import gensim toolkit
End of explanation
if not os.path.exists( os.path.join( DATAPATH, 'LDA_vocabulary.dct' ) ) :
vocabulary = corpora.Dictionary( lemmatized_reviews )
Explanation: Construct the term vocabulary
End of explanation
if not os.path.exists( os.path.join( DATAPATH, 'LDA_vocabulary.dct' ) ) :
vocabulary.filter_extremes( no_below = 5, no_above = 0.5, keep_n = None )
vocabulary.save( os.path.join( DATAPATH, 'LDA_vocabulary.dct' ) )
vocabulary
Explanation: Ditch too frequent or too rare terms.
End of explanation
corpus = [ vocabulary.doc2bow( text ) for text in lemmatized_reviews ]
corpora.MmCorpus.serialize( os.path.join( DATAPATH, 'LDA_bow.mm' ), corpus ) # store on disc
Explanation: Transform the document words into word ID vectors: bag-of-terms.
End of explanation
## Begin time
tick = tm.time( )
## Fit the LDA model
model = models.ldamodel.LdaModel(
corpus, id2word = vocabulary, num_topics = 100, chunksize = 50, update_every = 1, passes = 2 )
## End time
tock = tm.time()
print "Estimating LDA model took %.3f sec."%( tock - tick, )
Explanation: Train a Latent Dirichlet Allocation model.
End of explanation
for p in range( 10 ) :
for t in range( 20, 25 ) :
print model.show_topic(t)[ p ][ 1 ].center( 20, ' ' ),
print
Explanation: What is the LDA model? Basically the setting is as follows:
* there is an active vocabulary $W$ of terms, -- basically a finite "alphabet" where each word-term is a letter ;
* there is a collection of possible topics $T$, each characterized by a particualr distribution of term-frequencies $(\theta_t){t\in T} \in [0,1]^W$ ;
* in a collection of documents $D$ each documnet (being just an ordered tuple of terms) has its own distributio of topics $(\phi_d){d\in D} \in [0,1]^T$ ;
* now, each word $w_{di}$, $i\in d$ of a particular document $d\in D$ is assumed to have its intrinsic topic $z_{di}$ determined by the distribution of topics $\phi_d$ within that document ;
* in turn, the word $w_{di}$ conditionally on its topic is believed to have distribution $\theta_{z_{di}}$.
Formally, the model is as follows: given a set of documents $D$ and words $W$
* $ \bigl(\theta_t\bigr){t\in T} \sim \text{Dir}_W(\alpha)\,;$
* $ \bigl(\phi_d\bigr){d\in D} \sim \text{Dir}T(\beta)\,;$
* $ \bigl( z{di} ){d\in D,i\in d} \sim \text{Cat}_T( \phi_d )\,;$
* $ \bigl( w{di} ){d\in D,i\in d} \sim \text{Cat}_T( \theta{z_{dt}} )\,;$
where $\text{Dir}F(\alpha)$ is the Dirichlet Distribution on simplex $S^\circ_F = { x\in [0,1]^F\big| \sum{i\in F} x_i = 1 }$ with parameter $\alpha > 0$ and density for any $x\in [0,1]^F$
$$ \text{Dir}F\bigl( x;\alpha \bigr)
= \frac{\prod{i\in F} \Gamma(\alpha_i)}{\Gamma(\sum_{i\in F} \alpha_i)} 1_{x\in S^\circ_F }
\prod_{i\in F} x_i^{\alpha_i-1}\,, $$
and $\text{Cat}F(\theta)$ is the categorical distribution on $F$ with parameter $\theta$ and density
$$ \text{Cat}_F(x;\theta) = \theta_x = \prod{i\in F} \theta_i^{1_{x=i}}\,, $$
which is the distribution of a discrete random varaible with values in $F$.
Let $w_d = \bigl( w_{di} ){i\in d}$ for any $d\in D$. Then the log-likelihood of the model is
$$ L( D |\alpha, \beta )
= \log \prod{d\in D} p_d( w_d |\alpha, \beta )
= \sum_{d\in D} \sum_{i\in d} \log p_d( w_{di} |\alpha, \beta )\,, $$
where
$$ p_d\bigl( w | \alpha, \beta \bigr)
= \mathbb{E}{(\theta,\phi) \sim \text{Dir}_W(\alpha) \times \text{Dir}_T(\beta)}
p_d\bigl( w, \theta, \phi|\alpha, \beta \bigr)
= \iint p_d\bigl( w | \theta, \phi \bigr) \text{Dir}_W(\theta; \alpha) \times \text{Dir}_T(\phi; \beta) d\theta d\phi\,, $$
and
$$ p_d\bigl( w | \theta, \phi \bigr)
= \sum{z \in T} p_d( w, z |\theta, \phi )
= \sum_{z \in T} p_d( w | z, \theta, \phi ) p_d( z | \theta, \phi )
= \sum_{z \in T} \theta_{zw} p_d( z | \phi )
= \sum_{z \in T} \theta_{zw} \phi_{dz} \,, $$
for $\theta=(\theta_t){t\in T}$ and $\phi = (\phi_d){d\in D}$.
In Latent Semantic Analysis
$$ L( D |\theta, \phi ) = \prod_{d\in D} \prod_{i\in d} p_d( w_{di} |\theta,\phi ) \,, $$
with $p_d(\cdot)$ being the terms distribution in a particular documnet $d\in D$. The log-likelihood is
$$ l(D|\theta,\phi) = \sum_{d\in D} \sum_{i\in d} \log \sum_{z_{di}\in T} p_d( w_{di}, z_{di} |\theta,\phi ) \,,$$
since each word comes from a mixture of topic distributions, with the mixture component determined by $z_{di}$.
If the latent topic of each words were known, then the log-likelihood would be:
$$ l(D, Z|\theta,\phi) = \sum_{d\in D} \sum_{i\in d} \log \theta_{z_{di}w_{di}} + \sum_{d\in D} \sum_{i\in d} \log \phi_{d\,z_{di}} \,,$$
which in a more analytically-friendly notation would look like:
$$ l(D, Z|\theta,\phi) = \sum_{d\in D} \sum_{i\in d} \sum_{t\in T} \sum_{v\in W} 1_{t=z_{di}} 1_{v=w_{di}} \log \theta_{tw} + \sum_{d\in D} \sum_{i\in d} \sum_{t\in T} 1_{t=z_{di}} \log \phi_{dt} \,,$$
whence
$$ l(D, Z|\theta,\phi) = \sum_{t\in T} \sum_{v\in W} \log \theta_{tw} \sum_{d\in D} \sum_{i\in d} 1_{t=z_{di}} 1_{v=w_{di}} + \sum_{t\in T} \sum_{c\in D} \log \phi_{ct} \sum_{d\in D} \sum_{i\in d} 1_{t=z_{di}} 1_{c=d} \,. $$
where using Bayes formula
$$ p_d(z|w)
= \frac{p_d(w,z)}{ p_d(w) }
= \frac{p_d(w,z)}{ \sum_{z\in T} p_d(w,z) }
= \frac{p_d(w|z)p_d(z)}{ \sum_{z\in T} p_d(w,z) }
= \frac{ \theta_{zw} \phi_{dz}}{ \sum_{t\in T} \theta_{tw} \phi_{dt} }\,, $$
Let's have a look at the topics uncovered by the LDA represented by the most likely words.
End of explanation
model.show_topic(1)
Explanation: Sadly, they do readily lend themselves as topic keywords.
End of explanation |
14,534 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Training a classification model for wine production quality
Objective
Step1: The Wine Quality Dataset
The dataset is available in the UCI Machine Learning Repository.
Get the data
There is a copy of the White Wine dataset available on Google Cloud Storage (GCS).
The cell below shows the location of the CSV file.
Step2: To visualize and manipulate the data, we will use pandas.
First step is to import the data. We should list the columns that will be used to train our model. These column names will define what data will compose the dataframe object in pandas.
Step3: Clean the data
Datasets sometimes can have null values. Running the next cell counts how many null values exist on each one of the columns.
Note
Step4: We can see that, on this dataset, there are no null values.
If there were any, we could run dataframe = dataframe.dropna() to drop them and make this tutorial simpler.
Inspect the data
Let's take a look at the dataframe content. The tail() method, when ran on a dataframe, shows the last n roles (n is 5 by default).
Step5: Have a quick look at the joint distribution of a few pairs of columns from the training set
Step6: --- Some considerations ---
Did you notice anything when looking at the stats table?
One useful piece of information we can get from those are, for example, min and max values. This allows us to understand ranges in which these features fall in.
Based on the description of the dataset and the task we are trying to achieve, do you see any issues with the examples we have available to train on?
Did you notice that the ratings on the dataset range from 3 to 9? In this dataset, there is no wine rated with a 10 or a 0 - 2 rating. This will likely produce a poor model that is not able to generalize well to examples of fantastic tasting wine (nor to the ones that taste pretty bad!).
One way to fix this is to make sure your dataset represents all possible classes well. Another analysis, that we do not do on this exercise, is check if the data is balanced. Having a balanced dataset produces fair model, and that is always a good thing!
Split the data into train, validation and test
Now split the dataset into a training, validation, and test set.
Test sets are used for a final evaluation of the trained model.
There are more sophisticated ways to make sure that your splitting methods are repeatable. Ideally, the sets would always be the same after splitting to avoid randomic results, which makes experimentation difficult.
Step7: Use the tf.data.Dataset
The tf.data.Dataset allows for writing descriptive and efficient input pipelines. Dataset usage follows a common pattern
Step8: Next step is to create batches from train, validation and test datasets that we split earlier. Let's use a batch size of 5 for demonstration purposes.
Step9: Let's look at one batch of the data. The example below prints the content of a batch (column names, elements from the citric_acid column and elements from the quality label.
Step10: Create feature columns
TensorFlow provides many types of feature columns. In this exercise, all the feature columns are of type numeric. If there were any text or categorical values, transformations would need to take place to make the input all numeric.
However, you often don't want to feed a number directly into the model, but instead split its value into different categories based on numerical ranges. To do this, use the bucketized_column method of feature columns. This allows for the network to represent discretized dense input bucketed by boundaries.
Feature columns are the object type used to create feature layers, which we will feed to the Keras model.
Step11: Define, compile and train the Keras model
We will be using the Keras Sequential API to create the logistic regression model for the classification of the wine quality.
The model will be composed of the input layer (feature_layer created above), a single dense layer with two neural nodes, and the output layer, which will allow the model to predict the rating (1 - 10) of each instance being inferred.
When compiling the model, we define a loss function, an optimizer and which metrics to use to evaluate the model. CategoricalCrossentropy is a type of loss used in classification tasks. Losses are a mathematical way of measuring how wrong the model predictions are.
Optimizers tie together the loss function and model parameters by updating the model in response to the output of the loss function. In simpler terms, optimizers shape and mold your model into its most accurate possible form by playing with the weights. The loss function is the guide to the terrain, telling the optimizer when it’s moving in the right or wrong direction. We will use Adam as our optimizer for this exercise. Adam is an optimization algorithm that can be used instead of the classical stochastic gradient descent procedure to update network weights iterative based in training data.
There are many types of optimizers one can chose from. Ideally, when creating an ML model, try and identify an optimizer that has been empirically adopted on similar tasks. | Python Code:
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1 || pip install tensorflow==2.1
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
Explanation: Training a classification model for wine production quality
Objective:
In this lab, you will use the Keras Sequential API to create a classification model. You will learn how to use the tf.data API for creating input pipelines and use feature columns to prepare the data to be consumed by a neural network.
Lab Scope:
This lab does not cover how to make predictions on the model or deploy it to Cloud AI Platform.
Learning objectives:
Apply techniques to clean and inspect data.
Split dataset into training, validation and test datasets.
Use the tf.data.Dataset to create an input pipeline.
Use feature columns to prepare the data to be tained by a neural network.
Define, compile and train a model using the Keras Sequential API.
In a classification problem, we aim to select the output from a limited set of discrete values, like a category or a class. Contrast this with a regression problem, where we aim to predict a value from a continuos range of values.
This notebook uses the Wine Production Quality Dataset and builds a model to predict the production quality of wine given a set of attributes such as its citric acidity, density, and others.
To do this, we'll provide the model with examples of different wines produced, that received a rating from an evaluator. The ratings are provided by the numbers 0 - 10 (0 being of very poor quality and 10 being of great quality). We will then try and use this model to predict the rate a new wine will receive by infering towards the trained model.
Since we are learning how to use the Tensorflow 2.x API, this example uses the tf.keras API. Please see this guide for details.
End of explanation
dataset_path = "gs://cloud-training-demos/wine_quality/winequality-white.csv"
Explanation: The Wine Quality Dataset
The dataset is available in the UCI Machine Learning Repository.
Get the data
There is a copy of the White Wine dataset available on Google Cloud Storage (GCS).
The cell below shows the location of the CSV file.
End of explanation
column_names = ['fixed_acidity','volatile_acidity','citric_acid','residual_sugar',
'chlorides','free_sulfur_dioxide','total_sulfur_dioxide','density',
'pH','sulphates','alcohol','quality']
raw_dataframe = pd.read_csv(dataset_path, names=column_names, header = 0,
na_values = " ", comment='\t',
sep=";", skipinitialspace=True)
raw_dataframe = raw_dataframe.astype(float)
raw_dataframe['quality'] = raw_dataframe['quality'].astype(int)
dataframe= raw_dataframe.copy()
Explanation: To visualize and manipulate the data, we will use pandas.
First step is to import the data. We should list the columns that will be used to train our model. These column names will define what data will compose the dataframe object in pandas.
End of explanation
dataframe.isna().sum()
Explanation: Clean the data
Datasets sometimes can have null values. Running the next cell counts how many null values exist on each one of the columns.
Note: There are many other steps to make sure the data is clean, but this is out of the scope of this exercise.
End of explanation
dataframe.tail()
data_stats = dataframe.describe()
data_stats = data_stats.transpose()
data_stats
Explanation: We can see that, on this dataset, there are no null values.
If there were any, we could run dataframe = dataframe.dropna() to drop them and make this tutorial simpler.
Inspect the data
Let's take a look at the dataframe content. The tail() method, when ran on a dataframe, shows the last n roles (n is 5 by default).
End of explanation
import seaborn as sns
sns.pairplot(dataframe[["quality", "citric_acid", "residual_sugar", "alcohol"]], diag_kind="kde")
Explanation: Have a quick look at the joint distribution of a few pairs of columns from the training set:
End of explanation
from sklearn.model_selection import train_test_split
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
Explanation: --- Some considerations ---
Did you notice anything when looking at the stats table?
One useful piece of information we can get from those are, for example, min and max values. This allows us to understand ranges in which these features fall in.
Based on the description of the dataset and the task we are trying to achieve, do you see any issues with the examples we have available to train on?
Did you notice that the ratings on the dataset range from 3 to 9? In this dataset, there is no wine rated with a 10 or a 0 - 2 rating. This will likely produce a poor model that is not able to generalize well to examples of fantastic tasting wine (nor to the ones that taste pretty bad!).
One way to fix this is to make sure your dataset represents all possible classes well. Another analysis, that we do not do on this exercise, is check if the data is balanced. Having a balanced dataset produces fair model, and that is always a good thing!
Split the data into train, validation and test
Now split the dataset into a training, validation, and test set.
Test sets are used for a final evaluation of the trained model.
There are more sophisticated ways to make sure that your splitting methods are repeatable. Ideally, the sets would always be the same after splitting to avoid randomic results, which makes experimentation difficult.
End of explanation
def df_to_dataset(dataframe, epochs=10, shuffle=True, batch_size=64):
dataframe = dataframe.copy()
labels = tf.keras.utils.to_categorical(dataframe.pop('quality'), num_classes=11) #extracting the column which contains the training label
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.repeat(epochs).batch(batch_size)
return ds
Explanation: Use the tf.data.Dataset
The tf.data.Dataset allows for writing descriptive and efficient input pipelines. Dataset usage follows a common pattern:
Create a source dataset from your input data.
Apply dataset transformations to preprocess the data.
Iterate over the dataset and process the elements.
Iteration happens in a streaming fashion, so the full dataset does not need to fit into memory.
The df_to_dataset method below creates a dataset object from a pandas dataframe.
End of explanation
train_ds = df_to_dataset(train)
val_ds = df_to_dataset(val, shuffle=False)
test_ds = df_to_dataset(test, shuffle=False)
Explanation: Next step is to create batches from train, validation and test datasets that we split earlier. Let's use a batch size of 5 for demonstration purposes.
End of explanation
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of citric acid:', feature_batch['citric_acid'])
print('A batch of quality:', label_batch )
Explanation: Let's look at one batch of the data. The example below prints the content of a batch (column names, elements from the citric_acid column and elements from the quality label.
End of explanation
from tensorflow import feature_column
feature_columns = []
fixed_acidity = tf.feature_column.numeric_column('fixed_acidity')
bucketized_fixed_acidity = tf.feature_column.bucketized_column(
fixed_acidity, boundaries=[3., 5., 7., 9., 11., 13., 14.])
feature_columns.append(bucketized_fixed_acidity)
volatile_acidity = tf.feature_column.numeric_column('volatile_acidity')
bucketized_volatile_acidity = tf.feature_column.bucketized_column(
volatile_acidity, boundaries=[0., 0.2, 0.4, 0.6, 0.8, 1.])
feature_columns.append(bucketized_volatile_acidity)
citric_acid = tf.feature_column.numeric_column('citric_acid')
bucketized_citric_acid = tf.feature_column.bucketized_column(
citric_acid, boundaries=[0., 0.4, 0.7, 1.0, 1.3, 1.8])
feature_columns.append(bucketized_citric_acid)
residual_sugar = tf.feature_column.numeric_column('residual_sugar')
bucketized_residual_sugar = tf.feature_column.bucketized_column(
residual_sugar, boundaries=[0.6, 10., 20., 30., 40., 50., 60., 70.])
feature_columns.append(bucketized_residual_sugar)
chlorides = tf.feature_column.numeric_column('chlorides')
bucketized_chlorides = tf.feature_column.bucketized_column(
chlorides, boundaries=[0., 0.1, 0.2, 0.3, 0.4])
feature_columns.append(bucketized_chlorides)
free_sulfur_dioxide = tf.feature_column.numeric_column('free_sulfur_dioxide')
bucketized_free_sulfur_dioxide = tf.feature_column.bucketized_column(
free_sulfur_dioxide, boundaries=[1., 50., 100., 150., 200., 250., 300.])
feature_columns.append(bucketized_free_sulfur_dioxide)
total_sulfur_dioxide = tf.feature_column.numeric_column('total_sulfur_dioxide')
bucketized_total_sulfur_dioxide = tf.feature_column.bucketized_column(
total_sulfur_dioxide, boundaries=[9., 100., 200., 300., 400., 500.])
feature_columns.append(bucketized_total_sulfur_dioxide)
density = tf.feature_column.numeric_column('density')
bucketized_density = tf.feature_column.bucketized_column(
density, boundaries=[0.9, 1.0, 1.1])
feature_columns.append(bucketized_density)
pH = tf.feature_column.numeric_column('pH')
bucketized_pH = tf.feature_column.bucketized_column(
pH, boundaries=[2., 3., 4.])
feature_columns.append(bucketized_pH)
sulphates = tf.feature_column.numeric_column('sulphates')
bucketized_sulphates = tf.feature_column.bucketized_column(
sulphates, boundaries=[0.2, 0.4, 0.7, 1.0, 1.1])
feature_columns.append(bucketized_sulphates)
alcohol = tf.feature_column.numeric_column('alcohol')
bucketized_alcohol = tf.feature_column.bucketized_column(
alcohol, boundaries=[8., 9., 10., 11., 12., 13., 14.])
feature_columns.append(bucketized_alcohol)
feature_columns
# Create a feature layer from the feature columns
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
Explanation: Create feature columns
TensorFlow provides many types of feature columns. In this exercise, all the feature columns are of type numeric. If there were any text or categorical values, transformations would need to take place to make the input all numeric.
However, you often don't want to feed a number directly into the model, but instead split its value into different categories based on numerical ranges. To do this, use the bucketized_column method of feature columns. This allows for the network to represent discretized dense input bucketed by boundaries.
Feature columns are the object type used to create feature layers, which we will feed to the Keras model.
End of explanation
model = tf.keras.Sequential([
feature_layer,
layers.Dense(8, activation='relu'),
layers.Dense(8, activation='relu'),
layers.Dense(11, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
model.fit(train_ds,
validation_data=val_ds,
epochs=5)
Explanation: Define, compile and train the Keras model
We will be using the Keras Sequential API to create the logistic regression model for the classification of the wine quality.
The model will be composed of the input layer (feature_layer created above), a single dense layer with two neural nodes, and the output layer, which will allow the model to predict the rating (1 - 10) of each instance being inferred.
When compiling the model, we define a loss function, an optimizer and which metrics to use to evaluate the model. CategoricalCrossentropy is a type of loss used in classification tasks. Losses are a mathematical way of measuring how wrong the model predictions are.
Optimizers tie together the loss function and model parameters by updating the model in response to the output of the loss function. In simpler terms, optimizers shape and mold your model into its most accurate possible form by playing with the weights. The loss function is the guide to the terrain, telling the optimizer when it’s moving in the right or wrong direction. We will use Adam as our optimizer for this exercise. Adam is an optimization algorithm that can be used instead of the classical stochastic gradient descent procedure to update network weights iterative based in training data.
There are many types of optimizers one can chose from. Ideally, when creating an ML model, try and identify an optimizer that has been empirically adopted on similar tasks.
End of explanation |
14,535 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<img src="images/logo.jpg" style="display
Step1: <p style="text-align
Step2: <p style="text-align
Step3: <div class="align-center" style="display
Step4: <p style="text-align
Step5: <p style="text-align
Step6: <p style="text-align
Step7: <p style="text-align
Step8: <p style="text-align
Step9: <p style="text-align
Step10: <p style="text-align
Step11: <p style="text-align
Step12: <p style="text-align
Step13: <p style="text-align
Step14: <p style="text-align
Step15: <p style="text-align
Step16: <p style="text-align
Step17: <p style="text-align
Step18: <p style="text-align
Step21: <p style="text-align
Step22: <ol style="text-align
Step23: <p style="text-align
Step24: <p style="text-align
Step25: <p style="text-align
Step26: <p style="text-align | Python Code:
counter = 0
while counter < 10
print("Stop it!")
counter += 1
Explanation: <img src="images/logo.jpg" style="display: block; margin-left: auto; margin-right: auto;" alt="לוגו של מיזם לימוד הפייתון. נחש מצויר בצבעי צהוב וכחול, הנע בין האותיות של שם הקורס: לומדים פייתון. הסלוגן המופיע מעל לשם הקורס הוא מיזם חינמי ללימוד תכנות בעברית.">
<span style="text-align: right; direction: rtl; float: right;">חריגות</span>
<span style="text-align: right; direction: rtl; float: right; clear: both;">הקדמה</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
עוד בשבוע הראשון, כשרק התחלתם את הקורס, הזהרנו אתכם שהמחשב עלול להיות עמית קשוח לעבודה.<br>
הוא תמיד מבצע בדיוק את מה שהוריתם לו לעשות, ולא מסוגל להתגבר לבד גם על הקלה שבטעויות.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
במהלך הקורס נתקלתם פעמים רבות בחריגות ("שגיאות") שפייתון התריעה לכם עליהן.<br>
חלק מההתרעות על חריגות התרחשו בגלל טעויות בסיסיות בקוד, כמו נקודתיים חסרות בסוף שורת <code>if</code>,<br>
וחלק מהן התרחשו בגלל בעיות שהתגלו מאוחר יותר – כמו קובץ שניסיתם לפתוח אבל לא היה קיים במחשב.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בפרק זה ניכנס בעובי הקורה בכל הנוגע לחריגות.<br>
נבין אילו סוגי חריגות יש, איך מפענחים אותן ואיך הן בנויות בפייתון, איך מטפלים בהן ואיך יוצרים חריגות בעצמנו.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">הגדרה</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
<dfn>חריגה</dfn> (<dfn>Exception</dfn>) מייצגת כשל שהתרחש בזמן שפייתון ניסתה לפענח את הקוד שלנו או להריץ אותו.<br>
כשהקוד קורס ומוצגת לנו הודעת שגיאה, אפשר להגיד שפייתון <dfn>מתריעה על חריגה</dfn> (<dfn>raise an exception</dfn>).
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נבדיל בין שני סוגי חריגות: שגיאות תחביר וחריגות כלליות.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">שגיאת תחביר</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
פייתון תתריע לפנינו על <dfn>שגיאת תחביר</dfn> (<dfn>syntax error</dfn>) כשנכתוב קוד שהיא לא מסוגלת לפענח.<br>
לרוב זה יתרחש כשלא עמדנו בכללי התחביר של פייתון ושכחנו תו מסוים, בין אם זה סגירת סוגריים, גרשיים או נקודתיים בסוף שורה.<br>
ודאי נתקלתם בשגיאה דומה בעבר:
</p>
End of explanation
names = (
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Michael Palin",
"Graham Chapman",
"Terry Jones",
for name in names:
print(name)
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
פייתון משתדלת לספק לנו כמה שיותר מידע על מקור השגיאה:<br>
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>בשורה הראשונה נראה מידע על מיקום השגיאה: קובץ (אם יש כזה) ומספר השורה שבה נמצאה השגיאה.</li>
<li>בשורה השנייה את הקוד שבו פייתון מצאה את השגיאה.</li>
<li>בשורה השלישית חץ שמצביע חזותית למקום שבו נמצאה השגיאה.</li>
<li>בשורה הרביעית פייתון מסבירה לנו מה התרחש ומספקת הסבר קצר על השגיאה. במקרה הזה – <code>SyntaxError</code>.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כשמדובר בשגיאות תחביר כדאי להסתכל על מיקום השגיאה בעין ביקורתית.<br>
בחלק מהפעמים, פייתון תכלול בשגיאה מידע לא מדויק על מיקום השגיאה:
</p>
End of explanation
a = int(input("Please enter the first number: "))
b = int(input("Please enter the second number: "))
print(a // b)
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
אפשר לראות בקוד שלמעלה ששכחנו לסגור את הסוגריים שפתחנו בשורה הראשונה.<br>
הודעת השגיאה שפייתון תציג לנו מצביעה על הלולאה כמקור לבעיה, כאשר הבעיה האמיתית היא בבירור אי סגירת הסוגריים.<br>
ההודעות הלא מדויקות של פייתון בהקשר של שגיאות תחביר מבלבלות לעיתים קרובות מתכנתים מתחילים.<br>
המלצתנו, אם הסתבכתם עם שגיאה כזו - בדקו אם מקור השגיאה הוא בסביבה, ולאו דווקא במקום שפייתון מורה עליו.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">חריגה כללית</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
גם כשהקוד שלכם תואם את כללי התחביר של פייתון, לפעמים עלולות להתגלות בעיות בזמן הרצת הקוד.<br>
כפי שוודאי כבר חוויתם, מנעד הבעיות האפשריות הוא רחב מאוד – החל בחלוקה באפס, עבור לטעות בשם המשתנה וכלה בניסיון לפתיחת קובץ לא קיים.<br>
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בניגוד להתרעה על חריגה במצב של שגיאות תחביר, פייתון תתריע על חריגות אחרות רק כשהיא תגיע להריץ את הקוד שגורם לחריגה.<br>
נראה דוגמה לחריגה שכזו:
</p>
End of explanation
a = 5
b = 0
print(a // b)
Explanation: <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 70%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
חשבו על כל החריגות שמשתמש שובב יכול לסחוט מהקוד שבתא למעלה.
</p>
</div>
<div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;">
<p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;">
<strong>חשוב!</strong><br>
פתרו לפני שתמשיכו!
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אחת החריגות הראשונות שנחשוב עליהן היא חלוקה באפס, שלא מוגדרת חשבונית.<br>
נראה מה יקרה כשננסה לבצע השמה של הערך 0 למשתנה <var>b</var>:
</p>
End of explanation
a = int("5")
b = int("a")
print(a // b)
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
פייתון התריעה בפנינו על <var>ZeroDivisionError</var> בשורה 3, וגם הפעם היא פלטה לנו הודעה מפורטת יחסית.<br>
בדיוק כמו בשגיאת התחביר, השורה האחרונה היא הודעה שמספרת לנו מה קרה בפועל: חילקתם באפס וזה אסור.<br>
באותה שורה נראה גם את <dfn>סוג החריגה</dfn> (<var>ZeroDivisionError</var>) – הקטגוריה הכללית שאליה החריגה משתייכת.<br>
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נוכל לראות סוג אחר של חריגה אם נעביר לאחד המשתנים אות במקום מספר:
</p>
End of explanation
a = int("5")
b = int("a")
print(a // b)
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
גם הפעם פייתון התריעה בפנינו על חריגה, אבל מסוג שונה.<br>
חריגה מסוג <var>ValueError</var> מעידה שהערך שהעברנו הוא מהסוג (טיפוס) הנכון, אבל הוא לא התאים לביטוי שביקשנו מפייתון להריץ.<br>
ההודעה שפייתון הציגה מסבירה לנו שאי אפשר להמיר את המחרוזת "a" למספר עשרוני.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">קריאת הודעת השגיאה</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ניקח לדוגמה את ההתרעה האחרונה שקיבלנו על חריגה:
</p>
End of explanation
def division(a, b):
return int(a) // int(b)
division("meow", 5)
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
ננסה להבין לעומק את החלקים השונים של ההודעה:
</p>
<figure>
<img src="images/exception_parts.svg?v=3" style="margin-right: auto; margin-left: auto; text-align: center;" alt="תמונה הממחישה את החלקים השונים בהתרעת החריגה. חלקי הודעת השגיאה, לפי הסדר: שם הקובץ שבו נמצאה החריגה, מיקום החריגה, חץ המצביע למיקום החריגה, השורה שבה זוהתה החריגה, הקטגוריה של החריגה והסיבה להתרחשות החריגה. ויזואלית, בצד ימין יש קופסאות בצבעים עם תיאור חלקי השגיאה, ובצד שמאל יש את הודעת השגיאה כאשר כל חלק בה צבוע בצבעים המופיעים בצד ימין."/>
<figcaption style="margin-top: 2rem; text-align: center; direction: rtl;">
איור המבאר את חלקי ההודעה המוצגת במקרים של התרעה על חריגה.
</figcaption>
</figure>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
במקרה של התרעה על חריגה שהתרחשה בתוך פונקציה, ההודעה תציג מעקב אחר שרשרת הקריאות שגרמו לה:
</p>
End of explanation
def get_file_content(filepath):
with open(filepath) as file_handler:
return file_handler.read()
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
ההודעה הזו מכילה <dfn>Traceback</dfn> – מעין סיפור שמטרתו לעזור לנו להבין מדוע התרחשה החריגה.<br>
ב־Traceback נראה את השורה שבה התרחשה החריגה, ומעליה את שרשרת הקריאות לפונקציות שגרמו לשורה הזו לרוץ.<br>
כדי להבין טוב יותר את ה־Traceback, נהוג לקרוא אותו מהסוף להתחלה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
תחילה, נביט בשורה האחרונה ונקרא מה הייתה הסיבה שבגינה פייתון התריעה לנו על החריגה.<br>
ההודעה היא <q dir="ltr">invalid literal for int() with base 10: 'meow'</q> – ניסינו להמיר את המחרוזת "meow" למספר שלם, וזה לא תקין.<br>
כדאי להסתכל גם על סוג החריגה (<var>ValueError</var>) כדי לקבל מושג כללי על היצור שאנחנו מתעסקים איתו.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נמשיך ל־Traceback.<br>
בפסקה שמעל השורה שבה מוצגת הודעת השגיאה, נסתכל על שורת הקוד שגרמה להתרעה על חריגה: <code>return int(a) // int(b)</code>.<br>
בשלב זה יש בידינו די נתונים לצורך פענוח ההתרעה על החריגה: ניסינו לבצע המרה לא חוקית של המחרוזת "meow" למספר שלם בשורה 2.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אם עדיין לא נחה דעתכם וקשה לכם להבין מאיפה הגיעה ההתרעה על החריגה, תוכלו להמשיך ולטפס במעלה ה־Traceback.<br>
נעבור לקוד שגרם לשורה <code dir="ltr">return int(a) // int(b)</code> לרוץ: <code dir="ltr">division("meow", 5)</code>.<br>
נוכל לראות שהקוד הזה מעביר לפרמטר הראשון של הפונקציה <var>division</var> את הערך "meow", שאותו היא מנסה להמיר למספר שלם.<br>
עכשיו ברור לחלוטין מאיפה מגיעה ההתרעה על החריגה.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">טיפול בחריגות</span>
<span style="text-align: right; direction: rtl; float: right; clear: both;">הרעיון</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לפעמים אנחנו יודעים מראש על שורת קוד שכתבנו שעלולה לגרום להתרעה על חריגה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
פתיחת קובץ לקריאה, לדוגמה, עלולה להיכשל אם הנתיב לקובץ לא קיים במחשב.<br>
נכתוב פונקציה שמקבלת נתיב לקובץ ומחזירה את התוכן שלו כדי להדגים את הרעיון:
</p>
End of explanation
princess_location = get_file_content('castle.txt')
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
ננסה לאחזר את התוכן של הקובץ castle.txt:
</p>
End of explanation
def get_file_content(filepath):
try: # נסה לבצע את השורות הבאות
with open(filepath) as file_handler:
return file_handler.read()
except FileNotFoundError: # ...אם נכשלת בגלל סוג החריגה הזה, נסה לבצע במקום
print(f"Couldn't open the file: {filepath}.")
return ""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
במקרה שלמעלה ניסינו לפתוח קובץ שלא באמת קיים במחשב, ופייתון התריעה לנו על חריגת <var>FileNotFoundError</var>.<br>
מכאן, שהפונקציה <var>get_file_content</var> עשויה לגרום להתרעה על חריגה מסוג <var>FileNotFoundError</var> בכל פעם שיועבר לה נתיב שאינו קיים.<br>
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כמתכנתים אחראים, חשוב לנו שהתוכנה לא תקרוס בכל פעם שהמשתמש מספק נתיב שגוי לקובץ.<br>
פייתון מאפשרת לנו להגדיר מראש כיצד לטפל במקרים שבהם אנחנו צופים שהיא תתריע על חריגה, ובכך למנוע את קריסת התוכנית.<br>
נעשה זאת בעזרת מילות המפתח <code>try</code> ו־<code>except</code>.<br>
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">התחביר הבסיסי</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לפני שנצלול לקוד, נבין מהו הרעיון הכללי של <code>try</code> ושל <code>except</code>.<br>
המטרה שלנו היא לספק התנהגות חלופית לקוד שעשוי להיכשל בגלל התרעה על חריגה מסוימת שחזינו שעשויה לקרות.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
שימוש ב־<code>try</code> וב־<code>except</code> בפייתון נראה פחות או יותר כך:
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>נסה לבצע את שורות הקוד הבאות.</li>
<li>אם לא הצלחת כיוון שהייתה התרעה על חריגה מסוג <em>כך וכך</em>, בצע במקומן את השורות החלופיות הבאות.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נממש בקוד:
</p>
End of explanation
princess_location = get_file_content("castle.txt")
princess_location
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
וננסה לאחזר שוב את התוכן של הקובץ castle.txt:
</p>
End of explanation
princess_location = get_file_content("?")
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
כפי שאפשר לראות בדוגמה, הפונקציה לא התריעה על חריגת <var>FileNotFoundError</var>, אלא הדפיסה לנו הודעה והחזירה מחרוזת ריקה.<br>
זה קרה כיוון שעטפנו את הקוד שעלול להתריע על חריגה ב־<code>try</code>,
והגדרנו לפייתון בתוך ה־<code>except</code> מה לבצע במקרה של כישלון.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
התחביר של <code>try</code> ... <code>except</code> הוא כדלהלן:<br>
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>נתחיל עם שורה שבה כתוב אך ורק <code dir="ltr">try:</code>.</li>
<li>בהזחה, נכתוב את כל מה שאנחנו רוצים לנסות לבצע ועלול לגרום להתרעה על חריגה.</li>
<li>בשורה הבאה, נצא מההזחה ונכתוב <code dir="ltr">except ExceptionType:</code>, כאשר <var>ExceptionType</var> הוא סוג החריגה שנרצה לתפוס.</li>
<li>בהזחה (שוב), נכתוב קוד שנרצה לבצע אם פייתון התריעה על חריגה מסוג <var>ExceptionType</var> בזמן שהקוד המוזח תחת ה־<code>try</code> רץ.</li>
</ol>
<figure>
<img src="images/try_except_syntax.svg?v=2" style="width: 800px; margin-right: auto; margin-left: auto; text-align: center;" alt="בתמונה יש את הקוד מהדוגמה האחרונה, שלידו כתוב בעברית מה כל חלק עושה. ליד ה־try כתוב 'נסה לבצע...'. ליד שתי השורות המוזחות בתוכו כתוב 'קוד שעשוי לגרום להתרעה על חריגה'. ליד ה־except FileNotFoundError כתוב 'אם הייתה התרעה על חריגה מסוג FileNotFoundError', וליד הקוד המוזח בתוכו כתוב בצע במקום את הפעולות הבאות. הכתוביות מופיעות משמאל לקוד, ולידן קו שמסמן לאיזה חלק בקוד הן שייכות."/>
<figcaption style="margin-top: 2rem; text-align: center; direction: rtl;">
התחביר של <code>try</code> ... <code>except</code>
</figcaption>
</figure>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
השורות שב־<code>try</code> ירוצו כרגיל.<br>
אם לא תהיה התרעה על חריגה, פייתון תתעלם ממה שכתוב בתוך ה־<code>except</code>.<br>
אם אחת השורות בתוך ה־<code>try</code> גרמה להתרעה על חריגה מהסוג שכתוב בשורת ה־<code>except</code>,<br>
פייתון תפסיק מייד לבצע את הקוד שכתוב ב־<code>try</code>, ותעבור להריץ את הקוד המוזח בתוך ה־<code>except</code>.<br>
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ניקח את דוגמת הקוד שלמעלה, וננסה להבין כיצד פייתון קוראת אותה.<br>
פייתון תתחיל בהרצת השורה <code dir="ltr">with open("castle.txt") as file_handler:</code> ותתריע על חריגה, משום שהקובץ castle.txt לא נמצא.<br>
כיוון שהחריגה היא מסוג <code>FileNotFoundError</code>, היא תחפש את המילים <code dir="ltr">except FileNotFoundError:</code> מייד בסיום ההזחה.<br>
הביטוי הזה קיים בדוגמה שלנו, ולכן פייתון תבצע את מה שכתוב בהזחה שאחריו במקום להתריע על חריגה.
</p>
<figure>
<img src="images/try_except_flow.svg?v=1" style="width: 700px; margin-right: auto; margin-left: auto; text-align: center;" alt="בתמונה יש תרשים זרימה המציג כיצד פייתון קוראת את הקוד במבנה try-except. התרשים בסגנון קומיקסי עם אימוג'ים. החץ ממנו נכנסים לתרשים הוא 'התחל ב־try' עם סמלון של דגל מרוצים, שמוביל לעיגול שבו כתוב 'הרץ את השורה המוזחת הבאה בתוך ה־try'. מתוך עיגול זה יש חץ לעצמו, שבו כתוב 'אין התראה על חריגה' עם סמלון של וי ירוק, וחץ נוסף שבו כתוב 'אין שורות נוספות ב־try' עם סמלון של דגל מרוצים. החץ האחרון מוביל לעיגול ללא מוצא שבו כתוב 'סיימנו! המשך לקוד שאחרי ה־try וה־except'. מהעיגול הראשון יוצא גם חץ שעליו כתוב 'התרעה על חריגה' עם סמלון של פיצוץ, ומוביל לעיגול שבו כתוב 'חפש except עם סוג החריגה'. מעיגול זה יוצאים שני חצים: הראשון 'לא קיים', עם סמלון של איקס אדום שמוביל לעיגול ללא מוצא בו כתוב 'זרוק התרעה על חריגה'. השני 'קיים' עם סמלון של וי ירוק שמוביל לעיגול 'הרץ את השורות המוזחות בתוך ה־except'. מעיגול זה עצמו יוצא חץ לעיגול עליו סופר מקודם, 'סיימנו! המשך לקוד שאחרי ה־try וה־except' שתואר קודם."/>
<figcaption style="margin-top: 2rem; text-align: center; direction: rtl;">
תרשים זרימה המציג כיצד פייתון קוראת את הקוד במבנה <code>try</code> ... <code>except</code>
</figcaption>
</figure>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
היכולת החדשה שקיבלנו נקראת "<dfn>לתפוס חריגות</dfn>", או "<dfn>לטפל בחריגות</dfn>".<br>
היא מאפשרת לנו לתכנן קוד שיגיב לבעיות שעלולות להתעורר במהלך ריצת הקוד שלנו.
</p>
<div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 70%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כתבו פונקציה שמקבלת שני מספרים, ומחלקת את המספר הראשון בשני.<br>
אם המספר השני הוא אפס, החזירו <samp>0</samp> בתור התוצאה.<br>
השתמשו בחריגות.
</p>
</div>
<div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;">
<p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;">
<strong>חשוב!</strong><br>
פתרו לפני שתמשיכו!
</p>
</div>
</div>
<span style="text-align: right; direction: rtl; float: right; clear: both;">סוגים מרובים של חריגות</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
משתמשים פרחחים במיוחד לא יעצרו כאן.<br>
הפונקציה <var>get_file_content</var> מוגנת מניסיון לאחזר קבצים לא קיימים, זה נכון,<br>
אך משתמש שובב מהרגיל עשוי לנסות להעביר לפונקציה מחרוזות עם תווים שאסור לנתיבים להכיל:
</p>
End of explanation
def get_file_content(filepath):
try:
with open(filepath) as file_handler:
return file_handler.read()
except FileNotFoundError:
print(f"Couldn't open the file: {filepath}.")
return ""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
נביט בחריגה ובקוד המקורי, ונגלה שבאמת לא ביקשנו לתפוס בשום מקום חריגה מסוג <var>OSError</var>.
</p>
End of explanation
def get_file_content(filepath):
try:
with open(filepath) as file_handler:
return file_handler.read()
except (FileNotFoundError, OSError):
print(f"Couldn't open the file: {filepath}.")
return ""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
מכאן, נוכל לבחור לתקן את הקוד באחת משתי דרכים.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הדרך הראשונה היא להשתמש בקוד שכבר יצרנו לטיפול בחריגות מסוג <var>FileNotFoundError</var>.<br>
במקרה כזה, נצטרך לשנות את מה שכתוב אחרי ה־<code>except</code> ל־tuple שאיבריו הם כל סוגי השגיאות שבהן נרצה לטפל:
</p>
End of explanation
def get_file_content(filepath):
try:
with open(filepath) as file_handler:
return file_handler.read()
except FileNotFoundError:
print(f"Couldn't open the file: {filepath}.")
return ""
except OSError:
print(f"The path '{filepath}' is invalid.")
return ""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
בקוד שלמעלה גרמנו לכך, שהן חריגות מסוג <var>FileNotFoundError</var> והן חריגות מסוג <var>OSError</var> יטופלו באותה הצורה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אבל מה אם נרצה שחריגות <var>OSError</var> תטופלנה בצורה שונה מחריגות <var>FileNotFoundError</var>?<br>
במקרה הזה נפנה לדרך השנייה, שמימושה פשוט למדי – נוסף לקוד הקיים, נכתוב פסקת קוד חדשה שעושה שימוש ב־<code>except</code>:
</p>
End of explanation
princess_location = get_file_content("?")
princess_location
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
בקוד שלמעלה הוספנו ל־<var>get_file_content</var> קטע קוד נוסף.<br>
ה־<code>except</code> שהוספנו מאפשר לפייתון לטפל בחריגות מסוג <var>OSError</var>, כמו החריגה שקפצה לנו כשהכנסנו תווים בלתי חוקיים לנתיב הקובץ.<br>
נראה את הקוד בפעולה:
</p>
End of explanation
def a():
print("Dividing by zero...")
return 1 / 0
print("End of a.")
def b():
print("Calling a...")
a()
print("End of b.")
def c():
print("Calling b...")
b()
print("End of c.")
print("Start.")
print("Calling c...")
c()
print("Stop.")
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
לשמחתנו, אנחנו לא מוגבלים במספר ה־<code>except</code>־ים שאפשר להוסיף אחרי ה־<code>try</code>.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">זיהוי סוג החריגה</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בכל פעם שפייתון מתריעה על חריגה, היא גם מציגה את הקטגוריה שאליה שייכת אותה חריגה.<br>
כפי שכבר ראינו, שגיאות תחביר שייכות לקטגוריה <var>SyntaxError</var>, וחריגות שנובעות מערך שגוי שייכות לקטגוריה <var>ValueError</var>.<br>
למדנו גם להכיר שגיאות <var>FileNotFoundError</var> ושגיאות <var>OSError</var>, וללא ספק נתקלתם בחריגות שונות ומשונות בעצמכם במהלך הקורס.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אפשר להגיד, אם כך, שבפייתון יש סוגים רבים של חריגות שהם חלק מהשפה.<br>
מרוב סוגי חריגות, לפעמים קל לאבד את הידיים והרגליים ונעשה לא פשוט להבין על איזו חריגה פייתון עשויה להתריע.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נציע כמה דרכים מועילות להתמודד עם הבעיה, ולמצוא מהם סוגי החריגות שעשויים להיווצר בעקבות קוד שכתבתם:
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li><em>תיעוד</em> – בשימוש בפונקציה או בפעולה מסוימת, קראו בתיעוד שלה על אילו חריגות עלולה פייתון להתריע בעקבות הפעלת הפונקציה או הפעולה.</li>
<li><em>חיפוש</em> – השתמשו במנוע חיפוש כדי לשאול אילו חריגות עלולות לקפוץ בעקבות פעולה כללית שביצעתם. נניח: <q>python exceptions read file</q>.</li>
<li><em>נסו בעצמכם</em> – אם ברצונכם לברר על סוג חריגה במקרה מסוים – הריצו את המקרה במחברת ובדקו על איזה סוג חריגה פייתון מתריעה.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לנוחיותכם, בתיעוד של פייתון ישנו עמוד שמסביר על <a href="https://docs.python.org/3/library/exceptions.html">כל סוגי החריגות שפייתון מגדירה</a>.
</p>
<div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 70%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
על איזה סוג חריגה תתריע פייתון כאשר ניגש לרשימה במיקום שאינו קיים?<br>
מה בנוגע לגישה לרשימה במיקום שהוא מחרוזת?<br>
מהן סוגי החריגות שעליהם עלולה פייתון להתריע בעקבות הרצת הפעולה <code>index</code> על רשימה?
</p>
</div>
<div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;">
<p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;">
<strong>חשוב!</strong><br>
פתרו לפני שתמשיכו!
</p>
</div>
</div>
<span style="text-align: right; direction: rtl; float: right; clear: both;">תרגיל ביניים: תוכנית החלוקה</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כתבו פונקציה בשם <var>super_division</var> שמקבלת מספר בלתי מוגבל של פרמטרים מספריים.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הפונקציה תבצע חלוקה של המספר הראשון במספר השני.<br>
את התוצאה היא תחלק במספר השלישי לכדי תוצאה חדשה, את התוצאה החדשה היא תחלק במספר הרביעי וכן הלאה.<br>
לדוגמה: עבור הקריאה <code dir="ltr">super_division(100, 10, 5, 2)</code> הפונקציה תחזיר 1, כיוון שתוצאת הביטוי $100 / 10 / 5 / 2$ היא 1.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
עשו שימוש בטיפול בחריגות, ונסו לתפוס כמה שיותר מקרים של משתמשים שובבים.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">פעפוע של חריגות במעלה שרשרת הקריאות</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
התרעה על חריגה גורמת לריצת התוכנית להתנהג בצורה שונה מעט ממה שהכרנו עד כה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
במהלך המחברת נתקלנו בשני מקרים אפשריים:
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>או שהשורה שגרמה להתרעה על החריגה נמצאת ישירות תחת <code>try-except</code> שתואם לסוג החריגה, ואז החריגה נתפסת.</li>
<li>או שהשורה הזו אינה נמצאת ישירות תחת <code>try-except</code> ואז החריגה מקריסה את התוכנית. במקרה שכזה, מוצג לנו Traceback.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אבל מתברר שהסיפור מאחורי הקלעים הוא מורכב מעט יותר.<br>
אם פונקציה מסוימת לא יודעת כיצד לטפל בהתרעה על חריגה, היא מבקשת עזרה מהפונקציה שקראה לה.<br>
זה קצת כמו לבקש סוכר מהשכנים, גרסת החריגות והפונקציות.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נניח שבפונקציה <var>A</var> ישנה שורה שגרמה להתרעה על חריגה, והיא לא עטופה ב־<code>try-except</code>.<br>
לפני שהתוכנית תקרוס, ההתרעה על החריגה תִּשָּׁלַח לפונקציה הקוראת, <var>B</var>, זו שהפעילה את פונקציה <var>A</var> שבה התרחשה ההתרעה על החריגה.<br>
בשלב הזה פייתון נותנת לנו הזדמנות נוספת לתפוס את החריגה.<br>
אם בתוך פונקציה <var>B</var> השורה שקראה לפונקציה <var>A</var> עטופה ב־<code>try-except</code> שתופס את סוג החריגה הנכונה, החריגה תטופל.<br>
אם לא, החריגה תועבר לפונקציה <var>C</var> שקראה לפונקציה <var>B</var>, וכך הלאה, עד שנגיע לראש שרשרת הקריאות.<br>
אם אף אחד במעלה שרשרת הקריאות לא תפס את החריגה, התוכנית תקרוס ויוצג לנו Traceback.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ננסה להדגים באמצעות קטע קוד לא מתוחכם במיוחד.<br>
הנה האפשרות השנייה שדיברנו עליה – אף אחד בשרשרת הקריאות לא תופס את החריגה, התוכנה קורסת ומודפס Traceback:
</p>
End of explanation
def a():
print("Dividing by zero...")
try:
return 1 / 0
except ZeroDivisionError:
print("Never Dare Anyone to Divide By Zero!")
print("https://reddit.com/2rkuek/")
print("End of a.")
def b():
print("Calling a...")
a()
print("End of b.")
def c():
print("Calling b...")
b()
print("End of c.")
print("Start.")
print("Calling c...")
c()
print("Stop.")
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
והנה דוגמה לאפשרות הראשונה – שבה אנחנו תופסים את החריגה מייד כשהיא מתרחשת:
</p>
End of explanation
def a():
print("Dividing by zero...")
return 1 / 0
print("End of a.")
def b():
print("Calling a...")
a()
print("End of b.")
def c():
print("Calling b...")
try:
b()
except ZeroDivisionError:
print("Never Dare Anyone to Divide By Zero!")
print("https://reddit.com/2rkuek/")
print("End of c.")
print("Start.")
print("Calling c...")
c()
print("Stop.")
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
האפשרות המעניינת היא האפשרות השלישית.<br>
מה קורה אם מישהו במעלה שרשרת הקריאות, נניח הפונקציה <var>c</var>, היא זו שמחליטה לתפוס את החריגה:
</p>
End of explanation
MONTHS = [
"January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December",
]
def get_month_name(index):
Return the month name given its number.
return MONTHS[index - 1]
def get_month_number(name):
Return the month number given its name.
return MONTHS.index(name) + 1
def is_same_month(index, name):
return (
get_month_name(index) == name
and get_month_number(name) == index
)
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
שימו לב שבמקרה הזה דילגנו על השורות שמדפיסות את ההודעה על סיום ריצתן של הפונקציות <var>a</var> ו־<var>b</var>.<br>
בשורה מספר 3 התבצעה חלוקה לא חוקית ב־0 שגרמה להתרעה על חריגה מסוג <var>ZeroDivisionError</var>.<br>
כיוון שהקוד לא היה עטוף ב־<code>try-except</code>, ההתרעה על החריגה פעפעה לשורה <code dir="ltr">a()</code> שנמצאת בפונקציה <var>b</var>.<br>
גם שם אף אחד לא טיפל בחריגה באמצעות <code>try-except</code>, ולכן ההתרעה על החריגה המשיכה לפעפע לפונקציה שקראה ל־<var>b</var>, הלא היא <var>c</var>.<br>
ב־<var>c</var> סוף כל סוף הקריאה ל־<var>b</var> הייתה עטופה ב־<code>try-except</code>, ושם התבצע הטיפול בחריגה.<br>
מאותה נקודה שבה טופלה החריגה, התוכנית המשיכה לרוץ כרגיל.
</p>
<figure>
<img src="images/exception_propogation.svg?v=3" style="margin-right: auto; margin-left: auto; text-align: center;" alt="באיור 6 עמודות, כל אחת מייצגת שלב ומורכבת מ־4 מלבנים. אל המלבן התחתון בכל שלב מצביע חץ, וכתוב בו 'התוכנית'. ממנו יוצא חץ למלבן 'פונקציה c', ממנו יוצא חץ למלבן 'פונקציה b' וממנו יוצא חץ למלבן 'פונקציה a'. מעל השלב הראשון נכתב 'התוכנית קוראת לפונקציה c, שקוראת ל־b, שקוראת ל־a.'. בשלב 2 כתוב תחת הכותרת 'פייתון מתריעה על חריגה בפונקציה a. פונקציה a אינה מטפלת בחריגה.'. ליד המלבן של פונקציה a מופיע סמליל בו כתוב BOOM המסמן את ההתרעה על החריגה שפייתון תייצר. בשלב 3 כתוב 'החריגה, שלא נתפסה בפונקציה a, מפעפעת חזרה ל־b שקראה לה.'. על המלבן של פונקציה a מופיע סימן STOP שמייצג את זה שריצת הפונקציה הפסיקה. על המלבן של הפונקציה b מופיע סימן פיצוץ עם הכיתוב BOOM שמייצג את ההתרעה על החריגה. שנמצאת כרגע בפונקציה b. מהמלבן של פונקציה a בשלב 2 יוצא חץ למלבן של פונקציה b בשלב 3, שמסמן את פעפוע ההתרעה על השגיאה במעלה שרשרת הקריאות. מתחת לכותרת של שלב 4 כתוב 'החריגה, שלא נתפסה בפונקציה b, מפעפעת חזרה ל־c שקראה לה.'. ליד המלבן של הפונקציות a ו־b מופיע הסמליל STOP, וליד המלבן של הפונקציה c מופיע סמליל של BOOM, אליו מצביע חץ שיוצא מה־BOOM בשלב 3. מתחת לכותרת של שלב 5 כתוב 'פונקציה c תופסת את החריגה ומצילה את התוכנית מקריסה.'. הסימן ליד המלבן של פונקציה c משתנה מ־BOOM לכפפת בייסבול, שמסמלת את תפיסת ההודעה על החריגה. מתחת לכותרת של שלב 6 כתוב 'התוכנית ממשיכה את ריצתה כרגיל ממקום התפיסה בפונקציה c.'. הסמלילים נשארו כשהיו בשלב 5, אך מהמלבן של פונקציה c נוסף חץ למלבן של 'התוכנית', ומהמלבן של 'התוכנית' נוסף חץ היוצא כלפי חוץ."/>
<figcaption style="margin-top: 2rem; text-align: center; direction: rtl;">
איור שממחיש כיצד התרעה על חריגה מפעפעת בשרשרת הקריאות.
</figcaption>
</figure>
<span style="text-align: right; direction: rtl; float: right; clear: both;">תרגיל ביניים: החמישי זה נובמבר?</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
קראו את הקוד הבא, ופתרו את הסעיפים שאחריו.
</p>
End of explanation
try:
1 / 0
except ZeroDivisionError as err:
print(type(err))
print('-' * 40)
print(dir(err))
Explanation: <ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>כתבו שתי שורות הקוראות ל־<code>is_same_month</code>, שאחת מהן מחזירה <code>True</code> והשנייה מחזירה <code>False</code>.</li>
<li>על אילו חריגות פייתון עלולה להתריע בשתי הפונקציות הראשונות? תפסו אותן בפונקציות הרלוונטיות. החזירו <code>None</code> אם התרחשה התרעה על חריגה.</li>
<li>בונוס: האם תצליחו לחשוב על דרך לגרום לפונקציה להתרסק בכל זאת? אם כן, תקנו אותה כך שתחזיר <code>None</code> במקרה שכזה.</li>
<li>בונוס: האם תוכלו ליצור קריאה ל־<code>is_same_month</code> שתחזיר <code>True</code> בזמן שהיא אמורה להחזיר <code>False</code>?</li>
<li>הבה נשנה גישה: תפסו את החריגות ברמת <code>is_same_month</code> במקום בפונקציות שהתריעו על חריגה. החזירו <code>False</code> אם התרחשה התרעה על חריגה.</li>
</ol>
<span style="text-align: right; direction: rtl; float: right; clear: both;">הקשר בין חריגות למחלקות</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כל התרעה על חריגה מיוצגת בפייתון באמצעות מופע.<br>
בעזרת מילת המפתח <code>as</code>, נוכל ליצור משתנה שיצביע למופע הזה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נכתוב אחרי ה־<code>except</code> ולפני הנקודתיים את הביטוי <code>as VarName</code>, כאשר <var>VarName</var> הוא שם משתנה חדש שיצביע למופע של ההתרעה על החריגה.<br>
ביטוי זה יאפשר לנו לגשת למשתנה <var>VarName</var> שכולל את הפרטים על החריגה, מכל שורה שמוזחת תחת ה־<code>except</code>:
</p>
End of explanation
try:
1 / 0
except ZeroDivisionError as err:
print(f"The error is '{err}'.")
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
היופי במשתנה החדש שנוצר, <var>err</var>, זה שהוא מופע פייתוני שנוצר מתוך המחלקה <var>ZeroDivisionError</var>.<br>
<var>ZeroDivisionError</var>, אם כך, היא מחלקה לכל דבר: יש לה <code>__init__</code>, פעולות ותכונות, כמו שאפשר לראות בדוגמת הקוד שלמעלה.</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נבדוק אם יש לה <code>__str__</code> מועיל:
</p>
End of explanation
ZeroDivisionError.mro()
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
כמה נוח!<br>
זו דרך ממש טובה להדפיס למשתמש הודעת שגיאה המתארת בדיוק מה הגורם לשגיאה שחווה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
זה זמן טוב לעצור רגע ולחשוב.<br>
ישנם סוגי חריגות רבים, ולכל סוג חריגה יש מחלקה שמייצגת אותו.<br>
האם זה אומר שכולן יורשות מאיזו מחלקת "חריגה" מופשטת כלשהי?<br>
נבדוק באמצעות גישה ל־<var>Method Resolution Order</var> של המחלקה.
</p>
End of explanation
try:
1 / 0
except Exception as err:
print(f"The error is '{err}'.")
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
וואו! זו שרשרת ירושות באורך שלא היה מבייש את שושלת המלוכה הבריטית.<br>
אז נראה שהחריגה של חלוקה באפס (<var>ZeroDivisionError</var>) היא מקרה מיוחד של חריגה חשבונית.<br>
חריגה חשבונית (<var>ArithmeicError</var>), בתורה, יורשת מ־<var>Exception</var>, שהיא עצמה יורשת מ־<var>BaseException</var>.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נביט ב<a href="https://docs.python.org/3/library/exceptions.html#exception-hierarchy">מדרג הירושה המלא</a> המוצג בתיעוד של פייתון כדי להתרשם:
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כיוון שאנחנו כבר מנוסים יחסית בענייני ירושה, בשלב הזה נוכל לפתח כמה רעיונות מעניינים.<br>
האם עצם זה ש־<var>ZeroDivisionError</var> היא תת־מחלקה של <var>Exception</var>, גורם לכך שנוכל לתפוס אותה בעזרת <var>Exception</var>?<br>
אם יתברר שכן, נוכל לתפוס מספר גדול מאוד של סוגי התרעות על חריגות בצורה הזו.<br>
נבדוק!
</p>
End of explanation
search_in_directory(r"C:\Projects\Notebooks\week08", ["class", "int"])
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
ובכן, כן.<br>
אנחנו יכולים לכתוב בצורה הזו קוד שיתפוס את מרב סוגי ההתרעות על חריגות.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בשלב זה נציין שבניגוד לאינטואיציה, חשוב שנהיה ממוקדים בטיפול שלנו בחריגות.<br>
חניכים שזה עתה למדו על הרעיון של טיפול בחריגות מקיפים לפעמים את כל הקוד שלהם ב־<code>try-except</code>. מדובר ברעיון רע למדי.<br>
כלל האצבע שלנו מעתה יהיה זה:
</p>
<blockquote dir="rtl" style="direction: rtl; text-align: right; float: right; border-right: 5px solid rgba(0,0,0,.05); border-left: 0;">
בכל שימוש ב־<code>try-except</code>, צמצמו את כמות הקוד שנמצאת ב־<code>try</code>, וטפלו בחריגה כמה שיותר ספציפית.
</blockquote>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
טיפול בחריגות הוא מנגנון שבהינתן התרעה על חריגה, מאפשר לנו להריץ קוד חלופי או קוד שיטפל בבעיה שנוצרה.<br>
אם אנחנו לא יודעים בדיוק מה הבעיה, או לא מתכוונים לטפל בה בדרך הגיונית – עדיף שלא נתפוס אותה.<br>
טיפול בחריגות שלא לצורך עלול ליצור "תקלים שקטים" בתוכנה שלנו, שאותם יהיה לנו קשה מאוד לאתר לאחר מכן.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">סיכום</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
במחברת זו למדנו מהן חריגות, כיצד לקרוא הודעות שגיאה של פייתון, וכיצד לטפל בהתרעות על חריגות שנוצרות עקב כשל בריצת התוכנית.<br>
ראינו כיצד חריגות מיוצגות בפייתון, איך הן פועלות מאחורי הקלעים ואיך לגלות אילו חריגות עלולות לצוץ בזמן ריצת הקוד שלנו.<br>
למדנו גם שמוטב לתפוס שגיאה באופן נקודתי עד כמה שאפשר, ורק כשאנחנו יודעים כיצד לטפל בה בדרך הגיונית.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">מונחים</span>
<dl style="text-align: right; direction: rtl; float: right; clear: both;">
<dt>חריגה (exception)</dt>
<dd>
מופע המייצג מצב לא סדיר, לרוב בעייתי, שזוהה במהלך הרצת הקוד שבתוכנית.<br>
לכל חריגה יש סוג, וכל סוג חריגה מיוצג באמצעות מחלקה פייתונית.
</dd>
<dt>התרעה על חריגה (raise of an exception)</dt>
<dd>
מצב שבו פייתון מודיעה על שגיאה או על מצב לא סדיר בריצת התוכנית.<br>
התרעה על חריגה משנה את זרימת הריצה של התוכנית ועלולה לגרום לקריסתה.
</dd>
<dt>Traceback</dt>
<dd>
שרשרת הקריאות שהובילו להפעלתה של הפונקציה שבה אנחנו נמצאים ברגע מסוים.<br>
בהקשר של חריגות, מדובר בשרשרת הקריאות שהובילה להתרעה על החריגה.<br>
שרשרת הקריאות הזו מופיעה גם בהודעת השגיאה שמוצגת כאשר פייתון מתריעה על חריגה.
</dd>
<dt>טיפול בחריגה (exception handling)</dt>
<dd>
נקרא גם <dfn>תפיסת חריגה</dfn> (<dfn>catching an exception</dfn>).<br>
בעת התרעה על חריגה, התנהגות ברירת המחדל של התוכנה היא קריסה.<br>
מתכנת יכול להגדיר מראש מה הוא רוצה שיקרה במקרה של התרעה על חריגה.<br>
במקרה כזה, קריסת התוכנית תימנע.
</dd>
</dl>
<span style="text-align: right; direction: rtl; float: right; clear: both;">תרגילים</span>
<span style="text-align: right; direction: rtl; float: right; clear: both;">מחשבון</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כתבו פונקציה בשם <var>calc</var> שמקבלת כפרמטרים שני מספרים וסימן של פעולה חשבונית, בסדר הזה.<br>
הסימן יכול להיות אחד מאלה: <code>+</code>, <code>-</code>, <code>*</code> או <code>/</code>.<br>
מטרת הפונקציה היא להחזיר את תוצאת הפעולה החשבונית שהופעלה על שני המספרים.<br>
בפתרונכם, ודאו שאתם מטפלים בכל ההתרעות על חריגות שיכולות לצוץ בעקבות קלט מאתגר שהזין המשתמש.
</p>
<span style="text-align: right; direction: rtl; float: right; clear: both;">מנסה להבין איפה הסדר כאן</span>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כתבו פונקציה בשם <var>search_in_directory</var> שמקבלת נתיב, ורשימה של מילות מפתח.<br>
התוכנה תנסה לפתוח את כל הקבצים הנמצאים בנתיב, ותדפיס עבור כל מילת מפתח את כל הקבצים שבהם היא נמצאת.<br>
התוכנה תרוץ גם על תתי־התיקיות שנמצאות בנתיב שסופק (ועל תתי־תתי התיקיות, וכן הלאה), אם יש כאלו.<br>
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לדוגמה:
</p>
End of explanation |
14,536 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Week 3
Step1: (1) We will fit the data contained within Fig. 3B. Plot this data and describe the relationship you see between Kx, Kd, and valency.
Step2: (2) First, to do so, we'll need a function that takes the model predictions, scales them to the units of the actual measurements, and finds the predictions for each condition. Define a scaling parameter and a function that takes it along with the other parameters to make predictions about the experiment.
Use the fit parameters shown in Table 1 (row 2) and overlay with the measurements to ensure your function is working. (Scale = 1 for now.)
Step3: (2) Now use scipy.optimize.least_squares to find the least squares solution.
Step4: (3) Using leave-one-out crossvalidation, does this model predict the data? Plot the measured vs. predicted data.
Step5: (4) Using bootstrap estimation, plot the confidence interval of the model along with the data points.
(5) Now, we will perform a local sensitivity analysis to look at the dependence of the model results on each parameter. Vary each parameter up and down by 10-fold while holding the others constant, and plot the sum of squared error.
Which paramter influences the fit the most? Which one the least?
Step6: (6) While easier to perform, a local sensitivity analysis ignores codependency between the parameters. Do you anticipate your predictions of the parameter values will be more or less specified with a global analysis?
Step7: (7) Now, vary each parameter from the optimal solution, allowing the other parameters to vary. Was your prediction true? How might the other parameters be varying when Kd increases?
Hint
Step8: (8) At the same time as providing the number of multimerized receptors, the model also infers the quantities of other properties, such as the amount of ligand and receptor bound. Using the bootstrap estimates, plot the confidence in these other parameters. Are these more or less exactly specified than Rmulti? What can you say about which quantities will be most exactly predicted? | Python Code:
% matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import binom
from scipy.optimize import brentq
np.seterr(over='raise')
def StoneMod(Rtot, Kd, v, Kx, L0):
'''
Returns the number of mutlivalent ligand bound to a cell with Rtot
receptors, granted each epitope of the ligand binds to the receptor
kind in question with dissociation constant Kd and cross-links with
other receptors with crosslinking constant Kx. All eq derived from Stone et al. (2001).
'''
v = np.int_(v)
# Mass balance for receptor species, to identify the amount of free receptor
diffFunAnon = lambda x: Rtot-x*(1+v*L0*(1/Kd)*(1+Kx*x)**(v-1))
# Check that there is a solution
if diffFunAnon(0) * diffFunAnon(Rtot) > 0:
raise RuntimeError("There is no solution with these parameters. Are your inputs correct?")
# Vector of binomial coefficients
Req = brentq(diffFunAnon, 0, Rtot, disp=False)
# Calculate vieq from equation 1
vieq = L0*(1/Kd)*Req*(binom(v, np.arange(1, v + 1))) * np.power(Kx*Req, np.arange(v))
# Calculate L, according to equation 7
Lbound = np.sum(vieq)
# Calculate Rmulti from equation 5
Rmulti = np.sum(np.multiply(vieq[1:], np.arange(2, v + 1, dtype=np.float)))
# Calculate Rbound
Rbnd = np.sum(np.multiply(vieq, np.arange(1, v + 1, dtype=np.float)))
# Calculate numXlinks from equation 4
nXlink = np.sum(np.multiply(vieq[1:], np.arange(1, v, dtype=np.float)))
return (Lbound, Rbnd, Rmulti, nXlink)
data = np.loadtxt("./data/wk3-stone.csv", delimiter=",")
# Vector of the ligand concentrations, cell response (proportional to Rmulti), valencies
Xs, Ys, Vs = np.hsplit(data, 3)
Xs = np.squeeze(Xs)
Ys = np.squeeze(Ys)
Vs = np.squeeze(Vs)
Explanation: Week 3: Fitting
In many different cases, we might have a model for how a system works, and want to fit that model to a set of observations.
We're going to investigate the process of fitting using a classic paper that proposed a model for the T cell receptor. Here, the authors develop a mathematical model for how binding occurs and then have observations of how much binding occurs under specific conditions. Identifying whether and how this model fits has led to a better understanding of how our immune system utilizes antibodes, and efforts to design antibodies that function more potently.
End of explanation
plt.semilogx(Xs, Ys, '.');
plt.xlabel('Concentration');
plt.ylabel('CD3 (1/cell)');
Explanation: (1) We will fit the data contained within Fig. 3B. Plot this data and describe the relationship you see between Kx, Kd, and valency.
End of explanation
XsSim = np.repeat(np.logspace(-11, -5), 3)
VsSim = np.tile(np.array([2, 3, 4]), 50)
def predict(Rtot, Kd, Kx, Vs, Ls, scale):
pred = np.zeros(Ls.shape)
for ii in range(Ls.size):
pred[ii] = StoneMod(Rtot, Kd, Vs[ii], Kx, Ls[ii])[2]
return pred * scale
Rtot = 24000
ss = predict(Rtot, 1.7E-6, 3.15E-4, VsSim, XsSim, 1.0)
plt.semilogx(XsSim, ss, '.');
plt.semilogx(Xs, Ys, '.');
plt.xlabel('Concentration');
plt.ylabel('CD3 (1/cell)');
Explanation: (2) First, to do so, we'll need a function that takes the model predictions, scales them to the units of the actual measurements, and finds the predictions for each condition. Define a scaling parameter and a function that takes it along with the other parameters to make predictions about the experiment.
Use the fit parameters shown in Table 1 (row 2) and overlay with the measurements to ensure your function is working. (Scale = 1 for now.)
End of explanation
Ypred = lambda x: predict(Rtot, x[0], x[1], Vs, Xs, x[2]) - Ys
from scipy.optimize import least_squares
sol = least_squares(Ypred, [1.7E-6, 3.15E-4, 1.0])
best_x = sol.x
Explanation: (2) Now use scipy.optimize.least_squares to find the least squares solution.
End of explanation
# Answer
Explanation: (3) Using leave-one-out crossvalidation, does this model predict the data? Plot the measured vs. predicted data.
End of explanation
ssePred = lambda x: np.sum(np.square(Ypred(x)))
a = np.logspace(-1, 1, num = 41)
b = np.stack((a, a, a))
for ii in range(b.shape[0]):
for jj in range(b.shape[1]):
temp = best_x.copy()
temp[ii] = temp[ii] * a[jj]
b[ii, jj] = ssePred(temp)
b = b / np.min(np.min(b))
plt.loglog(a, b[0, :]);
plt.loglog(a, b[1, :]);
plt.loglog(a, b[2, :]);
Explanation: (4) Using bootstrap estimation, plot the confidence interval of the model along with the data points.
(5) Now, we will perform a local sensitivity analysis to look at the dependence of the model results on each parameter. Vary each parameter up and down by 10-fold while holding the others constant, and plot the sum of squared error.
Which paramter influences the fit the most? Which one the least?
End of explanation
# Answer.
Explanation: (6) While easier to perform, a local sensitivity analysis ignores codependency between the parameters. Do you anticipate your predictions of the parameter values will be more or less specified with a global analysis?
End of explanation
bglobal = np.stack((a, a, a))
for ii in range(bglobal.shape[0]):
for jj in range(bglobal.shape[1]):
temp = best_x.copy()
temp[ii] = temp[ii] * a[jj]
lb = np.array([-np.inf, -np.inf, -np.inf])
ub = -lb
lb[ii] = temp[ii] - 1.0E-12
ub[ii] = temp[ii] + 1.0E-12
bndtemp = (lb, ub)
x0 = [1.7E-6, 3.15E-4, 1.0]
x0[ii] = temp[ii]
bglobal[ii, jj] = least_squares(Ypred, x0, bounds = bndtemp).cost
bglobal = bglobal / np.min(np.min(bglobal))
for ii in range(3):
plt.loglog(a, bglobal[ii, :]);
Explanation: (7) Now, vary each parameter from the optimal solution, allowing the other parameters to vary. Was your prediction true? How might the other parameters be varying when Kd increases?
Hint: Probably the easiest way to do this is mess with the bounds of the least squares solver.
End of explanation
# Answer
Explanation: (8) At the same time as providing the number of multimerized receptors, the model also infers the quantities of other properties, such as the amount of ligand and receptor bound. Using the bootstrap estimates, plot the confidence in these other parameters. Are these more or less exactly specified than Rmulti? What can you say about which quantities will be most exactly predicted?
End of explanation |
14,537 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Find all leaves up to length maximum_superleave_length (takes a couple of minutes for length of 6)
Step1: Load in current file with ev
Step2: Function below calculates a "pseudo-superleave" based on the values of the tiles and the synergy between each pair of tiles.
Gives unreasonable results in cases with lots of multiples, so I put an artificial floor of -30 (where -35 would be a score of 0).
Step3: Quick proof of concept below that the pseudo-superleaves are at least getting us somewhat close. | Python Code:
t0 = time.time()
maximum_superleave_length = 6
leaves = {i:sorted(list(set(list(combinations(tilebag,i))))) for i in
range(1,maximum_superleave_length+1)}
for i in range(1,maximum_superleave_length+1):
leaves[i] = [''.join(leave) for leave in leaves[i]]
t1 = time.time()
print('Calculated superleaves up to length {} in {} seconds'.format(
maximum_superleave_length,t1-t0))
all_leaves = []
for i in range(1,maximum_superleave_length+1):
all_leaves += leaves[i]
Explanation: Find all leaves up to length maximum_superleave_length (takes a couple of minutes for length of 6)
End of explanation
leaves_file = 'leave_values_010619_v3.csv'
df = pd.read_csv(leaves_file)
df = df.rename(columns={'Unnamed: 0':'leave'})
df = df.set_index('leave')
ev_dict = df['ev'].to_dict()
synergy_dict = df['synergy'].to_dict()
Explanation: Load in current file with ev
End of explanation
def calculate_pseudo_ev(leave,ev,synergy):
raw_values = sum([ev[c] for c in leave])
synergies = sum([synergy[''.join(combo)] for combo in list(combinations(leave,2))])
return max(raw_values+synergies,-30)
pseudo_ev_series = pd.Series({leave: calculate_pseudo_ev(leave,ev_dict,synergy_dict)
for leave in all_leaves},name='pseudo_ev')
pseudo_ev_series.to_csv('pseudo_superleaves_010819.csv')
Explanation: Function below calculates a "pseudo-superleave" based on the values of the tiles and the synergy between each pair of tiles.
Gives unreasonable results in cases with lots of multiples, so I put an artificial floor of -30 (where -35 would be a score of 0).
End of explanation
df['leave'] = df.index
df['pseudo_ev'] = df['leave'].apply(lambda x: calculate_pseudo_ev(x,ev_dict,synergy_dict))
df['ev_offset'] = df['pseudo_ev']-df['ev']
df['ev_offset'].hist(bins=50)
Explanation: Quick proof of concept below that the pseudo-superleaves are at least getting us somewhat close.
End of explanation |
14,538 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Using a pre-trained convnet
This notebook contains the code sample found in Chapter 5, Section 3 of Deep Learning with Python. Note that the original text features far more content, in particular further explanations and figures
Step1: We passed three arguments to the constructor
Step2: The final feature map has shape (4, 4, 512). That's the feature on top of which we will stick a densely-connected classifier.
At this point, there are two ways we could proceed
Step3: The extracted features are currently of shape (samples, 4, 4, 512). We will feed them to a densely-connected classifier, so first we must
flatten them to (samples, 8192)
Step4: At this point, we can define our densely-connected classifier (note the use of dropout for regularization), and train it on the data and
labels that we just recorded
Step5: Training is very fast, since we only have to deal with two Dense layers -- an epoch takes less than one second even on CPU.
Let's take a look at the loss and accuracy curves during training
Step6: We reach a validation accuracy of about 90%, much better than what we could achieve in the previous section with our small model trained from
scratch. However, our plots also indicate that we are overfitting almost from the start -- despite using dropout with a fairly large rate.
This is because this technique does not leverage data augmentation, which is essential to preventing overfitting with small image datasets.
Now, let's review the second technique we mentioned for doing feature extraction, which is much slower and more expensive, but which allows
us to leverage data augmentation during training
Step7: This is what our model looks like now
Step8: As you can see, the convolutional base of VGG16 has 14,714,688 parameters, which is very large. The classifier we are adding on top has 2
million parameters.
Before we compile and train our model, a very important thing to do is to freeze the convolutional base. "Freezing" a layer or set of
layers means preventing their weights from getting updated during training. If we don't do this, then the representations that were
previously learned by the convolutional base would get modified during training. Since the Dense layers on top are randomly initialized,
very large weight updates would be propagated through the network, effectively destroying the representations previously learned.
In Keras, freezing a network is done by setting its trainable attribute to False
Step9: With this setup, only the weights from the two Dense layers that we added will be trained. That's a total of four weight tensors
Step10: Let's plot our results again
Step11: As you can see, we reach a validation accuracy of about 96%. This is much better than our small convnet trained from scratch.
Fine-tuning
Another widely used technique for model reuse, complementary to feature extraction, is fine-tuning.
Fine-tuning consists in unfreezing a few of the top layers
of a frozen model base used for feature extraction, and jointly training both the newly added part of the model (in our case, the
fully-connected classifier) and these top layers. This is called "fine-tuning" because it slightly adjusts the more abstract
representations of the model being reused, in order to make them more relevant for the problem at hand.
We have stated before that it was necessary to freeze the convolution base of VGG16 in order to be able to train a randomly initialized
classifier on top. For the same reason, it is only possible to fine-tune the top layers of the convolutional base once the classifier on
top has already been trained. If the classified wasn't already trained, then the error signal propagating through the network during
training would be too large, and the representations previously learned by the layers being fine-tuned would be destroyed. Thus the steps
for fine-tuning a network are as follow
Step12: We will fine-tune the last 3 convolutional layers, which means that all layers up until block4_pool should be frozen, and the layers
block5_conv1, block5_conv2 and block5_conv3 should be trainable.
Why not fine-tune more layers? Why not fine-tune the entire convolutional base? We could. However, we need to consider that
Step13: Now we can start fine-tuning our network. We will do this with the RMSprop optimizer, using a very low learning rate. The reason for using
a low learning rate is that we want to limit the magnitude of the modifications we make to the representations of the 3 layers that we are
fine-tuning. Updates that are too large may harm these representations.
Now let's proceed with fine-tuning
Step14: Let's plot our results using the same plotting code as before
Step15: These curves look very noisy. To make them more readable, we can smooth them by replacing every loss and accuracy with exponential moving
averages of these quantities. Here's a trivial utility function to do this
Step16: These curves look much cleaner and more stable. We are seeing a nice 1% absolute improvement.
Note that the loss curve does not show any real improvement (in fact, it is deteriorating). You may wonder, how could accuracy improve if the
loss isn't decreasing? The answer is simple | Python Code:
from keras.applications import VGG16
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3))
Explanation: Using a pre-trained convnet
This notebook contains the code sample found in Chapter 5, Section 3 of Deep Learning with Python. Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.
A common and highly effective approach to deep learning on small image datasets is to leverage a pre-trained network. A pre-trained network
is simply a saved network previously trained on a large dataset, typically on a large-scale image classification task. If this original
dataset is large enough and general enough, then the spatial feature hierarchy learned by the pre-trained network can effectively act as a
generic model of our visual world, and hence its features can prove useful for many different computer vision problems, even though these
new problems might involve completely different classes from those of the original task. For instance, one might train a network on
ImageNet (where classes are mostly animals and everyday objects) and then re-purpose this trained network for something as remote as
identifying furniture items in images. Such portability of learned features across different problems is a key advantage of deep learning
compared to many older shallow learning approaches, and it makes deep learning very effective for small-data problems.
In our case, we will consider a large convnet trained on the ImageNet dataset (1.4 million labeled images and 1000 different classes).
ImageNet contains many animal classes, including different species of cats and dogs, and we can thus expect to perform very well on our cat
vs. dog classification problem.
We will use the VGG16 architecture, developed by Karen Simonyan and Andrew Zisserman in 2014, a simple and widely used convnet architecture
for ImageNet. Although it is a bit of an older model, far from the current state of the art and somewhat heavier than many other recent
models, we chose it because its architecture is similar to what you are already familiar with, and easy to understand without introducing
any new concepts. This may be your first encounter with one of these cutesie model names -- VGG, ResNet, Inception, Inception-ResNet,
Xception... you will get used to them, as they will come up frequently if you keep doing deep learning for computer vision.
There are two ways to leverage a pre-trained network: feature extraction and fine-tuning. We will cover both of them. Let's start with
feature extraction.
Feature extraction
Feature extraction consists of using the representations learned by a previous network to extract interesting features from new samples.
These features are then run through a new classifier, which is trained from scratch.
As we saw previously, convnets used for image classification comprise two parts: they start with a series of pooling and convolution
layers, and they end with a densely-connected classifier. The first part is called the "convolutional base" of the model. In the case of
convnets, "feature extraction" will simply consist of taking the convolutional base of a previously-trained network, running the new data
through it, and training a new classifier on top of the output.
Why only reuse the convolutional base? Could we reuse the densely-connected classifier as well? In general, it should be avoided. The
reason is simply that the representations learned by the convolutional base are likely to be more generic and therefore more reusable: the
feature maps of a convnet are presence maps of generic concepts over a picture, which is likely to be useful regardless of the computer
vision problem at hand. On the other end, the representations learned by the classifier will necessarily be very specific to the set of
classes that the model was trained on -- they will only contain information about the presence probability of this or that class in the
entire picture. Additionally, representations found in densely-connected layers no longer contain any information about where objects are
located in the input image: these layers get rid of the notion of space, whereas the object location is still described by convolutional
feature maps. For problems where object location matters, densely-connected features would be largely useless.
Note that the level of generality (and therefore reusability) of the representations extracted by specific convolution layers depends on
the depth of the layer in the model. Layers that come earlier in the model extract local, highly generic feature maps (such as visual
edges, colors, and textures), while layers higher-up extract more abstract concepts (such as "cat ear" or "dog eye"). So if your new
dataset differs a lot from the dataset that the original model was trained on, you may be better off using only the first few layers of the
model to do feature extraction, rather than using the entire convolutional base.
In our case, since the ImageNet class set did contain multiple dog and cat classes, it is likely that it would be beneficial to reuse the
information contained in the densely-connected layers of the original model. However, we will chose not to, in order to cover the more
general case where the class set of the new problem does not overlap with the class set of the original model.
Let's put this in practice by using the convolutional base of the VGG16 network, trained on ImageNet, to extract interesting features from
our cat and dog images, and then training a cat vs. dog classifier on top of these features.
The VGG16 model, among others, comes pre-packaged with Keras. You can import it from the keras.applications module. Here's the list of
image classification models (all pre-trained on the ImageNet dataset) that are available as part of keras.applications:
Xception
InceptionV3
ResNet50
VGG16
VGG19
MobileNet
Let's instantiate the VGG16 model:
End of explanation
conv_base.summary()
Explanation: We passed three arguments to the constructor:
weights, to specify which weight checkpoint to initialize the model from
include_top, which refers to including or not the densely-connected classifier on top of the network. By default, this
densely-connected classifier would correspond to the 1000 classes from ImageNet. Since we intend to use our own densely-connected
classifier (with only two classes, cat and dog), we don't need to include it.
input_shape, the shape of the image tensors that we will feed to the network. This argument is purely optional: if we don't pass it,
then the network will be able to process inputs of any size.
Here's the detail of the architecture of the VGG16 convolutional base: it's very similar to the simple convnets that you are already
familiar with.
End of explanation
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
base_dir = '/Users/fchollet/Downloads/cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
# Note that since generators yield data indefinitely in a loop,
# we must `break` after every image has been seen once.
break
return features, labels
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
Explanation: The final feature map has shape (4, 4, 512). That's the feature on top of which we will stick a densely-connected classifier.
At this point, there are two ways we could proceed:
Running the convolutional base over our dataset, recording its output to a Numpy array on disk, then using this data as input to a
standalone densely-connected classifier similar to those you have seen in the first chapters of this book. This solution is very fast and
cheap to run, because it only requires running the convolutional base once for every input image, and the convolutional base is by far the
most expensive part of the pipeline. However, for the exact same reason, this technique would not allow us to leverage data augmentation at
all.
Extending the model we have (conv_base) by adding Dense layers on top, and running the whole thing end-to-end on the input data. This
allows us to use data augmentation, because every input image is going through the convolutional base every time it is seen by the model.
However, for this same reason, this technique is far more expensive than the first one.
We will cover both techniques. Let's walk through the code required to set-up the first one: recording the output of conv_base on our
data and using these outputs as inputs to a new model.
We will start by simply running instances of the previously-introduced ImageDataGenerator to extract images as Numpy arrays as well as
their labels. We will extract features from these images simply by calling the predict method of the conv_base model.
End of explanation
train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
Explanation: The extracted features are currently of shape (samples, 4, 4, 512). We will feed them to a densely-connected classifier, so first we must
flatten them to (samples, 8192):
End of explanation
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(train_features, train_labels,
epochs=30,
batch_size=20,
validation_data=(validation_features, validation_labels))
Explanation: At this point, we can define our densely-connected classifier (note the use of dropout for regularization), and train it on the data and
labels that we just recorded:
End of explanation
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Explanation: Training is very fast, since we only have to deal with two Dense layers -- an epoch takes less than one second even on CPU.
Let's take a look at the loss and accuracy curves during training:
End of explanation
from keras import models
from keras import layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
Explanation: We reach a validation accuracy of about 90%, much better than what we could achieve in the previous section with our small model trained from
scratch. However, our plots also indicate that we are overfitting almost from the start -- despite using dropout with a fairly large rate.
This is because this technique does not leverage data augmentation, which is essential to preventing overfitting with small image datasets.
Now, let's review the second technique we mentioned for doing feature extraction, which is much slower and more expensive, but which allows
us to leverage data augmentation during training: extending the conv_base model and running it end-to-end on the inputs. Note that this
technique is in fact so expensive that you should only attempt it if you have access to a GPU: it is absolutely intractable on CPU. If you
cannot run your code on GPU, then the previous technique is the way to go.
Because models behave just like layers, you can add a model (like our conv_base) to a Sequential model just like you would add a layer.
So you can do the following:
End of explanation
model.summary()
Explanation: This is what our model looks like now:
End of explanation
print('This is the number of trainable weights '
'before freezing the conv base:', len(model.trainable_weights))
conv_base.trainable = False
print('This is the number of trainable weights '
'after freezing the conv base:', len(model.trainable_weights))
Explanation: As you can see, the convolutional base of VGG16 has 14,714,688 parameters, which is very large. The classifier we are adding on top has 2
million parameters.
Before we compile and train our model, a very important thing to do is to freeze the convolutional base. "Freezing" a layer or set of
layers means preventing their weights from getting updated during training. If we don't do this, then the representations that were
previously learned by the convolutional base would get modified during training. Since the Dense layers on top are randomly initialized,
very large weight updates would be propagated through the network, effectively destroying the representations previously learned.
In Keras, freezing a network is done by setting its trainable attribute to False:
End of explanation
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
model.save('cats_and_dogs_small_3.h5')
Explanation: With this setup, only the weights from the two Dense layers that we added will be trained. That's a total of four weight tensors: two per
layer (the main weight matrix and the bias vector). Note that in order for these changes to take effect, we must first compile the model.
If you ever modify weight trainability after compilation, you should then re-compile the model, or these changes would be ignored.
Now we can start training our model, with the same data augmentation configuration that we used in our previous example:
End of explanation
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Explanation: Let's plot our results again:
End of explanation
conv_base.summary()
Explanation: As you can see, we reach a validation accuracy of about 96%. This is much better than our small convnet trained from scratch.
Fine-tuning
Another widely used technique for model reuse, complementary to feature extraction, is fine-tuning.
Fine-tuning consists in unfreezing a few of the top layers
of a frozen model base used for feature extraction, and jointly training both the newly added part of the model (in our case, the
fully-connected classifier) and these top layers. This is called "fine-tuning" because it slightly adjusts the more abstract
representations of the model being reused, in order to make them more relevant for the problem at hand.
We have stated before that it was necessary to freeze the convolution base of VGG16 in order to be able to train a randomly initialized
classifier on top. For the same reason, it is only possible to fine-tune the top layers of the convolutional base once the classifier on
top has already been trained. If the classified wasn't already trained, then the error signal propagating through the network during
training would be too large, and the representations previously learned by the layers being fine-tuned would be destroyed. Thus the steps
for fine-tuning a network are as follow:
1) Add your custom network on top of an already trained base network.
2) Freeze the base network.
3) Train the part you added.
4) Unfreeze some layers in the base network.
5) Jointly train both these layers and the part you added.
We have already completed the first 3 steps when doing feature extraction. Let's proceed with the 4th step: we will unfreeze our conv_base,
and then freeze individual layers inside of it.
As a reminder, this is what our convolutional base looks like:
End of explanation
conv_base.trainable = True
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
Explanation: We will fine-tune the last 3 convolutional layers, which means that all layers up until block4_pool should be frozen, and the layers
block5_conv1, block5_conv2 and block5_conv3 should be trainable.
Why not fine-tune more layers? Why not fine-tune the entire convolutional base? We could. However, we need to consider that:
Earlier layers in the convolutional base encode more generic, reusable features, while layers higher up encode more specialized features. It is
more useful to fine-tune the more specialized features, as these are the ones that need to be repurposed on our new problem. There would
be fast-decreasing returns in fine-tuning lower layers.
The more parameters we are training, the more we are at risk of overfitting. The convolutional base has 15M parameters, so it would be
risky to attempt to train it on our small dataset.
Thus, in our situation, it is a good strategy to only fine-tune the top 2 to 3 layers in the convolutional base.
Let's set this up, starting from where we left off in the previous example:
End of explanation
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
model.save('cats_and_dogs_small_4.h5')
Explanation: Now we can start fine-tuning our network. We will do this with the RMSprop optimizer, using a very low learning rate. The reason for using
a low learning rate is that we want to limit the magnitude of the modifications we make to the representations of the 3 layers that we are
fine-tuning. Updates that are too large may harm these representations.
Now let's proceed with fine-tuning:
End of explanation
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Explanation: Let's plot our results using the same plotting code as before:
End of explanation
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
plt.plot(epochs,
smooth_curve(acc), 'bo', label='Smoothed training acc')
plt.plot(epochs,
smooth_curve(val_acc), 'b', label='Smoothed validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,
smooth_curve(loss), 'bo', label='Smoothed training loss')
plt.plot(epochs,
smooth_curve(val_loss), 'b', label='Smoothed validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Explanation: These curves look very noisy. To make them more readable, we can smooth them by replacing every loss and accuracy with exponential moving
averages of these quantities. Here's a trivial utility function to do this:
End of explanation
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
test_loss, test_acc = model.evaluate_generator(test_generator, steps=50)
print('test acc:', test_acc)
Explanation: These curves look much cleaner and more stable. We are seeing a nice 1% absolute improvement.
Note that the loss curve does not show any real improvement (in fact, it is deteriorating). You may wonder, how could accuracy improve if the
loss isn't decreasing? The answer is simple: what we display is an average of pointwise loss values, but what actually matters for accuracy
is the distribution of the loss values, not their average, since accuracy is the result of a binary thresholding of the class probability
predicted by the model. The model may still be improving even if this isn't reflected in the average loss.
We can now finally evaluate this model on the test data:
End of explanation |
14,539 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Setup
We're going to download the collected works of Nietzsche to use as our data for this class.
Step1: Sometimes it's useful to have a zero value in the dataset, e.g. for padding
Step2: Map from chars to indices and back again
Step3: idx will be the data we use from now own - it simply converts all the characters to their index (based on the mapping above)
Step4: 3 char model
Create inputs
Create a list of every 4th character, starting at the 0th, 1st, 2nd, then 3rd characters
Step5: Our inputs
Step6: Our output
Step7: The first 4 inputs and outputs
Step8: The number of latent factors to create (i.e. the size of the embedding matrix)
Step9: Create inputs and embedding outputs for each of our 3 character inputs
Step10: Create and train model
Pick a size for our hidden state
Step11: This is the 'green arrow' from our diagram - the layer operation from input to hidden.
Step12: Our first hidden activation is simply this function applied to the result of the embedding of the first character.
Step13: This is the 'orange arrow' from our diagram - the layer operation from hidden to hidden.
Step14: Our second and third hidden activations sum up the previous hidden state (after applying dense_hidden) to the new input state.
Step15: This is the 'blue arrow' from our diagram - the layer operation from hidden to output.
Step16: The third hidden state is the input to our output layer.
Step17: Test model
Step18: Our first RNN!
Create inputs
This is the size of our unrolled RNN.
Step19: For each of 0 through 7, create a list of every 8th character with that starting point. These will be the 8 inputs to out model.
Step20: Then create a list of the next character in each of these series. This will be the labels for our model.
Step21: So each column below is one series of 8 characters from the text.
Step22: ...and this is the next character after each sequence.
Step23: Create and train model
Step24: The first character of each sequence goes through dense_in(), to create our first hidden activations.
Step25: Then for each successive layer we combine the output of dense_in() on the next character with the output of dense_hidden() on the current hidden state, to create the new hidden state.
Step26: Putting the final hidden state through dense_out() gives us our output.
Step27: So now we can create our model.
Step28: Test model
Step29: Our first RNN with keras!
Step30: This is nearly exactly equivalent to the RNN we built ourselves in the previous section.
Step31: Returning sequences
Create inputs
To use a sequence model, we can leave our input unchanged - but we have to change our output to a sequence (of course!)
Here, c_out_dat is identical to c_in_dat, but moved across 1 character.
Step32: Reading down each column shows one set of inputs and outputs.
Step33: Create and train model
Step34: We're going to pass a vector of all zeros as our starting point - here's our input layers for that
Step35: Test model
Step36: Sequence model with keras
Step37: To convert our previous keras model into a sequence model, simply add the 'return_sequences=True' parameter, and add TimeDistributed() around our dense layer.
Step38: One-hot sequence model with keras
This is the keras version of the theano model that we're about to create.
Step39: Stateful model with keras
Step40: A stateful model is easy to create (just add "stateful=True") but harder to train. We had to add batchnorm and use LSTM to get reasonable results.
When using stateful in keras, you have to also add 'batch_input_shape' to the first layer, and fix the batch size there.
Step41: Since we're using a fixed batch shape, we have to ensure our inputs and outputs are a even multiple of the batch size.
Step42: Theano RNN
Step43: Using raw theano, we have to create our weight matrices and bias vectors ourselves - here are the functions we'll use to do so (using glorot initialization).
The return values are wrapped in shared(), which is how we tell theano that it can manage this data (copying it to and from the GPU as necessary).
Step44: We return the weights and biases together as a tuple. For the hidden weights, we'll use an identity initialization (as recommended by Hinton.)
Step45: Theano doesn't actually do any computations until we explicitly compile and evaluate the function (at which point it'll be turned into CUDA code and sent off to the GPU). So our job is to describe the computations that we'll want theano to do - the first step is to tell theano what inputs we'll be providing to our computation
Step46: Now we're ready to create our intial weight matrices.
Step47: Theano handles looping by using the GPU scan operation. We have to tell theano what to do at each step through the scan - this is the function we'll use, which does a single forward pass for one character
Step48: Now we can provide everything necessary for the scan operation, so we can setup that up - we have to pass in the function to call at each step, the sequence to step through, the initial values of the outputs, and any other arguments to pass to the step function.
Step49: We can now calculate our loss function, and all of our gradients, with just a couple of lines of code!
Step50: We even have to show theano how to do SGD - so we set up this dictionary of updates to complete after every forward pass, which apply to standard SGD update rule to every weight.
Step51: We're finally ready to compile the function!
Step52: To use it, we simply loop through our input data, calling the function compiled above, and printing our progress from time to time.
Step53: Pure python RNN!
Set up basic functions
Now we're going to try to repeat the above theano RNN, using just pure python (and numpy). Which means, we have to do everything ourselves, including defining the basic functions of a neural net! Below are all of the definitions, along with tests to check that they give the same answers as theano. The functions ending in _d are the derivatives of each function.
Step54: We also have to define our own scan function. Since we're not worrying about running things in parallel, it's very simple to implement
Step55: ...for instance, scan on + is the cumulative sum.
Step56: Set up training
Let's now build the functions to do the forward and backward passes of our RNN. First, define our data and shape.
Step57: Here's the function to do a single forward pass of an RNN, for a single character.
Step58: We use scan to apply the above to a whole sequence of characters.
Step59: Now we can define the backward step. We use a loop to go through every element of the sequence. The derivatives are applying the chain rule to each step, and accumulating the gradients across the sequence.
Step60: Now we can set up our initial weight matrices. Note that we're not using bias at all in this example, in order to keep things simpler.
Step61: Our loop looks much like the theano loop in the previous section, except that we have to call the backwards step ourselves.
Step62: Keras GRU
Identical to the last keras rnn, but a GRU!
Step63: Theano GRU
Separate weights
The theano GRU looks just like the simple theano RNN, except for the use of the reset and update gates. Each of these gates requires its own hidden and input weights, so we add those to our weight matrices.
Step64: Here's the definition of a gate - it's just a sigmoid applied to the addition of the dot products of the input vectors.
Step65: Our step is nearly identical to before, except that we multiply our hidden state by our reset gate, and we update our hidden state based on the update gate.
Step66: Everything from here on is identical to our simple RNN in theano.
Step67: Combined weights
We can make the previous section simpler and faster by concatenating the hidden and input matrices and inputs together. We're not going to step through this cell by cell - you'll see it's identical to the previous section except for this concatenation. | Python Code:
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
Explanation: Setup
We're going to download the collected works of Nietzsche to use as our data for this class.
End of explanation
chars.insert(0, "\0")
''.join(chars[1:-6])
Explanation: Sometimes it's useful to have a zero value in the dataset, e.g. for padding
End of explanation
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
Explanation: Map from chars to indices and back again
End of explanation
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
Explanation: idx will be the data we use from now own - it simply converts all the characters to their index (based on the mapping above)
End of explanation
cs=3
c1_dat = [idx[i] for i in xrange(0, len(idx)-1-cs, cs)]
c2_dat = [idx[i+1] for i in xrange(0, len(idx)-1-cs, cs)]
c3_dat = [idx[i+2] for i in xrange(0, len(idx)-1-cs, cs)]
c4_dat = [idx[i+3] for i in xrange(0, len(idx)-1-cs, cs)]
Explanation: 3 char model
Create inputs
Create a list of every 4th character, starting at the 0th, 1st, 2nd, then 3rd characters
End of explanation
x1 = np.stack(c1_dat[:-2])
x2 = np.stack(c2_dat[:-2])
x3 = np.stack(c3_dat[:-2])
Explanation: Our inputs
End of explanation
y = np.stack(c4_dat[:-2])
Explanation: Our output
End of explanation
x1[:4], x2[:4], x3[:4]
y[:4]
x1.shape, y.shape
Explanation: The first 4 inputs and outputs
End of explanation
n_fac = 42
Explanation: The number of latent factors to create (i.e. the size of the embedding matrix)
End of explanation
def embedding_input(name, n_in, n_out):
inp = Input(shape=(1,), dtype='int64', name=name)
emb = Embedding(n_in, n_out, input_length=1)(inp)
return inp, Flatten()(emb)
c1_in, c1 = embedding_input('c1', vocab_size, n_fac)
c2_in, c2 = embedding_input('c2', vocab_size, n_fac)
c3_in, c3 = embedding_input('c3', vocab_size, n_fac)
Explanation: Create inputs and embedding outputs for each of our 3 character inputs
End of explanation
n_hidden = 256
Explanation: Create and train model
Pick a size for our hidden state
End of explanation
dense_in = Dense(n_hidden, activation='relu')
Explanation: This is the 'green arrow' from our diagram - the layer operation from input to hidden.
End of explanation
c1_hidden = dense_in(c1)
Explanation: Our first hidden activation is simply this function applied to the result of the embedding of the first character.
End of explanation
dense_hidden = Dense(n_hidden, activation='tanh')
Explanation: This is the 'orange arrow' from our diagram - the layer operation from hidden to hidden.
End of explanation
c2_dense = dense_in(c2)
hidden_2 = dense_hidden(c1_hidden)
c2_hidden = merge([c2_dense, hidden_2])
c3_dense = dense_in(c3)
hidden_3 = dense_hidden(c2_hidden)
c3_hidden = merge([c3_dense, hidden_3])
Explanation: Our second and third hidden activations sum up the previous hidden state (after applying dense_hidden) to the new input state.
End of explanation
dense_out = Dense(vocab_size, activation='softmax')
Explanation: This is the 'blue arrow' from our diagram - the layer operation from hidden to output.
End of explanation
c4_out = dense_out(c3_hidden)
model = Model([c1_in, c2_in, c3_in], c4_out)
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
model.optimizer.lr=0.000001
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
model.optimizer.lr=0.01
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
model.optimizer.lr.set_value(0.000001)
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
model.optimizer.lr.set_value(0.01)
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
Explanation: The third hidden state is the input to our output layer.
End of explanation
def get_next(inp):
idxs = [char_indices[c] for c in inp]
arrs = [np.array(i)[np.newaxis] for i in idxs]
p = model.predict(arrs)
i = np.argmax(p)
return chars[i]
get_next('phi')
get_next(' th')
get_next(' an')
Explanation: Test model
End of explanation
cs=8
Explanation: Our first RNN!
Create inputs
This is the size of our unrolled RNN.
End of explanation
c_in_dat = [[idx[i+n] for i in xrange(0, len(idx)-1-cs, cs)]
for n in range(cs)]
Explanation: For each of 0 through 7, create a list of every 8th character with that starting point. These will be the 8 inputs to out model.
End of explanation
c_out_dat = [idx[i+cs] for i in xrange(0, len(idx)-1-cs, cs)]
xs = [np.stack(c[:-2]) for c in c_in_dat]
len(xs), xs[0].shape
y = np.stack(c_out_dat[:-2])
Explanation: Then create a list of the next character in each of these series. This will be the labels for our model.
End of explanation
[xs[n][:cs] for n in range(cs)]
Explanation: So each column below is one series of 8 characters from the text.
End of explanation
y[:cs]
n_fac = 42
Explanation: ...and this is the next character after each sequence.
End of explanation
def embedding_input(name, n_in, n_out):
inp = Input(shape=(1,), dtype='int64', name=name+'_in')
emb = Embedding(n_in, n_out, input_length=1, name=name+'_emb')(inp)
return inp, Flatten()(emb)
c_ins = [embedding_input('c'+str(n), vocab_size, n_fac) for n in range(cs)]
n_hidden = 256
dense_in = Dense(n_hidden, activation='relu')
dense_hidden = Dense(n_hidden, activation='relu', init='identity')
dense_out = Dense(vocab_size, activation='softmax')
Explanation: Create and train model
End of explanation
hidden = dense_in(c_ins[0][1])
Explanation: The first character of each sequence goes through dense_in(), to create our first hidden activations.
End of explanation
for i in range(1,cs):
c_dense = dense_in(c_ins[i][1])
hidden = dense_hidden(hidden)
hidden = merge([c_dense, hidden])
Explanation: Then for each successive layer we combine the output of dense_in() on the next character with the output of dense_hidden() on the current hidden state, to create the new hidden state.
End of explanation
c_out = dense_out(hidden)
Explanation: Putting the final hidden state through dense_out() gives us our output.
End of explanation
model = Model([c[0] for c in c_ins], c_out)
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
model.fit(xs, y, batch_size=64, nb_epoch=12)
Explanation: So now we can create our model.
End of explanation
def get_next(inp):
idxs = [np.array(char_indices[c])[np.newaxis] for c in inp]
p = model.predict(idxs)
return chars[np.argmax(p)]
get_next('for thos')
get_next('part of ')
get_next('queens a')
Explanation: Test model
End of explanation
n_hidden, n_fac, cs, vocab_size = (256, 42, 8, 86)
Explanation: Our first RNN with keras!
End of explanation
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs),
SimpleRNN(n_hidden, activation='relu', inner_init='identity'),
Dense(vocab_size, activation='softmax')
])
model.summary()
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
model.fit(np.concatenate(xs,axis=1), y, batch_size=64, nb_epoch=8)
def get_next_keras(inp):
idxs = [char_indices[c] for c in inp]
arrs = np.array(idxs)[np.newaxis,:]
p = model.predict(arrs)[0]
return chars[np.argmax(p)]
get_next_keras('this is ')
get_next_keras('part of ')
get_next_keras('queens a')
Explanation: This is nearly exactly equivalent to the RNN we built ourselves in the previous section.
End of explanation
#c_in_dat = [[idx[i+n] for i in xrange(0, len(idx)-1-cs, cs)]
# for n in range(cs)]
c_out_dat = [[idx[i+n] for i in xrange(1, len(idx)-cs, cs)]
for n in range(cs)]
ys = [np.stack(c[:-2]) for c in c_out_dat]
Explanation: Returning sequences
Create inputs
To use a sequence model, we can leave our input unchanged - but we have to change our output to a sequence (of course!)
Here, c_out_dat is identical to c_in_dat, but moved across 1 character.
End of explanation
[xs[n][:cs] for n in range(cs)]
[ys[n][:cs] for n in range(cs)]
Explanation: Reading down each column shows one set of inputs and outputs.
End of explanation
dense_in = Dense(n_hidden, activation='relu')
dense_hidden = Dense(n_hidden, activation='relu', init='identity')
dense_out = Dense(vocab_size, activation='softmax', name='output')
Explanation: Create and train model
End of explanation
inp1 = Input(shape=(n_fac,), name='zeros')
hidden = dense_in(inp1)
outs = []
for i in range(cs):
c_dense = dense_in(c_ins[i][1])
hidden = dense_hidden(hidden)
hidden = merge([c_dense, hidden], mode='sum')
# every layer now has an output
outs.append(dense_out(hidden))
model = Model([inp1] + [c[0] for c in c_ins], outs)
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
zeros = np.tile(np.zeros(n_fac), (len(xs[0]),1))
zeros.shape
model.fit([zeros]+xs, ys, batch_size=64, nb_epoch=12)
Explanation: We're going to pass a vector of all zeros as our starting point - here's our input layers for that:
End of explanation
def get_nexts(inp):
idxs = [char_indices[c] for c in inp]
arrs = [np.array(i)[np.newaxis] for i in idxs]
p = model.predict([np.zeros(n_fac)[np.newaxis,:]] + arrs)
print(list(inp))
return [chars[np.argmax(o)] for o in p]
get_nexts(' this is')
get_nexts(' part of')
Explanation: Test model
End of explanation
n_hidden, n_fac, cs, vocab_size
Explanation: Sequence model with keras
End of explanation
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs),
SimpleRNN(n_hidden, return_sequences=True, activation='relu', inner_init='identity'),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.summary()
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
xs[0].shape
x_rnn=np.stack(xs, axis=1)
y_rnn=np.expand_dims(np.stack(ys, axis=1), -1)
x_rnn.shape, y_rnn.shape
model.fit(x_rnn, y_rnn, batch_size=64, nb_epoch=8)
def get_nexts_keras(inp):
idxs = [char_indices[c] for c in inp]
arr = np.array(idxs)[np.newaxis,:]
p = model.predict(arr)[0]
print(list(inp))
return [chars[np.argmax(o)] for o in p]
get_nexts_keras(' this is')
Explanation: To convert our previous keras model into a sequence model, simply add the 'return_sequences=True' parameter, and add TimeDistributed() around our dense layer.
End of explanation
model=Sequential([
SimpleRNN(n_hidden, return_sequences=True, input_shape=(cs, vocab_size),
activation='relu', inner_init='identity'),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='categorical_crossentropy', optimizer=Adam())
oh_ys = [to_categorical(o, vocab_size) for o in ys]
oh_y_rnn=np.stack(oh_ys, axis=1)
oh_xs = [to_categorical(o, vocab_size) for o in xs]
oh_x_rnn=np.stack(oh_xs, axis=1)
oh_x_rnn.shape, oh_y_rnn.shape
model.fit(oh_x_rnn, oh_y_rnn, batch_size=64, nb_epoch=8)
def get_nexts_oh(inp):
idxs = np.array([char_indices[c] for c in inp])
arr = to_categorical(idxs, vocab_size)
p = model.predict(arr[np.newaxis,:])[0]
print(list(inp))
return [chars[np.argmax(o)] for o in p]
get_nexts_oh(' this is')
Explanation: One-hot sequence model with keras
This is the keras version of the theano model that we're about to create.
End of explanation
bs=64
Explanation: Stateful model with keras
End of explanation
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs, batch_input_shape=(bs,8)),
BatchNormalization(),
LSTM(n_hidden, return_sequences=True, stateful=True),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
Explanation: A stateful model is easy to create (just add "stateful=True") but harder to train. We had to add batchnorm and use LSTM to get reasonable results.
When using stateful in keras, you have to also add 'batch_input_shape' to the first layer, and fix the batch size there.
End of explanation
mx = len(x_rnn)//bs*bs
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
model.optimizer.lr=1e-4
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
Explanation: Since we're using a fixed batch shape, we have to ensure our inputs and outputs are a even multiple of the batch size.
End of explanation
n_input = vocab_size
n_output = vocab_size
Explanation: Theano RNN
End of explanation
def init_wgts(rows, cols):
scale = math.sqrt(2/rows)
return shared(normal(scale=scale, size=(rows, cols)).astype(np.float32))
def init_bias(rows):
return shared(np.zeros(rows, dtype=np.float32))
Explanation: Using raw theano, we have to create our weight matrices and bias vectors ourselves - here are the functions we'll use to do so (using glorot initialization).
The return values are wrapped in shared(), which is how we tell theano that it can manage this data (copying it to and from the GPU as necessary).
End of explanation
def wgts_and_bias(n_in, n_out):
return init_wgts(n_in, n_out), init_bias(n_out)
def id_and_bias(n):
return shared(np.eye(n, dtype=np.float32)), init_bias(n)
Explanation: We return the weights and biases together as a tuple. For the hidden weights, we'll use an identity initialization (as recommended by Hinton.)
End of explanation
t_inp = T.matrix('inp')
t_outp = T.matrix('outp')
t_h0 = T.vector('h0')
lr = T.scalar('lr')
all_args = [t_h0, t_inp, t_outp, lr]
Explanation: Theano doesn't actually do any computations until we explicitly compile and evaluate the function (at which point it'll be turned into CUDA code and sent off to the GPU). So our job is to describe the computations that we'll want theano to do - the first step is to tell theano what inputs we'll be providing to our computation:
End of explanation
W_h = id_and_bias(n_hidden)
W_x = wgts_and_bias(n_input, n_hidden)
W_y = wgts_and_bias(n_hidden, n_output)
w_all = list(chain.from_iterable([W_h, W_x, W_y]))
Explanation: Now we're ready to create our intial weight matrices.
End of explanation
def step(x, h, W_h, b_h, W_x, b_x, W_y, b_y):
# Calculate the hidden activations
h = nnet.relu(T.dot(x, W_x) + b_x + T.dot(h, W_h) + b_h)
# Calculate the output activations
y = nnet.softmax(T.dot(h, W_y) + b_y)
# Return both (the 'Flatten()' is to work around a theano bug)
return h, T.flatten(y, 1)
Explanation: Theano handles looping by using the GPU scan operation. We have to tell theano what to do at each step through the scan - this is the function we'll use, which does a single forward pass for one character:
End of explanation
[v_h, v_y], _ = theano.scan(step, sequences=t_inp,
outputs_info=[t_h0, None], non_sequences=w_all)
Explanation: Now we can provide everything necessary for the scan operation, so we can setup that up - we have to pass in the function to call at each step, the sequence to step through, the initial values of the outputs, and any other arguments to pass to the step function.
End of explanation
error = nnet.categorical_crossentropy(v_y, t_outp).sum()
g_all = T.grad(error, w_all)
Explanation: We can now calculate our loss function, and all of our gradients, with just a couple of lines of code!
End of explanation
def upd_dict(wgts, grads, lr):
return OrderedDict({w: w-g*lr for (w,g) in zip(wgts,grads)})
upd = upd_dict(w_all, g_all, lr)
Explanation: We even have to show theano how to do SGD - so we set up this dictionary of updates to complete after every forward pass, which apply to standard SGD update rule to every weight.
End of explanation
fn = theano.function(all_args, error, updates=upd, allow_input_downcast=True)
X = oh_x_rnn
Y = oh_y_rnn
X.shape, Y.shape
Explanation: We're finally ready to compile the function!
End of explanation
err=0.0; l_rate=0.01
for i in range(len(X)):
err+=fn(np.zeros(n_hidden), X[i], Y[i], l_rate)
if i % 1000 == 999:
print ("Error:{:.3f}".format(err/1000))
err=0.0
f_y = theano.function([t_h0, t_inp], v_y, allow_input_downcast=True)
pred = np.argmax(f_y(np.zeros(n_hidden), X[6]), axis=1)
act = np.argmax(X[6], axis=1)
[indices_char[o] for o in act]
[indices_char[o] for o in pred]
Explanation: To use it, we simply loop through our input data, calling the function compiled above, and printing our progress from time to time.
End of explanation
def sigmoid(x): return 1/(1+np.exp(-x))
def sigmoid_d(x):
output = sigmoid(x)
return output*(1-output)
def relu(x): return np.maximum(0., x)
def relu_d(x): return (x > 0.)*1.
relu(np.array([3.,-3.])), relu_d(np.array([3.,-3.]))
def dist(a,b): return pow(a-b,2)
def dist_d(a,b): return 2*(a-b)
import pdb
eps = 1e-7
def x_entropy(pred, actual):
return -np.sum(actual * np.log(np.clip(pred, eps, 1-eps)))
def x_entropy_d(pred, actual): return -actual/pred
def softmax(x): return np.exp(x)/np.exp(x).sum()
def softmax_d(x):
sm = softmax(x)
res = np.expand_dims(-sm,-1)*sm
res[np.diag_indices_from(res)] = sm*(1-sm)
return res
test_preds = np.array([0.2,0.7,0.1])
test_actuals = np.array([0.,1.,0.])
nnet.categorical_crossentropy(test_preds, test_actuals).eval()
x_entropy(test_preds, test_actuals)
test_inp = T.dvector()
test_out = nnet.categorical_crossentropy(test_inp, test_actuals)
test_grad = theano.function([test_inp], T.grad(test_out, test_inp))
test_grad(test_preds)
x_entropy_d(test_preds, test_actuals)
pre_pred = random(oh_x_rnn[0][0].shape)
preds = softmax(pre_pred)
actual = oh_x_rnn[0][0]
np.allclose(softmax_d(pre_pred).dot(loss_d(preds,actual)), preds-actual)
softmax(test_preds)
nnet.softmax(test_preds).eval()
test_out = T.flatten(nnet.softmax(test_inp))
test_grad = theano.function([test_inp], theano.gradient.jacobian(test_out, test_inp))
test_grad(test_preds)
softmax_d(test_preds)
act=relu
act_d = relu_d
loss=x_entropy
loss_d=x_entropy_d
Explanation: Pure python RNN!
Set up basic functions
Now we're going to try to repeat the above theano RNN, using just pure python (and numpy). Which means, we have to do everything ourselves, including defining the basic functions of a neural net! Below are all of the definitions, along with tests to check that they give the same answers as theano. The functions ending in _d are the derivatives of each function.
End of explanation
def scan(fn, start, seq):
res = []
prev = start
for s in seq:
app = fn(prev, s)
res.append(app)
prev = app
return res
Explanation: We also have to define our own scan function. Since we're not worrying about running things in parallel, it's very simple to implement:
End of explanation
scan(lambda prev,curr: prev+curr, 0, range(5))
Explanation: ...for instance, scan on + is the cumulative sum.
End of explanation
inp = oh_x_rnn
outp = oh_y_rnn
n_input = vocab_size
n_output = vocab_size
inp.shape, outp.shape
Explanation: Set up training
Let's now build the functions to do the forward and backward passes of our RNN. First, define our data and shape.
End of explanation
def one_char(prev, item):
# Previous state
tot_loss, pre_hidden, pre_pred, hidden, ypred = prev
# Current inputs and output
x, y = item
pre_hidden = np.dot(x,w_x) + np.dot(hidden,w_h)
hidden = act(pre_hidden)
pre_pred = np.dot(hidden,w_y)
ypred = softmax(pre_pred)
return (
# Keep track of loss so we can report it
tot_loss+loss(ypred, y),
# Used in backprop
pre_hidden, pre_pred,
# Used in next iteration
hidden,
# To provide predictions
ypred)
Explanation: Here's the function to do a single forward pass of an RNN, for a single character.
End of explanation
def get_chars(n): return zip(inp[n], outp[n])
def one_fwd(n): return scan(one_char, (0,0,0,np.zeros(n_hidden),0), get_chars(n))
Explanation: We use scan to apply the above to a whole sequence of characters.
End of explanation
# "Columnify" a vector
def col(x): return x[:,newaxis]
def one_bkwd(args, n):
global w_x,w_y,w_h
i=inp[n] # 8x86
o=outp[n] # 8x86
d_pre_hidden = np.zeros(n_hidden) # 256
for p in reversed(range(len(i))):
totloss, pre_hidden, pre_pred, hidden, ypred = args[p]
x=i[p] # 86
y=o[p] # 86
d_pre_pred = softmax_d(pre_pred).dot(loss_d(ypred,y)) # 86
d_pre_hidden = (np.dot(d_pre_hidden, w_h.T)
+ np.dot(d_pre_pred,w_y.T)) * act_d(pre_hidden) # 256
# d(loss)/d(w_y) = d(loss)/d(pre_pred) * d(pre_pred)/d(w_y)
w_y -= col(hidden) * d_pre_pred * alpha
# d(loss)/d(w_h) = d(loss)/d(pre_hidden[p-1]) * d(pre_hidden[p-1])/d(w_h)
if (p>0): w_h -= args[p-1][3].dot(d_pre_hidden) * alpha
w_x -= col(x)*d_pre_hidden * alpha
return d_pre_hidden
Explanation: Now we can define the backward step. We use a loop to go through every element of the sequence. The derivatives are applying the chain rule to each step, and accumulating the gradients across the sequence.
End of explanation
scale=math.sqrt(2./n_input)
w_x = normal(scale=scale, size=(n_input,n_hidden))
w_y = normal(scale=scale, size=(n_hidden, n_output))
w_h = np.eye(n_hidden, dtype=np.float32)
Explanation: Now we can set up our initial weight matrices. Note that we're not using bias at all in this example, in order to keep things simpler.
End of explanation
overallError=0
alpha=0.0001
for n in range(10000):
res = one_fwd(n)
overallError+=res[-1][0]
deriv = one_bkwd(res, n)
if(n % 1000 == 999):
print ("Error:{:.4f}; Gradient:{:.5f}".format(
overallError/1000, np.linalg.norm(deriv)))
overallError=0
Explanation: Our loop looks much like the theano loop in the previous section, except that we have to call the backwards step ourselves.
End of explanation
model=Sequential([
GRU(n_hidden, return_sequences=True, input_shape=(cs, vocab_size),
activation='relu', inner_init='identity'),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='categorical_crossentropy', optimizer=Adam())
model.fit(oh_x_rnn, oh_y_rnn, batch_size=64, nb_epoch=8)
get_nexts_oh(' this is')
Explanation: Keras GRU
Identical to the last keras rnn, but a GRU!
End of explanation
W_h = id_and_bias(n_hidden)
W_x = init_wgts(n_input, n_hidden)
W_y = wgts_and_bias(n_hidden, n_output)
rW_h = init_wgts(n_hidden, n_hidden)
rW_x = wgts_and_bias(n_input, n_hidden)
uW_h = init_wgts(n_hidden, n_hidden)
uW_x = wgts_and_bias(n_input, n_hidden)
w_all = list(chain.from_iterable([W_h, W_y, uW_x, rW_x]))
w_all.extend([W_x, uW_h, rW_h])
Explanation: Theano GRU
Separate weights
The theano GRU looks just like the simple theano RNN, except for the use of the reset and update gates. Each of these gates requires its own hidden and input weights, so we add those to our weight matrices.
End of explanation
def gate(x, h, W_h, W_x, b_x):
return nnet.sigmoid(T.dot(x, W_x) + b_x + T.dot(h, W_h))
Explanation: Here's the definition of a gate - it's just a sigmoid applied to the addition of the dot products of the input vectors.
End of explanation
def step(x, h, W_h, b_h, W_y, b_y, uW_x, ub_x, rW_x, rb_x, W_x, uW_h, rW_h):
reset = gate(x, h, rW_h, rW_x, rb_x)
update = gate(x, h, uW_h, uW_x, ub_x)
h_new = gate(x, h * reset, W_h, W_x, b_h)
h = update*h + (1-update)*h_new
y = nnet.softmax(T.dot(h, W_y) + b_y)
return h, T.flatten(y, 1)
Explanation: Our step is nearly identical to before, except that we multiply our hidden state by our reset gate, and we update our hidden state based on the update gate.
End of explanation
[v_h, v_y], _ = theano.scan(step, sequences=t_inp,
outputs_info=[t_h0, None], non_sequences=w_all)
error = nnet.categorical_crossentropy(v_y, t_outp).sum()
g_all = T.grad(error, w_all)
upd = upd_dict(w_all, g_all, lr)
fn = theano.function(all_args, error, updates=upd, allow_input_downcast=True)
err=0.0; l_rate=0.1
for i in range(len(X)):
err+=fn(np.zeros(n_hidden), X[i], Y[i], l_rate)
if i % 1000 == 999:
l_rate *= 0.95
print ("Error:{:.2f}".format(err/1000))
err=0.0
Explanation: Everything from here on is identical to our simple RNN in theano.
End of explanation
W = (shared(np.concatenate([np.eye(n_hidden), normal(size=(n_input, n_hidden))])
.astype(np.float32)), init_bias(n_hidden))
rW = wgts_and_bias(n_input+n_hidden, n_hidden)
uW = wgts_and_bias(n_input+n_hidden, n_hidden)
W_y = wgts_and_bias(n_hidden, n_output)
w_all = list(chain.from_iterable([W, W_y, uW, rW]))
def gate(m, W, b): return nnet.sigmoid(T.dot(m, W) + b)
def step(x, h, W, b, W_y, b_y, uW, ub, rW, rb):
m = T.concatenate([h, x])
reset = gate(m, rW, rb)
update = gate(m, uW, ub)
m = T.concatenate([h*reset, x])
h_new = gate(m, W, b)
h = update*h + (1-update)*h_new
y = nnet.softmax(T.dot(h, W_y) + b_y)
return h, T.flatten(y, 1)
[v_h, v_y], _ = theano.scan(step, sequences=t_inp,
outputs_info=[t_h0, None], non_sequences=w_all)
def upd_dict(wgts, grads, lr):
return OrderedDict({w: w-g*lr for (w,g) in zip(wgts,grads)})
error = nnet.categorical_crossentropy(v_y, t_outp).sum()
g_all = T.grad(error, w_all)
upd = upd_dict(w_all, g_all, lr)
fn = theano.function(all_args, error, updates=upd, allow_input_downcast=True)
err=0.0; l_rate=0.01
for i in range(len(X)):
err+=fn(np.zeros(n_hidden), X[i], Y[i], l_rate)
if i % 1000 == 999:
print ("Error:{:.2f}".format(err/1000))
err=0.0
Explanation: Combined weights
We can make the previous section simpler and faster by concatenating the hidden and input matrices and inputs together. We're not going to step through this cell by cell - you'll see it's identical to the previous section except for this concatenation.
End of explanation |
14,540 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: IPython magic functions for Pyspark
Examples of shortcuts for executing SQL in Spark
Step2: Define test tables
Step3: Examples of how to use %SQL magic functions with Spark
Use %sql to run SQL and return a DataFrame, lazy evaluation
Step4: Use %sql_show to run SQL and show the top lines of the result set
Step5: Example of cell magic to run SQL spanning multiple lines
Step6: Use %sql_display to run SQL and display the results as a HTML table
Example of cell magic that runs SQL and then transforms it to Pandas. This will display the output as a HTML table in Jupyter notebooks
Step7: Use %sql_explain to display the execution plan | Python Code:
#
# IPython magic functions to use with Pyspark and Spark SQL
# The following code is intended as examples of shorcuts to simplify the use of SQL in pyspark
# The defined functions are:
#
# %sql <statement> - return a Spark DataFrame for lazy evaluation of the SQL
# %sql_show <statement> - run the SQL statement and show max_show_lines (50) lines
# %sql_display <statement> - run the SQL statement and display the results using a HTML table
# - this is implemented passing via Pandas and displays up to max_show_lines (50)
# %sql_explain <statement> - display the execution plan of the SQL statement
#
# Use: %<magic> for line magic or %%<magic> for cell magic.
#
# Author: [email protected]
# September 2016
#
from IPython.core.magic import register_line_cell_magic
# Configuration parameters
max_show_lines = 50 # Limit on the number of lines to show with %sql_show and %sql_display
detailed_explain = True # Set to False if you want to see only the physical plan when running explain
@register_line_cell_magic
def sql(line, cell=None):
"Return a Spark DataFrame for lazy evaluation of the sql. Use: %sql or %%sql"
val = cell if cell is not None else line
return spark.sql(val)
@register_line_cell_magic
def sql_show(line, cell=None):
"Execute sql and show the first max_show_lines lines. Use: %sql_show or %%sql_show"
val = cell if cell is not None else line
return spark.sql(val).show(max_show_lines)
@register_line_cell_magic
def sql_display(line, cell=None):
Execute sql and convert results to Pandas DataFrame for pretty display or further processing.
Use: %sql_display or %%sql_display
val = cell if cell is not None else line
return spark.sql(val).limit(max_show_lines).toPandas()
@register_line_cell_magic
def sql_explain(line, cell=None):
"Display the execution plan of the sql. Use: %sql_explain or %%sql_explain"
val = cell if cell is not None else line
return spark.sql(val).explain(detailed_explain)
Explanation: IPython magic functions for Pyspark
Examples of shortcuts for executing SQL in Spark
End of explanation
# Define test data and register it as tables
# This is a classic example of employee and department relational tables
# Test data will be used in the examples later in this notebook
from pyspark.sql import Row
Employee = Row("id", "name", "email", "manager_id", "dep_id")
df_emp = sqlContext.createDataFrame([
Employee(1234, 'John', '[email protected]', 1236, 10),
Employee(1235, 'Mike', '[email protected]', 1237, 10),
Employee(1236, 'Pat', '[email protected]', 1237, 20),
Employee(1237, 'Claire', '[email protected]', None, 20),
Employee(1238, 'Jim', '[email protected]', 1236, 30)
])
df_emp.registerTempTable("employee")
Department = Row("dep_id", "dep_name")
df_dep = sqlContext.createDataFrame([
Department(10, 'Engineering'),
Department(20, 'Head Quarter'),
Department(30, 'Human resources')
])
df_dep.registerTempTable("department")
Explanation: Define test tables
End of explanation
# Example of line magic, a shortcut to run SQL in pyspark
# Pyspark has lazy evaluation, so the query is not executed in this exmaple
df = %sql select * from employee
df
Explanation: Examples of how to use %SQL magic functions with Spark
Use %sql to run SQL and return a DataFrame, lazy evaluation
End of explanation
# Example of line magic, the SQL is executed and the result is displayed
# the maximum number of displayed lines is configurable (max_show_lines)
%sql_show select * from employee
Explanation: Use %sql_show to run SQL and show the top lines of the result set
End of explanation
%%sql_show
select emp.id, emp.name, emp.email, emp.manager_id, dep.dep_name
from employee emp, department dep
where emp.dep_id=dep.dep_id
Explanation: Example of cell magic to run SQL spanning multiple lines
End of explanation
%%sql_display
select emp.id, emp.name, emp.email, emp2.name as manager_name, dep.dep_name
from employee emp
left outer join employee emp2 on emp2.id=emp.manager_id
join department dep on emp.dep_id=dep.dep_id
Explanation: Use %sql_display to run SQL and display the results as a HTML table
Example of cell magic that runs SQL and then transforms it to Pandas. This will display the output as a HTML table in Jupyter notebooks
End of explanation
%%sql_explain
select emp.id, emp.name, emp.email, emp2.name as manager_name, dep.dep_name
from employee emp
left outer join employee emp2 on emp2.id=emp.manager_id
join department dep on emp.dep_id=dep.dep_id
Explanation: Use %sql_explain to display the execution plan
End of explanation |
14,541 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Machine Learning Engineer Nanodegree
Introduction and Foundations
Project 0
Step1: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship
Step3: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
To measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how accurate our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. Run the code cell below to create our accuracy_score function and test a prediction on the first five passengers.
Think
Step5: Tip
Step6: Question 1
Using the RMS Titanic data, how accurate would a prediction be that none of the passengers survived?
Hint
Step7: Answer
Step9: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction
Step10: Question 2
How accurate would a prediction be that all female passengers survived and the remaining passengers did not survive?
Hint
Step11: Answer
Step13: Examining the survival statistics, the majority of males younger then 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction
Step14: Question 3
How accurate would a prediction be that all female passengers and all male passengers younger than 10 survived?
Hint
Step15: Answer
Step17: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Make sure to keep track of the various features and conditions you tried before arriving at your final prediction model.
Hint
Step18: Question 4
Describe the steps you took to implement the final prediction model so that it got an accuracy of at least 80%. What features did you look at? Were certain features more informative than others? Which conditions did you use to split the survival outcomes in the data? How accurate are your predictions?
Hint | Python Code:
import numpy as np
import pandas as pd
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
Explanation: Machine Learning Engineer Nanodegree
Introduction and Foundations
Project 0: Titanic Survival Exploration
In 1912, the ship RMS Titanic struck an iceberg on its maiden voyage and sank, resulting in the deaths of most of its passengers and crew. In this introductory project, we will explore a subset of the RMS Titanic passenger manifest to determine which features best predict whether someone survived or did not survive. To complete this project, you will need to implement several conditional predictions and answer the questions below. Your project submission will be evaluated based on the completion of the code and your responses to the questions.
Tip: Quoted sections like this will provide helpful instructions on how to navigate and use an iPython notebook.
Getting Started
To begin working with the RMS Titanic passenger data, we'll first need to import the functionality we need, and load our data into a pandas DataFrame.
Run the code cell below to load our data and display the first few entries (passengers) for examination using the .head() function.
Tip: You can run a code cell by clicking on the cell and using the keyboard shortcut Shift + Enter or Shift + Return. Alternatively, a code cell can be executed using the Play button in the hotbar after selecting it. Markdown cells (text cells like this one) can be edited by double-clicking, and saved using these same shortcuts. Markdown allows you to write easy-to-read plain text that can be converted to HTML.
End of explanation
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
Explanation: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship:
- Survived: Outcome of survival (0 = No; 1 = Yes)
- Pclass: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
- Name: Name of passenger
- Sex: Sex of the passenger
- Age: Age of the passenger (Some entries contain NaN)
- SibSp: Number of siblings and spouses of the passenger aboard
- Parch: Number of parents and children of the passenger aboard
- Ticket: Ticket number of the passenger
- Fare: Fare paid by the passenger
- Cabin Cabin number of the passenger (Some entries contain NaN)
- Embarked: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
Since we're interested in the outcome of survival for each passenger or crew member, we can remove the Survived feature from this dataset and store it as its own separate variable outcomes. We will use these outcomes as our prediction targets.
Run the code block cell to remove Survived as a feature of the dataset and store it in outcomes.
End of explanation
def accuracy_score(truth, pred):
Returns accuracy score for input truth and predictions.
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(predictions, outcomes[:5])
Explanation: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
To measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how accurate our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. Run the code cell below to create our accuracy_score function and test a prediction on the first five passengers.
Think: Out of the first five passengers, if we predict that all of them survived, what would you expect the accuracy of our predictions to be?
End of explanation
def predictions_0(data):
Model with no features. Always predicts a passenger did not survive.
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
Explanation: Tip: If you save an iPython Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the code blocks from your previous session to reestablish variables and functions before picking up where you last left off.
Making Predictions
If we were told to make a prediction about any passenger aboard the RMS Titanic who we did not know anything about, then the best prediction we could make would be that they did not survive. This is because we can assume that a majority of the passengers as a whole did not survive the ship sinking.
The function below will always predict that a passenger did not survive.
End of explanation
print accuracy_score(outcomes, predictions)
Explanation: Question 1
Using the RMS Titanic data, how accurate would a prediction be that none of the passengers survived?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
survival_stats(data, outcomes, 'Sex')
Explanation: Answer: 61.62%.
Let's take a look at whether the feature Sex has any indication of survival rates among passengers using the survival_stats function. This function is defined in the titanic_visualizations.py Python script included with this project. The first two parameters passed to the function are the RMS Titanic data and passenger survival outcomes, respectively. The third parameter indicates which feature we want to plot survival statistics across.
Run the code cell below to plot the survival outcomes of passengers based on their sex.
End of explanation
def predictions_1(data):
Model with one feature:
- Predict a passenger survived if they are female.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'male':
predictions.append(0)
elif passenger['Sex'] == 'female':
predictions.append(1)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
Explanation: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction: If a passenger was female, then we will predict that they survived. Otherwise, we will predict the passenger did not survive.
Fill in the missing code below so that the function will make this prediction.
Hint: You can access the values of each feature for a passenger like a dictionary. For example, passenger['Sex'] is the sex of the passenger.
End of explanation
print accuracy_score(outcomes, predictions)
Explanation: Question 2
How accurate would a prediction be that all female passengers survived and the remaining passengers did not survive?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
Explanation: Answer: 78.68%.
Using just the Sex feature for each passenger, we are able to increase the accuracy of our predictions by a significant margin. Now, let's consider using an additional feature to see if we can further improve our predictions. Consider, for example, all of the male passengers aboard the RMS Titanic: Can we find a subset of those passengers that had a higher rate of survival? Let's start by looking at the Age of each male, by again using the survival_stats function. This time, we'll use a fourth parameter to filter out the data so that only passengers with the Sex 'male' will be included.
Run the code cell below to plot the survival outcomes of male passengers based on their age.
End of explanation
def predictions_2(data):
Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'male':
if passenger['Age'] < 10:
predictions.append(1)
else:
predictions.append(0)
elif passenger['Sex'] == 'female':
predictions.append(1)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
Explanation: Examining the survival statistics, the majority of males younger then 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction: If a passenger was female, then we will predict they survive. If a passenger was male and younger than 10, then we will also predict they survive. Otherwise, we will predict they do not survive.
Fill in the missing code below so that the function will make this prediction.
Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_1.
End of explanation
print accuracy_score(outcomes, predictions)
Explanation: Question 3
How accurate would a prediction be that all female passengers and all male passengers younger than 10 survived?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
survival_stats(data, outcomes, 'Pclass', ["Sex == 'female'", "SibSp == 0"])
survival_stats(data, outcomes, 'Pclass', ["Sex == 'female'", "SibSp > 0"])
Explanation: Answer: 79.35%.
Adding the feature Age as a condition in conjunction with Sex improves the accuracy by a small margin more than with simply using the feature Sex alone. Now it's your turn: Find a series of features and conditions to split the data on to obtain an outcome prediction accuracy of at least 80%. This may require multiple features and multiple levels of conditional statements to succeed. You can use the same feature multiple times with different conditions.
Pclass, Sex, Age, SibSp, and Parch are some suggested features to try.
Use the survival_stats function below to to examine various survival statistics.
Hint: To use mulitple filter conditions, put each condition in the list passed as the last argument. Example: ["Sex == 'male'", "Age < 18"]
End of explanation
def predictions_3(data):
Model with multiple features. Makes a prediction with an accuracy of at least 80%.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'male':
if passenger['Age'] < 10:
predictions.append(1)
else:
if passenger['Pclass'] == 1:
if passenger['SibSp'] == 0:
if passenger['Age'] >= 10 and passenger['Age'] <= 40:
predictions.append(1)
else:
predictions.append(0)
elif passenger['SibSp'] > 0:
if passenger['Age'] >= 20 and passenger['Age'] <= 50:
predictions.append(1)
else:
predictions.append(0)
else:
predictions.append(0)
elif passenger['Sex'] == 'female':
if passenger['Pclass'] == 3:
if passenger['SibSp'] > 0:
predictions.append(0)
else:
predictions.append(1)
else:
predictions.append(1)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
Explanation: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Make sure to keep track of the various features and conditions you tried before arriving at your final prediction model.
Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_2.
End of explanation
print accuracy_score(outcomes, predictions)
Explanation: Question 4
Describe the steps you took to implement the final prediction model so that it got an accuracy of at least 80%. What features did you look at? Were certain features more informative than others? Which conditions did you use to split the survival outcomes in the data? How accurate are your predictions?
Hint: Run the code cell below to see the accuracy of your predictions.
End of explanation |
14,542 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Compute induced power in the source space with dSPM
Returns STC files ie source estimates of induced power
for different bands in the source space. The inverse method
is linear based on dSPM inverse operator.
Step1: Set parameters
Step2: plot mean power | Python Code:
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_band_induced_power
print(__doc__)
Explanation: Compute induced power in the source space with dSPM
Returns STC files ie source estimates of induced power
for different bands in the source space. The inverse method
is linear based on dSPM inverse operator.
End of explanation
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax, event_id = -0.2, 0.5, 1
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
events = events[:10] # take 10 events to keep the computation time low
# Use linear detrend to reduce any edge artifacts
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
preload=True, detrend=1)
# Compute a source estimate per frequency band
bands = dict(alpha=[9, 11], beta=[18, 22])
stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
use_fft=False, n_jobs=1)
for b, stc in stcs.items():
stc.save('induced_power_%s' % b)
Explanation: Set parameters
End of explanation
plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
plt.xlabel('Time (ms)')
plt.ylabel('Power')
plt.legend()
plt.title('Mean source induced power')
plt.show()
Explanation: plot mean power
End of explanation |
14,543 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Execution Configuration Options
Nipype gives you many liberties on how to create workflows, but the execution of them uses a lot of default parameters. But you have of course all the freedom to change them as you like.
Nipype looks for the configuration options in the local folder under the name nipype.cfg and in ~/.nipype/nipype.cfg (in this order). It can be divided into Logging and Execution options. A few of the possible options are the following
Step1: At the workflow level
Step2: At the node level | Python Code:
from nipype import config, logging
import os
os.makedirs('/output/log_folder', exist_ok=True)
os.makedirs('/output/crash_folder', exist_ok=True)
config_dict={'execution': {'remove_unnecessary_outputs': 'true',
'keep_inputs': 'false',
'poll_sleep_duration': '60',
'stop_on_first_rerun': 'false',
'hash_method': 'timestamp',
'local_hash_check': 'true',
'create_report': 'true',
'crashdump_dir': '/output/crash_folder',
'use_relative_paths': 'false',
'job_finished_timeout': '5'},
'logging': {'workflow_level': 'INFO',
'filemanip_level': 'INFO',
'interface_level': 'INFO',
'log_directory': '/output/log_folder',
'log_to_file': 'true'}}
config.update_config(config_dict)
logging.update_logging(config)
Explanation: Execution Configuration Options
Nipype gives you many liberties on how to create workflows, but the execution of them uses a lot of default parameters. But you have of course all the freedom to change them as you like.
Nipype looks for the configuration options in the local folder under the name nipype.cfg and in ~/.nipype/nipype.cfg (in this order). It can be divided into Logging and Execution options. A few of the possible options are the following:
Logging
workflow_level: How detailed the logs regarding workflow should be
log_to_file: Indicates whether logging should also send the output to a file
Execution
stop_on_first_crash: Should the workflow stop upon first node crashing or try to execute as many nodes as possible?
remove_unnecessary_outputs: This will remove any interface outputs not needed by the workflow. If the required outputs from a node changes, rerunning the workflow will rerun the node. Outputs of leaf nodes (nodes whose outputs are not connected to any other nodes) will never be deleted independent of this parameter.
use_relative_paths: Should the paths stored in results (and used to look for inputs) be relative or absolute. Relative paths allow moving the whole working directory around but may cause problems with symlinks.
job_finished_timeout: When batch jobs are submitted through, SGE/PBS/Condor they could be killed externally. Nipype checks to see if a results file exists to determine if the node has completed. This timeout determines for how long this check is done after a job finish is detected. (float in seconds; default value: 5)
poll_sleep_duration: This controls how long the job submission loop will sleep between submitting all pending jobs and checking for job completion. To be nice to cluster schedulers the default is set to 2
For the full list, see Configuration File.
Global, workflow & node level
The configuration options can be changed globally (i.e. for all workflows), for just a workflow, or for just a node. The implementations look as follows (note that you should first create directories if you want to change crashdump_dir and log_directory):
At the global level:
End of explanation
from nipype import Workflow
wf = Workflow(name="config_test")
# Change execution parameters
wf.config['execution']['stop_on_first_crash'] = 'true'
# Change logging parameters
wf.config['logging'] = {'workflow_level' : 'DEBUG',
'filemanip_level' : 'DEBUG',
'interface_level' : 'DEBUG',
'log_to_file' : 'True',
'log_directory' : '/output/log_folder'}
Explanation: At the workflow level:
End of explanation
from nipype import Node
from nipype.interfaces.fsl import BET
bet = Node(BET(), name="config_test")
bet.config = {'execution': {'keep_unnecessary_outputs': 'false'}}
Explanation: At the node level:
End of explanation |
14,544 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Let's say we want to prepare data and try some scalers and classifiers for prediction in a classification problem. We will tune paramaters of classifiers by grid search technique.
Data preparing
Step1: Setting steps for our pipelines and parameters for grid search
Step2: Setting a cross-validation for grid searching of hyperparameters and for evaluation of models with obtained hyperparameters.
Step3: Creating a plan of our research
Step4: To tune parameters of models and evaluate this models, run | Python Code:
from sklearn.datasets import make_classification
X, y = make_classification()
Explanation: Let's say we want to prepare data and try some scalers and classifiers for prediction in a classification problem. We will tune paramaters of classifiers by grid search technique.
Data preparing:
End of explanation
from reskit.core import Pipeliner
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
classifiers = [('LR', LogisticRegression()),
('SVC', SVC())]
scalers = [('standard', StandardScaler()),
('minmax', MinMaxScaler())]
steps = [('scaler', scalers),
('classifier', classifiers)]
param_grid = {'LR': {'penalty': ['l1', 'l2']},
'SVC': {'kernel': ['linear', 'poly', 'rbf', 'sigmoid']}}
Explanation: Setting steps for our pipelines and parameters for grid search:
End of explanation
from sklearn.model_selection import StratifiedKFold
grid_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
eval_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
Explanation: Setting a cross-validation for grid searching of hyperparameters and for evaluation of models with obtained hyperparameters.
End of explanation
pipe = Pipeliner(steps=steps, grid_cv=grid_cv, eval_cv=eval_cv, param_grid=param_grid)
pipe.plan_table
Explanation: Creating a plan of our research:
End of explanation
pipe.get_results(X, y, scoring=['roc_auc'])
Explanation: To tune parameters of models and evaluate this models, run:
End of explanation |
14,545 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 1.3. Chemistry Scheme Scope
Is Required
Step7: 1.4. Basic Approximations
Is Required
Step8: 1.5. Prognostic Variables Form
Is Required
Step9: 1.6. Number Of Tracers
Is Required
Step10: 1.7. Family Approach
Is Required
Step11: 1.8. Coupling With Chemical Reactivity
Is Required
Step12: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required
Step13: 2.2. Code Version
Is Required
Step14: 2.3. Code Languages
Is Required
Step15: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required
Step16: 3.2. Split Operator Advection Timestep
Is Required
Step17: 3.3. Split Operator Physical Timestep
Is Required
Step18: 3.4. Split Operator Chemistry Timestep
Is Required
Step19: 3.5. Split Operator Alternate Order
Is Required
Step20: 3.6. Integrated Timestep
Is Required
Step21: 3.7. Integrated Scheme Type
Is Required
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required
Step23: 4.2. Convection
Is Required
Step24: 4.3. Precipitation
Is Required
Step25: 4.4. Emissions
Is Required
Step26: 4.5. Deposition
Is Required
Step27: 4.6. Gas Phase Chemistry
Is Required
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required
Step30: 4.9. Photo Chemistry
Is Required
Step31: 4.10. Aerosols
Is Required
Step32: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required
Step33: 5.2. Global Mean Metrics Used
Is Required
Step34: 5.3. Regional Metrics Used
Is Required
Step35: 5.4. Trend Metrics Used
Is Required
Step36: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required
Step37: 6.2. Matches Atmosphere Grid
Is Required
Step38: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required
Step39: 7.2. Canonical Horizontal Resolution
Is Required
Step40: 7.3. Number Of Horizontal Gridpoints
Is Required
Step41: 7.4. Number Of Vertical Levels
Is Required
Step42: 7.5. Is Adaptive Grid
Is Required
Step43: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required
Step44: 8.2. Use Atmospheric Transport
Is Required
Step45: 8.3. Transport Details
Is Required
Step46: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required
Step47: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required
Step48: 10.2. Method
Is Required
Step49: 10.3. Prescribed Climatology Emitted Species
Is Required
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required
Step51: 10.5. Interactive Emitted Species
Is Required
Step52: 10.6. Other Emitted Species
Is Required
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required
Step54: 11.2. Method
Is Required
Step55: 11.3. Prescribed Climatology Emitted Species
Is Required
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required
Step57: 11.5. Interactive Emitted Species
Is Required
Step58: 11.6. Other Emitted Species
Is Required
Step59: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required
Step60: 12.2. Prescribed Upper Boundary
Is Required
Step61: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required
Step62: 13.2. Species
Is Required
Step63: 13.3. Number Of Bimolecular Reactions
Is Required
Step64: 13.4. Number Of Termolecular Reactions
Is Required
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required
Step67: 13.7. Number Of Advected Species
Is Required
Step68: 13.8. Number Of Steady State Species
Is Required
Step69: 13.9. Interactive Dry Deposition
Is Required
Step70: 13.10. Wet Deposition
Is Required
Step71: 13.11. Wet Oxidation
Is Required
Step72: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required
Step73: 14.2. Gas Phase Species
Is Required
Step74: 14.3. Aerosol Species
Is Required
Step75: 14.4. Number Of Steady State Species
Is Required
Step76: 14.5. Sedimentation
Is Required
Step77: 14.6. Coagulation
Is Required
Step78: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required
Step79: 15.2. Gas Phase Species
Is Required
Step80: 15.3. Aerosol Species
Is Required
Step81: 15.4. Number Of Steady State Species
Is Required
Step82: 15.5. Interactive Dry Deposition
Is Required
Step83: 15.6. Coagulation
Is Required
Step84: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required
Step85: 16.2. Number Of Reactions
Is Required
Step86: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required
Step87: 17.2. Environmental Conditions
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-2', 'atmoschem')
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: IPSL
Source ID: SANDBOX-2
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-20 15:02:45
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation |
14,546 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Deep learning for Natural Language Processing
Simple text representations, bag of words
Word embedding and... not just another word2vec this time
1-dimensional convolutions for text
Aggregating several data sources "the hard way"
Solving ~somewhat~ real ML problem with ~almost~ end-to-end deep learning
Special thanks to Irina Golzmann for help with technical part.
NLTK
You will require nltk v3.2 to solve this assignment
It is really important that the version is 3.2, otherwize russian tokenizer might not work
Install/update
* sudo pip install --upgrade nltk==3.2
* If you don't remember when was the last pip upgrade, sudo pip install --upgrade pip
If for some reason you can't or won't switch to nltk v3.2, just make sure that russian words are tokenized properly with RegeExpTokenizer.
For students with low-RAM machines
This assignment can be accomplished with even the low-tier hardware (<= 4Gb RAM)
If that is the case, turn flag "low_RAM_mode" below to True
If you have around 8GB memory, it is unlikely that you will feel constrained by memory.
In case you are using a PC from last millenia, consider setting very_low_RAM=True
Step1: Dataset
Ex-kaggle-competition on prohibited content detection
There goes the description - https
Step2:
Step3: Balance-out the classes
Vast majority of data samples are non-prohibited
250k banned out of 4kk
Let's just downsample random 250k legal samples to make further steps less computationally demanding
If you aim for high Kaggle score, consider a smarter approach to that.
Step4: Tokenizing
First, we create a dictionary of all existing words.
Assign each word a number - it's Id
Step5: Remove rare tokens
We are unlikely to make use of words that are only seen a few times throughout the corpora.
Again, if you want to beat Kaggle competition metrics, consider doing something better.
Step6: Replace words with IDs
Set a maximum length for titles and descriptions.
* If string is longer that that limit - crop it, if less - pad with zeros.
* Thus we obtain a matrix of size [n_samples]x[max_length]
* Element at i,j - is an identifier of word j within sample i
Step7: Data format examples
Step8: As you can see, our preprocessing is somewhat crude. Let us see if that is enough for our network
Non-sequences
Some data features are not text samples. E.g. price, # urls, category, etc
They require a separate preprocessing.
Step9: Split data into training and test
Step10: Save preprocessed data [optional]
The next tab can be used to stash all the essential data matrices and get rid of the rest of the data.
Highly recommended if you have less than 1.5GB RAM left
To do that, you need to first run it with save_prepared_data=True, then restart the notebook and only run this tab with read_prepared_data=True.
Step11: Train the monster
Since we have several data sources, our neural network may differ from what you used to work with.
Separate input for titles
cnn+global max or RNN
Separate input for description
cnn+global max or RNN
Separate input for categorical features
обычные полносвязные слои или какие-нибудь трюки
These three inputs must be blended somehow - concatenated or added.
Output
Step12: NN architecture
Step13: Loss function
The standard way
Step14: Determinitic prediction
In case we use stochastic elements, e.g. dropout or noize
Compile a separate set of functions with deterministic prediction (deterministic = True)
Unless you think there's no neet for dropout there ofc. Btw is there?
Step15: Coffee-lation
Step16: Training loop
The regular way with loops over minibatches
Since the dataset is huge, we define epoch as some fixed amount of samples isntead of all dataset
Step17: Tweaking guide
batch_size - how many samples are processed per function call
optimization gets slower, but more stable, as you increase it.
May consider increasing it halfway through training
minibatches_per_epoch - max amount of minibatches per epoch
Does not affect training. Lesser value means more frequent and less stable printing
Setting it to less than 10 is only meaningfull if you want to make sure your NN does not break down after one epoch
n_epochs - total amount of epochs to train for
n_epochs = 10**10 and manual interrupting is still an option
Tips
Step18: Final evaluation
Evaluate network over the entire test set | Python Code:
low_RAM_mode = True
very_low_RAM = False #If you have <3GB RAM, set BOTH to true
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
Explanation: Deep learning for Natural Language Processing
Simple text representations, bag of words
Word embedding and... not just another word2vec this time
1-dimensional convolutions for text
Aggregating several data sources "the hard way"
Solving ~somewhat~ real ML problem with ~almost~ end-to-end deep learning
Special thanks to Irina Golzmann for help with technical part.
NLTK
You will require nltk v3.2 to solve this assignment
It is really important that the version is 3.2, otherwize russian tokenizer might not work
Install/update
* sudo pip install --upgrade nltk==3.2
* If you don't remember when was the last pip upgrade, sudo pip install --upgrade pip
If for some reason you can't or won't switch to nltk v3.2, just make sure that russian words are tokenized properly with RegeExpTokenizer.
For students with low-RAM machines
This assignment can be accomplished with even the low-tier hardware (<= 4Gb RAM)
If that is the case, turn flag "low_RAM_mode" below to True
If you have around 8GB memory, it is unlikely that you will feel constrained by memory.
In case you are using a PC from last millenia, consider setting very_low_RAM=True
End of explanation
if not low_RAM_mode:
# a lot of ram
df = pd.read_csv("avito_train.tsv",sep='\t')
else:
#aroung 4GB ram
df = pd.read_csv("avito_train_1kk.tsv",sep='\t')
print df.shape, df.is_blocked.mean()
df[:5]
Explanation: Dataset
Ex-kaggle-competition on prohibited content detection
There goes the description - https://www.kaggle.com/c/avito-prohibited-content
Download
High-RAM mode,
* Download avito_train.tsv from competition data files
Low-RAM-mode,
* Download downsampled dataset from here
* archive https://yadi.sk/d/l0p4lameqw3W8
* raw https://yadi.sk/d/I1v7mZ6Sqw2WK (in case you feel masochistic)
What's inside
Different kinds of features:
* 2 text fields - title and description
* Special features - price, number of e-mails, phones, etc
* Category and subcategory - unsurprisingly, categorical features
* Attributes - more factors
Only 1 binary target whether or not such advertisement contains prohibited materials
* criminal, misleading, human reproduction-related, etc
* diving into the data may result in prolonged sleep disorders
End of explanation
print "Blocked ratio",df.is_blocked.mean()
print "Count:",len(df)
Explanation:
End of explanation
#downsample
< downsample data so that both classes have approximately equal ratios>
df = <downsampled dataset>
print "Blocked ratio:",df.is_blocked.mean()
print "Count:",len(df)
assert df.is_blocked.mean() < 0.51
assert df.is_blocked.mean() > 0.49
assert len(df) <= 560000
print "All tests passed"
#In case your RAM-o-meter is in the red
if very_low_ram:
data = data[::2]
Explanation: Balance-out the classes
Vast majority of data samples are non-prohibited
250k banned out of 4kk
Let's just downsample random 250k legal samples to make further steps less computationally demanding
If you aim for high Kaggle score, consider a smarter approach to that.
End of explanation
from nltk.tokenize import RegexpTokenizer
from collections import Counter,defaultdict
tokenizer = RegexpTokenizer(r"\w+")
#Dictionary of tokens
token_counts = Counter()
#All texts
all_texts = np.hstack([df.description.values,df.title.values])
#Compute token frequencies
for s in all_texts:
if type(s) is not str:
continue
s = s.decode('utf8').lower()
tokens = tokenizer.tokenize(s)
for token in tokens:
token_counts[token] +=1
Explanation: Tokenizing
First, we create a dictionary of all existing words.
Assign each word a number - it's Id
End of explanation
#Word frequency distribution, just for kicks
_=plt.hist(token_counts.values(),range=[0,50],bins=50)
#Select only the tokens that had at least 10 occurences in the corpora.
#Use token_counts.
min_count = 10
tokens = <tokens from token_counts keys that had at least min_count occurences throughout the dataset>
token_to_id = {t:i+1 for i,t in enumerate(tokens)}
null_token = "NULL"
token_to_id[null_token] = 0
print "# Tokens:",len(token_to_id)
if len(token_to_id) < 30000:
print "Alarm! It seems like there are too few tokens. Make sure you updated NLTK and applied correct thresholds -- unless you now what you're doing, ofc"
if len(token_to_id) > 1000000:
print "Alarm! Too many tokens. You might have messed up when pruning rare ones -- unless you know what you're doin' ofc"
Explanation: Remove rare tokens
We are unlikely to make use of words that are only seen a few times throughout the corpora.
Again, if you want to beat Kaggle competition metrics, consider doing something better.
End of explanation
def vectorize(strings, token_to_id, max_len=150):
token_matrix = []
for s in strings:
if type(s) is not str:
token_matrix.append([0]*max_len)
continue
s = s.decode('utf8').lower()
tokens = tokenizer.tokenize(s)
token_ids = map(lambda token: token_to_id.get(token,0), tokens)[:max_len]
token_ids += [0]*(max_len - len(token_ids))
token_matrix.append(token_ids)
return np.array(token_matrix)
desc_tokens = vectorize(df.description.values,token_to_id,max_len = 150)
title_tokens = vectorize(df.title.values,token_to_id,max_len = 15)
Explanation: Replace words with IDs
Set a maximum length for titles and descriptions.
* If string is longer that that limit - crop it, if less - pad with zeros.
* Thus we obtain a matrix of size [n_samples]x[max_length]
* Element at i,j - is an identifier of word j within sample i
End of explanation
print "Размер матрицы:",title_tokens.shape
for title, tokens in zip(df.title.values[:3],title_tokens[:3]):
print title,'->', tokens[:10],'...'
Explanation: Data format examples
End of explanation
#All numeric features
df_numerical_features = df[["phones_cnt","emails_cnt","urls_cnt","price"]]
#One-hot-encoded category and subcategory
from sklearn.feature_extraction import DictVectorizer
categories = []
data_cat_subcat = df[["category","subcategory"]].values
categories = [A list of dictionaries {"category":category_name, "subcategory":subcategory_name} for each data sample]
vectorizer = DictVectorizer(sparse=False)
cat_one_hot = vectorizer.fit_transform(categories)
cat_one_hot = pd.DataFrame(cat_one_hot,columns=vectorizer.feature_names_)
df_non_text = pd.merge(
df_numerical_features,cat_one_hot,on = np.arange(len(cat_one_hot))
)
del df_non_text["key_0"]
Explanation: As you can see, our preprocessing is somewhat crude. Let us see if that is enough for our network
Non-sequences
Some data features are not text samples. E.g. price, # urls, category, etc
They require a separate preprocessing.
End of explanation
#Target variable - whether or not sample contains prohibited material
target = df.is_blocked.values.astype('int32')
#Preprocessed titles
title_tokens = title_tokens.astype('int32')
#Preprocessed tokens
desc_tokens = desc_tokens.astype('int32')
#Non-sequences
df_non_text = df_non_text.astype('float32')
#Split into training and test set.
#Difficulty selector:
#Easy: split randomly
#Medium: select test set items that have item_ids strictly above that of training set
#Hard: do whatever you want, but score yourself using kaggle private leaderboard
title_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = <define_these_variables>
Explanation: Split data into training and test
End of explanation
save_prepared_data = True #save
read_prepared_data = False #load
#but not both at once
assert not (save_prepared_data and read_prepared_data)
if save_prepared_data:
print "Saving preprocessed data (may take up to 3 minutes)"
import pickle
with open("preprocessed_data.pcl",'w') as fout:
pickle.dump(data_tuple,fout)
with open("token_to_id.pcl",'w') as fout:
pickle.dump(token_to_id,fout)
print "готово"
elif read_prepared_data:
print "Reading saved data..."
import pickle
with open("preprocessed_data.pcl",'r') as fin:
data_tuple = pickle.load(fin)
title_tr,title_ts,desc_tr,desc_ts,nontext_tr,nontext_ts,target_tr,target_ts = data_tuple
with open("token_to_id.pcl",'r') as fin:
token_to_id = pickle.load(fin)
#Re-importing libraries to allow staring noteboook from here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
print "done"
Explanation: Save preprocessed data [optional]
The next tab can be used to stash all the essential data matrices and get rid of the rest of the data.
Highly recommended if you have less than 1.5GB RAM left
To do that, you need to first run it with save_prepared_data=True, then restart the notebook and only run this tab with read_prepared_data=True.
End of explanation
#libraries
import lasagne
from theano import tensor as T
import theano
#3 inputs and a refere output
title_token_ids = T.matrix("title_token_ids",dtype='int32')
desc_token_ids = T.matrix("desc_token_ids",dtype='int32')
categories = T.matrix("categories",dtype='float32')
target_y = T.ivector("is_blocked")
Explanation: Train the monster
Since we have several data sources, our neural network may differ from what you used to work with.
Separate input for titles
cnn+global max or RNN
Separate input for description
cnn+global max or RNN
Separate input for categorical features
обычные полносвязные слои или какие-нибудь трюки
These three inputs must be blended somehow - concatenated or added.
Output: a simple binary classification
1 sigmoidal with binary_crossentropy
2 softmax with categorical_crossentropy - essentially the same as previous one
1 neuron without nonlinearity (lambda x: x) + hinge loss
End of explanation
title_inp = lasagne.layers.InputLayer((None,title_tr.shape[1]),input_var=title_token_ids)
descr_inp = lasagne.layers.InputLayer((None,desc_tr.shape[1]),input_var=desc_token_ids)
cat_inp = lasagne.layers.InputLayer((None,nontext_tr.shape[1]), input_var=categories)
# Descriptions
#word-wise embedding. We recommend to start from some 64 and improving after you are certain it works.
descr_nn = lasagne.layers.EmbeddingLayer(descr_inp,
input_size=len(token_to_id)+1,
output_size=?)
#reshape from [batch, time, unit] to [batch,unit,time] to allow 1d convolution over time
descr_nn = lasagne.layers.DimshuffleLayer(descr_nn, [0,2,1])
descr_nn = 1D convolution over embedding, maybe several ones in a stack
#pool over time
descr_nn = lasagne.layers.GlobalPoolLayer(descr_nn,T.max)
#Possible improvements here are adding several parallel convs with different filter sizes or stacking them the usual way
#1dconv -> 1d max pool ->1dconv and finally global pool
# Titles
title_nn = <Process titles somehow (title_inp)>
# Non-sequences
cat_nn = <Process non-sequences(cat_inp)>
nn = <merge three layers into one (e.g. lasagne.layers.concat) >
nn = lasagne.layers.DenseLayer(nn,your_lucky_number)
nn = lasagne.layers.DropoutLayer(nn,p=maybe_use_me)
nn = lasagne.layers.DenseLayer(nn,1,nonlinearity=lasagne.nonlinearities.linear)
Explanation: NN architecture
End of explanation
#All trainable params
weights = lasagne.layers.get_all_params(nn,trainable=True)
#Simple NN prediction
prediction = lasagne.layers.get_output(nn)[:,0]
#Hinge loss
loss = lasagne.objectives.binary_hinge_loss(prediction,target_y,delta = what_do_you_think).mean()
#Weight optimization step
updates = <your favorite optimizer>
Explanation: Loss function
The standard way:
prediction
loss
updates
training and evaluation functions
Hinge loss
$ L_i = \max(0, \delta - t_i p_i) $
delta is a tunable parameter: how far should a neuron be in the positive margin area for us to stop bothering about it
Function description may mention some +-1 limitations - this is not neccessary, at least as long as hinge loss has a default flag binary = True
End of explanation
#deterministic version
det_prediction = lasagne.layers.get_output(nn,deterministic=True)[:,0]
#equivalent loss function
det_loss = <an excercise in copy-pasting and editing>
Explanation: Determinitic prediction
In case we use stochastic elements, e.g. dropout or noize
Compile a separate set of functions with deterministic prediction (deterministic = True)
Unless you think there's no neet for dropout there ofc. Btw is there?
End of explanation
train_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[loss,prediction],updates = updates)
eval_fun = theano.function([desc_token_ids,title_token_ids,categories,target_y],[det_loss,det_prediction])
Explanation: Coffee-lation
End of explanation
#average precision at K
from oracle import APatK, score
# Out good old minibatch iterator now supports arbitrary amount of arrays (X,y,z)
def iterate_minibatches(*arrays,**kwargs):
batchsize=kwargs.get("batchsize",100)
shuffle = kwargs.get("shuffle",True)
if shuffle:
indices = np.arange(len(arrays[0]))
np.random.shuffle(indices)
for start_idx in range(0, len(arrays[0]) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [arr[excerpt] for arr in arrays]
Explanation: Training loop
The regular way with loops over minibatches
Since the dataset is huge, we define epoch as some fixed amount of samples isntead of all dataset
End of explanation
from sklearn.metrics import roc_auc_score, accuracy_score
n_epochs = 100
batch_size = 100
minibatches_per_epoch = 100
for i in range(n_epochs):
#training
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_tr,title_tr,nontext_tr,target_tr,batchsize=batch_size,shuffle=True)):
if j > minibatches_per_epoch:break
loss,pred_probas = train_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
print "Train:"
print '\tloss:',b_loss/b_c
print '\tacc:',accuracy_score(epoch_y_true,epoch_y_pred>0.)
print '\tauc:',roc_auc_score(epoch_y_true,epoch_y_pred)
print '\tap@k:',APatK(epoch_y_true,epoch_y_pred,K = int(len(epoch_y_pred)*0.025)+1)
#evaluation
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_ts,title_ts,nontext_ts,target_ts,batchsize=batch_size,shuffle=True)):
if j > minibatches_per_epoch: break
loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
print "Val:"
print '\tloss:',b_loss/b_c
print '\tacc:',accuracy_score(epoch_y_true,epoch_y_pred>0.)
print '\tauc:',roc_auc_score(epoch_y_true,epoch_y_pred)
print '\tap@k:',APatK(epoch_y_true,epoch_y_pred,K = int(len(epoch_y_pred)*0.025)+1)
print "If you are seeing this, it's time to backup your notebook. No, really, 'tis too easy to mess up everything without noticing. "
Explanation: Tweaking guide
batch_size - how many samples are processed per function call
optimization gets slower, but more stable, as you increase it.
May consider increasing it halfway through training
minibatches_per_epoch - max amount of minibatches per epoch
Does not affect training. Lesser value means more frequent and less stable printing
Setting it to less than 10 is only meaningfull if you want to make sure your NN does not break down after one epoch
n_epochs - total amount of epochs to train for
n_epochs = 10**10 and manual interrupting is still an option
Tips:
With small minibatches_per_epoch, network quality may jump around 0.5 for several epochs
AUC is the most stable of all three metrics
Average Precision at top 2.5% (APatK) - is the least stable. If batch_size*minibatches_per_epoch < 10k, it behaves as a uniform random variable.
Plotting metrics over training time may be a good way to analyze which architectures work better.
Once you are sure your network aint gonna crash, it's worth letting it train for a few hours of an average laptop's time to see it's true potential
End of explanation
#evaluation
epoch_y_true = []
epoch_y_pred = []
b_c = b_loss = 0
for j, (b_desc,b_title,b_cat, b_y) in enumerate(
iterate_minibatches(desc_ts,title_ts,nontext_ts,target_ts,batchsize=batch_size,shuffle=True)):
loss,pred_probas = eval_fun(b_desc,b_title,b_cat,b_y)
b_loss += loss
b_c +=1
epoch_y_true.append(b_y)
epoch_y_pred.append(pred_probas)
epoch_y_true = np.concatenate(epoch_y_true)
epoch_y_pred = np.concatenate(epoch_y_pred)
final_accuracy = accuracy_score(epoch_y_true,epoch_y_pred>0)
final_auc = roc_auc_score(epoch_y_true,epoch_y_pred)
final_apatk = APatK(epoch_y_true,epoch_y_pred,K = int(len(epoch_y_pred)*0.025)+1)
print "Scores:"
print '\tloss:',b_loss/b_c
print '\tacc:',final_accuracy
print '\tauc:',final_auc
print '\tap@k:',final_apatk
score(final_accuracy,final_auc,final_apatk)
Explanation: Final evaluation
Evaluate network over the entire test set
End of explanation |
14,547 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Matrix multiplication tutorial
This tutorial demonstrates how to use Kernel Tuner to test and tune kernels, using matrix multiplication as an example.
Matrix multiplication is one of the most well-known and widely-used linear algebra operations, and is frequently used to demonstrate the high-performance computing capabilities of GPUs. As such, matrix multiplication presents a familiar starting point for many GPU programmers.
<div class="alert alert-info">**Note
Step1: This kernel assumes that the width and height of the matrices A, B, and C is equal to WIDTH, which is known at compile time. Of course, you'll want a more flexible solution in reality, but this is just an example kernel to demonstrate how to use Kernel Tuner.
There are two more contants in the code that are currently undefined. These are block_size_x and block_size_y, these are the names that Kernel Tuner uses by default for denoting the thread block dimensions in x and y. The actual values used for these constants at compile time can be any sensible value for thread block dimensions. As long as we create enough threads to compute all elements in $C$, the output will not be affected by the value of block_size_x and block_size_y. Parameters in the code that have this property are called tunable parameters.
Because we can pick any value for these parameters, we can use auto-tuning to automatically find the best performing combination of parameters. That's exactly what we're going to do in this tutorial!
Tuning a naive kernel
Now we will have a look at how to use Kernel Tuner to find the best performing combination of tunable parameters for our naive matrix multiplication kernel. We'll go over the process of creating an auto-tuning script step-by-step.
Because the tuner will need to execute the kernel, we start with creating some input data.
Step2: In the above Python code, we've specified the size of matrices and generated some random data for matrix $A$ and $B$, and a zeroed matrix $C$. We've also created a list named args that contains the matrices C, A, and B, which will be used as the argument list by the tuner to call the kernel and measure its performance.
The next step is specifying to the tuner what values can be used for the thread block dimensions in x and y. In other words, we specify the tunable parameters and the possible values they can take.
Step3: We are creating a dictionary to hold the tunable parameters. The name of the parameter is used the key, and the list
of possible values for this parameter is the value for this key in the dictionary. We are using a small set of possible values here, but you are free to specify any values that you like. In general, we try to keep the total number of threads in a thread block as a multiple of the warpsize (32) on the GPU.
Also, to keep our kernel clean and simple, we did not include any bounds checking in the kernel code. This means that, for the kernel to run correctly, we need to make sure that the number of threads used in a particular dimension divides the size of the matrix in that dimension. By using 4096 as the width and height of our matrix and using only powers of two for our thread block dimensions we can avoid memory errors.
Before we start tuning, we will also tell Kernel Tuner how to compute a metric that we commonly use to express the compute performance of GPU kernels, namelijk GFLOP/s, which stands for giga floating-point operations per second. User-defined metrics are specified using the metrics option and should be supplied using an ordered dictionary, because metrics are composable.
Step4: Now that we've specified the input, the tunable parameters, and a user-defined metric, we are ready to call Kernel Tuner's tune_kernel method to start auto-tuning our kernel.
Step5: Before looking at the result, we'll explain briefly how we called tune_kernel. The first argument is the name of the kernel that we want to tune. The second argument is a string that contains the filename of the kernel. It is also possible to directly pass a string that contains the code, or to pass a Python function that generates the kernel code. The tuner will figure out which language (CUDA or OpenCL) is being used in the kernel code. The third argument to tune_kernel is the problem size, which is used by the tuner to compute the grid dimensions for our kernel. To compute the grid dimensions the tuner needs to know the thread block dimensions, which we have specified using the tunable parameters (fifth argument). The fourth argument is the argument list that the tuner will need to actually call the kernel.
As we can see the execution times printed by tune_kernel already vary quite dramatically between the different values for block_size_x and block_size_y. However, even with the best thread block dimensions our kernel is still not very efficient.
Therefore, we'll have a look at the Nvidia Visual Profiler to find that the utilization of our kernel is actually pretty low
Step6: As you can see the simple for loop inside our kernel has been replaced with a blocked version. The blocked loop consists of two for-loop constructs. The outer loop iterates with steps of size block_size_x over the WIDTH of the matrix.
Within each iteration of the outer loop two things happen. First the threads within this thread block fill shared memory with the submatrices needed for all the computations performed by the thread block. The actual computation happens in the inner loop and only uses data in shared memory. The __syncthreads() statements are needed to avoid race conditions on data in shared memory.
The above kernel does come with one restriction, it can only be executed correctly when the area operated on by the thread block as a whole is a square. This means that the number of threads we use in the x and y dimensions will have to be equal. We can specify this restriction to the tuner using the restrictions= option of tune_kernel().
There are multiple ways to define restrictions in Kernel Tuner. You can specify a list of conditions that need to evaluate to True before a kernel configurations is considered to be part of the parameter space of our kernel. In the code below we create such a restrictions list and call the tuner again for our kernel that uses shared memory.
Step7: Since we have only one restriction, our list only contains a single expression. The tunable parameter values will be inserted in this expression before it is evaluated. Another way to specify restrictions is with a function. This function takes a dictionary with the tunable parameter values of the kernel configuration and should return True if the configuration is part of the search space.
Step8: This kernel drastically reduces memory bandwidth consumption. Compared to our naive kernel, it is about three times faster now, which comes from the highly increased memory utilization
Step9: First of all we'll need to expand our tune_params dictionary to include our newly introduced tunable parameters. We'll choose a couple of small values for the tiling factors in both the x and y-dimension, to keep the search space manageable.
Step10: As explained in the text above, the tiling factors will reduce the number of thread blocks needed in their respective dimensions with a factor of N, where N is the tiling factor in that dimension. This is something that we will need to tell the tuner, otherwise it may execute the kernel with too many thread blocks.
We can tell the tuner how the grid dimensions need to be computed. So far, we've only used the default behavior of computing the grid dimensions by dividing the problem size with the thread block size in each dimension. However, the tuner now also needs to take the tiling factor into account. We specify this by setting up grid divisor lists, that will contain the names of all the tunable parameters that divide the grid in a particular dimension. These grid divisor lists will be passed as optional arguments when we call tune_kernel.
Step11: Remember that the area operated on by the thread block should be a square. In this kernel however, we allow block_size_x and block_size_y to vary independently, while tile_size_y increases the amount of work per thread in the y-direction within the thread block. This yields a discontinuous search space in which only part of the configurations are actually valid. Therefore, we again use the restrictions= option of tune_kernel. After this, we are ready to call tune_kernel again. | Python Code:
%%writefile matmul_naive.cu
#define WIDTH 4096
__global__ void matmul_kernel(float *C, float *A, float *B) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y + threadIdx.y;
float sum = 0.0;
for (int k=0; k<WIDTH; k++) {
sum += A[y*WIDTH+k] * B[k*WIDTH+x];
}
C[y*WIDTH+x] = sum;
}
Explanation: Matrix multiplication tutorial
This tutorial demonstrates how to use Kernel Tuner to test and tune kernels, using matrix multiplication as an example.
Matrix multiplication is one of the most well-known and widely-used linear algebra operations, and is frequently used to demonstrate the high-performance computing capabilities of GPUs. As such, matrix multiplication presents a familiar starting point for many GPU programmers.
<div class="alert alert-info">**Note:** If you are reading this tutorial on the Kernel Tuner's documentation pages, note that you can actually run this tutorial as a Jupyter Notebook. Just clone the Kernel Tuner's [GitHub repository](http://github.com/benvanwerkhoven/kernel_tuner). Install using *pip install .[tutorial,cuda]* and you're ready to go! You can start the tutorial by typing "jupyter notebook" in the "kernel_tuner/tutorial" directory.
</div>
Make sure to execute all the code cells you come across in this tutorial by selecting them and pressing shift+enter.
Naive CUDA kernel
We'll start with a very simple kernel for performing a matrix multiplication in CUDA. The idea is that this kernel is executed with one thread per element in the output matrix. As such, each thread $(i,j)$ iterates over the entire row $i$ in matrix $A$, and column $j$ in matrix $B$.
To keep the code clean and simple, we'll assume that we only work with square matrices. Execute the following cell to write our naive matrix multiplication kernel to a file name "matmul_naive.cu" by pressing shift+enter.
End of explanation
import numpy as np
problem_size = (4096, 4096)
A = np.random.randn(*problem_size).astype(np.float32)
B = np.random.randn(*problem_size).astype(np.float32)
C = np.zeros_like(A)
args = [C, A, B]
Explanation: This kernel assumes that the width and height of the matrices A, B, and C is equal to WIDTH, which is known at compile time. Of course, you'll want a more flexible solution in reality, but this is just an example kernel to demonstrate how to use Kernel Tuner.
There are two more contants in the code that are currently undefined. These are block_size_x and block_size_y, these are the names that Kernel Tuner uses by default for denoting the thread block dimensions in x and y. The actual values used for these constants at compile time can be any sensible value for thread block dimensions. As long as we create enough threads to compute all elements in $C$, the output will not be affected by the value of block_size_x and block_size_y. Parameters in the code that have this property are called tunable parameters.
Because we can pick any value for these parameters, we can use auto-tuning to automatically find the best performing combination of parameters. That's exactly what we're going to do in this tutorial!
Tuning a naive kernel
Now we will have a look at how to use Kernel Tuner to find the best performing combination of tunable parameters for our naive matrix multiplication kernel. We'll go over the process of creating an auto-tuning script step-by-step.
Because the tuner will need to execute the kernel, we start with creating some input data.
End of explanation
from collections import OrderedDict
tune_params = OrderedDict()
tune_params["block_size_x"] = [16, 32, 64]
tune_params["block_size_y"] = [1, 2, 4, 8, 16, 32]
Explanation: In the above Python code, we've specified the size of matrices and generated some random data for matrix $A$ and $B$, and a zeroed matrix $C$. We've also created a list named args that contains the matrices C, A, and B, which will be used as the argument list by the tuner to call the kernel and measure its performance.
The next step is specifying to the tuner what values can be used for the thread block dimensions in x and y. In other words, we specify the tunable parameters and the possible values they can take.
End of explanation
from collections import OrderedDict
metrics = OrderedDict()
metrics["GFLOP/s"] = lambda p : (2*problem_size[0]**3/1e9)/(p["time"]/1e3)
Explanation: We are creating a dictionary to hold the tunable parameters. The name of the parameter is used the key, and the list
of possible values for this parameter is the value for this key in the dictionary. We are using a small set of possible values here, but you are free to specify any values that you like. In general, we try to keep the total number of threads in a thread block as a multiple of the warpsize (32) on the GPU.
Also, to keep our kernel clean and simple, we did not include any bounds checking in the kernel code. This means that, for the kernel to run correctly, we need to make sure that the number of threads used in a particular dimension divides the size of the matrix in that dimension. By using 4096 as the width and height of our matrix and using only powers of two for our thread block dimensions we can avoid memory errors.
Before we start tuning, we will also tell Kernel Tuner how to compute a metric that we commonly use to express the compute performance of GPU kernels, namelijk GFLOP/s, which stands for giga floating-point operations per second. User-defined metrics are specified using the metrics option and should be supplied using an ordered dictionary, because metrics are composable.
End of explanation
from kernel_tuner import tune_kernel
results = tune_kernel("matmul_kernel", "matmul_naive.cu", problem_size, args, tune_params, metrics=metrics)
Explanation: Now that we've specified the input, the tunable parameters, and a user-defined metric, we are ready to call Kernel Tuner's tune_kernel method to start auto-tuning our kernel.
End of explanation
%%writefile matmul_shared.cu
#define WIDTH 4096
__global__ void matmul_kernel(float *C, float *A, float *B) {
__shared__ float sA[block_size_y][block_size_x];
__shared__ float sB[block_size_y][block_size_x];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * block_size_x + tx;
int y = blockIdx.y * block_size_y + ty;
float sum = 0.0;
int k,kb;
for (k=0; k<WIDTH; k+=block_size_x) {
__syncthreads();
sA[ty][tx] = A[y*WIDTH+k+tx];
sB[ty][tx] = B[(k+ty)*WIDTH+x];
__syncthreads();
for (kb=0; kb<block_size_x; kb++) {
sum += sA[ty][kb] * sB[kb][tx];
}
}
C[y*WIDTH+x] = sum;
}
Explanation: Before looking at the result, we'll explain briefly how we called tune_kernel. The first argument is the name of the kernel that we want to tune. The second argument is a string that contains the filename of the kernel. It is also possible to directly pass a string that contains the code, or to pass a Python function that generates the kernel code. The tuner will figure out which language (CUDA or OpenCL) is being used in the kernel code. The third argument to tune_kernel is the problem size, which is used by the tuner to compute the grid dimensions for our kernel. To compute the grid dimensions the tuner needs to know the thread block dimensions, which we have specified using the tunable parameters (fifth argument). The fourth argument is the argument list that the tuner will need to actually call the kernel.
As we can see the execution times printed by tune_kernel already vary quite dramatically between the different values for block_size_x and block_size_y. However, even with the best thread block dimensions our kernel is still not very efficient.
Therefore, we'll have a look at the Nvidia Visual Profiler to find that the utilization of our kernel is actually pretty low:
There is however, a lot of opportunity for data reuse, which is realized by making the threads in a thread block collaborate.
Using shared memory
We can increase the utilization of memory bandwidth with a technique called cache-blocking or loop-tiling.
To this end, we define two square data structures in shared memory, which will be used for storing square parts of matrix $A$ and $B$. The threads in a thread block will collaboratively fill these two submatrices, and then proceed to perform all the computations that need this data, before moving to the next blocked iteration.
The code required to do this is a little bit more complex:
End of explanation
restrict = ["block_size_x==block_size_y"]
Explanation: As you can see the simple for loop inside our kernel has been replaced with a blocked version. The blocked loop consists of two for-loop constructs. The outer loop iterates with steps of size block_size_x over the WIDTH of the matrix.
Within each iteration of the outer loop two things happen. First the threads within this thread block fill shared memory with the submatrices needed for all the computations performed by the thread block. The actual computation happens in the inner loop and only uses data in shared memory. The __syncthreads() statements are needed to avoid race conditions on data in shared memory.
The above kernel does come with one restriction, it can only be executed correctly when the area operated on by the thread block as a whole is a square. This means that the number of threads we use in the x and y dimensions will have to be equal. We can specify this restriction to the tuner using the restrictions= option of tune_kernel().
There are multiple ways to define restrictions in Kernel Tuner. You can specify a list of conditions that need to evaluate to True before a kernel configurations is considered to be part of the parameter space of our kernel. In the code below we create such a restrictions list and call the tuner again for our kernel that uses shared memory.
End of explanation
restrict = lambda p: p["block_size_x"]==p["block_size_y"]
results = tune_kernel("matmul_kernel", "matmul_shared.cu",
problem_size, args, tune_params, metrics=metrics,
restrictions=restrict)
Explanation: Since we have only one restriction, our list only contains a single expression. The tunable parameter values will be inserted in this expression before it is evaluated. Another way to specify restrictions is with a function. This function takes a dictionary with the tunable parameter values of the kernel configuration and should return True if the configuration is part of the search space.
End of explanation
%%writefile matmul.cu
#define WIDTH 4096
__global__ void matmul_kernel(float *C, float *A, float *B) {
__shared__ float sA[block_size_y*tile_size_y][block_size_x];
__shared__ float sB[block_size_y*tile_size_y][block_size_x * tile_size_x];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * block_size_x * tile_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y * tile_size_y + threadIdx.y;
int k, kb;
float sum[tile_size_y][tile_size_x];
for (k = 0; k < WIDTH; k += block_size_x) {
__syncthreads ();
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
sA[ty + block_size_y * i][tx] = A[y * WIDTH + block_size_y * i * WIDTH + k + tx];
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sB[ty + block_size_y * i][tx + j * block_size_x] =
B[(k + ty + block_size_y * i) * WIDTH + x + j * block_size_x];
}
}
__syncthreads ();
//compute
#pragma unroll
for (kb = 0; kb < block_size_x; kb++) {
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sum[i][j] += sA[ty + block_size_y * i][kb] * sB[kb][tx + j * block_size_x];
}
}
}
}
//store result
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
C[y * WIDTH + x + block_size_y * i * WIDTH + j * block_size_x] = sum[i][j];
}
}
}
Explanation: This kernel drastically reduces memory bandwidth consumption. Compared to our naive kernel, it is about three times faster now, which comes from the highly increased memory utilization:
The compute utilization has actually decreased slightly, which is due to the synchronization overhead, because __syncthread() is called frequently.
The restriction we have introduced has limited the number of kernel configurations benchmarked by the tuner significantly. Because the thread block size needs to be a square, there only a handful of configurations we can try. Fortunately, we can add several more optimizations to the code that also open the parameter space for tuning.
Increase work per thread
A commonly used code optimization in GPU programming is to increase the amount of work performed by each thread. This optimization has several benefits. It increases data reuse within the thread block and reduces the number of redundant instructions executed by distinct threads. This code optimization is typically called 1xN Tiling or thread-block-merge. We will use two different forms of 1xN tiling in this example:
First of all, in the x-direction we will use tiling in a way that is similar to the convolution example (used as part of the 'Getting Started' tutorial). The area of output data that is processed by a single thread block is increased by a factor of N, and as such shared memory usage also increases by a factor $N$. This means that the number of thread blocks needed to execute the kernel for this problem size is also reduced by a factor of $N$. While this may reduce occupancy due to increased shared memory and register usage, this optimization drastically reduces the number of redundant instructions that were previously distributed across multiple thread blocks.
Secondly, in the y-direction we will use a different form of 1xN tiling, where we tile within the thread block. This too means that threads will compute multiple elements, but in this case, not the total number of thread blocks is reduced, but instead the number of threads per block goes down.
Note that these two different forms of tiling could have combined in different or even multiple ways to increase the tuning parameter space even further. However, for the purposes of this tutorial, the resulting kernel is already complex enough:
End of explanation
tune_params["tile_size_x"] = [1, 2, 4]
tune_params["tile_size_y"] = [1, 2, 4]
Explanation: First of all we'll need to expand our tune_params dictionary to include our newly introduced tunable parameters. We'll choose a couple of small values for the tiling factors in both the x and y-dimension, to keep the search space manageable.
End of explanation
grid_div_x = ["block_size_x", "tile_size_x"]
grid_div_y = ["block_size_y", "tile_size_y"]
Explanation: As explained in the text above, the tiling factors will reduce the number of thread blocks needed in their respective dimensions with a factor of N, where N is the tiling factor in that dimension. This is something that we will need to tell the tuner, otherwise it may execute the kernel with too many thread blocks.
We can tell the tuner how the grid dimensions need to be computed. So far, we've only used the default behavior of computing the grid dimensions by dividing the problem size with the thread block size in each dimension. However, the tuner now also needs to take the tiling factor into account. We specify this by setting up grid divisor lists, that will contain the names of all the tunable parameters that divide the grid in a particular dimension. These grid divisor lists will be passed as optional arguments when we call tune_kernel.
End of explanation
restrict = ["block_size_x==block_size_y*tile_size_y"]
results = tune_kernel("matmul_kernel", "matmul/matmul.cu", problem_size, args, tune_params,
grid_div_y=grid_div_y, grid_div_x=grid_div_x, metrics=metrics,
restrictions=restrict)
Explanation: Remember that the area operated on by the thread block should be a square. In this kernel however, we allow block_size_x and block_size_y to vary independently, while tile_size_y increases the amount of work per thread in the y-direction within the thread block. This yields a discontinuous search space in which only part of the configurations are actually valid. Therefore, we again use the restrictions= option of tune_kernel. After this, we are ready to call tune_kernel again.
End of explanation |
14,548 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Critical Radii
Step1: As always, let's do imports and initialize a logger and a new Bundle.
Step2: Contact Systems
Contact systems are created by passing contact_binary=True to phoebe.default_binary() or by manually adding an envelope and setting the hierarchy correctly.
By default, requiv@primary is the free Parameter, with requiv@secondary, pot@contact_envelope, and fillout_factor@contact_envelope constrained such that there is one single surface defining the envelope.
Step3: In order to pass the system checks, these values must be between their minimum and maximum value ensuring the system is not underflowing (in which case it should be detached or semi-detached) or overflowing and losing mass.
These limiting values are constrained parameters, allowing us to see the allowed range for any parameterization.
Step4: Changing Parameterization
It is possible to change this parameterization to allow any one of the four parameters (requiv@primary, requiv@secondary, pot@contact_envelope, fillout_factor@contact_envelope) to be adjustable and the other two to be constrained. Doing so requires flipping one or two constraints via b.flip_constraint().
Let's first flip the constraints so that we can provide the potential of the envelope.
Step5: Or we could instead flip two constraints to have fillout factor of the envelope as the adjustable parameter (we'll start with a fresh bundle just to avoid confusion with the flipping we just did)
Step6: Or instead we could allow providing the equivalent radius of the secondary star. | Python Code:
#!pip install -I "phoebe>=2.3,<2.4"
Explanation: Critical Radii: Contact Systems
Setup
Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary(contact_binary=True)
Explanation: As always, let's do imports and initialize a logger and a new Bundle.
End of explanation
print(b.filter(qualifier=['requiv', 'pot', 'fillout_factor']))
Explanation: Contact Systems
Contact systems are created by passing contact_binary=True to phoebe.default_binary() or by manually adding an envelope and setting the hierarchy correctly.
By default, requiv@primary is the free Parameter, with requiv@secondary, pot@contact_envelope, and fillout_factor@contact_envelope constrained such that there is one single surface defining the envelope.
End of explanation
print(b.filter(qualifier=['requiv_max', 'requiv_min', 'pot_max', 'pot_min']))
Explanation: In order to pass the system checks, these values must be between their minimum and maximum value ensuring the system is not underflowing (in which case it should be detached or semi-detached) or overflowing and losing mass.
These limiting values are constrained parameters, allowing us to see the allowed range for any parameterization.
End of explanation
b.flip_constraint('pot', solve_for='requiv@primary')
print(b.filter(qualifier=['requiv', 'pot', 'fillout_factor']))
Explanation: Changing Parameterization
It is possible to change this parameterization to allow any one of the four parameters (requiv@primary, requiv@secondary, pot@contact_envelope, fillout_factor@contact_envelope) to be adjustable and the other two to be constrained. Doing so requires flipping one or two constraints via b.flip_constraint().
Let's first flip the constraints so that we can provide the potential of the envelope.
End of explanation
b = phoebe.default_binary(contact_binary=True)
b.flip_constraint('pot', solve_for='requiv@primary')
b.flip_constraint('fillout_factor', solve_for='pot')
print(b.filter(qualifier=['requiv', 'pot', 'fillout_factor']))
Explanation: Or we could instead flip two constraints to have fillout factor of the envelope as the adjustable parameter (we'll start with a fresh bundle just to avoid confusion with the flipping we just did):
End of explanation
b = phoebe.default_binary(contact_binary=True)
b.flip_constraint('pot', solve_for='requiv@primary')
b.flip_constraint('requiv@secondary', solve_for='pot')
print(b.filter(qualifier=['requiv', 'pot', 'fillout_factor']))
Explanation: Or instead we could allow providing the equivalent radius of the secondary star.
End of explanation |
14,549 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
K Nearest Neighbours is a algorithim for finding out the similarity or distance b/w two things, to find out how alike/different they are.
Say we have a bunch of fruit, KNN will classify them into clusters by using what we know - with fruit this would be shape, size, weight, color, etc.
Anyways, lets start with the Iris dataset, which has 150 measurements of flowers
Step1: Now I'm sampling 5 random flowers from this data set so we can use our fancy new KNN algo to determine what kind of flower they are later on
Step2: And here I am deleting the sampled flowers from the iris dataset to make sure our algo hasn't seem the test flowers
Step3: Now to look at the data visually
Step4: Looking at petal length variation across species
Step6: Now to actually write the algorithim and figure out what species the flowers in the test data set belong to.
First, a helper function to calculate the distance b/w points
Step8: lets look at the values of the first flower in our test data and see if we can figure out what it is by using KNN | Python Code:
iris = sns.load_dataset("iris")
print(f"Iris dataset shape: {iris.shape}")
iris.head()
Explanation: K Nearest Neighbours is a algorithim for finding out the similarity or distance b/w two things, to find out how alike/different they are.
Say we have a bunch of fruit, KNN will classify them into clusters by using what we know - with fruit this would be shape, size, weight, color, etc.
Anyways, lets start with the Iris dataset, which has 150 measurements of flowers:
End of explanation
test = iris.sample(n=5)
test
Explanation: Now I'm sampling 5 random flowers from this data set so we can use our fancy new KNN algo to determine what kind of flower they are later on:
End of explanation
iris.drop(test.index, inplace=True)
print(iris.shape)
iris.head()
Explanation: And here I am deleting the sampled flowers from the iris dataset to make sure our algo hasn't seem the test flowers:
End of explanation
sns.pairplot(data=iris, hue="species")
Explanation: Now to look at the data visually:
It's pretty clear the the species are different, though there is some overlap at the boundaries:
End of explanation
sns.boxplot(x="species", y="petal_length", data=iris);
Explanation: Looking at petal length variation across species:
End of explanation
def distance(x, y):
returns distance b/w two points x and y
assert len(x) == len(y)
inner = 0
for a, b in zip(x,y):
inner += (a - b)**2
return np.sqrt(inner)
distance((1,5),[5,5])
Explanation: Now to actually write the algorithim and figure out what species the flowers in the test data set belong to.
First, a helper function to calculate the distance b/w points:
End of explanation
test.iloc[2]
def knn(item, data, n=3):
takes in an item to check and a dataset, of size 4 features each
returns the first n closest neighbours as a tuple (loc, distance)
dist = []
for i, row in data.iterrows():
dist.append((i, distance(row[:4], item)))
nearest = sorted(dist, key=lambda x: x[1])[:n]
species = [iris.loc[i[0]]["species"] for i in nearest]
return Counter(species).most_common()[0][0]
knn(test.iloc[2][:4], iris)
knn_species = []
for i, row in test.iterrows():
knn_species.append(knn(row[:4], iris))
knn_species
test["knn"] = knn_species
test
Explanation: lets look at the values of the first flower in our test data and see if we can figure out what it is by using KNN:
End of explanation |
14,550 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Cell Composition Adjustement
One very important step to note is that we are adjusting each patient's beta-values by the expected value given their cell composition. We are including this step to reduce the variablility in methylation levels that is purely resulting from differing cellular compositions. We are preforming this step after quantile normalization, but before the BMIQ step. This could be done in other sections of the pipeline but should not have too big of an impact on downstream results.
Step1: Hannum
Read in quantile-normalized beta values and cell counts from the MINFI pipeline.
Step2: Read in cell compositions from probe annotations.
Step3: Adjust data to account for differences in cellular composition.
Step4: Here s1 is the Hannum data, which we used to train our model. We are normalizeing all of our data with the median value of the Hannum cohort as our reference for each probe.
Step5: EPIC
Step6: UCSD HIV Cohort | Python Code:
cd /cellar/users/agross/TCGA_Code/Methlation/
import NotebookImport
from Setup.Imports import *
Explanation: Cell Composition Adjustement
One very important step to note is that we are adjusting each patient's beta-values by the expected value given their cell composition. We are including this step to reduce the variablility in methylation levels that is purely resulting from differing cellular compositions. We are preforming this step after quantile normalization, but before the BMIQ step. This could be done in other sections of the pipeline but should not have too big of an impact on downstream results.
End of explanation
betas = pd.read_hdf(HDFS_DIR + 'dx_methylation.h5', 'betas')
betas = betas['s1']
betas = betas.groupby(axis=0, level=0).first()
cell_counts = pd.read_hdf(HDFS_DIR + 'dx_methylation.h5', 'cell_counts')
cell_counts = cell_counts.groupby(level=0).first()
cell_counts = cell_counts.ix[betas.columns]
Explanation: Hannum
Read in quantile-normalized beta values and cell counts from the MINFI pipeline.
End of explanation
flow_sorted_data = pd.read_hdf(HDFS_DIR + 'methylation_annotation.h5','flow_sorted_data')
flow_sorted_data = flow_sorted_data.groupby(level=0).first()
flow_sorted_data = flow_sorted_data.ix[betas.index]
cell_type = pd.read_hdf(HDFS_DIR + 'methylation_annotation.h5', 'label_map')
n2 = flow_sorted_data.groupby(cell_type, axis=1).mean()
avg = n2[cell_counts.columns].dot(cell_counts.T)
Explanation: Read in cell compositions from probe annotations.
End of explanation
cc = avg.mean(1)
adj = (betas - avg).add(cc, axis=0)
adj = adj.dropna(how='all', axis=1)
Explanation: Adjust data to account for differences in cellular composition.
End of explanation
gold_standard_ah = adj.median(1)
store = pd.HDFStore(HDFS_DIR + 'methylation_norm.h5')
store['Hannum_gold_standard'] = gold_standard_ah
store.append('Hannum_adj', adj)
store.create_table_index('Hannum_adj', optlevel=9, kind='full')
Explanation: Here s1 is the Hannum data, which we used to train our model. We are normalizeing all of our data with the median value of the Hannum cohort as our reference for each probe.
End of explanation
betas = pd.read_hdf(HDFS_DIR + 'dx_methylation.h5', 'betas')
betas = betas['s3']
betas = betas.groupby(axis=0, level=0).first()
cell_counts = pd.read_hdf(HDFS_DIR + 'dx_methylation.h5', 'cell_counts')
cell_counts = cell_counts.groupby(level=0).first()
cell_counts = cell_counts.ix[betas.columns]
flow_sorted_data = pd.read_hdf(HDFS_DIR + 'methylation_annotation.h5','flow_sorted_data')
flow_sorted_data = flow_sorted_data.groupby(level=0).first()
flow_sorted_data = flow_sorted_data.ix[betas.index]
cell_type = pd.read_hdf(HDFS_DIR + 'methylation_annotation.h5', 'label_map')
n2 = flow_sorted_data.groupby(cell_type, axis=1).mean()
avg = n2[cell_counts.columns].dot(cell_counts.T)
cc = avg.mean(1)
adj = (betas - avg).add(cc, axis=0)
adj = adj.dropna(how='all', axis=1)
store.append('EPIC_adj', adj)
store.create_table_index('EPIC_adj', optlevel=9, kind='full')
Explanation: EPIC
End of explanation
betas = pd.read_hdf(HDFS_DIR + 'dx_methylation.h5', 'betas')
betas = betas['s2']
betas = betas.groupby(axis=0, level=0).first()
cell_counts = pd.read_hdf(HDFS_DIR + 'dx_methylation.h5', 'cell_counts')
cell_counts = cell_counts.groupby(level=0).first()
cell_counts = cell_counts.ix[betas.columns]
flow_sorted_data = pd.read_hdf(HDFS_DIR + 'methylation_annotation.h5','flow_sorted_data')
flow_sorted_data = flow_sorted_data.groupby(level=0).first()
flow_sorted_data = flow_sorted_data.ix[betas.index]
cell_type = pd.read_hdf(HDFS_DIR + 'methylation_annotation.h5', 'label_map')
n2 = flow_sorted_data.groupby(cell_type, axis=1).mean()
avg = n2[cell_counts.columns].dot(cell_counts.T)
cc = avg.mean(1)
adj = (betas - avg).add(cc, axis=0)
adj = adj.dropna(how='all', axis=1)
store.append('HIV_adj', adj)
store.create_table_index('HIV_adj', optlevel=9, kind='full')
del betas, adj, avg
s1 = pd.read_hdf(HDFS_DIR + 'methylation_norm.h5', 'Hannum_adj')
s2 = pd.read_hdf(HDFS_DIR + 'methylation_norm.h5', 'HIV_adj')
s3 = pd.read_hdf(HDFS_DIR + 'methylation_norm.h5', 'EPIC_adj')
sc = pd.concat([s1, s2, s3], axis=1)
del s1, s2, s3
store.append('betas_adj', sc)
store.create_table_index('betas_adj', optlevel=9, kind='full')
store.close()
Explanation: UCSD HIV Cohort
End of explanation |
14,551 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Watch your tail!
Allen Downey 2019
MIT License
Step1: Loading historical data from the S&P 500
Step2: One day rally after the 2008 crash
Step3: Black Monday
Step13: To compare data to a distribution, I like to look at CDFs
Step14: Bayesian analysis adapted from
https | Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from empiricaldist import Pmf
from utils import decorate
Explanation: Watch your tail!
Allen Downey 2019
MIT License
End of explanation
# https://finance.yahoo.com/quote/%5EGSPC/history?period1=-630961200&period2=1565150400&interval=1d&filter=history&frequency=1d
df = pd.read_csv('yahoo/yahoo_sp500.csv', index_col=0, parse_dates=True)
df.shape
df.head()
change = df['Open'].diff() / df['Open'] * 100
change = change.shift(-1).dropna()
change.shape
change.head()
change.tail()
Explanation: Loading historical data from the S&P 500:
End of explanation
change.max(), change.idxmax()
Explanation: One day rally after the 2008 crash
End of explanation
change.min(), change.idxmin()
change['2008-10-29']
df.loc['2008-10-29']
from empiricaldist import Cdf
cdf = Cdf.from_seq(change)
cdf.plot(label='data')
decorate(xlabel='Daily change (percent point)',
ylabel='CDF',
title='Distribution of daily changes')
Explanation: Black Monday
End of explanation
from scipy.stats import norm
def make_model(sample, size=201):
Estimate the parameters of a Gaussian model.
mu = np.mean(sample)
sigma = np.std(sample)
model = norm(mu, sigma)
xs = np.linspace(np.min(sample), np.max(sample), size)
ys = model.cdf(xs)
return xs, ys
xs, ys = make_model(change)
plt.plot(xs, ys, color='gray', label='Gaussian')
cdf.plot(label='data')
decorate(xlabel='Daily change (percent point)',
ylabel='CDF',
title='Distribution of daily changes')
def plot_middle(sample):
Plot the CDF between -3 and 3 percentage points.
xs, ys = make_model(sample)
plt.plot(xs, ys, color='gray', label='Gaussian')
cdf = Cdf.from_seq(sample)
cdf.plot(label='data')
decorate(xlim=[-3, 3],
xlabel='Daily change (percent point)',
ylabel='CDF',
title='Distribution of daily changes')
plot_middle(change)
def make_normal_prob_plot(sample):
Plot a normal probablity plot.
xs = norm.rvs(size=len(sample))
xs = np.sort(xs)
ys = np.sort(sample)
span = min(xs), max(xs)
plt.plot(span, span, color='gray', alpha=0.5)
plt.plot(xs, ys)
decorate(xlabel='Standard deviations from the mean',
ylabel='Daily change (percent point)',
title='Normal probability plot')
make_normal_prob_plot(change)
from empiricaldist import Surv
def tail_plot(sample):
Plot the CCDF on a log-log scale.
xs, ys = make_model(sample)
plt.plot(xs, 1-ys, color='gray', label='Gaussian')
# when we plot y on a log scale, we lose the
# most extreme value
surv = Surv.from_seq(sample)
surv.replace(0, np.nan, inplace=True)
surv.plot(label='data')
decorate(xscale='log',
yscale='log',
xlabel='Daily change (percent point)',
ylabel='CCDF (log)')
return surv
def resample(sample, ncols=101):
Generate bootstrap samples.
nrows = len(sample)
array = np.random.choice(sample, (nrows, ncols))
return pd.DataFrame(array)
def plot_surv_confidence(index, samples, **options):
Plot a 90% confidence interval for a survival curve.
df = pd.DataFrame(index=index, columns=samples.columns)
for i in samples.columns:
surv = Surv.from_seq(samples[i])
surv.replace(0, np.nan, inplace=True)
df[i] = surv(index)
df.fillna(method='ffill', inplace=True)
df.values.sort()
nrows, ncols = df.shape
low = int(ncols * 0.05)
high = int(ncols * 0.95)
plt.fill_between(df.index, df[low], df[high], **options)
from matplotlib.ticker import NullFormatter
def set_xticks(locs, labels):
Put tick labels at the given locations.
ax = plt.gca()
ax.xaxis.set_major_formatter(NullFormatter())
ax.xaxis.set_minor_formatter(NullFormatter())
plt.xticks(locs, labels)
def plot_right(sample):
Plot the right tail.
shift = np.min(sample)
right_tail = sample - shift
surv = tail_plot(right_tail)
samples = resample(right_tail, 101)
plot_surv_confidence(surv.index, samples, alpha=0.2)
decorate(title='Right tail of daily changes',
xlim=[20, 40],
ylim=[1e-5, 1.5])
labels = np.array([1, 3, 5, 7, 10])
locs = labels - shift
set_xticks(locs, labels)
return surv
plot_right(change);
def plot_left(sample):
Plot the left tail.
shift = np.max(sample)
left_tail = shift - sample
surv = tail_plot(left_tail)
samples = resample(left_tail, 101)
plot_surv_confidence(surv.index, samples, alpha=0.2)
decorate(title='Left tail of daily changes',
xlim=[7, 22],
ylim=[1e-5, 1.5])
plt.gca().invert_xaxis()
labels = np.array([-1, -3, -5, -7, -10])
locs = shift - labels
set_xticks(locs, labels)
return surv
plot_left(change);
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
plot_left(change)
plt.subplot(1, 3, 2)
plot_middle(change)
plt.subplot(1, 3, 3)
plot_right(change)
plt.savefig('sp550.1.png', dpi=150)
Explanation: To compare data to a distribution, I like to look at CDFs
End of explanation
import pymc3 as pm
# Normal model
with pm.Model() as model:
μ = pm.Uniform('μ', lower=0, upper=10)
σ = pm.HalfNormal('σ', sd=10)
y = pm.Normal('y', mu=μ, sd=σ, observed=change)
trace = pm.sample(1000, tune=1000)
import pymc3 as pm
# Student T model
with pm.Model() as model:
μ = pm.Uniform('μ', lower=0, upper=60)
s = pm.HalfNormal('s', sd=10)
ν = pm.Exponential('ν', 1/1)
y = pm.StudentT('y', mu=μ, sd=s, nu=ν, observed=change)
trace = pm.sample(1000, tune=1000)
import arviz as az
az.plot_trace(trace[:1000], var_names = ['μ']);
az.plot_trace(trace[:1000], var_names = ['s']);
az.plot_trace(trace[:1000], var_names = ['ν']);
ppc = pm.sample_posterior_predictive(trace, samples=101, model=model)
samples = pd.DataFrame(np.transpose(ppc['y']))
samples.shape
plot_middle(change)
cdf_y = Cdf.from_seq(samples[0])
cdf_y.plot(label='Student t')
plt.legend();
surv = plot_right(change)
shift = np.min(change)
right_samples = pd.DataFrame(np.transpose(ppc['y'])) - shift
plot_surv_confidence(surv.index, right_samples,
alpha=0.2,
label='Student t')
plt.legend();
surv = plot_left(change)
shift = np.max(change)
left_samples = shift - pd.DataFrame(np.transpose(ppc['y']))
plot_surv_confidence(surv.index, left_samples,
alpha=0.2,
label='Student t')
plt.legend();
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
surv = plot_left(change)
plot_surv_confidence(surv.index, left_samples,
alpha=0.2,
label='Student t')
plt.legend()
plt.subplot(1, 3, 2)
plot_middle(change)
cdf_y.plot(label='Student t')
plt.legend()
plt.subplot(1, 3, 3)
surv = plot_right(change)
plot_surv_confidence(surv.index, right_samples,
alpha=0.2,
label='Student t')
plt.legend()
plt.savefig('sp550.2.png', dpi=150)
Explanation: Bayesian analysis adapted from
https://towardsdatascience.com/bayesian-modeling-airlines-customer-service-twitter-response-time-74af893f02c0
End of explanation |
14,552 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Assignment 2
Create two models for the relationship between height and weight based on gender
Modify the code in Assignment 1 to ask for a person's gender as well as their height to produce an estimate of a person's weight using the models you created
Find the weights and use those in your function (i.e. don't generate a model each time)
Step1: For Female
Step2: For Male
Step3: Function | Python Code:
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
%matplotlib inline
import statsmodels.formula.api as smf
df = pd.read_csv('heights_weights_genders.csv')
df.head(3)
female_df = df[df['Gender'] == 'Female']
male_df = df[df['Gender'] == 'Male']
Explanation: Assignment 2
Create two models for the relationship between height and weight based on gender
Modify the code in Assignment 1 to ask for a person's gender as well as their height to produce an estimate of a person's weight using the models you created
Find the weights and use those in your function (i.e. don't generate a model each time)
End of explanation
female_df.describe()
lm = smf.ols(formula="Weight~Height",data=female_df).fit()
lm.params
female_intercept, female_slope = lm.params
female_df.plot(kind="scatter",x="Height",y="Weight")
plt.plot(female_df["Height"],female_slope*female_df["Height"]+female_intercept,"-",color="darkgrey")
plt.title('Correlation between height and weight for females')
plt.xlabel('Height (inches)')
plt.ylabel('Weight (lbs)')
Explanation: For Female:
End of explanation
male_df.describe()
lm = smf.ols(formula="Weight~Height",data=male_df).fit()
lm.params
male_intercept, male_slope = lm.params
male_df.plot(kind="scatter",x="Height",y="Weight")
plt.plot(male_df["Height"],male_slope*male_df["Height"]+male_intercept,"-",color="darkgrey")
plt.title('Correlation between height and weight for males')
plt.xlabel('Height (inches)')
plt.ylabel('Weight (lbs)')
Explanation: For Male:
End of explanation
gender = input('Male or female? ')
if gender == 'female' or gender == 'Female':
height = int(input('Height (in inches): '))
weight = female_slope * height + female_intercept
print('If a person is female and ' + str(height) + ' inches tall, they probably weigh ' + str(round(weight,2)) + ' pounds.')
elif gender == 'male' or gender == 'Male':
height = int(input('Height (in inches): '))
weight = male_slope * height + male_intercept
print('If a person is male and ' + str(height) + ' inches tall, they probably weigh ' + str(round(weight,2)) + ' pounds.')
else:
print("Input error.")
gender = input('Male or female? ')
if gender == 'female' or gender == 'Female':
height = int(input('Height (in inches): '))
weight = 5.994047 * height - 246.013266
print('If a person is female and ' + str(height) + ' inches tall, they probably weigh ' + str(round(weight,2)) + ' pounds.')
elif gender == 'male' or gender == 'Male':
height = int(input('Height (in inches): '))
weight = 5.961774 * height - 224.498841
print('If a person is male and ' + str(height) + ' inches tall, they probably weigh ' + str(round(weight,2)) + ' pounds.')
else:
print("Input error.")
Explanation: Function:
End of explanation |
14,553 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Create a Vertex AI Feature Store Using the SDK
Learning objectives
In this notebook, you learn how to
Step1: Restart the kernel
After you install the SDK, you need to restart the notebook kernel so it can find the packages. You can restart kernel from Kernel -> Restart Kernel, or running the following
Step2: Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Enable the Vertex AI API and Compute Engine API.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note
Step3: Otherwise, set your project ID here.
Step4: Import libraries and define constants
Step5: Terminology and Concept
Featurestore Data model
Vertex AI Feature Store organizes data with the following 3 important hierarchical concepts
Step6: Use the function call below to retrieve a Featurestore and check that it has been created.
Step7: Create Entity Type
Entity types can be created within the Featurestore class. Below, create the Users entity type and Movies entity type. A process log will be printed out.
Step8: To retrieve an entity type or check that it has been created use the get_entity_type or list_entity_types methods on the Featurestore object.
Step9: Create Feature
Features can be created within each entity type. Add defining features to the Users entity type and Movies entity type by using the create_feature method.
Step10: Use the list_features method to list all the features of a given entity type.
Step11: Search created features
While the list_features method allows you to easily view all features of a single
entity type, the search method in the Feature class searches across all featurestores and entity types in a given location (such as us-central1), and returns a list of features. This can help you discover features that were created by someone else.
You can query based on feature properties including feature ID, entity type ID, and feature description. You can also limit results by filtering on a specific featurestore, feature value type, and/or labels. Some search examples are shown below.
Search for all features within a featurestore with the code snippet below.
Step12: Now, narrow down the search to features that are of type DOUBLE.
Step13: Or, limit the search results to features with specific keywords in their ID and type.
Step14: Import Feature Values
You need to import feature values before you can use them for online/offline serving. In this step, you learn how to import feature values by ingesting the values from GCS (Google Cloud Storage). You can also import feature values from BigQuery or a Pandas dataframe.
Source Data Format and Layout
BigQuery table/Avro/CSV are supported as input data types. No matter what format you are using, each imported entity must have an ID; also, each entity can optionally have a timestamp, specifying when the feature values are generated. This notebook uses Avro as an input, located at this public bucket. The Avro schemas are as follows
Step15: Import feature values for Movies entity type
Similarly, import feature values for the Movies entity type into the featurestore.
Step16: Get online predictions from your model
Online serving
lets you serve feature values for small batches of entities. It's designed for latency-sensitive service, such as online model prediction. For example, for a movie service, you might want to quickly show movies that the current user would most likely watch.
Read one entity per request
With the Python SDK, it is easy to read feature values of one entity. By default, the SDK will return the latest value of each feature, meaning the feature values with the most recent timestamp.
To read feature values, specify the entity type ID and features to read. By default all the features of an entity type will be selected. The response will output and display the selected entity type ID and the selected feature values as a Pandas dataframe.
Step17: Read multiple entities per request
To read feature values from multiple entities, specify the different entity type IDs. By default all the features of an entity type will be selected. Note that fetching only a small number of entities is recommended when using this SDK due to its latency-sensitive nature.
Step18: Now that you have learned how to fetch imported feature values for online serving, the next step is learning how to use imported feature values for offline use cases.
Get batch predictions from your model
Batch serving is used to fetch a large batch of feature values for high-throughput, and is typically used for training a model or batch prediction. In this section, you learn how to prepare for training examples by using the Featurestore's batch serve function.
Use case
The task is to prepare a training dataset to train a model, which predicts if a given user will watch a given movie. To achieve this, you need 2 sets of input
Step19: Batch Read Feature Values
Assemble the request which specify the following info
Step20: After the LRO finishes, you should be able to see the result in the BigQuery console, as a new table under the BigQuery dataset created earlier.
Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
You can also keep the project but delete the featurestore and the BigQuery dataset by running the code below | Python Code:
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
# Install necessary dependencies
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
Explanation: Create a Vertex AI Feature Store Using the SDK
Learning objectives
In this notebook, you learn how to:
Create feature store, entity type, and feature resources.
Import your features into Vertex AI Feature Store.
Serve online prediction requests using the imported features.
Access imported features in offline jobs, such as training jobs.
Overview
This notebook introduces Vertex AI Feature Store, a managed cloud service for machine learning engineers and data scientists to store, serve, manage and share machine learning features at a large scale.
This notebook assumes that you understand basic Google Cloud concepts such as Project, Storage and Vertex AI. Some machine learning knowledge is also helpful but not required.
Dataset
This notebook uses a movie recommendation dataset as an example throughout all the sessions. The task is to train a model to predict if a user is going to watch a movie and serve this model online.
Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook
Before you begin
Install additional packages
For this notebook, you need the Vertex SDK for Python.
End of explanation
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
Explanation: Restart the kernel
After you install the SDK, you need to restart the notebook kernel so it can find the packages. You can restart kernel from Kernel -> Restart Kernel, or running the following:
End of explanation
import os
PROJECT_ID = "qwiklabs-gcp-01-5dbc4e7474d8" # Replace this with your Project ID
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
Explanation: Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Enable the Vertex AI API and Compute Engine API.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.
Set your project ID
If you don't know your project ID, you may be able to get your project ID using gcloud.
End of explanation
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-01-5dbc4e7474d8" # Replace this with your Project ID
print("Project ID: ", PROJECT_ID)
Explanation: Otherwise, set your project ID here.
End of explanation
# Import necessary libraries and define required constants
from google.cloud import aiplatform
from google.cloud.aiplatform import Feature, Featurestore
REGION = "us-central1" # Replace this with your region
if REGION == "[your-region]":
REGION = "us-central1"
FEATURESTORE_ID = "movie_prediction"
INPUT_CSV_FILE = "gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/movie_prediction.csv"
ONLINE_STORE_FIXED_NODE_COUNT = 1
aiplatform.init(project=PROJECT_ID, location=REGION)
Explanation: Import libraries and define constants
End of explanation
# Create the Featurestore
fs = # TODO 1a: Your code goes here(
featurestore_id=FEATURESTORE_ID,
online_store_fixed_node_count=ONLINE_STORE_FIXED_NODE_COUNT,
project=PROJECT_ID,
location=REGION,
sync=True,
)
Explanation: Terminology and Concept
Featurestore Data model
Vertex AI Feature Store organizes data with the following 3 important hierarchical concepts:
Featurestore -> Entity type -> Feature
* Featurestore: the place to store your features
* Entity type: under a Featurestore, an Entity type describes an object to be modeled, real one or virtual one.
* Feature: under an Entity type, a Feature describes an attribute of the Entity type
In the movie prediction example, you will create a featurestore called movie_prediction. This store has 2 entity types: users and movies. The users entity type has the age, gender, and liked_genres features. The movies entity type has the titles, genres, and average rating features.
Create Featurestore and Define Schemas
Create Featurestore
The method to create a Featurestore returns a
long-running operation (LRO). An LRO starts an asynchronous job. LROs are returned for other API
methods too, such as updating or deleting a featurestore. Running the code cell will create a featurestore and print the process log.
End of explanation
fs = Featurestore(
featurestore_name=FEATURESTORE_ID,
project=PROJECT_ID,
location=REGION,
)
print(fs.gca_resource)
Explanation: Use the function call below to retrieve a Featurestore and check that it has been created.
End of explanation
# Create users entity type
users_entity_type = fs.create_entity_type(
entity_type_id="users",
description="Users entity",
)
# Create movies entity type
movies_entity_type = fs.create_entity_type(
entity_type_id="movies",
description="Movies entity",
)
Explanation: Create Entity Type
Entity types can be created within the Featurestore class. Below, create the Users entity type and Movies entity type. A process log will be printed out.
End of explanation
users_entity_type = fs.get_entity_type(entity_type_id="users")
movies_entity_type = fs.get_entity_type(entity_type_id="movies")
print(users_entity_type)
print(movies_entity_type)
fs.list_entity_types()
Explanation: To retrieve an entity type or check that it has been created use the get_entity_type or list_entity_types methods on the Featurestore object.
End of explanation
# to create features one at a time use
users_feature_age = users_entity_type.create_feature(
feature_id="age",
value_type="INT64",
description="User age",
)
users_feature_gender = users_entity_type.create_feature(
feature_id="gender",
value_type="STRING",
description="User gender",
)
users_feature_liked_genres = users_entity_type.create_feature(
feature_id="liked_genres",
value_type="STRING_ARRAY",
description="An array of genres this user liked",
)
Explanation: Create Feature
Features can be created within each entity type. Add defining features to the Users entity type and Movies entity type by using the create_feature method.
End of explanation
users_entity_type.list_features()
movies_feature_configs = {
"title": {
"value_type": "STRING",
"description": "The title of the movie",
},
"genres": {
"value_type": "STRING",
"description": "The genre of the movie",
},
"average_rating": {
"value_type": "DOUBLE",
"description": "The average rating for the movie, range is [1.0-5.0]",
},
}
# Create Features
movie_features = # TODO 1b: Your code goes here(
feature_configs=movies_feature_configs,
)
Explanation: Use the list_features method to list all the features of a given entity type.
End of explanation
my_features = Feature.search(query="featurestore_id={}".format(FEATURESTORE_ID))
my_features
Explanation: Search created features
While the list_features method allows you to easily view all features of a single
entity type, the search method in the Feature class searches across all featurestores and entity types in a given location (such as us-central1), and returns a list of features. This can help you discover features that were created by someone else.
You can query based on feature properties including feature ID, entity type ID, and feature description. You can also limit results by filtering on a specific featurestore, feature value type, and/or labels. Some search examples are shown below.
Search for all features within a featurestore with the code snippet below.
End of explanation
double_features = Feature.search(
query="value_type=DOUBLE AND featurestore_id={}".format(FEATURESTORE_ID)
)
double_features[0].gca_resource
Explanation: Now, narrow down the search to features that are of type DOUBLE.
End of explanation
title_features = Feature.search(
query="feature_id:title AND value_type=STRING AND featurestore_id={}".format(
FEATURESTORE_ID
)
)
title_features[0].gca_resource
Explanation: Or, limit the search results to features with specific keywords in their ID and type.
End of explanation
# Specify the required details of users entity
USERS_FEATURES_IDS = [feature.name for feature in users_entity_type.list_features()]
USERS_FEATURE_TIME = "update_time"
USERS_ENTITY_ID_FIELD = "user_id"
USERS_GCS_SOURCE_URI = (
"gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/users.avro"
)
GCS_SOURCE_TYPE = "avro"
WORKER_COUNT = 1
print(USERS_FEATURES_IDS)
# Import feature values for the users entity type
users_entity_type.ingest_from_gcs(
feature_ids=USERS_FEATURES_IDS,
feature_time=USERS_FEATURE_TIME,
entity_id_field=USERS_ENTITY_ID_FIELD,
gcs_source_uris=USERS_GCS_SOURCE_URI,
gcs_source_type=GCS_SOURCE_TYPE,
worker_count=WORKER_COUNT,
sync=False,
)
Explanation: Import Feature Values
You need to import feature values before you can use them for online/offline serving. In this step, you learn how to import feature values by ingesting the values from GCS (Google Cloud Storage). You can also import feature values from BigQuery or a Pandas dataframe.
Source Data Format and Layout
BigQuery table/Avro/CSV are supported as input data types. No matter what format you are using, each imported entity must have an ID; also, each entity can optionally have a timestamp, specifying when the feature values are generated. This notebook uses Avro as an input, located at this public bucket. The Avro schemas are as follows:
For the Users entity:
schema = {
"type": "record",
"name": "User",
"fields": [
{
"name":"user_id",
"type":["null","string"]
},
{
"name":"age",
"type":["null","long"]
},
{
"name":"gender",
"type":["null","string"]
},
{
"name":"liked_genres",
"type":{"type":"array","items":"string"}
},
{
"name":"update_time",
"type":["null",{"type":"long","logicalType":"timestamp-micros"}]
},
]
}
For the Movies entity:
schema = {
"type": "record",
"name": "Movie",
"fields": [
{
"name":"movie_id",
"type":["null","string"]
},
{
"name":"average_rating",
"type":["null","double"]
},
{
"name":"title",
"type":["null","string"]
},
{
"name":"genres",
"type":["null","string"]
},
{
"name":"update_time",
"type":["null",{"type":"long","logicalType":"timestamp-micros"}]
},
]
}
Import feature values for Users entity type
When importing, specify the following in your request:
IDs of the features to import
Data source URI
Data source format: BigQuery Table/Avro/CSV
End of explanation
# Specify the required details of movies entity
MOVIES_FEATURES_IDS = [feature.name for feature in movies_entity_type.list_features()]
MOVIES_FEATURE_TIME = "update_time"
MOVIES_ENTITY_ID_FIELD = "movie_id"
MOVIES_GCS_SOURCE_URI = (
"gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/movies.avro"
)
GCS_SOURCE_TYPE = "avro"
WORKER_COUNT = 1
print(MOVIES_FEATURES_IDS)
# Import feature values for the Movies entity type
# TODO 2: Your code goes here(
feature_ids=MOVIES_FEATURES_IDS,
feature_time=MOVIES_FEATURE_TIME,
entity_id_field=MOVIES_ENTITY_ID_FIELD,
gcs_source_uris=MOVIES_GCS_SOURCE_URI,
gcs_source_type=GCS_SOURCE_TYPE,
worker_count=WORKER_COUNT,
sync=False,
)
Explanation: Import feature values for Movies entity type
Similarly, import feature values for the Movies entity type into the featurestore.
End of explanation
# Read feature value of user entity by using entity ID
users_entity_type.read(entity_ids="bob")
# Read feature value of movies entity by specifying the entity type ID and features ID
# TODO 3: Your code goes here
Explanation: Get online predictions from your model
Online serving
lets you serve feature values for small batches of entities. It's designed for latency-sensitive service, such as online model prediction. For example, for a movie service, you might want to quickly show movies that the current user would most likely watch.
Read one entity per request
With the Python SDK, it is easy to read feature values of one entity. By default, the SDK will return the latest value of each feature, meaning the feature values with the most recent timestamp.
To read feature values, specify the entity type ID and features to read. By default all the features of an entity type will be selected. The response will output and display the selected entity type ID and the selected feature values as a Pandas dataframe.
End of explanation
users_entity_type.read(entity_ids=["bob", "alice"])
movies_entity_type.read(
entity_ids=["movie_02", "movie_03", "movie_04"], feature_ids=["title, genres"]
)
Explanation: Read multiple entities per request
To read feature values from multiple entities, specify the different entity type IDs. By default all the features of an entity type will be selected. Note that fetching only a small number of entities is recommended when using this SDK due to its latency-sensitive nature.
End of explanation
# Import necessary libraries
from datetime import datetime
from google.cloud import bigquery
# Output dataset
DESTINATION_DATA_SET = "movie_predictions" # @param {type:"string"}
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
DESTINATION_DATA_SET = "{prefix}_{timestamp}".format(
prefix=DESTINATION_DATA_SET, timestamp=TIMESTAMP
)
# Output table. Make sure that the table does NOT already exist; the BatchReadFeatureValues API cannot overwrite an existing table
DESTINATION_TABLE_NAME = "training_data" # @param {type:"string"}
DESTINATION_PATTERN = "bq://{project}.{dataset}.{table}"
DESTINATION_TABLE_URI = DESTINATION_PATTERN.format(
project=PROJECT_ID, dataset=DESTINATION_DATA_SET, table=DESTINATION_TABLE_NAME
)
# Create dataset
client = bigquery.Client(project=PROJECT_ID)
dataset_id = "{}.{}".format(client.project, DESTINATION_DATA_SET)
dataset = bigquery.Dataset(dataset_id)
dataset.location = REGION
dataset = client.create_dataset(dataset)
print("Created dataset {}.{}".format(client.project, dataset.dataset_id))
Explanation: Now that you have learned how to fetch imported feature values for online serving, the next step is learning how to use imported feature values for offline use cases.
Get batch predictions from your model
Batch serving is used to fetch a large batch of feature values for high-throughput, and is typically used for training a model or batch prediction. In this section, you learn how to prepare for training examples by using the Featurestore's batch serve function.
Use case
The task is to prepare a training dataset to train a model, which predicts if a given user will watch a given movie. To achieve this, you need 2 sets of input:
Features: you already imported into the featurestore.
Labels: the ground-truth data recorded that user X has watched movie Y.
To be more specific, the ground-truth observation is described in Table 1 and the desired training dataset is described in Table 2. Each row in Table 2 is a result of joining the imported feature values from Vertex AI Feature Store according to the entity IDs and timestamps in Table 1. In this example, the age, gender and liked_genres features from users and
the titles, genres and average_rating features from movies are chosen to train the model. Note that only positive examples are shown in these 2 tables, i.e., you can imagine there is a label column whose values are all True.
batch_serve_to_bq takes Table 1 as
input, joins all required feature values from the featurestore, and returns Table 2 for training.
<h4 align="center">Table 1. Ground-truth data</h4>
users | movies | timestamp
----- | -------- | --------------------
alice | Cinema Paradiso | 2019-11-01T00:00:00Z
bob | The Shining | 2019-11-15T18:09:43Z
... | ... | ...
<h4 align="center">Table 2. Expected training data generated by using batch serve</h4>
timestamp | entity_type_users | age | gender | liked_genres | entity_type_movies | title | genre | average_rating
-------------------- | ----------------- | --------------- | ---------------- | -------------------- | - | -------- | --------- | -----
2019-11-01T00:00:00Z | bob | 35 | M | [Action, Crime] | movie_02 | The Shining | Horror | 4.8
2019-11-01T00:00:00Z | alice | 55 | F | [Drama, Comedy] | movie_03 | Cinema Paradiso | Romance | 4.5 |
... | ... | ... | ... | ... | ... | ... | ... | ...
Why timestamp?
Note that there is a timestamp column in Table 2. This indicates the time when the ground-truth was observed. This is to avoid data inconsistency.
For example, the 2nd row of Table 2 indicates that user alice watched movie Cinema Paradiso on 2019-11-01T00:00:00Z. The featurestore keeps feature values for all timestamps but fetches feature values only at the given timestamp during batch serving. On that day, Alice might have been 54 years old, but now Alice might be 56; featurestore returns age=54 as Alice's age, instead of age=56, because that is the value of the feature at the observation time. Similarly, other features might be time-variant as well, such as liked_genres.
Create BigQuery dataset for output
You need a BigQuery dataset to host the output data in us-central1. Input the name of the dataset you want to create and specify the name of the table you want to store the output created later. These will be used in the next section.
Make sure that the table name does NOT already exist.
End of explanation
SERVING_FEATURE_IDS = {
# to choose all the features use 'entity_type_id: ['*']'
"users": ["age", "gender", "liked_genres"],
"movies": ["title", "average_rating", "genres"],
}
# Batch read the feature values
# TODO 4: Your code goes here(
bq_destination_output_uri=DESTINATION_TABLE_URI,
serving_feature_ids=SERVING_FEATURE_IDS,
read_instances_uri=INPUT_CSV_FILE,
)
Explanation: Batch Read Feature Values
Assemble the request which specify the following info:
Where is the label data, i.e., Table 1.
Which features are read, i.e., the column names in Table 2.
The output is stored in the BigQuery table.
End of explanation
# Delete Featurestore
fs.delete(force=True)
# Delete BigQuery dataset
client = bigquery.Client(project=PROJECT_ID)
client.delete_dataset(
DESTINATION_DATA_SET, delete_contents=True, not_found_ok=True
) # Make an API request.
print("Deleted dataset '{}'.".format(DESTINATION_DATA_SET))
Explanation: After the LRO finishes, you should be able to see the result in the BigQuery console, as a new table under the BigQuery dataset created earlier.
Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
You can also keep the project but delete the featurestore and the BigQuery dataset by running the code below:
End of explanation |
14,554 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<img src="fuellogo.svg" style="float
Step1: declare constants
Step2: declare free variables
Step3: Check the vector constraints
Step4: Form the optimization problem
In the 3-element vector variables, indices 0, 1, and 2 are the outbound, return and sprint flights.
Step5: Design an airplane
Step6: The "local model" is the power-law tangent to the Pareto frontier, gleaned from sensitivities.
Step7: plot design frontiers
Step10: Interactive analysis
Let's investigate it with the cadtoons library. Running cadtoon.py flightconditions.svg in this folder creates an interactive SVG graphic for us.
First, import the functions to display HTML in iPython Notebook, and the ractivejs library.
Step12: This concludes the aircraft example. Try playing around with the sliders up above until you're bored; then check out one of the other examples. Thanks for reading!
Import CSS for nbviewer
If you have a local iPython stylesheet installed, this will add it to the iPython Notebook | Python Code:
import numpy as np
from gpkit.shortcuts import *
import gpkit.interactive
%matplotlib inline
Explanation: <img src="fuellogo.svg" style="float:left; padding-right:1em;" width=150 />
AIRPLANE FUEL
Minimize fuel burn for a plane that can sprint and land quickly.
Set up the modelling environment
First we'll to import GPkit and turn on $\LaTeX$ printing for GPkit variables and equations.
End of explanation
N_lift = Var("N_{lift}", 6.0, "-", "Wing loading multiplier")
pi = Var("\\pi", np.pi, "-", "Half of the circle constant")
sigma_max = Var("\\sigma_{max}", 250e6, "Pa", "Allowable stress, 6061-T6")
sigma_maxshear = Var("\\sigma_{max,shear}", 167e6, "Pa", "Allowable shear stress")
g = Var("g", 9.8, "m/s^2", "Gravitational constant")
w = Var("w", 0.5, "-", "Wing-box width/chord")
r_h = Var("r_h", 0.75, "-", "Wing strut taper parameter")
f_wadd = Var("f_{wadd}", 2, "-", "Wing added weight fraction")
W_fixed = Var("W_{fixed}", 14.7e3, "N", "Fixed weight")
C_Lmax = Var("C_{L,max}", 1.5, "-", "Maximum C_L, flaps down")
rho = Var("\\rho", 0.91, "kg/m^3", "Air density, 3000m")
rho_sl = Var("\\rho_{sl}", 1.23, "kg/m^3", "Air density, sea level")
rho_alum = Var("\\rho_{alum}", 2700, "kg/m^3", "Density of aluminum")
mu = Var("\\mu", 1.69e-5, "kg/m/s", "Dynamic viscosity, 3000m")
e = Var("e", 0.95, "-", "Wing spanwise efficiency")
A_prop = Var("A_{prop}", 0.785, "m^2", "Propeller disk area")
eta_eng = Var("\\eta_{eng}", 0.35, "-", "Engine efficiency")
eta_v = Var("\\eta_v", 0.85, "-", "Propeller viscous efficiency")
h_fuel = Var("h_{fuel}", 42e6, "J/kg", "fuel heating value")
V_sprint_reqt = Var("V_{sprintreqt}", 150, "m/s", "sprint speed requirement")
W_pay = Var("W_{pay}", 500*9.81, "N")
R_min = Var("R_{min}", 1e6, "m", "Minimum airplane range")
V_stallmax = Var("V_{stall,max}", 40, "m/s", "Stall speed")
# sweep variables
R_min = Var("R_{min}", 5e6, "m", "Minimum airplane range")
V_stallmax = Var("V_{stall,max}", 40, "m/s", "Stall speed")
Explanation: declare constants
End of explanation
V = Vec(3, "V", "m/s", "Flight speed")
C_L = Vec(3, "C_L", "-", "Wing lift coefficent")
C_D = Vec(3, "C_D", "-", "Wing drag coefficent")
C_Dfuse = Vec(3, "C_{D_{fuse}}", "-", "Fuselage drag coefficent")
C_Dp = Vec(3, "C_{D_p}", "-", "drag model parameter")
C_Di = Vec(3, "C_{D_i}", "-", "drag model parameter")
T = Vec(3, "T", "N", "Thrust force")
Re = Vec(3, "Re", "-", "Reynold's number")
W = Vec(3, "W", "N", "Aircraft weight")
eta_i = Vec(3, "\\eta_i", "-", "Aircraft efficiency")
eta_prop = Vec(3, "\\eta_{prop}", "-")
eta_0 = Vec(3, "\\eta_0", "-")
W_fuel = Vec(2, "W_{fuel}", "N", "Fuel weight")
z_bre = Vec(2, "z_{bre}", "-")
S = Var("S", "m^2", "Wing area")
R = Var("R", "m", "Airplane range")
A = Var("A", "-", "Aspect Ratio")
I_cap = Var("I_{cap}", "m^4", "Spar cap area moment of inertia per unit chord")
M_rbar = Var("\\bar{M}_r", "-")
P_max = Var("P_{max}", "W")
V_stall = Var("V_{stall}", "m/s")
nu = Var("\\nu", "-")
p = Var("p", "-")
q = Var("q", "-")
tau = Var("\\tau", "-")
t_cap = Var("t_{cap}", "-")
t_web = Var("t_{web}", "-")
W_cap = Var("W_{cap}", "N")
W_zfw = Var("W_{zfw}", "N", "Zero fuel weight")
W_eng = Var("W_{eng}", "N")
W_mto = Var("W_{mto}", "N", "Maximum takeoff weight")
W_pay = Var("W_{pay}", "N")
W_tw = Var("W_{tw}", "N")
W_web = Var("W_{web}", "N")
W_wing = Var("W_{wing}", "N")
Explanation: declare free variables
End of explanation
W == 0.5*rho*C_L*S*V**2
Explanation: Check the vector constraints:
End of explanation
steady_level_flight = (W == 0.5*rho*C_L*S*V**2,
T >= 0.5*rho*C_D*S*V**2,
Re == (rho/mu)*V*(S/A)**0.5)
landing_fc = (W_mto <= 0.5*rho_sl*V_stall**2*C_Lmax*S,
V_stall <= V_stallmax)
sprint_fc = (P_max >= T[2]*V[2]/eta_0[2],
V[2] >= V_sprint_reqt)
drag_model = (C_D >= (0.05/S)*gpkit.units.m**2 +C_Dp + C_L**2/(pi*e*A),
1 >= (2.56*C_L**5.88/(Re**1.54*tau**3.32*C_Dp**2.62) +
3.8e-9*tau**6.23/(C_L**0.92*Re**1.38*C_Dp**9.57) +
2.2e-3*Re**0.14*tau**0.033/(C_L**0.01*C_Dp**0.73) +
1.19e4*C_L**9.78*tau**1.76/(Re*C_Dp**0.91) +
6.14e-6*C_L**6.53/(Re**0.99*tau**0.52*C_Dp**5.19)))
propulsive_efficiency = (eta_0 <= eta_eng*eta_prop,
eta_prop <= eta_i*eta_v,
4*eta_i + T*eta_i**2/(0.5*rho*V**2*A_prop) <= 4)
# 4th order taylor approximation for e^x
z_bre_sum = 0
for i in range(1,5):
z_bre_sum += z_bre**i/np.math.factorial(i)
range_constraints = (R >= R_min,
z_bre >= g*R*T[:2]/(h_fuel*eta_0[:2]*W[:2]),
W_fuel/W[:2] >= z_bre_sum)
punits = gpkit.units.parse_expression('N/W^0.8083')
weight_relations = (W_pay >= 500*g*gpkit.units.kg,
W_tw >= W_fixed + W_pay + W_eng,
W_zfw >= W_tw + W_wing,
W_eng >= 0.0372*P_max**0.8083 * punits,
W_wing/f_wadd >= W_cap + W_web,
W[0] >= W_zfw + W_fuel[1],
W[1] >= W_zfw,
W_mto >= W[0] + W_fuel[0],
W[2] == W[0])
wunits = gpkit.units.m**-4
munits = gpkit.units.parse_expression('Pa*m**6')
wing_structural_model = (2*q >= 1 + p,
p >= 2.2,
tau <= 0.25,
M_rbar >= W_tw*A*p/(24*gpkit.units.N),
.92**2/2.*w*tau**2*t_cap >= I_cap * wunits + .92*w*tau*t_cap**2,
8 >= N_lift*M_rbar*A*q**2*tau/S/I_cap/sigma_max * munits,
12 >= A*W_tw*N_lift*q**2/tau/S/t_web/sigma_maxshear,
nu**3.94 >= .86*p**-2.38 + .14*p**0.56,
W_cap >= 8*rho_alum*g*w*t_cap*S**1.5*nu/3/A**.5,
W_web >= 8*rho_alum*g*r_h*tau*t_web*S**1.5*nu/3/A**.5
)
eqns = (weight_relations + range_constraints + propulsive_efficiency
+ drag_model + steady_level_flight + landing_fc + sprint_fc + wing_structural_model)
m = gpkit.Model(W_fuel.sum(), eqns)
Explanation: Form the optimization problem
In the 3-element vector variables, indices 0, 1, and 2 are the outbound, return and sprint flights.
End of explanation
m.interact()
Explanation: Design an airplane
End of explanation
m.solution["localmodel"]
Explanation: The "local model" is the power-law tangent to the Pareto frontier, gleaned from sensitivities.
End of explanation
from gpkit.interactive.plotting import sensitivity_plot
_ = sensitivity_plot(m)
Explanation: plot design frontiers
End of explanation
from string import Template
fuelupdate_js = Template(
var W_eng = $W_eng,
lam = $lam
fuel.shearinner.scalex = 1-$tcap*10
fuel.shearinner.scaley = 1-$tweb*100
fuel.airfoil.scaley = $tau/0.13
fuel.fuse.scalex = $W_fus/24000
fuel.wing.scalex = $b/2/14
fuel.wing.scaley = $cr*1.21
)
def fuelupdate_py(sol):
varstrs = ("p", "S", "A", "t_{cap}", "t_{web}", "w",
"\\tau", "W_{eng}", "W_{mto}", "W_{wing}")
p, S, A, t_cap, t_web, w, tau, W_eng, W_mto, W_wing = sol.getvars(*varstrs)
lam = 0.5*(p-1)
return fuelupdate_js.substitute(lam = lam,
b = (S*A)**0.5,
cr = 2/(1+lam)*(S/A)**0.5,
tcap = t_cap/tau,
tweb = t_web/w,
tau = tau,
W_eng = W_eng,
W_fus = W_mto - W_wing - W_eng)
fuelconstraint_js =
fuel.engine1.scale = Math.pow(W_eng/3000, 2/3)
fuel.engine2.scale = Math.pow(W_eng/3000, 2/3)
fuel.engine1.y = 6*lam
fuel.engine2.y = 6*lam
fuel.wingrect.scaley = 1-lam
fuel.wingrect.y = -6 + 5*lam
fuel.wingtaper.scaley = lam
fuel.wingtaper.y = 5*lam
gpkit.interactive.showcadtoon("fuel",
"position:absolute; height:0; right:0; top:24em;")
gpkit.interactive.ractorpy(m, fuelupdate_py,
{"V_{stall,max}": (20, 50, 0.3),
"R_{min}": (1e6, 1e7, 1e5),
"V_{sprintreqt}": (100, 200, 1)},
fuelconstraint_js)
gpkit.interactive.ractorjs("fuel", m, fuelupdate_py,
{"V_{stall,max}": (20, 50, 3),
"R_{min}": (1e6, 1e7, 2e6),
"V_{sprintreqt}": (100, 200, 20)},
fuelconstraint_js)
Explanation: Interactive analysis
Let's investigate it with the cadtoons library. Running cadtoon.py flightconditions.svg in this folder creates an interactive SVG graphic for us.
First, import the functions to display HTML in iPython Notebook, and the ractivejs library.
End of explanation
from IPython import utils
from IPython.core.display import HTML
import os
def css_styling():
Load default custom.css file from ipython profile
base = utils.path.get_ipython_dir()
csspath = os.path.join(base,'profile_default/static/custom/custom.css')
styles = "<style>\n%s\n</style>" % (open(csspath,'r').read())
return HTML(styles)
css_styling()
Explanation: This concludes the aircraft example. Try playing around with the sliders up above until you're bored; then check out one of the other examples. Thanks for reading!
Import CSS for nbviewer
If you have a local iPython stylesheet installed, this will add it to the iPython Notebook:
End of explanation |
14,555 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
1. Bitcoin and Cryptocurrencies
Step1: 2. Discard the cryptocurrencies without a market capitalization
<p>Why do the <code>count()</code> for <code>id</code> and <code>market_cap_usd</code> differ above? It is because some cryptocurrencies listed in coinmarketcap.com have no known market capitalization, this is represented by <code>NaN</code> in the data, and <code>NaN</code>s are not counted by <code>count()</code>. These cryptocurrencies are of little interest to us in this analysis, so they are safe to remove.</p>
Step2: 3. How big is Bitcoin compared with the rest of the cryptocurrencies?
<p>At the time of writing, Bitcoin is under serious competition from other projects, but it is still dominant in market capitalization. Let's plot the market capitalization for the top 10 coins as a barplot to better visualize this.</p>
Step3: 4. Making the plot easier to read and more informative
<p>While the plot above is informative enough, it can be improved. Bitcoin is too big, and the other coins are hard to distinguish because of this. Instead of the percentage, let's use a log<sup>10</sup> scale of the "raw" capitalization. Plus, let's use color to group similar coins and make the plot more informative<sup>1</sup>. </p>
<p>For the colors rationale
Step4: 5. What is going on?! Volatility in cryptocurrencies
<p>The cryptocurrencies market has been spectacularly volatile since the first exchange opened. This notebook didn't start with a big, bold warning for nothing. Let's explore this volatility a bit more! We will begin by selecting and plotting the 24 hours and 7 days percentage change, which we already have available.</p>
Step5: 6. Well, we can already see that things are a bit crazy
<p>It seems you can lose a lot of money quickly on cryptocurrencies. Let's plot the top 10 biggest gainers and top 10 losers in market capitalization.</p>
Step6: 7. Ok, those are... interesting. Let's check the weekly Series too.
<p>800% daily increase?! Why are we doing this tutorial and not buying random coins?<sup>1</sup></p>
<p>After calming down, let's reuse the function defined above to see what is going weekly instead of daily.</p>
<p><em><sup>1</sup> Please take a moment to understand the implications of the red plots on how much value some cryptocurrencies lose in such short periods of time</em></p>
Step7: 8. How small is small?
<p>The names of the cryptocurrencies above are quite unknown, and there is a considerable fluctuation between the 1 and 7 days percentage changes. As with stocks, and many other financial products, the smaller the capitalization, the bigger the risk and reward. Smaller cryptocurrencies are less stable projects in general, and therefore even riskier investments than the bigger ones<sup>1</sup>. Let's classify our dataset based on Investopedia's capitalization <a href="https
Step8: 9. Most coins are tiny
<p>Note that many coins are not comparable to large companies in market cap, so let's divert from the original Investopedia definition by merging categories.</p>
<p><em>This is all for now. Thanks for completing this project!</em></p> | Python Code:
# Importing pandas
import pandas as pd
# Importing matplotlib and setting aesthetics for plotting later.
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
plt.style.use('fivethirtyeight')
# Reading datasets/coinmarketcap_06122017.csv into pandas
dec6 = pd.read_csv("datasets/coinmarketcap_06122017.csv")
# Selecting the 'id' and the 'market_cap_usd' columns
market_cap_raw = dec6[["id", "market_cap_usd"]]
# Counting the number of values
market_cap_raw.count()
Explanation: 1. Bitcoin and Cryptocurrencies: Full dataset, filtering, and reproducibility
<p>Since the <a href="https://newfronttest.bitcoin.com/bitcoin.pdf">launch of Bitcoin in 2008</a>, hundreds of similar projects based on the blockchain technology have emerged. We call these cryptocurrencies (also coins or cryptos in the Internet slang). Some are extremely valuable nowadays, and others may have the potential to become extremely valuable in the future<sup>1</sup>. In fact, on the 6th of December of 2017, Bitcoin has a <a href="https://en.wikipedia.org/wiki/Market_capitalization">market capitalization</a> above $200 billion. </p>
<p><center>
<img src="https://assets.datacamp.com/production/project_82/img/bitcoint_market_cap_2017.png" style="width:500px"> <br>
<em>The astonishing increase of Bitcoin market capitalization in 2017.</em></center></p>
<p>*<sup>1</sup> <strong>WARNING</strong>: The cryptocurrency market is exceptionally volatile<sup>2</sup> and any money you put in might disappear into thin air. Cryptocurrencies mentioned here <strong>might be scams</strong> similar to <a href="https://en.wikipedia.org/wiki/Ponzi_scheme">Ponzi Schemes</a> or have many other issues (overvaluation, technical, etc.). <strong>Please do not mistake this for investment advice</strong>. *</p>
<p><em><sup>2</sup> <strong>Update on March 2020</strong>: Well, it turned out to be volatile indeed :D</em></p>
<p>That said, let's get to business. We will start with a CSV we conveniently downloaded on the 6th of December of 2017 using the coinmarketcap API (NOTE: The public API went private in 2020 and is no longer available) named <code>datasets/coinmarketcap_06122017.csv</code>. </p>
End of explanation
# Filtering out rows without a market capitalization
cap = market_cap_raw.query('market_cap_usd > 0')
# Counting the number of values again
cap.count()
Explanation: 2. Discard the cryptocurrencies without a market capitalization
<p>Why do the <code>count()</code> for <code>id</code> and <code>market_cap_usd</code> differ above? It is because some cryptocurrencies listed in coinmarketcap.com have no known market capitalization, this is represented by <code>NaN</code> in the data, and <code>NaN</code>s are not counted by <code>count()</code>. These cryptocurrencies are of little interest to us in this analysis, so they are safe to remove.</p>
End of explanation
#Declaring these now for later use in the plots
TOP_CAP_TITLE = 'Top 10 market capitalization'
TOP_CAP_YLABEL = '% of total cap'
# Selecting the first 10 rows and setting the index
cap10 = cap.head(10).set_index(cap.id[:10])
# Calculating market_cap_perc
cap10 = cap10.assign(market_cap_perc = lambda x: (x.market_cap_usd / cap.market_cap_usd.sum()) * 100)
# Plotting the barplot with the title defined above
ax = cap10.plot.bar(x='id',y='market_cap_perc',title=TOP_CAP_TITLE)
# Annotating the y axis with the label defined above
ax.set_ylabel(TOP_CAP_YLABEL)
Explanation: 3. How big is Bitcoin compared with the rest of the cryptocurrencies?
<p>At the time of writing, Bitcoin is under serious competition from other projects, but it is still dominant in market capitalization. Let's plot the market capitalization for the top 10 coins as a barplot to better visualize this.</p>
End of explanation
# Colors for the bar plot
COLORS = ['orange', 'green', 'orange', 'cyan', 'cyan', 'blue', 'silver', 'orange', 'red', 'green']
# Plotting market_cap_usd as before but adding the colors and scaling the y-axis
ax = cap10.plot.bar(x='id',y='market_cap_perc',title=TOP_CAP_TITLE, color=COLORS, log=True)
# Annotating the y axis with 'USD'
ax.set_ylabel("USD")
# Final touch! Removing the xlabel as it is not very informative
ax.set_xlabel('')
Explanation: 4. Making the plot easier to read and more informative
<p>While the plot above is informative enough, it can be improved. Bitcoin is too big, and the other coins are hard to distinguish because of this. Instead of the percentage, let's use a log<sup>10</sup> scale of the "raw" capitalization. Plus, let's use color to group similar coins and make the plot more informative<sup>1</sup>. </p>
<p>For the colors rationale: bitcoin-cash and bitcoin-gold are forks of the bitcoin <a href="https://en.wikipedia.org/wiki/Blockchain">blockchain</a><sup>2</sup>. Ethereum and Cardano both offer Turing Complete <a href="https://en.wikipedia.org/wiki/Smart_contract">smart contracts</a>. Iota and Ripple are not minable. Dash, Litecoin, and Monero get their own color.</p>
<p><sup>1</sup> <em>This coloring is a simplification. There are more differences and similarities that are not being represented here.</em></p>
<p><sup>2</sup> <em>The bitcoin forks are actually <strong>very</strong> different, but it is out of scope to talk about them here. Please see the warning above and do your own research.</em></p>
End of explanation
# Selecting the id, percent_change_24h and percent_change_7d columns
volatility = dec6[['id', 'percent_change_24h', 'percent_change_7d']]
# Setting the index to 'id' and dropping all NaN rows
volatility = volatility.set_index('id').dropna()
# Sorting the DataFrame by percent_change_24h in ascending order
volatility = volatility.sort_values('percent_change_24h', ascending=True)
# Checking the first few rows
volatility.head()
Explanation: 5. What is going on?! Volatility in cryptocurrencies
<p>The cryptocurrencies market has been spectacularly volatile since the first exchange opened. This notebook didn't start with a big, bold warning for nothing. Let's explore this volatility a bit more! We will begin by selecting and plotting the 24 hours and 7 days percentage change, which we already have available.</p>
End of explanation
#Defining a function with 2 parameters, the series to plot and the title
def top10_subplot(volatility_series, title):
# Making the subplot and the figure for two side by side plots
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 6))
# Plotting with pandas the barchart for the top 10 losers
ax = (volatility_series[:10].plot.bar(color='darkred', ax=axes[0]))
# Setting the figure's main title to the text passed as parameter
fig.suptitle(title)
# Setting the ylabel to '% change'
ax.set_ylabel('% change')
# Same as above, but for the top 10 winners
ax = (volatility_series[-10:].plot.bar(color='darkblue', ax=axes[1]))
# Returning this for good practice, might use later
return fig, ax
DTITLE = "24 hours top losers and winners"
# Calling the function above with the 24 hours period series and title DTITLE
fig, ax = top10_subplot(volatility.percent_change_24h,DTITLE)
Explanation: 6. Well, we can already see that things are a bit crazy
<p>It seems you can lose a lot of money quickly on cryptocurrencies. Let's plot the top 10 biggest gainers and top 10 losers in market capitalization.</p>
End of explanation
# Sorting in ascending order
volatility7d = volatility.sort_values('percent_change_7d', ascending=True)
WTITLE = "Weekly top losers and winners"
# Calling the top10_subplot function
fig, ax = top10_subplot(volatility7d.percent_change_7d, WTITLE)
Explanation: 7. Ok, those are... interesting. Let's check the weekly Series too.
<p>800% daily increase?! Why are we doing this tutorial and not buying random coins?<sup>1</sup></p>
<p>After calming down, let's reuse the function defined above to see what is going weekly instead of daily.</p>
<p><em><sup>1</sup> Please take a moment to understand the implications of the red plots on how much value some cryptocurrencies lose in such short periods of time</em></p>
End of explanation
# Selecting everything bigger than 10 billion
largecaps = cap.query('market_cap_usd>1E+10')
# Printing out largecaps
largecaps.head()
Explanation: 8. How small is small?
<p>The names of the cryptocurrencies above are quite unknown, and there is a considerable fluctuation between the 1 and 7 days percentage changes. As with stocks, and many other financial products, the smaller the capitalization, the bigger the risk and reward. Smaller cryptocurrencies are less stable projects in general, and therefore even riskier investments than the bigger ones<sup>1</sup>. Let's classify our dataset based on Investopedia's capitalization <a href="https://www.investopedia.com/video/play/large-cap/">definitions</a> for company stocks. </p>
<p><sup>1</sup> <em>Cryptocurrencies are a new asset class, so they are not directly comparable to stocks. Furthermore, there are no limits set in stone for what a "small" or "large" stock is. Finally, some investors argue that bitcoin is similar to gold, this would make them more comparable to a <a href="https://www.investopedia.com/terms/c/commodity.asp">commodity</a> instead.</em></p>
End of explanation
# Making a nice function for counting different marketcaps from the
# "cap" DataFrame. Returns an int.
# INSTRUCTORS NOTE: Since you made it to the end, consider it a gift :D
def capcount(query_string):
return cap.query(query_string).count().id
# Labels for the plot
LABELS = ["biggish", "micro", "nano"]
# Using capcount count the biggish cryptos
biggish = ...
# Same as above for micro ...
micro = ...
# ... and for nano
nano = ...
# Making a list with the 3 counts
values = ...
# Plotting them with matplotlib
# ... YOUR CODE FOR TASK 10 ...
Explanation: 9. Most coins are tiny
<p>Note that many coins are not comparable to large companies in market cap, so let's divert from the original Investopedia definition by merging categories.</p>
<p><em>This is all for now. Thanks for completing this project!</em></p>
End of explanation |
14,556 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Identify Factors that Predict Intro CS Experience Based on Gender
Step1: Problem Statement
I am interested in identify the leading indicators of experience broken down by gender in introductory CS at an elite research university like Berkeley. In short, I want to find the attributes that split the dataset as purely as possible into male and female.
To solve this problem, I will undertake the following course of action
Step3: Load Data
Step4: 3. Modeling and Validating
Algorithms and Techniques
For the problem of determining the factors that predict intro CS experience based on gender, I experimented with four different classifiers, a decision tree classifier, two ensemble methods and a support vector machine
Step5: Prediction and Expected Value
Step6: 4. Select Best Classifier
Benchmark
Before I start selecting which classifier I want to proceed with, I need a baseline score on which I can evaluate the practical value of datamining for this problem. Since this project is applying machine learning to a novel dataset, I do not have standard benchmarks I can measure against. As such, I have decided to use a simple majority classifier which always selects the majority class of the training set.
|Confusion Matrices|
|---|---|
| <img src="report/figures/XGBoost.png" alt="Drawing" style="width
Step7: 4. Tune Xgboost Model
I am going to tune my model based on some heuristics about the kinds of value ranges that are suitable for the hyper-parameters I want to learn.
Ideal choices of parameters as taken from Xgboost With Python ebook
Number of Trees (n_estimators) set to a fixed value between 100 and 1000, depending on the dataset size.
Learning Rate (learnin_rate) simplified to the ratio
Step8: Results of tuning
Once I performed the search through the hyper-parameter space to find the combination of hyper-parameters that maximized the performance of the selected classifier, I was able to improve the previous F_1 score by 2.82%, to achieve a prediction score of 73.17%.
- You can see that the false negative count for the female class has gone from 40 down to 35. This decision cost us a very small increase in the false positive count of the male class from 19 to 20. This is not too bad, so I will stick with this improved model.
| Base Model | Tuned Model |
|---|---|
| <img src="report/figures/XGBoost.png" alt="Drawing" style="width
Step9: 5(b). Feature Importance
Step10: 5(c). Feature Importance | Python Code:
from IPython.display import display
from IPython.display import HTML
import IPython.core.display as di # Example: di.display_html('<h3>%s:</h3>' % str, raw=True)
# This line will hide code by default when the notebook is exported as HTML
di.display_html('<script>jQuery(function() {if (jQuery("body.notebook_app").length == 0) { jQuery(".input_area").toggle(); jQuery(".prompt").toggle();}});</script>', raw=True)
# This line will add a button to toggle visibility of code blocks, for use with the HTML export version
di.display_html('''<button onclick="jQuery('.input_area').toggle(); jQuery('.prompt').toggle();">Toggle code</button>''', raw=True)
Explanation: Identify Factors that Predict Intro CS Experience Based on Gender: Part Two
End of explanation
%pylab inline
# Import libraries
from __future__ import division
import sys
sys.path.append('tools/')
import numpy as np
import pandas as pd
import pickle
import tools
# Graphing Libraries
import matplotlib.pyplot as pyplt
import seaborn as sns
sns.set_style("white")
Explanation: Problem Statement
I am interested in identify the leading indicators of experience broken down by gender in introductory CS at an elite research university like Berkeley. In short, I want to find the attributes that split the dataset as purely as possible into male and female.
To solve this problem, I will undertake the following course of action:
1. Explore the dataset
- Explore the dataset to ensure its integrity and understand the context.
2. Identify features that may be used.
- If possible, engineer features that might provide greater discrimination.
3. With the understanding that this a classification task, explore a couple of classifiers that might be well suited for the problem at hand.
- Random Forest classifier
- eXtreme Gradient Boosted (XGBoost) trees classifier
- Support Vector Machine (SVM)
- Decision Tree classifier
4. Select appropriate classifier based on evaluation metric and tune it for optimality.
5. Extract top features responsible for discriminating the data.
I have already completed steps one and two in this notebook. Now I will focus on steps three through five.
Preliminaries
End of explanation
X = pd.read_pickle('data/features.pickle.dat')
y = pd.read_pickle('data/labels.pickle.dat')
# First, decide how many training vs test samples you want
num_all = X.shape[0] # same as len(student_data)
num_train = 662 # about 75% of the data
num_test = num_all - num_train
from sklearn import cross_validation
def shuffle_split_data(X, y):
Shuffles and splits data into 75% training and 25% testing subsets,
then returns the training and testing subsets.
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
train_size=num_train, random_state=42)
# Return the training and testing data subsets
return X_train, y_train, X_test, y_test
# Split the data into training and testing sets
try:
X_train, y_train, X_test, y_test = shuffle_split_data(X, y)
print "Successfully shuffled and split the data!"
except:
print "Something went wrong with shuffling and splitting the data."
print "Training set: {} samples".format(X_train.shape[0])
print "Test set: {} samples".format(X_test.shape[0])
Explanation: Load Data
End of explanation
X = X_train # Training data
seed = 342 # For reproducability
folds = 10
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn import metrics
models = {
'XGBoost': XGBClassifier(),
'DecisionTree': DecisionTreeClassifier(),
'SVC': svm.SVC(),
'RandomForest': RandomForestClassifier()
}
print "CLASSIFICATION RESULTS OF BASELINE CLASSIFIERS\n"
print"{:20}{:^15}{:^10}".format('CLASSIFIER', 'MEAN SCORE %', 'STD DEV %')
for model_name, model in models.iteritems():
kfold = StratifiedKFold(y_train, n_folds=folds, random_state=np.random.seed(seed))
results = cross_val_score(model, X, y_train, cv=kfold, scoring='f1')
print"{:20}{:^15.2f}{:^10.2f}".format(model_name, results.mean()*100, results.std()*100)
Features_test = X_test
pred_results = {}
for model_name, model in models.iteritems():
# make predictions for test data
model.fit(X, y_train)
y_predictions = model.predict(Features_test)
predictions = [round(value) for value in y_predictions]
# evaluate predictions
C = confusion_matrix(y_test, predictions)
pred_results[model_name] = dict(score=metrics.f1_score(y_test, predictions) * 100,
C=C, expected_value=0, rates=[[0,0],[0,0]])
# ROC Curve
fpr, tpr, _ = roc_curve(y_test, predictions)
roc_auc = auc(fpr, tpr)
pred_results[model_name]['fpr'] = fpr
pred_results[model_name]['tpr'] = tpr
pred_results[model_name]['roc_auc'] = roc_auc
Explanation: 3. Modeling and Validating
Algorithms and Techniques
For the problem of determining the factors that predict intro CS experience based on gender, I experimented with four different classifiers, a decision tree classifier, two ensemble methods and a support vector machine:
I selected a Random Forest classifier because it is considered one of the best off-the-shelf learning algorithm, and requires almost no tuning.
I selected an eXtreme Gradient Boosted (XGBoost) trees classifier; which is an advanced implementation of the gradient boosting algorithm. From reading literature on machine learning in practice, the XGBoost classifier has differentiated itself as a classifier that has successfully demonstrated its performance in a wide range of problems. For example, "among the 29 challenge winning solutions published at Kaggle's blog during 2015, 17 solutions used XGBoost."
I selected a Support Vector Machine (SVMs) because they are very robust classifiers and more importantly, they have a method to correct for class imbalances.
Finally I selected a Decision Tree classifier because it lends itself to interpretability. For this problem domain, it is not just satisfactory for me to discriminate between male and female students, what I ultimately want is to gain insights into what the salient factors around the experience of intro CS are, based on gender.
I implemented the four learning algorithms. For each of the learners I implemented the baseline algorithm using a stratified shuffle split cross validation with 10 folds and calculated the F1 scores and looked at the confusion matrices respectively.
Train Classifiers
End of explanation
# calculate expected rates
for key in pred_results:
rates = tools.expected_rates(pred_results[key]['C'])
pred_results[key]['rates'] = [[1-rates['fpr'], rates['fpr']],[1-rates['tpr'], rates['tpr']]]
cost_benefit_matrix = [[1, -1],[-2, 5]]
# calculate expected value
for key in pred_results:
pred_results[key]['expected_value'] = 0
for i in range(2):
for j in range(2):
pred_results[key]['expected_value'] += pred_results[key]['rates'][i][j] * cost_benefit_matrix[i][j]
print "{:^50}".format("PREDICTION RESULTS OF BASELINE CLASSIFIERS\n")
print"{:20}{:^15}{:^15}".format('CLASSIFIER', 'SCORE %', 'EXPECTED VALUE')
print"{:20}{:^15.2f}{:^15.2f}".format('Majority', y_train.tolist().count(0) / len(y_train) * 100, 0)
for key in pred_results:
print"{:20}{:^15.2f}{:^15.2f}".format(key, pred_results[key]['score'], pred_results[key]['expected_value'])
sns.set(font_scale = 1)
for key in pred_results:
tools.show_confusion_matrix(pred_results[key]['C'],
key,'report/figures/'+key+'.png', ['Class Male', 'Class Female'])
i, lw = 0, 2
colors = ["salmon", "k", "m", "r", "c"]
for key in pred_results:
pyplt.plot(pred_results[key]['fpr'], pred_results[key]['tpr'], color=colors[i], lw=lw,
label= key+' curve %0.2f' % pred_results[key]['roc_auc'])
i += 1
pyplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
pyplt.xlim([0.0, 1.0])
pyplt.ylim([0.0, 1.05])
pyplt.title("ROC Curves")
pyplt.xlabel('False Positive Rate')
pyplt.ylabel('True Positive Rate')
pyplt.legend(loc="lower right");
pyplt.savefig('report/figures/rocCurve.png', format='png', dpi=200)
pyplt.close()
Explanation: Prediction and Expected Value
End of explanation
model = XGBClassifier()
model.fit(X, y_train)
g = xgb.to_graphviz(model, num_trees=1, rankdir='TB')
g.format = 'png'
g.render('report/figures/firstXGraph', view=False)
g = xgb.to_graphviz(model, num_trees=2, rankdir='TB')
g.format = 'png'
g.render('report/figures/SecondXGraph', view=False)
sns.set(font_scale = 1.5)
importances = model.booster().get_fscore()
importance_frame = pd.DataFrame({'Importance': list(importances.values()), 'Feature': list(importances.keys())})
importance_frame.sort_values(by = 'Importance', inplace = True)
importance_frame.plot(kind = 'barh', x = 'Feature', figsize = (8,14), color = 'orange');
pyplt.title('XGBOOST FEATURE IMPORTANCE')
pyplt.savefig('report/figures/featureImportance.png', dpi=200, bbox_inches='tight')
pyplt.close()
Explanation: 4. Select Best Classifier
Benchmark
Before I start selecting which classifier I want to proceed with, I need a baseline score on which I can evaluate the practical value of datamining for this problem. Since this project is applying machine learning to a novel dataset, I do not have standard benchmarks I can measure against. As such, I have decided to use a simple majority classifier which always selects the majority class of the training set.
|Confusion Matrices|
|---|---|
| <img src="report/figures/XGBoost.png" alt="Drawing" style="width: 350px;"/> | <img src="report/figures/SVC.png" alt="Drawing" style="width: 350px;"/> |
| <img src="report/figures/DecisionTree.png" alt="Drawing" style="width: 350px;"/> | <img src="report/figures/RandomForest.png" alt="Drawing" style="width: 350px;"/> |
Expected Value
To evaluate the performance of the classifiers I use the expected value framework. The expected value of a classifier is the expected rates multiplied by the cost-benefit of each entry in the confusion matrix, weighted by the class priors. I invented a cost and benefits value associated with entry of the confusion matrix based on domain knowledge. My goal is to reward correct female class classification while penalizing false classifications. My choices for these values can be see in table below.
|Cost and Benefits|Value|
|---|---|
|Benefit of correctly identifying a female student | 5 |
|Benefit of correctly identifying a male student | 1 |
|Cost of misclassifying a female student | -2 |
|Cost of misclassifying a male student | 1 |
From running these baseline classifiers, I selected the xgboost classifier as the best classifier based on my evaluation criteria. On this problem, Random Forest classifier and the Support Vector Machine did not give a better performance than the majority classifier. While the Decision Tree did well, it was not as robust as the XGBoost classifier.
ROC Curve
<img src="report/figures/rocCurve.png" alt="Drawing" style="width: 500px;"/>
Its interesting to note that the Decision Tree classifier, had the highest true positive rate at 0.63, however, its false positive rate was a staggering 0.38! This means that it cannot find a meaningful sets of conditions for separating males from females in the dataset. The Support Vector Machine had the lowest false positive rate but did not beat the majority classifier because its true positive rate was abysmal. Where as, XGBoost does satisfactory on both fronts. You can really see this from looking at the ROC curves of the classifiers.
End of explanation
from sklearn.grid_search import GridSearchCV
# Build a stratified shuffle object because of unbalanced data
ssscv = StratifiedShuffleSplit(y_train, folds, random_state=np.random.seed(seed))
num_trees = range(200, 1100, 100)
params_grid = {
'learning_rate': [x/y for x in range(2, 3, 1) for y in num_trees],
'max_depth': [4, 6, 8, 10, 12],
'n_estimators': num_trees,
'colsample_bytree': [0.4, 0.6, 0.8, 1.0],
'subsample':[0.7]
}
params_fixed = {
'objective': 'binary:logistic',
'silent': 1
}
# Load and use already tuned classifier, else tune classifier
tune_flag = False
model_filename = "data/fixedDepthTree.pickle.dat"
hyper_params = ['n_estimators', 'subsample', 'learning_rate', 'colsample_bytree', 'max_depth']
if tune_flag:
grid = GridSearchCV(estimator=XGBClassifier(**params_fixed),
param_grid=params_grid,
cv=ssscv,
scoring='f1')
grid.fit(X, y_train)
print "Best accuracy obtained: {0}".format(grid.best_score_)
print "Parameters:"
for key, value in grid.best_params_.items():
print "\t{}: {}".format(key, value)
model = grid.best_estimator_
# save model to file
pickle.dump(model, open(model_filename, "wb"))
print "MODEL HYPER-PARAMETERS\n"
for item in hyper_params:
print "{:16}: {:<10.2f}".format(item, model.get_params()[item])
else:
model = tools.load_model(model_filename)
print "TUNED MODEL HYPER-PARAMETERS\n"
for item in hyper_params:
print "{:16}: {:<10.2f}".format(item, model.get_params()[item])
Features_test = X_test
# make predictions for test data
model.fit(X, y_train)
ypred = model.predict(Features_test)
predictions = ypred
# evaluate predictions
xgb_tuned_prediction_score = f1_score(y_test, predictions) * 100
print "Prediction of tuned classifier: %.2f%%"%(xgb_tuned_prediction_score)
print "Tuned Model improvement over baseline classifier: %.2f%%"%(xgb_tuned_prediction_score -
pred_results['XGBoost']['score'])
sns.set(font_scale = 1)
model_name = model.__class__.__name__
C = confusion_matrix(y_test, ypred)
tools.show_confusion_matrix(C, model_name, 'report/figures/tuned_model_CM.png', ['Class Male', 'Class Female'])
sns.set(font_scale = 0.5)
C = array(cost_benefit_matrix)
class_labels = ['Class Male', 'Class Female']
model_name = 'COST BENEFIT MATRIX'
# true negative, false positive, etc...
tn = C[0,0]; fp = C[0,1]; fn = C[1,0]; tp = C[1,1];
NP = fn+tp # Num positive examples
NN = tn+fp # Num negative examples
N = NP+NN
fig = plt.figure(figsize=(3.75,2.5), dpi=300)
ax = fig.add_subplot(111)
ax.imshow(C, interpolation='nearest', cmap=plt.cm.Blues)
# Draw the grid boxes
ax.set_xlim(-0.5,0.5)
ax.set_ylim(0.5,-0.5)
# Set xlabels
ax.set_xlabel(model_name+'\n\nPredicted Label', fontsize=10)
ax.set_xticks([0,1,1.5])
ax.set_xticklabels(class_labels + [''])
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
# These coordinate might require some tinkering. Ditto for y, below.
ax.xaxis.set_label_coords(0.5,1.10)
# Set ylabels
ax.set_ylabel('True Label', fontsize=10, rotation=90)
ax.set_yticklabels(class_labels + [''],rotation=90)
ax.set_yticks([0,1,1.5])
ax.yaxis.set_label_coords(-0.09,0.65)
# Fill in initial metrics: tp, tn, etc...
ax.text(0,0,
'b(Y,p): %d'%(tn),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(0,1,
'c(N,p): %d'%fn,
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(1,0,
'c(Y,n): %d'%fp,
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
ax.text(1,1,
'b(N,n): %d\n'%(tp),
va='center',
ha='center',
bbox=dict(fc='w',boxstyle='round,pad=1'))
plt.tight_layout()
plt.savefig('report/figures/cost_benefit_matrix.png', format='png', dpi=200)
#plt.close()
model.fit(X, y_train)
g = xgb.to_graphviz(model, num_trees=1, rankdir='TB')
g.format = 'png'
g.render('report/figures/first_tuned_model_graph', view=False)
g = xgb.to_graphviz(model, num_trees=2, rankdir='TB')
g.format = 'png'
g.render('report/figures/second_tuned_model_graph', view=False)
Explanation: 4. Tune Xgboost Model
I am going to tune my model based on some heuristics about the kinds of value ranges that are suitable for the hyper-parameters I want to learn.
Ideal choices of parameters as taken from Xgboost With Python ebook
Number of Trees (n_estimators) set to a fixed value between 100 and 1000, depending on the dataset size.
Learning Rate (learnin_rate) simplified to the ratio: [2 to 10]/trees, depending on the number of trees.
Row Sampling (subsample) grid searched values in the range [0.5, 0.75, 1.0].
Column Sampling (colsample bytree and maybe colsample bylevel) grid searched values in the range [0.4, 0.6, 0.8, 1.0].
Min Leaf Weight (min_child_weight) simplified to the ratio 3/rare_events , where rare events rare events is the percentage of rare event observations in the dataset.
Tree Size (max_depth) grid searched values in the rage [4, 6, 8, 10].
Min Split Gain (gamma) fixed with a value of zero.
End of explanation
params_dict = model.get_params()
xgdmat = xgb.DMatrix(X, y_train) # Create our DMatrix to make XGBoost more efficient
testdmat = xgb.DMatrix(X_test)
cv_xgb = xgb.cv(params = params_dict, dtrain = xgdmat, num_boost_round = 3000, nfold = folds,
metrics =['map'],
early_stopping_rounds = 100) # Look for early stopping that minimizes error
bst = xgb.train(params_dict, xgdmat, num_boost_round = 251)
y_pred = bst.predict(testdmat,ntree_limit=bst.best_ntree_limit)
# evaluate predictions
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
sns.set(font_scale = 1.5)
importances = bst.get_fscore()
df_1 = pd.DataFrame({'Importance': list(importances.values()), 'Feature': list(importances.keys())})
df_1.sort_values(by = 'Importance', inplace = True)
df_1.plot(kind = 'barh', x = 'Feature', figsize = (8,14));
pyplt.title('XGBOOST TUNED FEATURE IMPORTANCE\n')
pyplt.savefig('report/figures/featureImportance_tuned.png', dpi=200, bbox_inches='tight')
pyplt.close()
Explanation: Results of tuning
Once I performed the search through the hyper-parameter space to find the combination of hyper-parameters that maximized the performance of the selected classifier, I was able to improve the previous F_1 score by 2.82%, to achieve a prediction score of 73.17%.
- You can see that the false negative count for the female class has gone from 40 down to 35. This decision cost us a very small increase in the false positive count of the male class from 19 to 20. This is not too bad, so I will stick with this improved model.
| Base Model | Tuned Model |
|---|---|
| <img src="report/figures/XGBoost.png" alt="Drawing" style="width: 350px;"/> | <img src="report/figures/tuned_model_CM.png" alt="Drawing" style="width: 350px;"/> |
Identifying the important factors
To identify factors that predict experience based on gender, I will then extract the top features responsible for discriminating the data and then expand the final step to:
- Explore the various parameters around feature splitting
- Xgboost algorithm feature importance score
- Information gain
- Cover
Feature Importance: Xgboost
There are two things that need consideration when using xgBoost for understanding feature importance: the features that are doing the most work in splitting the data, and the automatically generated feature importance ranking that is done in xgBoost.
Base Model Trees
I plotted some estimators in the xgboost learners to see which features are doing the most work in splitting the data. I chose to focus on the first and second tree in the ensemble. On simple models, the first two trees may be enough to gain a strong understanding. This model has three levels and eight distinct types.
| First tree in the ensemble | Second tree in the ensemble |
|---|---|
| <img src="report/figures/firstXGraph.png" alt="Drawing" style="width: 500px;"/> | <img src="report/figures/secondXGraph.png" alt="Drawing" style="width: 500px;"/> |
Tuned Model Trees
The tuned model has a more complex tree that goes down six levels, for each of its estimators. This model segmented the data into 36 distinct types; you can see this by counting the number of leaf nodes.
First tree in the esemble
Second tree in the esemble
5(a). Feature Importance: Tuned Model
End of explanation
tools.create_feature_map(list(importances.keys()),'data/xgb_model_feature_map.txt')
information_gain_list = tools.order_features_by_gains(bst, 'data/xgb_model_feature_map.txt')
importances_gain = {}
for i in range(len(information_gain_list)):
feat, info = information_gain_list[i]
importances_gain[feat] = round(info['gain'])
sns.set(font_scale = 1.5)
df_2 = pd.DataFrame({'Gain': list(importances_gain.values()), 'Feature': list(importances_gain.keys())})
df_2.sort_values(by = 'Gain', inplace = True)
df_2.plot(kind = 'barh', x = 'Feature', figsize = (8,14), color = 'green');
pyplt.title('INFORMATION-GAIN BASED FEATURE IMPORTANCE\n')
pyplt.savefig('report/figures/featureImportance_informationGain.png', dpi=200, bbox_inches='tight')
pyplt.close()
Explanation: 5(b). Feature Importance: Information Gain based Ranking
One can think on information gain as a measurement of informativeness of a feature with respect to the target class.
End of explanation
importances_cover = {}
for i in range(len(information_gain_list)):
feat, info = information_gain_list[i]
importances_cover[feat] = round(info['cover'])
sns.set(font_scale = 1.5)
df_3 = pd.DataFrame({'Cover': list(importances_cover.values()), 'Feature': list(importances_cover.keys())})
df_3.sort_values(by = 'Cover', inplace = True)
df_3.plot(kind = 'barh', x = 'Feature', figsize = (8,14), color = 'grey');
pyplt.title('COVER BASED FEATURE IMPORTANCE\n')
pyplt.savefig('report/figures/featureImportance_cover.png', dpi=200, bbox_inches='tight')
pyplt.close()
threshold = 20
dfd_1 = df_1[threshold:]
dfd_2 = df_2[threshold:]
dfd_3 = df_3[threshold:]
df = pd.merge(dfd_1, dfd_2, on='Feature')
df = pd.merge(df, dfd_3, on='Feature')
df.sort_values(['Importance'], ascending=False, inplace = True)
df = df.reset_index(drop=True)
df.sort_values(by = 'Importance', inplace = True)
df.plot(kind = 'barh', x = 'Feature', figsize = (8,14));
pyplt.title('FEATURE IMPORTANCE\n')
pyplt.savefig('report/figures/final_importance.png', dpi=200, bbox_inches='tight')
pyplt.close()
Explanation: 5(c). Feature Importance: Cover based Ranking
The cover is the sum of second order gradient in each node, and intuitively it represents the number of data points affected by the split.
End of explanation |
14,557 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Lesson 7
Python Basic, Lesson 5, v1.0.1, 2016.12 by David.Yi
Python Basic, Lesson 5, v1.0.2, 2017.03 modified by Yimeng.Zhang
v1.1, 2020.4 2020.5 edit by David Yi
本次内容要点
文件和目录操作之一:文件和目录操作
文件和目录操作之二:读写文本文件
思考:搜索电脑上指定路径指定类型的文件
文件和目录操作之一
Python 的 os 库有很多和文件、路径和执行系统命令相关的函数。
os 库常用函数
os.sep 可以取代操作系统特定的路径分割符
os.name 字符串指示你正在使用的平台。比如对于Windows,它是'nt',而对于Linux/Unix用户,它是'posix'
os.getcwd() 函数得到当前工作目录,即当前Python脚本工作的目录路径
os.chdir(dirname) 改变工作目录到dirname
os.getenv() 用来读取环境变量
os.putenv() 用来设置环境变量
os.listdir() 返回指定目录下的所有文件名和目录名
os.remove() 删除一个文件
os.system() 运行shell命令
os.linesep 字符串给出当前平台使用的行终止符。例如,Windows使用'/r/n',Mac使用'\n'。
os.mkdir() 建立路径
os.rmdir() 删除路径
不同操作系统在路径和文件处理上有一定差异,这里的举例在 Windows 和 macOS 下都测试过
关于文件系统的延展阅读
文件系统介绍 https
Step1: os.path 常用函数
os.path.isdir() 检查给出的路径是否是一个目录
os.path.isfile() 检查给出的路径是否一个文件
os.path.exists() 检查给出的路径或者文件是否存在
os.path.getsize() 获得路径或者文件的大小
os.path.getatime() 返回所指向的文件或者目录的最后存取时间
os.path.getmtime() 返回所指向的文件或者目录的最后修改时间
os.path.split() 返回一个路径的目录名和文件名
os.path.abspath() 返回规范化的绝对路径
os.path.isabs() 如果输入是绝对路径,返回True
os.path.split() 将路径分割成目录和文件名的二元素元组
os.path.splitdrive() 返回(drivername,fpath)元组
os.path.dirname() 返回路径的目录,其实就是 os.path.split(path)的第一个元素
os.path.basename() 返回路径最后的文件名,其实就是 os.path.split(path)的第二个元素
os.path.splitext() 分离文件名与扩展名,返回(fname,fextension)元组
os.path.join() 将多个路径组合后返回,第一个绝对路径之前的参数将被忽略
os.path.commonprefix(list) 返回list中,所有路径共有的最长的路径
Step2: 文件和目录操作之二
读写文件是最常见的IO操作。Python内置了读写文件的函数。
读写文件前,我们先必须了解一下,在磁盘上读写文件的功能都是由操作系统提供的,现代操作系统不允许普通的程序直接操作磁盘,所以,读写文件就是请求操作系统打开一个文件对象,然后,通过操作系统提供的接口从这个文件对象中读取数据,或者把数据写入这个文件对象。
读文件
函数 open() 返回 文件对象,通常的用法需要两个参数:open(filename, mode)。分别是文件名和打开模式
在做下面的例子前,我们要创建一个 test.txt 文件,并且保证其中的内容是如下样式,包含三行内容:
hello
hi
byebye
文件保存在可以访问的目录,我们用到的文件都保存在 notebook 下面的 files 目录;
使用 jupyter 可以直接新建 Text File,来完成建立和编辑文本文件
Step3: 写文件
写文件和读文件是一样的,唯一区别是调用 open() 函数时,传入标识符 'w' 或者 'wb' 表示写文本文件或写二进制文件。
r 以读方式打开
w 以写方式打开
a 以追加模式打开(必要时候创建新文件)
Step4: 操作系统和文件系统差异处理
如果要写一个 windows 和 macOS 都能用的文件处理软件,很多时候要考虑操作系统带来的差异
linesep 文件中分隔行的字符串;
path.sep 分割文件路径名的字符串;
curdir 当前工作目录的字符串;
pardir 当前工作目录的父目录字符串;
使用 glob 包查找文件
glob 是 python 自己带的一个文件操作相关模块,很简洁,用它可以查找符合自己目的的文件,就类似于 Windows 下的文件搜索,而且也支持通配符
Step6: 我们在 fishbase 的 fish_file 包内,也实现了一个搜索文件的功能,也使用了 python 自带的 pathlib 函数包。 | Python Code:
import os
# 操作系统路径分隔符
print(os.sep)
# 操作系统平台名称
print(os.name)
# 获取当前路径
os.getcwd()
# 记录一下这是 zhang yimeng 当时执行后的结果:'C:\\Users\\yimeng.zhang\\Desktop\\Class\\python基础\\python_basic'
# 这是我现在在 windows 电脑上执行的结果:'C:\\dev_python\\python_study\\python_study_basic_notebook'
# 切换路径
# os.chdir('/Users/david.yi')
# 切换路径大家要参考上面获取的当前路径,根据自己的电脑做适当调整,替换下面 yijeng.zhang 为自己电脑上的用户名
os.chdir('C:\\Users\\yimeng.zhang\\Desktop\\Class')
os.getcwd()
# 返回指定的文件夹包含的文件或文件夹的名字的列表。这个列表以字母顺序。
# 不包括 '.' 和'..' 即使它在文件夹中。
import os
# os.chdir('C:\\Users\\yimeng.zhang\\Desktop\\Class\\python基础\\python_basic')
os.listdir()
# 注意返回的数据类型是什么,是一个列表
print(type(os.listdir()))
# 计算目录下有多少文件,因为返回结果是 list,因此各类计算都比较方便
a = os.listdir()
print(len(a))
# 可以指定路径参数,来列出该目录下所有文件
# list_a = os.listdir('/Users/david.yi')
list_a = os.listdir('C:\\dev_python\\python_study\\python_study_basic_notebook')
# 可以判断各类情况,比如第一个是 P 字母
for i in list_a:
if i[0] == 'P':
print(i)
# 操作系统换行符
# 在一些文本文件处理中有用
os.linesep
# 建立路径
# 切换到当前路径
os.getcwd()
os.mkdir('test')
print('ok')
Explanation: Lesson 7
Python Basic, Lesson 5, v1.0.1, 2016.12 by David.Yi
Python Basic, Lesson 5, v1.0.2, 2017.03 modified by Yimeng.Zhang
v1.1, 2020.4 2020.5 edit by David Yi
本次内容要点
文件和目录操作之一:文件和目录操作
文件和目录操作之二:读写文本文件
思考:搜索电脑上指定路径指定类型的文件
文件和目录操作之一
Python 的 os 库有很多和文件、路径和执行系统命令相关的函数。
os 库常用函数
os.sep 可以取代操作系统特定的路径分割符
os.name 字符串指示你正在使用的平台。比如对于Windows,它是'nt',而对于Linux/Unix用户,它是'posix'
os.getcwd() 函数得到当前工作目录,即当前Python脚本工作的目录路径
os.chdir(dirname) 改变工作目录到dirname
os.getenv() 用来读取环境变量
os.putenv() 用来设置环境变量
os.listdir() 返回指定目录下的所有文件名和目录名
os.remove() 删除一个文件
os.system() 运行shell命令
os.linesep 字符串给出当前平台使用的行终止符。例如,Windows使用'/r/n',Mac使用'\n'。
os.mkdir() 建立路径
os.rmdir() 删除路径
不同操作系统在路径和文件处理上有一定差异,这里的举例在 Windows 和 macOS 下都测试过
关于文件系统的延展阅读
文件系统介绍 https://zh.wikipedia.org/wiki/%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F
windows 文件系统 FAT、FAT32、NTFS 介绍 https://support.microsoft.com/zh-cn/kb/100108
linux 文件系统介绍 http://cn.linux.vbird.org/linux_basic/0230filesystem.php
目前 python 社区逐渐推荐使用pathlib 函数包来进行文件目录相关的访问。和 python 其他一些函数包一样,文件目录操作由于其基本性和发展,造成库比较多,功能有重叠,对于理解python 如何处理文件目录,我们还是沿用了原来的函数包,而 pathlib 是从 python 3.4 版本开始引入并且不断得到完善。之后会有专题来介绍 pathlib。
End of explanation
# 检查给出的路径是否是一个存在的目录,存在
# 确保执行这些测试代码的时候先设定路径到当前 notebook 的路径,或者设定的某个路径
# 进行路径、文件操作时候,还是要谨慎小心一些
os.chdir('C:\\dev_python\\python_study\\python_study_basic_notebook')
s_dir = os.getcwd()
print(s_dir)
print(os.path.isdir(s_dir))
print(os.path.isdir('C:\\Users'))
print(os.path.isdir('C:\\Users222'))
# 检查给出的路径是否是一个存在的目录
# 下面再当前路径下加了个字母,当然是不存在的
os.path.isdir(s_dir + 's')
# 文件不是路径,即便文件存在,也返回 False
os.path.isdir(s_dir + 'test.txt')
# 检查给出的路径是否一个文件,存在
import os
s_dir = os.getcwd()
s_file = os.path.join(s_dir, 'files/test.txt')
print(s_file)
os.path.isfile(s_file)
# 检查给出的路径是否一个文件,不存在
s_dir = os.getcwd()
s_file = os.path.join(s_dir, 'test222.txt')
os.path.isfile(s_file)
# 路径不是文件,所以返回 False
s_dir = os.getcwd()
os.path.isfile(s_dir)
# 对路径和文件都通用的检查方式
s_dir = os.getcwd()
s_file = os.path.join(s_dir, 'test.txt')
print(os.path.exists(s_dir))
print(os.path.exists(s_file))
# 获得路径或者文件的大小
s_dir = os.getcwd()
s_file = os.path.join(s_dir, 'test.txt')
os.path.getsize(s_file)
# 获得路径或者文件的大小
os.path.getsize(s_dir)
# 返回所指向的文件或者目录的最后存取时间
# 返回的时间格式可能和大家想象的不太一样
s_dir = os.getcwd()
s_file = os.path.join(s_dir, 'files/test.txt')
os.path.getatime(s_file)
# 返回所指向的文件或者目录的最后存取时间
import os
import time
# 将日期格式化
dt = time.localtime(os.path.getatime(s_dir))
# print(dt)
print(time.strftime('%Y-%m-%d %H:%M:%S', dt))
# 返回所指向的文件或者目录的最后修改时间
s_file = os.path.join(s_dir, 'files/test.txt')
os.path.getmtime(s_file)
# 返回所指向的文件或者目录的最后修改时间
# 使用 time.ctime() 方法来格式化日期
import time, os
s_file = os.path.join(s_dir, 'files/test.txt')
time.ctime(os.path.getmtime(s_file))
# 返回规范化的绝对路径
# 会自动补齐完整路径,不管文件是否存在
os.path.abspath('tt1211.txt')
# 如果输入是绝对路径,返回True
print(os.path.isabs('test.txt'))
print(os.path.isabs('/Users/yijun/test.txt'))
# 返回一个路径的目录名和文件名
# os.chdir('/Users/david.yi/Documents/dev/python_study/python_basic')
# os.chdir('C:\\Users\\yimeng.zhang\\Desktop\\Class\\python基础\\python_basic')
s_dir = os.getcwd()
s_file = os.path.join(s_dir, 'test.txt')
print(s_file)
# 分拆路径和文件名
os.path.split(s_file)
# 返回路径的目录,其实就是 os.path.split(path)的第一个元素
os.path.dirname('/Users/yijun/test.txt')
# 返回路径最后的文件名,其实就是 os.path.split(path)的第二个元素
os.path.basename(s_file)
# 分离文件名与扩展名,返回(fname,fextension)元组
os.path.splitext(s_file)
# 将多个路径组合后返回,第一个绝对路径之前的参数将被忽略
# os.path.join('/Users/yijun', 'test.txt')
os.path.join('C:\\dev_python\\python_study\\python_study_basic_notebook\\files', 'test.txt')
# 返回list中,所有路径共有的最长的路径
l = ['/Users/yijun/test.txt', '/Users/yijun/test/aaa.txt', '/Users/yijun/bbb.txt']
os.path.commonprefix(l)
# 遍历一个目录下的所有文件
import os
def list_dir(root_dir):
for lists in os.listdir(root_dir):
path = os.path.join(root_dir, lists)
print(path)
if os.path.isdir(path):
list_dir(path)
# 注意不要挑选目录下过多文件的,否则会耗费电脑资源
list_dir(os.getcwd())
# 遍历一个目录下的所有文件
# 显示文件的字节数,用 getsize()
import os
def list_dir(root_dir):
for lists in os.listdir(root_dir):
path = os.path.join(root_dir, lists)
if lists[0:1] != '.':
filesize = os.path.getsize(path)
print(path, ' ', filesize)
if os.path.isdir(path):
list_dir(path)
# 注意不要挑选目录下过多文件的,否则会耗费电脑资源
#list_dir('/Users/david.yi/Documents/dev/dig/doc')
list_dir(os.getcwd())
# 遍历一个目录下的所有文件
# 过滤 . 开头的文件,一般是系统文件
# 显示文件的字节数
# 显示指定后缀 ipynb 的文件,引入 endswith 用法
import os
def list_dir(root_dir):
for lists in os.listdir(root_dir):
path = os.path.join(root_dir, lists)
if lists[0:1] != '.' and lists.endswith('.ipynb'):
filesize = os.path.getsize(path)
print(path, ' ', filesize)
if os.path.isdir(path):
list_dir(path)
# 注意不要挑选目录下过多文件的,否则会耗费电脑资源
# list_dir('/Users/david.yi/Documents/dev/dig/n_query')
list_dir(os.getcwd())
# 写一个可以搜索硬盘上指定路径指定类型的文件
# os.walk() 返回一个三元tuple(root, dirnames, filenames)
# 第一个为起始路径,String
# 第二个为起始路径下的文件夹, List
# 第三个是起始路径下的文件. List
# fnmatch python自带的文件名模式匹配包
# https://docs.python.org/zh-tw/3/library/fnmatch.html
import fnmatch
import os
images = ['*.jpg', '*.jpeg', '*.png', '*.tif', '*.tiff']
matches = []
# for root, dirnames, filenames in os.walk('/Users/david.yi/Documents/dev/'):
for root, dirnames, filenames in os.walk(os.getcwd()):
for extensions in images:
for filename in fnmatch.filter(filenames, extensions):
matches.append(os.path.join(root, filename))
print(matches)
# import os
# for root, dirnames, filenames in os.walk('C:\\Users\\yimeng.zhang\\Desktop\\Class\\python基础\\python_basic'):
# print(filenames)
Explanation: os.path 常用函数
os.path.isdir() 检查给出的路径是否是一个目录
os.path.isfile() 检查给出的路径是否一个文件
os.path.exists() 检查给出的路径或者文件是否存在
os.path.getsize() 获得路径或者文件的大小
os.path.getatime() 返回所指向的文件或者目录的最后存取时间
os.path.getmtime() 返回所指向的文件或者目录的最后修改时间
os.path.split() 返回一个路径的目录名和文件名
os.path.abspath() 返回规范化的绝对路径
os.path.isabs() 如果输入是绝对路径,返回True
os.path.split() 将路径分割成目录和文件名的二元素元组
os.path.splitdrive() 返回(drivername,fpath)元组
os.path.dirname() 返回路径的目录,其实就是 os.path.split(path)的第一个元素
os.path.basename() 返回路径最后的文件名,其实就是 os.path.split(path)的第二个元素
os.path.splitext() 分离文件名与扩展名,返回(fname,fextension)元组
os.path.join() 将多个路径组合后返回,第一个绝对路径之前的参数将被忽略
os.path.commonprefix(list) 返回list中,所有路径共有的最长的路径
End of explanation
import os
# 获得当前路径
s_dir = os.getcwd()
print(s_dir)
# 拼接完整文件名
filename = os.path.join(s_dir, 'files/test.txt')
print(filename)
# 和读写文件打交道是一件需要小心的事情,这里用了 try-finally 的方式来避免出错
try:
# 打开文件
f = open(filename, 'r')
print(f.read())
finally:
if f:
f.close()
# 简化调用方式
# 省却了 try...finally,会有 with 来自动控制保证可以关闭文件等
with open(filename, 'r') as f:
print(f.read())
# 读入文件所有的内容
# 这样操作对于一般的文件没啥问题,太大的文件不能这样读,内存会不够
with open(filename, 'r') as f:
lines = f.readlines()
print(type(lines))
print(lines)
# 把读入的文件显示出来
for i in lines:
print(i)
# 更简单的按行读取文件内容方法
with open(filename, 'r') as f:
for eachline in f:
print(eachline)
Explanation: 文件和目录操作之二
读写文件是最常见的IO操作。Python内置了读写文件的函数。
读写文件前,我们先必须了解一下,在磁盘上读写文件的功能都是由操作系统提供的,现代操作系统不允许普通的程序直接操作磁盘,所以,读写文件就是请求操作系统打开一个文件对象,然后,通过操作系统提供的接口从这个文件对象中读取数据,或者把数据写入这个文件对象。
读文件
函数 open() 返回 文件对象,通常的用法需要两个参数:open(filename, mode)。分别是文件名和打开模式
在做下面的例子前,我们要创建一个 test.txt 文件,并且保证其中的内容是如下样式,包含三行内容:
hello
hi
byebye
文件保存在可以访问的目录,我们用到的文件都保存在 notebook 下面的 files 目录;
使用 jupyter 可以直接新建 Text File,来完成建立和编辑文本文件
End of explanation
# 写文件
import os
# 获得当前路径
s_dir = os.getcwd()
# 拼接完整文件名
filename= os.path.join(s_dir, 'files/test2.txt')
print(filename)
# 换行符
br = os.linesep
# 写文件
with open(filename, 'w') as f:
f.write('Hello, World!' + br)
f.write('Hello, Shanghai!' + br)
f.write('Hello, CHINA!' + br)
f.close()
with open(filename, 'r') as f:
print(f.read())
Explanation: 写文件
写文件和读文件是一样的,唯一区别是调用 open() 函数时,传入标识符 'w' 或者 'wb' 表示写文本文件或写二进制文件。
r 以读方式打开
w 以写方式打开
a 以追加模式打开(必要时候创建新文件)
End of explanation
import pathlib
list(pathlib.Path('.').glob('**/*.ipynb'))
Explanation: 操作系统和文件系统差异处理
如果要写一个 windows 和 macOS 都能用的文件处理软件,很多时候要考虑操作系统带来的差异
linesep 文件中分隔行的字符串;
path.sep 分割文件路径名的字符串;
curdir 当前工作目录的字符串;
pardir 当前工作目录的父目录字符串;
使用 glob 包查找文件
glob 是 python 自己带的一个文件操作相关模块,很简洁,用它可以查找符合自己目的的文件,就类似于 Windows 下的文件搜索,而且也支持通配符: *,?,[] 这三个通配符,* 代表0个或多个字符,? 代表一个字符,[] 匹配指定范围内的字符,如[0-9]匹配数字。
glob 的主要方法也叫 glob,该方法返回所有匹配的文件路径列表,该方法需要一个参数用来指定匹配的路径字符串。
2020.5.10 之前的教程我们推荐试用 glob 函数包来进行查找文件,随着 python 的演进,现在推荐试用 pathlib 函数包来进行文件操作,来获得更加好的兼容性和性能,在pathlib 函数包中同样有 glob 函数,下面简单的演示了试用 pathlib 中的 glob 来遍历搜索文件。
End of explanation
# v1.0.14 edit by Hu Jun, edit by Jia Chunying, #38
# v1.0.17 edit by Hu Jun, #212
# v1.3 edit by David Yi, #272
def find_files(path, exts=None):
查找路径下的文件,返回指定类型的文件列表
:param:
* path: (string) 查找路径
* exts: (list) 文件类型列表,默认为空
:return:
* files_list: (list) 文件列表
举例如下::
print('--- find_files demo ---')
path1 = '/root/fishbase_issue'
all_files = find_files(path1)
print(all_files)
exts_files = find_files(path1, exts=['.png', '.py'])
print(exts_files)
print('---')
执行结果::
--- find_files demo ---
['/root/fishbase_issue/test.png', '/root/fishbase_issue/head.jpg','/root/fishbase_issue/py/man.png'
['/root/fishbase_issue/test.png', '/root/fishbase_issue/py/man.png']
---
files_list = []
for root, dirs, files in os.walk(path):
for name in files:
files_list.append(os.path.join(root, name))
if exts is not None:
return [file for file in files_list if pathlib.Path(file).suffix in exts]
return files_list
Explanation: 我们在 fishbase 的 fish_file 包内,也实现了一个搜索文件的功能,也使用了 python 自带的 pathlib 函数包。
End of explanation |
14,558 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
PoS tagging en Español
En este ejercicio vamos a jugar con uno de los corpus en español que está disponible desde NLTK
Step1: Fíjate que las etiquetas que se usan en el treebank español son diferentes a las etiquetas que habíamos visto en inglés. Para empezar, el español es una lengua con una morfología más rica
Step2: Las etiquetas morfológicas que hemos visto son bastante complejas, ya que incorporan los rasgos de la flexión del español. Afortunadamente, NLTK permite cargar los corpus etiquetados con un conjunto de etiquetas universal y simplificado (todos los detalles en el paper) utilizando la opcion tagset='universal'. Para ello, asegúrate de que has almacenado dentro de tu directorio de recursos de nltk el mapeo de etiquetas originales del corpus con su versión simplificada. Este fichero se llama universal_tagset-ES.map y lo tienes en la carpeta data del respositorio. Es recomendable renombrarlo, por ejemplo
Step3: Después, ejecuta la siguiente celda y fíjate cómo hemos cargado una lista de oraciones etiquetadas con esta nueva versión de las etiquetas.
Step5: Estas etiquetas son más sencillas, ¿verdad? Básicamente tenemos DET para determinante, NOUN para nombre, VERB para verbo, ADJ para adjetivo, ADP para preposición, etc.
Vamos a utilizar este corpus para entrenar varios etiquetadores basados en ngramas, tal y como hicimos en clase y se explica en la presentación nltk-pos.
Construye de manera incremental cuatro etiquetadores.
un etiquetador que por defecto que asuma que una palabra desconocida es un nombre común en masculino singular y asigne la etiqueta correspondiente a todas las palabras.
un etiquetador basado en unigramas que aprenda a partir de la lista oraciones y utilice en etiquetador anterior como respaldo.
un etiquetador basado en bigramas que aprenda a partir de la lista oraciones y utilice en etiquetador anterior como respaldo.
un etiquetador basado en trigramas que aprenda a partir de la lista oraciones y utilice en etiquetador anterior como respaldo. | Python Code:
import nltk
from nltk.corpus import cess_esp
cess_esp = cess_esp.tagged_sents()
print(cess_esp[5])
Explanation: PoS tagging en Español
En este ejercicio vamos a jugar con uno de los corpus en español que está disponible desde NLTK: CESS_ESP, un treebank anotado a partir de una colección de noticias en español.
Este corpus está actualmente incluído en un recurso más amplio, el corpus AnCora que desarrollan en la Universitat de Barcelona. Para más información, podéis leer el artículo de M. Taulé, M. A. Martí y M. Recasens "AnCora: Multilevel Annotated Corpora for Catalan and Spanish". Proceedings of 6th International Conference on Language Resources and Evaluation (LREC 2008). 2008. Marrakesh (Morocco).
Antes de nada, ejecuta la siguiente celda para acceder al corpus y a otras herramientas que vamos a usar en este ejercicio.
End of explanation
# escribe tu código aquí
# la etiqueta de 3 pers plur del pret. perf simpl es: vmis3p0
verbos_en_pasado = []
total = 0
for oracion in cess_esp:
total += len(oracion)
for item in oracion:
if item[1] == 'vmis3p0':
verbos_en_pasado.append(item[0])
print('Tengo', len(verbos_en_pasado), 'verbos en pasado y', total, 'palabras en total')
print('El porcentaje de verbos es', len(verbos_en_pasado)/total)
Explanation: Fíjate que las etiquetas que se usan en el treebank español son diferentes a las etiquetas que habíamos visto en inglés. Para empezar, el español es una lengua con una morfología más rica: si queremos reflejar el género y el número de los adjetivos, por ejemplo, no nos vale con etiquetar los adjetivos con una simple JJ.
Echa un vistazo a las etiquetas morfológicas y trata de interpretar su significado. En estas primeras 50 palabras encontramos:
da0ms0: determinante artículo masculino singular
ncms000: nombre común masculino singular
aq0cs0: adjetivo calificativo de género común singular
np00000: nombre propio
sps00: preposición
vmis3s0: verbo principal indicativo pasado 3ª persona del singular
Aquí tienes el la explicación de las etiquetas y el catálogo completo de rasgos para el etiquetado en español usadas en este corpus. A partir de lo que aprendas en el enlace anterior:
Imprime por pantalla solo las palabras etiquetadas como formas verbales en 3ª persona del plural del pretérito perfecto simple de indicativo.
Calcula qué porcentaje del total representan las palabras del corpus CEES_ESP etiquetadas como formas verbales en 3ª persona del plural del pretérito perfecto simple de indicativo.
End of explanation
!cp ../data/universal_tagset-ES.map ~/nltk_data/taggers/universal_tagset/es-ancora.map
Explanation: Las etiquetas morfológicas que hemos visto son bastante complejas, ya que incorporan los rasgos de la flexión del español. Afortunadamente, NLTK permite cargar los corpus etiquetados con un conjunto de etiquetas universal y simplificado (todos los detalles en el paper) utilizando la opcion tagset='universal'. Para ello, asegúrate de que has almacenado dentro de tu directorio de recursos de nltk el mapeo de etiquetas originales del corpus con su versión simplificada. Este fichero se llama universal_tagset-ES.map y lo tienes en la carpeta data del respositorio. Es recomendable renombrarlo, por ejemplo:
End of explanation
from nltk.corpus import cess_esp
cess_esp._tagset = 'es-ancora'
oraciones = cess_esp.tagged_sents(tagset='universal')
print(oraciones[0])
Explanation: Después, ejecuta la siguiente celda y fíjate cómo hemos cargado una lista de oraciones etiquetadas con esta nueva versión de las etiquetas.
End of explanation
# escribe tu código aquí
defaultTagger = nltk.DefaultTagger('NOUN')
unigramTagger = nltk.UnigramTagger(oraciones, backoff=defaultTagger)
bigramTagger = nltk.BigramTagger(oraciones, backoff=unigramTagger)
trigramTagger = nltk.TrigramTagger(oraciones, backoff=bigramTagger)
# prueba tu etiquetador basado en trigramas con las siguientes oraciones que,
# con toda seguridad, no aparecen en el corpus
print(trigramTagger.tag("Este banco está ocupado por un padre y por un hijo. El padre se llama Juan y el hijo ya te lo he dicho".split()))
print(trigramTagger.tag(El presidente del gobierno por fin ha dado la cara para anunciar aumentos de presupuesto en Educación y Sanidad a costa de dejar de subvencionar las empresas de los amigotes..split()))
print(trigramTagger.tag("El cacique corrupto y la tonadillera se comerán el turrón en prisión.".split()))
Explanation: Estas etiquetas son más sencillas, ¿verdad? Básicamente tenemos DET para determinante, NOUN para nombre, VERB para verbo, ADJ para adjetivo, ADP para preposición, etc.
Vamos a utilizar este corpus para entrenar varios etiquetadores basados en ngramas, tal y como hicimos en clase y se explica en la presentación nltk-pos.
Construye de manera incremental cuatro etiquetadores.
un etiquetador que por defecto que asuma que una palabra desconocida es un nombre común en masculino singular y asigne la etiqueta correspondiente a todas las palabras.
un etiquetador basado en unigramas que aprenda a partir de la lista oraciones y utilice en etiquetador anterior como respaldo.
un etiquetador basado en bigramas que aprenda a partir de la lista oraciones y utilice en etiquetador anterior como respaldo.
un etiquetador basado en trigramas que aprenda a partir de la lista oraciones y utilice en etiquetador anterior como respaldo.
End of explanation |
14,559 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Outline
Glossary
Positional Astronomy
Previous
Step1: Import section specific modules
Step2: Direction Cosine Coordinates
There is another useful astronomical coordinate system that we ought to introduce at this juncture, namely the direction cosine coordinate system. The direction cosine coordinate system is quite powerful and allows us to redefine the fundamental reference point on the celestial sphere, from which we measure all other celestial objects, to an arbitrary location (i.e. we can make local sky-maps around our own chosen reference point; the vernal equinox need not be our fundamental reference point). Usually this arbitrary location is chosen to be the celestial source that we are interested in observing. We generally refer to this arbitrary location as the field centre or phase centre.
<div class=advice>
<b>Note
Step3: Recall that we can use <a class='pos_eq_convertlmnradec_dir'></a><!--\label{pos
Step4: Plotting the result. | Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
Explanation: Outline
Glossary
Positional Astronomy
Previous: Horizontal Coordinates
Next: Further Reading
Import standard modules:
End of explanation
from IPython.display import HTML
HTML('../style/code_toggle.html')
Explanation: Import section specific modules:
End of explanation
RA_rad = (np.pi/12) * np.array([5. + 30./60, 5 + 32./60 + 0.4/3600, 5 + 36./60 + 12.8/3600, 5 + 40./60 + 45.5/3600])
DEC_rad = (np.pi/180)*np.array([60., 60. + 17.0/60 + 57./3600, 61. + 12./60 + 6.9/3600, 61 + 56./60 + 34./3600])
Flux_sources_labels = np.array(["", "1 Jy", "0.5 Jy", "0.2 Jy"])
Flux_sources = np.array([1., 0.5, 0.1]) #in Janskys
print "RA (rad) of Sources and Field Center = ", RA_rad
print "DEC (rad) of Sources = ", DEC_rad
Explanation: Direction Cosine Coordinates
There is another useful astronomical coordinate system that we ought to introduce at this juncture, namely the direction cosine coordinate system. The direction cosine coordinate system is quite powerful and allows us to redefine the fundamental reference point on the celestial sphere, from which we measure all other celestial objects, to an arbitrary location (i.e. we can make local sky-maps around our own chosen reference point; the vernal equinox need not be our fundamental reference point). Usually this arbitrary location is chosen to be the celestial source that we are interested in observing. We generally refer to this arbitrary location as the field centre or phase centre.
<div class=advice>
<b>Note:</b> The direction cosine coordinate system is useful for another reason, when we use
it to image interferometric data, then it becomes evident that there exists a Fourier relationship between the sky brightness function and the measurements that an interferometer makes (see <a href='../4_Visibility_Space/4_0_introduction.ipynb'>Chapter 4 ➞</a>).
</div>
<br>
We use three coordinates in the direction cosine coordinate system, namely $l$, $m$ and $n$. The coordinates $l$, $m$ and $n$ are dimensionless direction cosines, i.e.
\begin{eqnarray}
l &=& \cos(\alpha) = \frac{a_1}{|\mathbf{a}|}\
m &=& \cos(\beta) = \frac{a_2}{|\mathbf{a}|}\
n &=& \cos(\gamma) = \frac{a_3}{|\mathbf{a}|}
\end{eqnarray}
<a id='pos:fig:cosines'></a> <!--\label{pos:fig:cosines}--><img src='figures/cosine.svg' width=35%>
Figure 3.4.1: Definition of direction cosines.
The quantities $\alpha$, $\beta$, $\gamma$, $a_1$, $a_2$, $a_3$ and $\mathbf{a}$ are all defined in <a class='pos_fig_cos_dir'></a> <!--\ref{pos:fig:cos}-->. Moreover, $|\cdot|$ denotes the magnitude of its operand. The definitions above also imply that $l^2+m^2+n^2 = 1$. When $|\mathbf{a}|=1$ then we may simply interpret $l$, $m$ and $n$ as Cartesian coordinates, i.e. we may simply relabel the axes $x$, $y$ and $z$ (in <a class='pos_fig_cos_dir'></a><!--\ref{pos:fig:cos}-->) to
$l$, $m$ and $n$.
So the question now arises, how do we use $l$, $m$ and $n$ to uniquely identify a location on the celestial sphere? The direction cosine coordinate system (and the relationship between it and the celestial coordinate sytem) is depicted in <a class='pos_fig_dirconversion_dir'></a><!--\ref{pos:fig:dirconversion}-->. Note that the $n$-axis points toward the field center (which is denoted by $\boldsymbol{s}_c$ in <a class='pos_fig_dirconversion_dir'></a><!--\ref{pos:fig:dirconversion}-->. It should be clear from <a class='pos_fig_dirconversion_dir'></a><!--\ref{pos:fig:dirconversion}--> that we can use $\mathbf{s} = (l,m,n)$ to uniquely idnetify any location on the celestial sphere.
<a id='pos:fig:convert_lmn_ra_dec'></a> <!--\label{pos:fig:convert_lmn_ra_dec}--><img src='figures/conversion2.svg' width=40%>
Figure 3.4.2: The source-celestial pole-field center triangle; which enables us to derive the conversion equations between direction cosine and equatorial coordinates. The red plane represents the fundamental plane of the equatorial coordinate system, while the blue plane represents the fundamental plane of the direction cosine coordinate system. We are able to label the orthogonal fundamental axes of the direction cosine coordinate system $l$,$m$ and $n$, since the radius of the celestial sphere is equal to one.
We use the following equations to convert between the equatorial and direction cosine coordinate systems:
<p class=conclusion>
<font size=4><b>Converting between the equatorial and direction cosine coordinates (3.1)</b></font>
<br>
<br>
\begin{eqnarray}
l &=& \sin \theta \sin \psi = \cos \delta \sin \Delta \alpha \nonumber\\
m &=& \sin \theta \cos \psi = \sin \delta \cos \delta_0 - \cos \delta \sin \delta_0 \cos\Delta \alpha \nonumber\\
\delta &=& \sin^{-1}(m\cos \delta_0 + \sin \delta_0\sqrt{1-l^2-m^2})\nonumber\\
\alpha &=& \alpha_0 + \tan^{-1}\bigg(\frac{l}{\cos\delta_0\sqrt{1-l^2-m^2}-m\sin\delta_0}\bigg)\nonumber
\end{eqnarray}
</p>
<a id='pos_eq_convertlmnradec'></a><!--\label{pos:eq:convertlmnradec}-->
<div class=advice>
<b>Note:</b> See <a href='../0_Introduction/2_Appendix.ipynb'>Appendix ➞</a> for the derivation of the above relations.
</div>
We can obtain the conversion relations above by applying the spherical trigonemetric identities in <a href='../2_Mathematical_Groundwork/2_13_spherical_trigonometry.ipynb'>$\S$ 2.13 ➞</a> to the triangle depicted in <a class='pos_fig_dirconversion_dir'></a><!--\ref{pos:fig:dirconversion}--> (the one formed by the source the field center and the NCP).
There is another important interpretation of direction cosine coordinates we should
be cognisant of. If we project the direction cosine position vector $\mathbf{s}$ of a celestial body onto the $lm$-plane it's projected length will be equal to $\sin \theta$, where $\theta$ is the angular distance between your field center $\mathbf{s}_c$ and $\mathbf{s}$ measured along the surface of the celestial sphere. If $\theta$ is small we may use the small angle approximation, i.e. $\sin \theta \approx \theta$. The projected length of $\mathbf{s}$ is also equal to $\sqrt{l^2+m^2}$, implying that $l^2+m^2 \approx \theta^2$. We may therefore loosely interpret $\sqrt{l^2+m^2}$ as the angular distance measured between the source at $\mathbf{s}$ and the field-center $\mathbf{s}_c$ measured along the surface of the celestial sphere, i.e. we may measure $l$ and $m$ in $^{\circ}$.
The explenation above is graphically illustrated in <a class='pos_fig_proj_dir'></a> <!--\ref{pos:fig:proj}-->.
<a id='pos:fig:understand_lm'></a> <!--\label{pos:fig:understand_lm}--><img src='figures/conversion2b.svg' width=40%>
Figure 3.4.3: Why do we measure $l$ and $m$ in degrees?
<p class=conclusion>
<font size=4><b>Three interpretations of direction cosine coordinates</b></font>
<br>
<br>
• **Direction cosines**: $l$,$m$ and $n$ are direction cosines<br><br>
• **Cartesian coordinates**: $l$,$m$ and $n$ are Cartesian coordinates if we work on the
unit sphere<br><br>
• <b>Angular distance</b>: $\sqrt{l^2+m^2}$ denotes the angular distance $\theta$, $(l,m,n)$ is from the field center (if $\theta$ is sufficiently small).
</p>
Example
Here we have a couple of sources given in RA ($\alpha$) and DEC ($\delta$):
* Source 1: (5h 32m 0.4s,60$^{\circ}$17' 57'') - 1Jy
* Source 2: (5h 36m 12.8s,61$^{\circ}$ 12' 6.9'') - 0.5Jy
* Source 3: (5h 40m 45.5s,61$^{\circ}$ 56' 34'') - 0.2Jy
The field center is located at $(\alpha_0,\delta_0) = $ (5h 30m,60$^{\circ}$). The first step is to convert right ascension and declination into radians with
\begin{eqnarray}
\alpha_{\textrm{rad}} &=& \frac{\pi}{12} \bigg(h + \frac{m}{60} + \frac{s}{3600}\bigg)\
\delta_{\textrm{rad}} &=& \frac{\pi}{180} \bigg(d + \frac{m_{\textrm{arcmin}}}{60}+\frac{s_{\textrm{arcsec}}}{3600}\bigg)
\end{eqnarray}
In the above equations $h,~m,~s,~d,~m_{\textrm{arcmin}}$ and $s_{\textrm{arcsec}}$ respectively denote hours, minutes, seconds, degrees, arcminutes and arcseconds. If we apply the above to our three sources we obtain
End of explanation
RA_delta_rad = RA_rad-RA_rad[0] #calculating delta alpha
l = np.cos(DEC_rad) * np.sin(RA_delta_rad)
m = (np.sin(DEC_rad) * np.cos(DEC_rad[0]) - np.cos(DEC_rad) * np.sin(DEC_rad[0]) * np.cos(RA_delta_rad))
print "l (degrees) = ", l*(180./np.pi)
print "m (degrees) = ", m*(180./np.pi)
Explanation: Recall that we can use <a class='pos_eq_convertlmnradec_dir'></a><!--\label{pos:eq:convertlmnradec}--> to convert between equatorial and direction cosine coordinates, in terms of the current example this translates into the python code below. Note that before we can do the conversion we first need to calculate $\Delta \alpha$.
End of explanation
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlim([-4., 4.])
plt.ylim([-4., 4.])
plt.xlabel("$l$ [degrees]")
plt.ylabel("$m$ [degrees]")
plt.plot(l[0], m[0], "bx")
plt.hold("on")
plt.plot(l[1:]*(180/np.pi), m[1:]*(180/np.pi), "ro")
counter = 1
for xy in zip(l[1:]*(180/np.pi)+0.25, m[1:]*(180/np.pi)+0.25):
ax.annotate(Flux_sources_labels[counter], xy=xy, textcoords='offset points',horizontalalignment='right',
verticalalignment='bottom')
counter = counter + 1
plt.grid()
Explanation: Plotting the result.
End of explanation |
14,560 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<header class="w3-container w3-teal">
<img src="images/utfsm.png" alt="" height="100px" align="left"/>
<img src="images/mat.png" alt="" height="100px" align="right"/>
</header>
<br/><br/><br/><br/><br/>
MAT281
Laboratorio Aplicaciones de la Matemática en la Ingeniería
Clustering
INSTRUCCIONES
Anoten su nombre y rol en la celda siguiente.
Desarrollen los problemas de manera secuencial.
Guarden constantemente con Ctr-S para evitar sorpresas.
Reemplacen en las celdas de código donde diga #FIX_ME por el código correspondiente.
Ejecuten cada celda de código utilizando Ctr-Enter
Step1: Observación
Este laboratorio utiliza la librería sklearn (oficialmente llamada scikit learn), puesto que buscamos aplicar la técnica del clustering a datos tal como se haría en una aplicación real. El código a proveer en este laboratorio es reducido, y la nota se basará mayoritariamente en la calidad de las respuestas entregadas en los comentarios.
Problema
Step2: Desafío 1 (10%)
El siguiente código permite leer los datos desde el archivo data/wine_data.txt y cargarlos en un numpy.array. Complete la preparación de los datos, separando los datos en el arreglo X (datos a utilizar para clustering) y true_labels (etiquetas verdaderas para cada dato de X).
OBS
Step3: Desafío 2 (10%)
Utilizando el vector true_labels definido anteriormente, complete el código para conocer cuántas muestras son de tipo 0, de tipo 1 y de tipo 2.
Step4: 2. Exploración de valores
Antes de realizar el clustering, deseamos revisar los datos. El siguiente código permite conocer la distribución de las mediciones para las muestras.
Step5: Desafío 3 (10%)
En base a la exploración de valores, ¿que resulta más razonable? ¿Porqué?
1. Aplicar el algoritmo de clustering directamente.
2. Realizar algún tipo de normalización a los datos, y luego aplicar el algoritmo de clustering.
Justifique su respuesta
Step6: Desafío 4 (10%)
Ejecute el código anterior y comente los resultados. ¿Permite el clustering recobrar el agrupamiento natural de los datos? ¿Si no, porqué?
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
3. Normalización de los datos
Sabemos que los algoritmos suelen funcionar mejor con los datos normalizados, como se explicó en la clase de Regresión Lineal.
Note que en el caso de los algoritmos de clustering, sólo es necesario normalizar la matrix X, ¡las etiquetas no necesitan normalizarse!
Desafío 5 (20%)
Normalice los datos utilizando para obtener una nueva matriz X_mod_1, cuyas columnas tengan sus datos en el rango [0,1].
¿Porqué normalizamos por columna y no por fila?
Reutilice el código anteriormente provisto para realizar el clustering en los datos normalizados y comente los resultados obtenidos.
Step7: Comentario a los resultados obtenidos.
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
Desafío 6 (20%)
Estandarice los datos para obtener una nueva matriz X_mod_2, de manera que X_mod_2 posea media 0 y desviación estándar 1 para cada una de sus columnas.
Reutilice el código anteriormente provisto para realizar el clustering en los datos estandarizados y comente los resultados obtenidos.
Step8: Comentario a los resultados obtenidos.
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
Desafío 7 (10%)
¿Cuál de las 3 versiones aplicadas de clustering funcionó mejor? ¿Porqué cree que sea así?
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
Bonus Track | Python Code:
# Configuracion para recargar módulos y librerías
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from IPython.core.display import HTML
HTML(open("style/mat281.css", "r").read())
from mat281_code.lab import greetings
alumno_1 = ("Sebastian Flores", "2004001-7")
alumno_2 = ("Maria Jose Vargas", "2004007-8")
HTML(greetings(alumno_1, alumno_2))
Explanation: <header class="w3-container w3-teal">
<img src="images/utfsm.png" alt="" height="100px" align="left"/>
<img src="images/mat.png" alt="" height="100px" align="right"/>
</header>
<br/><br/><br/><br/><br/>
MAT281
Laboratorio Aplicaciones de la Matemática en la Ingeniería
Clustering
INSTRUCCIONES
Anoten su nombre y rol en la celda siguiente.
Desarrollen los problemas de manera secuencial.
Guarden constantemente con Ctr-S para evitar sorpresas.
Reemplacen en las celdas de código donde diga #FIX_ME por el código correspondiente.
Ejecuten cada celda de código utilizando Ctr-Enter
End of explanation
%%bash
head data/wine_data.txt
Explanation: Observación
Este laboratorio utiliza la librería sklearn (oficialmente llamada scikit learn), puesto que buscamos aplicar la técnica del clustering a datos tal como se haría en una aplicación real. El código a proveer en este laboratorio es reducido, y la nota se basará mayoritariamente en la calidad de las respuestas entregadas en los comentarios.
Problema: Wine Dataset
Los datos del Wine Dataset son un conjunto de datos clásicos para verificar los algoritmos de clustering.
<img src="images/wine.jpg" alt="" width="600px" align="middle"/>
Los datos corresponden a 3 cultivos diferentes de vinos de la misma región de Italia, y que han sido identificados con las etiquetas 1, 2 y 3. Para cada tipo de vino se realizado 13 análisis químicos:
Alcohol
Malic acid
Ash
Alcalinity of ash
Magnesium
Total phenols
Flavanoids
Nonflavanoid phenols
Proanthocyanins
Color intensity
Hue
OD280/OD315 of diluted wines
Proline
La base de datos contiene 178 muestras distintas en total.
1. Lectura de datos
Esta vez los datos del wine dataset ya se encuentan en la carpeta Lab04/data/.
Existen 2 archivos de interés:
* wine_data.txt : Datos de interés.
* wine_names.txt : Explicación de los datos.
End of explanation
import numpy as np
data = np.loadtxt("data/wine_data.txt", delimiter=",")
names = ["Alcohol", "Malic acid", "Ash", "Alcalinity of ash", "Magnesium", "Total phenols",
"Flavanoids", "Nonflavanoid phenols", "Proanthocyanins", "Color intensity",
"Hue", "OD280/OD315", "Proline",
]
X = data[:,:] # FIX ME ¿que columnas tomar?
true_labels = data[:,:] # FIX ME ¿que columna tomar?
Explanation: Desafío 1 (10%)
El siguiente código permite leer los datos desde el archivo data/wine_data.txt y cargarlos en un numpy.array. Complete la preparación de los datos, separando los datos en el arreglo X (datos a utilizar para clustering) y true_labels (etiquetas verdaderas para cada dato de X).
OBS: Las etiquetas verdaderas deben modificarse para que sean 0, 1 y 2 (en vez de 1, 2 y 3 como vienen en el archivo).
End of explanation
tipo_0 = len(true_labels[:]) # FIX ME. ¿Como seleccionar una clase en particular?
tipo_1 = 42 # FIX ME ¿Como seleccionar una clase en particular?
tipo_2 = 0 # FIX ME ¿Como seleccionar una clase en particular?
print "Hay %d muestras de tipo 0" %tipo_0
print "Hay %d muestras de tipo 1" %tipo_1
print "Hay %d muestras de tipo 2" %tipo_2
esta_correcto = ( tipo_0+tipo_1+tipo_2==len(true_labels) )
print "Check: %d + %d + %d = %d? %s" %(tipo_0, tipo_1, tipo_2, len(true_labels), esta_correcto)
Explanation: Desafío 2 (10%)
Utilizando el vector true_labels definido anteriormente, complete el código para conocer cuántas muestras son de tipo 0, de tipo 1 y de tipo 2.
End of explanation
from matplotlib import pyplot as plt
rows, cols = 5, 3
fig, axes = plt.subplots(rows, cols, figsize=(16,16))
for i in range(rows):
for j in range(cols):
n = i*cols + j
if n<13:
ax = axes[i][j]
ax.hist(X[:,n], alpha=0.75)
ax.set_title(names[n])
fig.tight_layout()
plt.show()
Explanation: 2. Exploración de valores
Antes de realizar el clustering, deseamos revisar los datos. El siguiente código permite conocer la distribución de las mediciones para las muestras.
End of explanation
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
# Parameters
n_clusters = 3
# Running the algorithm
kmeans = KMeans(n_clusters)
kmeans.fit(X)
pred_labels = kmeans.labels_
cm = confusion_matrix(true_labels, pred_labels)
print cm
Explanation: Desafío 3 (10%)
En base a la exploración de valores, ¿que resulta más razonable? ¿Porqué?
1. Aplicar el algoritmo de clustering directamente.
2. Realizar algún tipo de normalización a los datos, y luego aplicar el algoritmo de clustering.
Justifique su respuesta: piense en cómo funciona K-Means.
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
3. Clustering Directo
A continuación se provee el código para realizar el clustering de los datos de manera directa (sin normalizar).
End of explanation
X_mod_1 = X # FIX ME
# AGREGAR CODIGO PARA REALIZAR CLUSTERING EN X_mod_1
Explanation: Desafío 4 (10%)
Ejecute el código anterior y comente los resultados. ¿Permite el clustering recobrar el agrupamiento natural de los datos? ¿Si no, porqué?
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
3. Normalización de los datos
Sabemos que los algoritmos suelen funcionar mejor con los datos normalizados, como se explicó en la clase de Regresión Lineal.
Note que en el caso de los algoritmos de clustering, sólo es necesario normalizar la matrix X, ¡las etiquetas no necesitan normalizarse!
Desafío 5 (20%)
Normalice los datos utilizando para obtener una nueva matriz X_mod_1, cuyas columnas tengan sus datos en el rango [0,1].
¿Porqué normalizamos por columna y no por fila?
Reutilice el código anteriormente provisto para realizar el clustering en los datos normalizados y comente los resultados obtenidos.
End of explanation
X_mod_2 = X # FIX ME
# AGREGAR CODIGO PARA REALIZAR CLUSTERING EN X_mod_1
Explanation: Comentario a los resultados obtenidos.
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
Desafío 6 (20%)
Estandarice los datos para obtener una nueva matriz X_mod_2, de manera que X_mod_2 posea media 0 y desviación estándar 1 para cada una de sus columnas.
Reutilice el código anteriormente provisto para realizar el clustering en los datos estandarizados y comente los resultados obtenidos.
End of explanation
from sklearn.cluster import KMeans
X_mod = np.loadtxt("data/X_estandarized.txt")
clusters = range(2,20)
total_distance = []
for n_clusters in clusters:
kmeans = KMeans(n_clusters)
kmeans.fit(X_mod)
pred_labels = kmeans.labels_
centroids = kmeans.cluster_centers_
# Get the distances
distance_for_n = 0
for k in range(n_clusters):
points = X_mod[pred_labels==k]
aux = (points - centroids[k,:])**2
distance_for_n += (aux.sum(axis=1)**0.5).sum()
total_distance.append(distance_for_n)
fig = plt.figure(figsize=(16,8))
plt.plot(clusters, total_distance, 'rs')
plt.xlim(min(clusters)-1, max(clusters)+1)
#plt.ylim(0, max(total_distance)*1.1)
plt.show()
Explanation: Comentario a los resultados obtenidos.
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
Desafío 7 (10%)
¿Cuál de las 3 versiones aplicadas de clustering funcionó mejor? ¿Porqué cree que sea así?
Respuesta
FIX ME. (Editar con doble click, y luego mostrar con Ctrl+Enter)
Bonus Track: Regla del codo
En todos los casos hemos utilizado que el número de clusters es igual a 3. En caso que no conociéramos este dato, deberíamos graficar la suma de las distancias a los clusters para cada punto, en función del número de clusters.
A continuación se provee el código para el caso de clustering sobre los datos estandarizados, leídos directamente de un archivo preparado especialmente.
End of explanation |
14,561 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Poincare surface of section
This example uses rebound to create a Poincare surface of section of the restricted circular three body problem (RC3BP). First, a series of RC3BP simulations with test particles at different semi-major axes are initialized at a fixed value of the Jacobi constant $C_J$. Then, each simulation is integrated and the state of the test particle is recorded whenever the test particle and perturber are at opposition, i.e., $\lambda - \lambda_\mathrm{p} = \pi$ where $\lambda$ and $\lambda_\mathrm{p}$ are the mean longitudes of the test particle and massive perturber, respectively. Finally, a surface of section showing the particles' periods versus mean anomalies is plotted. Numerous resonant islands are visible at period ratios corresponding to mean motion resonances between the particle and perturber.
Step1: Calculate the synodic angle, $\psi = \lambda - \lambda_p$, at a specified time T from a simluation, sim.
Step2: Calculate the Jacobi constant of the test particle,
$$
C_J = n_p l_z - |\pmb{v}|^2 -\frac{Gm_}{|\pmb{r}-\pmb{r_}|}-\frac{Gm_p}{|\pmb{r}-\pmb{r_p}|}
$$
where $l_z$ is the component of the test particle's specific angular momentum aligned with perturber's orbit normal.
Step3: Run simulations
Set the parameters of the simulations
Step4: Given a semi-major axis a, we solve for the eccentricity such that the Jacobi constant is equal to the user-specified value CJ. The eccentricity solution is assumed to lie in the interval specified by e_bracket. After finding a solution, we initialize and return a simulation with a test particle with the desired semi-major axis/eccentricity combination.
Step5: The surface of section points are plotted below, along with the values of the test particles' eccentricities over the range of period ratio displayed in the surface of section. | Python Code:
import rebound
import numpy as np
import matplotlib.pyplot as plt
def get_sim(m_pert,n_pert,a_tp,l_pert,l_tp,e_tp,pomega_tp):
sim = rebound.Simulation()
sim.add(m=1)
P_pert = 2 * np.pi / n_pert
sim.add(m=m_pert,P=P_pert,l=l_pert)
sim.add(m=0.,a = a_tp,l=l_tp,e=e_tp,pomega=pomega_tp)
sim.move_to_com()
return sim
Explanation: Poincare surface of section
This example uses rebound to create a Poincare surface of section of the restricted circular three body problem (RC3BP). First, a series of RC3BP simulations with test particles at different semi-major axes are initialized at a fixed value of the Jacobi constant $C_J$. Then, each simulation is integrated and the state of the test particle is recorded whenever the test particle and perturber are at opposition, i.e., $\lambda - \lambda_\mathrm{p} = \pi$ where $\lambda$ and $\lambda_\mathrm{p}$ are the mean longitudes of the test particle and massive perturber, respectively. Finally, a surface of section showing the particles' periods versus mean anomalies is plotted. Numerous resonant islands are visible at period ratios corresponding to mean motion resonances between the particle and perturber.
End of explanation
def get_psi(T,sim):
ps = sim.particles
sim.integrate(T)
return np.mod(ps[1].l - ps[2].l ,2*np.pi)
Explanation: Calculate the synodic angle, $\psi = \lambda - \lambda_p$, at a specified time T from a simluation, sim.
End of explanation
def get_jacobi_const(sim):
ps = sim.particles
star = ps[0]
planet = ps[1]
particle = ps[2]
rstar = np.array(star.xyz)
rplanet = np.array(planet.xyz)
r = np.array(particle.xyz)
v = np.array(particle.vxyz)
KE = 0.5 * v@v # test particle kinetic energy
mu1 = sim.G * star.m
mu2 = sim.G * planet.m
r1 = r-rstar
r2 = r-rplanet
PE = -1*mu1/np.sqrt(r1@r1) - mu2/np.sqrt(r2@r2) # test particle potential energy
lz = np.cross(r,v)[-1]
CJ = 2 * planet.n * lz - 2 * (KE + PE) # jacobi constant
return CJ
Explanation: Calculate the Jacobi constant of the test particle,
$$
C_J = n_p l_z - |\pmb{v}|^2 -\frac{Gm_}{|\pmb{r}-\pmb{r_}|}-\frac{Gm_p}{|\pmb{r}-\pmb{r_p}|}
$$
where $l_z$ is the component of the test particle's specific angular momentum aligned with perturber's orbit normal.
End of explanation
m_pert = 3e-5
n_pert = 5/4 * (1+0.05)
e_tp = 0.0
l_tp = 0
l_pert = 0
pomega_tp = 0.5 * np.pi
Explanation: Run simulations
Set the parameters of the simulations
End of explanation
from scipy.optimize import root_scalar
def get_sim_at_fixed_CJ(a,CJ,e_bracket):
get_sim_fn = lambda e,a: get_sim(m_pert,n_pert,a,l_pert,l_tp,e,pomega_tp)
root_fn = lambda e,a: get_jacobi_const(get_sim_fn(e,a)) - CJ
root = root_scalar(root_fn,args=(a,),bracket=e_bracket)
assert root.converged, "Root-finding failed to converge for a={:.1f}, CJ={:.1f}".format(a,CJ)
return get_sim_fn(root.root,a)
def get_sos_data(sim,Npts,psi_section = np.pi):
ps = sim.particles
n_syn = ps[1].n - ps[2].n
Tsyn = 2 * np.pi / n_syn
n,e,M = np.zeros((3,Npts))
for i in range(Npts):
try:
rt=root_scalar(lambda t: get_psi(t,sim) - psi_section , bracket=[sim.t + 0.8*Tsyn,sim.t + 1.2*Tsyn])
except:
# re-compute Tsyn
n_syn = ps[1].n - ps[2].n
Tsyn = 2*np.pi/n_syn
rt=root_scalar(lambda t: get_psi(t,sim) - psi_section , bracket=[sim.t + 0.8*Tsyn,sim.t + 1.2*Tsyn])
n[i] = ps[2].n
e[i] = ps[2].e
M[i] = ps[2].M
return n,e,M
a_tp0 = 1
sim0 = get_sim(m_pert,n_pert,a_tp0,l_pert,l_tp,e_tp,pomega_tp)
CJ0 = get_jacobi_const(sim0)
Nsims = 24 # Number of simulations to plot on surface of section
Npts = 100 # Number of points to plot per simulation
da_vals = np.linspace(0,0.1,Nsims)
sims = [get_sim_at_fixed_CJ(a_tp0 + da,CJ0,[0,0.3]) for da in da_vals]
all_pts = np.array([get_sos_data(sim,Npts) for sim in sims])
Explanation: Given a semi-major axis a, we solve for the eccentricity such that the Jacobi constant is equal to the user-specified value CJ. The eccentricity solution is assumed to lie in the interval specified by e_bracket. After finding a solution, we initialize and return a simulation with a test particle with the desired semi-major axis/eccentricity combination.
End of explanation
fig,ax = plt.subplots(1,2,sharey=True,figsize=(12,5))
min_pratio=0
max_pratio=np.inf
for n,e,M in all_pts:
n_tp = n
alpha = (n/n_pert)**(2/3)
ecross = 1-alpha
ax[0].plot(M / np.pi, n/n_pert,'.')
ax[1].plot(e, n/n_pert,'.')
# plot the orbit-crossing eccentricity for reference
# versus period ratio
pratios = np.linspace(*ax[0].get_ylim())
alpha = pratios**(2/3)
ecross=1-alpha
ax[1].plot(ecross,pratios,'k-',lw=3,label="orbit crossing")
ax[1].legend()
ax[0].set_ylabel(r"$P_\mathrm{pert}/P$")
ax[0].set_xlabel(r"$M/\pi$")
ax[1].set_xlabel(r"$e$")
# plot the location of some resonances
for a in ax:
a.axhline(3/4,ls='-',color='k',lw=2) # 1st order mmr
a.axhline(8/11,ls='-.',color='k') # 3rd order mmr
a.axhline(5/7,ls='--',color='k') # 2nd order mmr
a.axhline(7/10,ls='-.',color='k') # 3rd order mmr
a.axhline(2/3,ls='-',color='k') # first order mmr
Explanation: The surface of section points are plotted below, along with the values of the test particles' eccentricities over the range of period ratio displayed in the surface of section.
End of explanation |
14,562 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Copyright 2018 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
Step1: Universal Sentence Encoder-Lite 데모
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https
Step2: TF-Hub에서 모듈 로드하기
Step3: TF-Hub 모듈에서 SentencePiece 모델 로드하기
SentencePiece 모델은 모듈의 자산 내에 편리하게 저장됩니다. 프로세서를 초기화하려면 이 모델을 로드해야 합니다.
Step4: 몇 가지 예를 통해 모듈 테스트하기
Step5: 의미론적 텍스트 유사성(STS) 작업 예제
Universal Sentence Encoder에 의해 생성된 임베딩은 대략적으로 정규화됩니다. 두 문장의 의미론적 유사성은 인코딩의 내적으로 간편하게 계산될 수 있습니다.
Step6: 시각화된 유사성
여기서는 히트 맵으로 유사성을 나타냅니다. 최종 그래프는 9x9 행렬이며, 각 항 [i, j]는 문장 i 및 j에 대한 인코딩의 내적을 바탕으로 색상이 지정됩니다.
Step7: 평가
Step8: 평가 그래프 빌드하기
Step10: 문장 임베딩 평가하기 | Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
Explanation: Copyright 2018 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
# Install seaborn for pretty visualizations
!pip3 install --quiet seaborn
# Install SentencePiece package
# SentencePiece package is needed for Universal Sentence Encoder Lite. We'll
# use it for all the text processing and sentence feature ID lookup.
!pip3 install --quiet sentencepiece
from absl import logging
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_hub as hub
import sentencepiece as spm
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
Explanation: Universal Sentence Encoder-Lite 데모
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder_lite"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a> </td>
<td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder_lite.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행</a> </td>
<td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder_lite.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 보기</a> </td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/hub/tutorials/semantic_similarity_with_tf_hub_universal_encoder_lite.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드</a></td>
<td><a href="https://tfhub.dev/google/universal-sentence-encoder-lite/2"><img src="https://www.tensorflow.org/images/hub_logo_32px.png">TF Hub 모델 보기</a></td>
</table>
이 Colab은 문장 유사성 작업에 Universal Sentence Encoder-Lite를 사용하는 방법을 보여줍니다. 이 모듈은 입력 문장에서 SentencePiece 처리를 실행해야 한다는 점만 제외하면 Universal Sentence Encoder와 매우 유사합니다.
Universal Sentence Encoder를 사용하면 기존에 개별 단어에 대한 임베딩을 조회하는 것처럼 쉽게 문장 수준 임베딩을 얻을 수 있습니다. 그러면 문장 임베딩을 간단히 사용하여 문장 수준의 의미론적 유사성을 계산할 수 있을 뿐만 아니라 감독되지 않은 더 적은 훈련 데이터를 사용하여 다운스트림 분류 작업의 성능을 높일 수 있습니다.
시작하기
설정
End of explanation
module = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-lite/2")
input_placeholder = tf.sparse_placeholder(tf.int64, shape=[None, None])
encodings = module(
inputs=dict(
values=input_placeholder.values,
indices=input_placeholder.indices,
dense_shape=input_placeholder.dense_shape))
Explanation: TF-Hub에서 모듈 로드하기
End of explanation
with tf.Session() as sess:
spm_path = sess.run(module(signature="spm_path"))
sp = spm.SentencePieceProcessor()
with tf.io.gfile.GFile(spm_path, mode="rb") as f:
sp.LoadFromSerializedProto(f.read())
print("SentencePiece model loaded at {}.".format(spm_path))
def process_to_IDs_in_sparse_format(sp, sentences):
# An utility method that processes sentences with the sentence piece processor
# 'sp' and returns the results in tf.SparseTensor-similar format:
# (values, indices, dense_shape)
ids = [sp.EncodeAsIds(x) for x in sentences]
max_len = max(len(x) for x in ids)
dense_shape=(len(ids), max_len)
values=[item for sublist in ids for item in sublist]
indices=[[row,col] for row in range(len(ids)) for col in range(len(ids[row]))]
return (values, indices, dense_shape)
Explanation: TF-Hub 모듈에서 SentencePiece 모델 로드하기
SentencePiece 모델은 모듈의 자산 내에 편리하게 저장됩니다. 프로세서를 초기화하려면 이 모델을 로드해야 합니다.
End of explanation
# Compute a representation for each message, showing various lengths supported.
word = "Elephant"
sentence = "I am a sentence for which I would like to get its embedding."
paragraph = (
"Universal Sentence Encoder embeddings also support short paragraphs. "
"There is no hard limit on how long the paragraph is. Roughly, the longer "
"the more 'diluted' the embedding will be.")
messages = [word, sentence, paragraph]
values, indices, dense_shape = process_to_IDs_in_sparse_format(sp, messages)
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.tables_initializer()])
message_embeddings = session.run(
encodings,
feed_dict={input_placeholder.values: values,
input_placeholder.indices: indices,
input_placeholder.dense_shape: dense_shape})
for i, message_embedding in enumerate(np.array(message_embeddings).tolist()):
print("Message: {}".format(messages[i]))
print("Embedding size: {}".format(len(message_embedding)))
message_embedding_snippet = ", ".join(
(str(x) for x in message_embedding[:3]))
print("Embedding: [{}, ...]\n".format(message_embedding_snippet))
Explanation: 몇 가지 예를 통해 모듈 테스트하기
End of explanation
def plot_similarity(labels, features, rotation):
corr = np.inner(features, features)
sns.set(font_scale=1.2)
g = sns.heatmap(
corr,
xticklabels=labels,
yticklabels=labels,
vmin=0,
vmax=1,
cmap="YlOrRd")
g.set_xticklabels(labels, rotation=rotation)
g.set_title("Semantic Textual Similarity")
def run_and_plot(session, input_placeholder, messages):
values, indices, dense_shape = process_to_IDs_in_sparse_format(sp,messages)
message_embeddings = session.run(
encodings,
feed_dict={input_placeholder.values: values,
input_placeholder.indices: indices,
input_placeholder.dense_shape: dense_shape})
plot_similarity(messages, message_embeddings, 90)
Explanation: 의미론적 텍스트 유사성(STS) 작업 예제
Universal Sentence Encoder에 의해 생성된 임베딩은 대략적으로 정규화됩니다. 두 문장의 의미론적 유사성은 인코딩의 내적으로 간편하게 계산될 수 있습니다.
End of explanation
messages = [
# Smartphones
"I like my phone",
"My phone is not good.",
"Your cellphone looks great.",
# Weather
"Will it snow tomorrow?",
"Recently a lot of hurricanes have hit the US",
"Global warming is real",
# Food and health
"An apple a day, keeps the doctors away",
"Eating strawberries is healthy",
"Is paleo better than keto?",
# Asking about age
"How old are you?",
"what is your age?",
]
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
run_and_plot(session, input_placeholder, messages)
Explanation: 시각화된 유사성
여기서는 히트 맵으로 유사성을 나타냅니다. 최종 그래프는 9x9 행렬이며, 각 항 [i, j]는 문장 i 및 j에 대한 인코딩의 내적을 바탕으로 색상이 지정됩니다.
End of explanation
import pandas
import scipy
import math
def load_sts_dataset(filename):
# Loads a subset of the STS dataset into a DataFrame. In particular both
# sentences and their human rated similarity score.
sent_pairs = []
with tf.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
# (sent_1, sent_2, similarity_score)
sent_pairs.append((ts[5], ts[6], float(ts[4])))
return pandas.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
def download_and_load_sts_data():
sts_dataset = tf.keras.utils.get_file(
fname="Stsbenchmark.tar.gz",
origin="http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz",
extract=True)
sts_dev = load_sts_dataset(
os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-dev.csv"))
sts_test = load_sts_dataset(
os.path.join(
os.path.dirname(sts_dataset), "stsbenchmark", "sts-test.csv"))
return sts_dev, sts_test
sts_dev, sts_test = download_and_load_sts_data()
Explanation: 평가: 의미론적 텍스트 유사성(STS) 벤치마크
STS 벤치마크는 문장 임베딩을 사용하여 계산된 유사성 점수가 사람의 판단과 일치하는 정도에 대한 내재적 평가를 제공합니다. 벤치마크를 위해 시스템이 다양한 문장 쌍 선택에 대한 유사성 점수를 반환해야 합니다. 그런 다음 Pearson 상관 관계를 사용하여 사람의 판단에 대한 머신 유사성 점수의 품질을 평가합니다.
데이터 다운로드하기
End of explanation
sts_input1 = tf.sparse_placeholder(tf.int64, shape=(None, None))
sts_input2 = tf.sparse_placeholder(tf.int64, shape=(None, None))
# For evaluation we use exactly normalized rather than
# approximately normalized.
sts_encode1 = tf.nn.l2_normalize(
module(
inputs=dict(values=sts_input1.values,
indices=sts_input1.indices,
dense_shape=sts_input1.dense_shape)),
axis=1)
sts_encode2 = tf.nn.l2_normalize(
module(
inputs=dict(values=sts_input2.values,
indices=sts_input2.indices,
dense_shape=sts_input2.dense_shape)),
axis=1)
sim_scores = -tf.acos(tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1))
Explanation: 평가 그래프 빌드하기
End of explanation
#@title Choose dataset for benchmark
dataset = sts_dev #@param ["sts_dev", "sts_test"] {type:"raw"}
values1, indices1, dense_shape1 = process_to_IDs_in_sparse_format(sp, dataset['sent_1'].tolist())
values2, indices2, dense_shape2 = process_to_IDs_in_sparse_format(sp, dataset['sent_2'].tolist())
similarity_scores = dataset['sim'].tolist()
def run_sts_benchmark(session):
Returns the similarity scores
scores = session.run(
sim_scores,
feed_dict={
sts_input1.values: values1,
sts_input1.indices: indices1,
sts_input1.dense_shape: dense_shape1,
sts_input2.values: values2,
sts_input2.indices: indices2,
sts_input2.dense_shape: dense_shape2,
})
return scores
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
scores = run_sts_benchmark(session)
pearson_correlation = scipy.stats.pearsonr(scores, similarity_scores)
print('Pearson correlation coefficient = {0}\np-value = {1}'.format(
pearson_correlation[0], pearson_correlation[1]))
Explanation: 문장 임베딩 평가하기
End of explanation |
14,563 | Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
Lists have a very simple method to insert elements: | Problem:
import numpy as np
a = np.asarray([1,2,3,4])
pos = 2
element = 66
a = np.insert(a, pos, element) |
14,564 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Landice
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 1.3. Ice Albedo
Is Required
Step7: 1.4. Atmospheric Coupling Variables
Is Required
Step8: 1.5. Oceanic Coupling Variables
Is Required
Step9: 1.6. Prognostic Variables
Is Required
Step10: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required
Step11: 2.2. Code Version
Is Required
Step12: 2.3. Code Languages
Is Required
Step13: 3. Grid
Land ice grid
3.1. Overview
Is Required
Step14: 3.2. Adaptive Grid
Is Required
Step15: 3.3. Base Resolution
Is Required
Step16: 3.4. Resolution Limit
Is Required
Step17: 3.5. Projection
Is Required
Step18: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required
Step19: 4.2. Description
Is Required
Step20: 4.3. Dynamic Areal Extent
Is Required
Step21: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required
Step22: 5.2. Grounding Line Method
Is Required
Step23: 5.3. Ice Sheet
Is Required
Step24: 5.4. Ice Shelf
Is Required
Step25: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required
Step26: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required
Step27: 7.2. Ocean
Is Required
Step28: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required
Step29: 8.2. Melting
Is Required
Step30: 9. Ice --> Dynamics
**
9.1. Description
Is Required
Step31: 9.2. Approximation
Is Required
Step32: 9.3. Adaptive Timestep
Is Required
Step33: 9.4. Timestep
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'sandbox-1', 'landice')
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: CNRM-CERFACS
Source ID: SANDBOX-1
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:52
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation |
14,565 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Skip-gram word2vec
In this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like translations.
Readings
Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.
A really good conceptual overview of word2vec from Chris McCormick
First word2vec paper from Mikolov et al.
NIPS paper with improvements for word2vec also from Mikolov et al.
An implementation of word2vec from Thushan Ganegedara
TensorFlow word2vec tutorial
Word embeddings
When you're dealing with language and words, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.
<img src="assets/word2vec_architectures.png" width="500">
In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
First up, importing packages.
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Here I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to <PERIOD>. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.
Step3: And here I'm creating dictionaries to convert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by
$$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
I'm going to leave this up to you as an exercise. This is more of a programming challenge, than about deep learning specifically. But, being able to prepare your data for your network is an important skill to have. Check out my solution to see how I did it.
Exercise
Step5: Making batches
Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$.
From Mikolov et al.
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
From Chris McCormick's blog, we can see the general structure of our network.
The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.
The idea here is to train the hidden layer weight matrix to find efficient representations for our words. This weight matrix is usually called the embedding matrix or embedding look-up table. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.
I'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.
Exercise
Step8: Embedding
The embedding matrix has a size of the number of words by the number of neurons in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using one-hot encoded vectors for our inputs. When you do the matrix multiplication of the one-hot vector with the embedding matrix, you end up selecting only one row out of the entire matrix
Step9: Negative sampling
For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called "negative sampling". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.
Exercise
Step10: Validation
This code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
Step11: Training
Below is the code to train the network. Every 100 batches it reports the training loss. Every 1000 batches, it'll print out the validation words.
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data. | Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
Explanation: Skip-gram word2vec
In this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like translations.
Readings
Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.
A really good conceptual overview of word2vec from Chris McCormick
First word2vec paper from Mikolov et al.
NIPS paper with improvements for word2vec also from Mikolov et al.
An implementation of word2vec from Thushan Ganegedara
TensorFlow word2vec tutorial
Word embeddings
When you're dealing with language and words, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.
<img src="assets/word2vec_architectures.png" width="500">
In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
First up, importing packages.
End of explanation
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
Explanation: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
End of explanation
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
Explanation: Preprocessing
Here I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to <PERIOD>. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.
End of explanation
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
Explanation: And here I'm creating dictionaries to convert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
End of explanation
## Your code here
from collections import Counter
counter = Counter(int_words)
train_words = []
t = 1e-5
total_words = len(int_words)
rands = np.power(np.random.uniform(size=len(int_words)), 2.0)
for i, word in enumerate(int_words):
if t / counter[word] * total_words > rands[i]:
train_words.append(word)
print(len(train_words))
Explanation: Subsampling
Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by
$$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
I'm going to leave this up to you as an exercise. This is more of a programming challenge, than about deep learning specifically. But, being able to prepare your data for your network is an important skill to have. Check out my solution to see how I did it.
Exercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to train_words.
End of explanation
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
r = np.random.randint(1, window_size + 1)
start = max(idx - r, 0)
stop = idx + r
return words[start: idx] + words[idx + 1: stop + 1]
Explanation: Making batches
Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$.
From Mikolov et al.:
"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."
Exercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you choose a random number of words from the window.
End of explanation
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words) // batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
Explanation: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
End of explanation
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name="inputs")
labels = tf.placeholder(tf.int32, [None, None], name="labels")
Explanation: Building the graph
From Chris McCormick's blog, we can see the general structure of our network.
The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.
The idea here is to train the hidden layer weight matrix to find efficient representations for our words. This weight matrix is usually called the embedding matrix or embedding look-up table. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.
I'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.
Exercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.
End of explanation
n_vocab = len(int_to_vocab)
n_embedding = 400 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform([n_vocab, n_embedding], -1, 1), name="embedding")
embed = tf.nn.embedding_lookup(embedding, inputs) # use tf.nn.embedding_lookup to get the hidden layer output
Explanation: Embedding
The embedding matrix has a size of the number of words by the number of neurons in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using one-hot encoded vectors for our inputs. When you do the matrix multiplication of the one-hot vector with the embedding matrix, you end up selecting only one row out of the entire matrix:
You don't actually need to do the matrix multiplication, you just need to select the row in the embedding matrix that corresponds to the input word. Then, the embedding matrix becomes a lookup table, you're looking up a vector the size of the hidden layer that represents the input word.
<img src="assets/word2vec_weight_matrix_lookup_table.png" width=500>
Exercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform. This TensorFlow tutorial will help if you get stuck.
End of explanation
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
# create softmax weight matrix here
softmax_w = tf.Variable(
tf.truncated_normal([n_vocab, n_embedding])
)
# create softmax biases here
softmax_b = tf.Variable(tf.zeros([n_vocab]))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(
softmax_w, softmax_b,
labels, embed,
n_sampled, n_vocab
)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
Explanation: Negative sampling
For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called "negative sampling". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.
Exercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.
End of explanation
import random
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
Explanation: Validation
This code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
End of explanation
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
Explanation: Training
Below is the code to train the network. Every 100 batches it reports the training loss. Every 1000 batches, it'll print out the validation words.
End of explanation
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
Explanation: Restore the trained network if you need to:
End of explanation
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
Explanation: Visualizing the word vectors
Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.
End of explanation |
14,566 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="https
Step1: TensorFlow 2 quickstart for experts
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https
Step2: Load and prepare the MNIST dataset.
Step3: Use tf.data to batch and shuffle the dataset
Step4: Build the tf.keras model using the Keras model subclassing API
Step5: Choose an optimizer and loss function for training
Step6: Select metrics to measure the loss and the accuracy of the model. These metrics accumulate the values over epochs and then print the overall result.
Step7: Use tf.GradientTape to train the model
Step8: Test the model | Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Explanation: <a href="https://colab.research.google.com/github/bklooste/ANNe/blob/master/advanced_rot.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2019 The TensorFlow Authors.
End of explanation
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
Explanation: TensorFlow 2 quickstart for experts
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/quickstart/advanced"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This is a Google Colaboratory notebook file. Python programs are run directly in the browser—a great way to learn and use TensorFlow. To follow this tutorial, run the notebook in Google Colab by clicking the button at the top of this page.
In Colab, connect to a Python runtime: At the top-right of the menu bar, select CONNECT.
Run all the notebook code cells: Select Runtime > Run all.
Download and install TensorFlow 2. Import TensorFlow into your program:
Note: Upgrade pip to install the TensorFlow 2 package. See the install guide for details.
Import TensorFlow into your program:
End of explanation
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
Explanation: Load and prepare the MNIST dataset.
End of explanation
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
Explanation: Use tf.data to batch and shuffle the dataset:
End of explanation
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Create an instance of the model
model = MyModel()
Explanation: Build the tf.keras model using the Keras model subclassing API:
End of explanation
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
Explanation: Choose an optimizer and loss function for training:
End of explanation
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
Explanation: Select metrics to measure the loss and the accuracy of the model. These metrics accumulate the values over epochs and then print the overall result.
End of explanation
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
Explanation: Use tf.GradientTape to train the model:
End of explanation
@tf.function
def test_step(images, labels):
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
tf.keras.backend.set_floatx('float64')
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
Explanation: Test the model:
End of explanation |
14,567 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
(ADBLUCO)=
3.2 Algoritmos de descenso y búsqueda de línea en Unconstrained Convex Optimization (UCO)
```{admonition} Notas para contenedor de docker
Step1: Los candidatos a ser mínimos los encontramos al calcular el gradiente de $f_o$ que podemos calcular con SymPy
Step2: y plantear
Step3: Gráfica de las curvas de nivel de $f_o$
Step4: Resolvamos con scipy.optimize.fsolve la ecuación no lineal $\nabla f_o(x) = 0$
Step5: ```{margin}
Elegimos diferentes puntos iniciales.
```
Step6: ```{margin}
Obsérvese que los puntos root1, root2, root3 satisfacen la relación $x_2 = -x_1^2$.
```
Step7: Al evaluar el gradiente en cada punto obtenemos cero (o cercano a cero)
Step8: ```{margin}
Los puntos root1, root2 y root3 resuelven la ecuación no lineal $\nabla f(x) = 0$ .
```
Step9: ¿Cómo podemos identificar si son mínimos? ... usamos la Hessiana de $f_o$
Step10: y revisamos eigenvalores de la Hessiana evaluada en los puntos root1, root2, root3
Step11: ```{margin}
La Hessiana en root1 es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
Step12: ```{margin}
La Hessiana en root2 es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
Step13: ```{margin}
La Hessiana en root3 es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
Step14: Tenemos un criterio para $2$ dimensiones
Step15: ```{margin}
Los tres puntos root1, root2 y root3 satisfacen $x_2 = -x_1^2$.
```
El cual se anula justo en los puntos que cumplen
Step16: ```{margin}
El punto res_fmin satisface $\nabla f_o(x) = 0$ y la relación $x_2 = -x_1^2$.
```
Step17: ```{margin}
La Hessiana en res_fmin es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
Step18: Grafiquemos los puntos que cumplen $x_2=-x_1^2$
Step19: Evaluemos en un rango más amplio la función objetivo $f_o$ y realicemos una gráfica
Step20: El mínimo valor de $f_o$ es $0$ por lo que tenemos un conjunto de mínimos dados por la curva $x_2=-x_1^2$.
¿Por qué fue un poco complicado determinar el conjunto de mínimos de $f_o$?
$f_o(x)=x_1^4+2x_1^2x_2+x_2^2$ no es una función convexa en su $\text{dom}f_o$, $\mathbb{R}^2$, esto es, no satisface la desigualdad
Step21: ```{margin}
Se tiene $f(\theta x + (1-\theta)y) > \theta f(x) + (1-\theta)f(y)$ con la elección $\theta=0.5$, $x=[-0.5, -1.5]^T$, $y=[0.5, -1.5]^T$ lo que indica que $f$ no es convexa sino cóncava para esta elección de puntos $x,y$.
```
Step22: ```{admonition} Observación
Step23: se reescribe el problema de optimización como
Step24: y la Hessiana de $f_o$ es
Step25: El gradiente de $f_o$ es
Step26: El mínimo debe satisfacer la ecuación lineal con dos variables y dos ecuaciones
$$\nabla f_o(x) = Px +q =\left [ \begin{array}{cc}
5 & 4\
4 & 5
\end{array}
\right ]
\left [ \begin{array}{c}
x_1\
x_2
\end{array}
\right ]
+ \left [ \begin{array}{c}
-1\
1
\end{array}
\right ]=
\left [ \begin{array}{cc}
5x_1+4x_2-1\
4x_1+5x_2+1
\end{array}
\right ]
=0
$$
```{admonition} Observación
Step27: El problema anterior también lo podemos resolver con cvxpy pues es un problema convexo.
```{margin}
Definición de variables y función objetivo
Step28: ````{admonition} Observaciones
Step29: Como $f_o$ es una función convexa (de hecho estrictamente convexa) en su dominio $\mathbb{R}^4$, se tiene que su óptimo se obtiene igualando y resolviendo la ecuación no lineal $\nabla f_o(x) = 0$
Step30: ```{margin}
Definición de $f_o$
```
Step31: ```{margin}
Evaluando $f_o$ en $x^{(0)}$.
```
Step32: ```{margin}
Evaluando el gradiente en $x^{(0)}$
Step33: ```{margin}
Verificando que es dirección de descenso
Step34: Primera iteración
```{margin}
Esquema iterativo
Step35: ```{margin}
Evaluando $f_o$ en $x^{(1)}$ se observa que $f_o$ decreció muy poco, de hecho $f_o(x^{(0)}) \approx f_o(x^{(1)})$.
```
Step36: Obsérvese que la aproximación a primer orden por Taylor no es correcta
Step37: Segunda iteración
```{margin}
Evaluando el gradiente en $x^{(1)}$
Step38: ```{margin}
Verificando que es dirección de descenso
Step39: ```{margin}
Esquema iterativo
Step40: ```{margin}
Evaluando $f_o$ en $x^{(1)}$ se observa que $f_o$ decreció muy poco, de hecho $f_o(x^{(2)}) \approx f_o(x^{(1)})$.
```
Step41: Obsérvese que la aproximación a primer orden por Taylor no es correcta
Step42: Tercera iteración
```{margin}
Esquema iterativo
Step43: ```{margin}
Obsérvese que $x_3 \approx x_1$.
```
Step44: Cuarta iteración
```{margin}
Esquema iterativo
Step45: ```{margin}
Obsérvese que $x_4 \approx x_2$.
```
Step46: y así nos quedaremos ciclando por muchas iteraciones...
```{admonition} Comentario
El método de descenso en gradiente para el ejemplo anterior no convergerá al óptimo $x^* = \left [ \begin{array}{c} 2 \ 2 \0 \ 0 \ \end{array} \right ]$
```
```{admonition} Ejercicio
Step47: ```{margin}
Evaluando la Hessiana en $x^{(0)}$
Step48: ```{margin}
Evaluando el gradiente en $x^{(0)}$
Step49: El sistema de ecuaciones lineales a resolver es
Step50: ```{margin}
Aquí convertimos de un objeto de SymPy a un array de NumPy pues si bien podríamos resolver el sistema con SymPy es menos costoso utilizar arrays de NumPy.
```
Step51: ```{margin}
Resolvemos $\nabla ^2 f_o \left ( x^{(0)} \right ) \Delta x = - \nabla f_o \left(x^{(0)}\right )$ para obtener $\Delta x$ dirección de Newton.
```
Step52: ```{margin}
Verificando que es dirección de descenso
Step53: Primera iteración
```{margin}
Esquema iterativo
Step54: Recuérdese que siempre es útil monitorear el número de condición de la matriz del sistema de ecuaciones lineales que en este caso es la Hessiana de $f_o$ en $x^{(0)}$ para confiabilidad de nuestros cálculos al resolver el sistema de ecuaciones lineales asociado, ver {ref}Número de condición de una matriz <NCM>
Step55: ```{margin}
Evaluando $f_o$ en $x^{(1)}$ se observa que $f_o$ sí decrece $f_o(x^{(1)}) < f_o(x^{(0)})$.
```
Step56: Obsérvese que la aproximación a segundo orden por Taylor es cercana
Step57: Segunda iteración
```{margin}
Evaluando la Hessiana en $x^{(1)}$
Step58: ```{margin}
Verificando que es dirección de descenso
Step59: ```{margin}
Evaluando $f_o$ en $x^{(2)}$ se observa que $f_o$ sí decrece $f_o(x^{(2)}) < f_o(x^{(1)})$.
```
Step60: Obsérvese que la aproximación a segundo orden por Taylor es cercana
Step61: ```{margin}
Número de condición de la Hessiana de $f_o$ en $x^{(1)}$.
```
Step62: Tercera iteración
```{margin}
Evaluando la Hessiana en $x^{(2)}$
Step63: ```{margin}
Verificando que es dirección de descenso
Step64: ```{margin}
Evaluando $f_o$ en $x^{(3)}$ se observa que $f_o$ sí decrece $f_o(x^{(3)}) < f_o(x^{(2)})$.
```
Step65: Obsérvese que la aproximación a segundo orden por Taylor es cercana
Step66: ```{margin}
Número de condición de la Hessiana de $f_o$ en $x^{(2)}$.
```
Step67: Cuarta iteración
```{margin}
Evaluando la Hessiana en $x^{(3)}$
Step68: ```{margin}
Verificando que es dirección de descenso
Step69: ```{margin}
Evaluando $f_o$ en $x^{(4)}$ se observa que $f_o$ sí decrece $f_o(x^{(4)}) < f_o(x^{(3)})$.
```
Step70: Obsérvese que la aproximación a segundo orden por Taylor es cercana
Step71: ```{margin}
Obsérvese cómo va aumentando el número de condición de la Hessiana conforme nos aproximamos a la solución, en este paso se ha calculado la Hessiana de $f_o$ en $x^{(3)}$.
```
Step72: ```{admonition} Comentario
El método por dirección de Newton sí convergerá al óptimo $x^* = \left [ \begin{array}{c} 2 \ 2 \0 \ 0 \ \end{array} \right ]$ pero la convergencia será lenta.
```
Si hubiéramos elegido como punto inicial $x^{(0)} = \left [ \begin{array}{c} 5 \ 5 \1 \ 0 \ \end{array} \right ]$
Step73: ```{margin}
Evaluando la Hessiana en $x^{(0)}$
Step74: ```{margin}
Evaluando el gradiente en $x^{(0)}$
Step75: El sistema de ecuaciones lineales a resolver es
Step76: ```{margin}
SymPy nos permite obtener soluciones a sistemas de ecuaciones lineales que tienen un renglón y columna de ceros.
```
Step77: ```{margin}
Esquema iterativo
Step78: ```{margin}
$x_1$ es el óptimo del problema.
```
Step79: ```{margin}
El número de condición es $\infty$, con el print de SymPy se ve así.
```
Step80: ```{admonition} Comentarios
De acuerdo al ejemplo anterior
Step81: Tamaño o longitud de paso
En el ejemplo anterior en el que se aproximó al mínimo del siguiente problema con el método de Newton
$$\displaystyle \min_{x \in \mathbb{R}^4} (x_1-2)^2+(2-x_2)^2+x_3^2+x_4^4$$
se concluyó que tal método converge de forma lenta y el método de descenso en gradiente no converge para el punto inicial elegido en ambos métodos. La pequeña reducción que se obtenía en $f_o$ por cada iteración fue la razón de tal situación en el caso del descenso en gradiente. Una metodología que resuelve la no convergencia del método de descenso en gradiente utiliza el siguiente esquema iterativo
Step82: por lo que llegamos a aproximar al óptimo en una iteración.
```{admonition} Observación | Python Code:
import numpy as np
import sympy
from sympy.tensor.array import derive_by_array
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from scipy.optimize import fmin
import pandas as pd
import cvxpy as cp
from pytest import approx
np.set_printoptions(precision=3, suppress=True)
Explanation: (ADBLUCO)=
3.2 Algoritmos de descenso y búsqueda de línea en Unconstrained Convex Optimization (UCO)
```{admonition} Notas para contenedor de docker:
Comando de docker para ejecución de la nota de forma local:
nota: cambiar <ruta a mi directorio> por la ruta de directorio que se desea mapear a /datos dentro del contenedor de docker y <versión imagen de docker> por la versión más actualizada que se presenta en la documentación.
docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion -p 8888:8888 -d palmoreck/jupyterlab_optimizacion:<versión imagen de docker>
password para jupyterlab: qwerty
Detener el contenedor de docker:
docker stop jupyterlab_optimizacion
Documentación de la imagen de docker palmoreck/jupyterlab_optimizacion:<versión imagen de docker> en liga.
```
Nota generada a partir de liga.
```{admonition} Al final de esta nota la comunidad lectora:
:class: tip
Comprenderá el uso de la información de primer y segundo orden para resolver problemas small scale de minimización de funciones convexas mediante los algoritmos general de descenso y de búsqueda de línea por backtracking.
Aprenderá la importancia y relación con ecuaciones no lineales al resolver los problemas que involucran aproximar mínimos locales de funciones.
```
En esta nota consideramos problemas de optimización small scale sin restricciones. Aunque el término small scale es ambiguo pues depende la máquina en la que se realice el cómputo e involucra el número de variables o parámetros y cantidad de almacenamiento para datos, tomamos como small scale aquel problema de optimización sin restricciones en el que se tiene un número de variables del orden menor o igual a $10^3$.
Los problemas de optimización sin restricciones, Unconstrained Optimization (UO), son de la forma:
$$\min f_o(x)$$
con $f_o:\mathbb{R}^n \rightarrow \mathbb{R}$ función objetivo.
```{admonition} Comentario
Si la función objetivo es convexa se le nombra problema de optimización convexa sin restricciones,Unconstrained Convex Optimization (UCO).
```
Ejemplos de problemas de optimización small scale
En optimización la búsqueda del (o los) óptimo(s) involucran el cálculo de información de primer o segundo orden, ver {ref}Definición de función, continuidad y derivada <FCD>, de la función $f_o$ de acuerdo a lo revisado en los {ref}resultados útiles<RESUT>. Tal información para problemas small scale es calculada utilizando todos los datos en un enfoque por batch o lote.
Ejemplo
$$\displaystyle \min_{x \in \mathbb{R}^2} x_1^4+2x_1^2x_2+x_2^2$$
End of explanation
x1, x2 = sympy.symbols("x1, x2")
fo_sympy = x1**4+2*x1**2*x2+x2**2
sympy.pprint(fo_sympy)
gf = derive_by_array(fo_sympy, (x1, x2))
sympy.pprint(gf)
Explanation: Los candidatos a ser mínimos los encontramos al calcular el gradiente de $f_o$ que podemos calcular con SymPy
End of explanation
def fo_numpy(x):
return x[0]**4 + 2*x[0]**2*x[1] + x[1]**2
x1_plot,x2_plot = np.meshgrid(np.linspace(-2,2,100), np.linspace(-2,2,100))
z_plot = x1_plot**4 + 2*x1_plot**2*x2_plot + x2_plot**2
x1_np = 0
x2_np = 0
z_np = fo_numpy([x1_np, x2_np])
point = (x1_np, x2_np, z_np)
print(point)
# Create the figure
fig = plt.figure()
# Add an axes
ax = fig.gca(projection='3d')
ax.plot_surface(x1_plot, x2_plot, z_plot, alpha=0.2)
ax.scatter(point[0], point[1], point[2], color='green')
plt.title("$f_o(x) = x_1^4+2x_1^2x_2+x_2^2$")
plt.show()
Explanation: y plantear:
$$
\nabla f_o(x) =
\left [
\begin{array}{c}
4x_1^3+4x_1x_2\
2x_1^2+2x_2
\end{array}
\right ]=0
$$
la cual es una ecuación de dos variables y dos incógnitas no lineal. Resolviendo para $x_2$ se obtiene la relación: $x_2 = -x_1^2$. Entonces todos los puntos con coordenadas $x = (x_1, x_2)$ que satisfacen tal relación cumplen $\nabla f_o(x) = 0$. ¿Todos serán mínimos locales?
Gráfica de la superficie $f_o$
End of explanation
x1_plot,x2_plot = np.meshgrid(np.linspace(-2,2,100), np.linspace(-4, 1,100))
z_plot = x1_plot**4 + 2*x1_plot**2*x2_plot + x2_plot**2
plt.contour(x1_plot,x2_plot,z_plot)
plt.scatter(point[0], point[1], color="green")
plt.title("Curvas de nivel de $f_o$")
plt.show()
Explanation: Gráfica de las curvas de nivel de $f_o$
End of explanation
def eqn(x):
x1,x2=x
return [4*x1**3+4*x1*x2, 2*x1**2+2*x2]
Explanation: Resolvamos con scipy.optimize.fsolve la ecuación no lineal $\nabla f_o(x) = 0$
End of explanation
root1 = fsolve(eqn, (1, 1))
root2 = fsolve(eqn, (-1, 1))
root3 = fsolve(eqn, (2, 0))
dic_roots = {"root1": root1,
"root2": root2,
"root3": root3}
Explanation: ```{margin}
Elegimos diferentes puntos iniciales.
```
End of explanation
print(pd.DataFrame(dic_roots))
Explanation: ```{margin}
Obsérvese que los puntos root1, root2, root3 satisfacen la relación $x_2 = -x_1^2$.
```
End of explanation
gf_eval = lambda x: np.array([partial_derivative.subs({"x1": x[0],
"x2": x[1]}) for partial_derivative in gf],
dtype=float)
dic = {"root1": gf_eval(root1),
"root2": gf_eval(root2),
"root3": gf_eval(root3)}
Explanation: Al evaluar el gradiente en cada punto obtenemos cero (o cercano a cero):
End of explanation
print(pd.DataFrame(dic).round(3))
Explanation: ```{margin}
Los puntos root1, root2 y root3 resuelven la ecuación no lineal $\nabla f(x) = 0$ .
```
End of explanation
Hf = derive_by_array(gf, (x1, x2))
sympy.pprint(Hf)
Explanation: ¿Cómo podemos identificar si son mínimos? ... usamos la Hessiana de $f_o$
End of explanation
Hf_eval = lambda x: np.array([second_partial_derivative.subs({"x1": x[0],
"x2": x[1]}) for second_partial_derivative in Hf],
dtype=float)
Hf_root1 = Hf_eval(root1)
Hf_root2 = Hf_eval(root2)
Hf_root3 = Hf_eval(root3)
Explanation: y revisamos eigenvalores de la Hessiana evaluada en los puntos root1, root2, root3
End of explanation
print(np.linalg.eigvals(Hf_root1))
Explanation: ```{margin}
La Hessiana en root1 es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
End of explanation
print(np.linalg.eigvals(Hf_root2))
Explanation: ```{margin}
La Hessiana en root2 es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
End of explanation
print(np.linalg.eigvals(Hf_root3))
Explanation: ```{margin}
La Hessiana en root3 es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
End of explanation
sympy.pprint(sympy.Matrix(Hf).det())
Explanation: Tenemos un criterio para $2$ dimensiones:
```{admonition} Comentario
Sea $f \in \mathcal{C}^2(\text{intdom}f)$, $\det(\nabla^2 f(x))$ determinante de la Hessiana y $x \in \mathbb{R}^2$ un punto crítico o estacionario de $f$:
Si $\frac{\partial^2f(x)}{\partial x_1^2} < 0$ y $\det(\nabla^2 f(x)) >0$ entonces $f$ tiene un máximo local en $x$.
Si $\frac{\partial^2f(x)}{\partial x_1^2} > 0$ y $\det(\nabla^2 f(x)) >0$ entonces $f$ tiene un mínimo local en $x$.
Si $\det(\nabla^2 f(x)) < 0$ entonces $f$ tiene un punto silla o saddle point en $x$.
Si $\det(\nabla^2 f(x)) = 0$ no podemos concluir si $x$ es extremo.
```
```{admonition} Observaciones
:class: tip
Al determinante de la Hessiana de $f$ se le nombra Hessiano de $f$.
Lo anterior es un caso particular de los resultados descritos en {ref}sobre puntos críticos <SPCRITICOS>.
```
En el ejemplo el Hessiano es:
End of explanation
res_fmin = fmin(fo_numpy, [1,1])
Explanation: ```{margin}
Los tres puntos root1, root2 y root3 satisfacen $x_2 = -x_1^2$.
```
El cual se anula justo en los puntos que cumplen: $x_2 = -x_1^2$
$8x_1^2 + 8x_2 = 8 x_1^2 + 8(-x_1^2) = 0$
por lo que no podemos concluir...
Usemos una función de SciPy scipy.optimize.fmin
```{margin}
Elegimos un punto inicial.
```
End of explanation
print(res_fmin)
print(gf_eval(res_fmin))
Hf_fmin = Hf_eval(res_fmin)
Explanation: ```{margin}
El punto res_fmin satisface $\nabla f_o(x) = 0$ y la relación $x_2 = -x_1^2$.
```
End of explanation
print(np.linalg.eigvals(Hf_fmin))
Explanation: ```{margin}
La Hessiana en res_fmin es semidefinida positiva por lo que no podemos concluir que sea mínimo local de $f_o$.
```
End of explanation
x1_plot,x2_plot = np.meshgrid(np.linspace(-2,2,100), np.linspace(-4,2,100))
z_plot = x1_plot**4 + 2*x1_plot**2*x2_plot + x2_plot**2
point1 = (root1[0], root1[1], fo_numpy(root1))
point2 = (root2[0], root2[1], fo_numpy(root2))
point3 = (root3[0], root3[1], fo_numpy(root3))
#another point:
point4 = (-2, -4, 0)
x1_plot2 = np.linspace(-2,2,100)
# Create the figure
fig = plt.figure()
# Add an axes
ax = fig.gca(projection='3d')
ax.plot_surface(x1_plot, x2_plot, z_plot, alpha=0.2)
ax.scatter(point[0], point[1], point[2], color='green')
ax.scatter(point1[0], point1[1], point1[2], color='green')
ax.scatter(point2[0], point2[1], point2[2], color='green')
ax.scatter(point3[0], point3[1], point3[2], color='green')
ax.scatter(point4[0], point4[1], point4[2], color='green')
ax.plot(x1_plot2, -x1_plot2**2, color="red")
plt.title("$f_o(x) = x_1^4+2x_1^2x_2+x_2^2$")
plt.show()
Explanation: Grafiquemos los puntos que cumplen $x_2=-x_1^2$
End of explanation
x1_plot,x2_plot = np.meshgrid(np.linspace(-100,100,100), np.linspace(-100,100,100))
z_plot = x1_plot**4 + 2*x1_plot**2*x2_plot + x2_plot**2
# Create the figure
fig = plt.figure()
# Add an axes
ax = fig.gca(projection='3d')
ax.plot_surface(x1_plot, x2_plot, z_plot, alpha=0.2)
plt.title("$f_o(x) = x_1^4+2x_1^2x_2+x_2^2$")
plt.show()
np.sum(z_plot < 0)
Explanation: Evaluemos en un rango más amplio la función objetivo $f_o$ y realicemos una gráfica
End of explanation
pointx = np.array([-.5, -1.5])
pointy = np.array([.5, -1.5])
theta = 1/2
point_convex_combination = theta*pointx + (1-theta)*pointy
print(fo_numpy(point_convex_combination))
Explanation: El mínimo valor de $f_o$ es $0$ por lo que tenemos un conjunto de mínimos dados por la curva $x_2=-x_1^2$.
¿Por qué fue un poco complicado determinar el conjunto de mínimos de $f_o$?
$f_o(x)=x_1^4+2x_1^2x_2+x_2^2$ no es una función convexa en su $\text{dom}f_o$, $\mathbb{R}^2$, esto es, no satisface la desigualdad:
$$f_o(\theta x + (1-\theta) y) \leq \theta f_o(x) + (1-\theta)f_o(y)$$
$\forall x,y$ en su dominio y $\forall \theta \in [0,1]$.
End of explanation
print(theta*fo_numpy(pointx) + (1-theta)*fo_numpy(pointy))
Explanation: ```{margin}
Se tiene $f(\theta x + (1-\theta)y) > \theta f(x) + (1-\theta)f(y)$ con la elección $\theta=0.5$, $x=[-0.5, -1.5]^T$, $y=[0.5, -1.5]^T$ lo que indica que $f$ no es convexa sino cóncava para esta elección de puntos $x,y$.
```
End of explanation
P = sympy.Matrix([[5, 4],
[4, 5]])
x = sympy.Matrix(sympy.symbols("x1, x2"))
q = sympy.Matrix([-1,1])
r = 3
fo_sympy = (1/2*x.T*P*x + q.T*x)[0] + r
sympy.pprint(fo_sympy.expand())
Explanation: ```{admonition} Observación
:class: tip
Recordar que si $f_o$ es una función convexa, el gradiente de $f_o$ nos ayuda a determinar si un punto es un mínimo local de forma necesaria y suficiente.
```
```{admonition} Ejercicio
:class: tip
Realizar un análisis similar al anterior para la función $f_o(x) = x_2^4+2x_2^2x_1^2+x_1^2$.
```
Ejemplo importante
$$\displaystyle \min_{x \in \mathbb{R}^2} \frac{1}{2}x^TPx+q^Tx+r$$
donde: $P=\left [\begin{array}{cc} 5 & 4\ 4 & 5 \end{array} \right ]$, $q=\left [\begin{array}{c} -1\ 1 \end{array} \right]$, $r=3$.
Haciendo las multiplicaciones de matriz-vector y productos punto
End of explanation
sympy.pprint(P.eigenvals())
Explanation: se reescribe el problema de optimización como:
$$\displaystyle \min_{x \in \mathbb{R}^2} \frac{5}{2}x_1^2 + \frac{5}{2}x_2^2+4x_1x_2 -x_1 + x_2+3$$
La función objetivo es una función estrictamente convexa en $\mathbb{R}^2$ (de hecho fuertemente convexa) pues:
```{margin}
Los eigenvalores de $P$ son $1$ y $9$ de multiplicidad simple cada uno.
```
End of explanation
sympy.pprint(derive_by_array(derive_by_array(fo_sympy, (x1,x2)),
(x1,x2))
)
Explanation: y la Hessiana de $f_o$ es:
End of explanation
sympy.pprint(fo_sympy.diff(x))
Explanation: El gradiente de $f_o$ es:
End of explanation
P = np.array([[5,4],[4,5]])
q = np.array([-1,1])
print(np.linalg.solve(P,-q))
Explanation: El mínimo debe satisfacer la ecuación lineal con dos variables y dos ecuaciones
$$\nabla f_o(x) = Px +q =\left [ \begin{array}{cc}
5 & 4\
4 & 5
\end{array}
\right ]
\left [ \begin{array}{c}
x_1\
x_2
\end{array}
\right ]
+ \left [ \begin{array}{c}
-1\
1
\end{array}
\right ]=
\left [ \begin{array}{cc}
5x_1+4x_2-1\
4x_1+5x_2+1
\end{array}
\right ]
=0
$$
```{admonition} Observación
:class: tip
En algunos casos especiales es posible resolver la ecuación no lineal $\nabla f_o(x) = 0$ para $x$ de forma analítica o cerrada. Este es el caso de este ejemplo cuya solución está dada por $x^* = -P^{-1}q$.
```
End of explanation
n = 2 #number of variables
x = cp.Variable(n) #optimization variable
fo_cvxpy = (1/2)*cp.quad_form(x, P) + q.T @ x + r#objective function
opt_objective = cp.Minimize(fo_cvxpy) #optimization objective
prob = cp.Problem(opt_objective) #optimization problem
print(prob.solve())
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", x.value)
Explanation: El problema anterior también lo podemos resolver con cvxpy pues es un problema convexo.
```{margin}
Definición de variables y función objetivo: $\frac{1}{2}x^TPx+q^Tx+r$
```
End of explanation
x1, x2, x3, x4 = sympy.symbols("x1, x2, x3, x4")
fo_sympy = (x1-2)**2 + (2-x2)**2 + x3**2 + x4**4
gf = derive_by_array(fo_sympy, (x1, x2, x3, x4))
sympy.pprint(gf)
Hf = derive_by_array(gf, (x1, x2, x3, x4))
sympy.pprint(Hf)
Explanation: ````{admonition} Observaciones
:class: tip
El paquete cvxpy requiere que se especifique el problema de optimización a resolver siguiendo reglas establecidas en Disciplined Convex Programming. En el ejemplo anterior resulta en un error si se ejecutan las líneas siguientes:
```python
x1 = cp.Variable()
x2 = cp.Variable()
fo = 2.5x12 + 4x1x2 - x1 + 2.5x2**2 + x2 + 3
obj = cp.Minimize(fo)
prob = cp.Problem(obj)
prob.solve()
```
La última línea produce
Problem does not follow DCP rules. Specifically: The objective is not DCP.
En la liga de ejemplos hay muchos problemas típicos en optimización convexa y en Atomic Functions ejemplos de funciones atómicas que pueden aplicarse a expresiones de CVXPY.
````
```{admonition} Ejercicio
:class: tip
Utilizando las herramientas (teóricas y prácticas) del ejemplo anterior, resolver el problema de optimización:
$$\min_{x \in \mathbb{R}^2} ||Ax-b||_2^2$$
con $A=\left [ \begin{array}{ccc} 1 & 6 & 2.5\ 1 & 2 & 8 \ 1 & 10 & -1\ 1 & -9 & 3\ 1 & -1 & 2 \end{array} \right ]$, $b=\left [ \begin{array}{c} -1 \ 0 \ 2 \ 3.5 \ -1.7 \end{array} \right ]$.
```
Métodos de descenso
Los ejemplos anteriores mostraron la importancia de la información de primer y segundo orden de la función objetivo $f_o$ y las ecuaciones no lineales para resolver el problema de optimización. El primer ejemplo utilizó las funciones scipy.optimize.fsolve y scipy.optimize.fmin para este propósito. Tales funciones utilizan métodos iterativos para resolver ecuaciones no lineales y aproximar un mínimo local respectivamente en un esquema que para el caso de minimización satisface:
$$f_o(x^{(0)}) > f_o(x^{(1)}) > f_o(x^{(2)}) > \cdots > f_o(x^{(k)}) > \cdots$$
con $x^{(0)}$ punto inicial.
Los métodos que utilizan esquemas iterativos para calcular una secuencia de minimización de puntos $x^{(0)}, x^{(1)}, \dots \in \text{dom}f_o$ con la característica $f_o(x^{(k)}) \rightarrow p^$ si $k \rightarrow \infty$ se conocen con el nombre de métodos de descenso*.
```{admonition} Definición
Un método de descenso es aquel que genera la secuencia de minimización $x^{(0)}, x^{(1)}, \dots \in \text{dom}f_o$ la cual cumple con la desigualdad: $f_o(x^{(k+1)}) < f_o(x^{(k)})$ excepto para $x^{(k)}$ óptimo y $f_o(x^{(k)}) \rightarrow p^*$ si $k \rightarrow \infty$.
```
La secuencia de minimización en un método de descenso se genera con la expresión:
$$x^{(k+1)} = x^{(k)} + \Delta x$$
donde: $\Delta x$ es dirección de búsqueda que es de descenso.
```{admonition} Observaciones
:class: tip
La definición de una dirección de descenso anterior aplica para funciones en general, no es necesario que $f_o$ sea convexa.
En la práctica la secuencia de minimización involucra calcular un tamaño o longitud de paso que ayuda a la convergencia del método. Se tienen diferentes metodologías para su cálculo y más adelante se describe una de ellas.
```
```{margin}
Recuérdese que si $f_o$ es fuertemente convexa en el conjunto $\mathcal{S}$ entonces $\nabla^2 f_o (x) \in \mathbb{S}^n_{++}$ y $\text{cond}(\nabla ^2 f_o(x))$ está acotado por arriba por una constante para $x \in \mathcal{S}$.
```
```{admonition} Comentario
Asumiendo que la función $f_o$ es convexa, típicamente se asume lo siguiente para tener métodos iterativos confiables y exactos:
Los puntos iniciales $x^{(0)}$ están en $\text{dom}f_o$.
Que el conjunto $f_o(x^{(0)})$-subnivel sea cerrado pues así se garantiza que la secuencia de minimización está en el conjunto $f_o(x^{(0)})$-subnivel para todas las iteraciones.
$f_o$ fuertemente convexa en el conjunto $f_o(x^{(0)})$-subnivel para tener propiedades dadas en los {ref}resultados que son posibles probar para funciones fuertemente convexas <RESFFUERTCON>.
```
En lo siguiente se asume que $f_o$ cumple $f_o \in \mathcal{C}^2(\text{dom}f_o)$ y es convexa en un conjunto convexo y cerrado que contiene a $x^*$. Ver {ref}conjunto abierto, cerrado, cerradura e interior <CACCI> para definición de conjunto cerrado.
Ejemplo de función objetivo convexa
Encontrar el mínimo del siguiente problema con un método iterativo.
$$\displaystyle \min_{x \in \mathbb{R}^4} (x_1-2)^2+(2-x_2)^2+x_3^2+x_4^4$$
End of explanation
x_0 = np.array([5,5,1,0.1])
Explanation: Como $f_o$ es una función convexa (de hecho estrictamente convexa) en su dominio $\mathbb{R}^4$, se tiene que su óptimo se obtiene igualando y resolviendo la ecuación no lineal $\nabla f_o(x) = 0$ :
$$\nabla f_o(x) =
\left[ \begin{array}{c}
2x_1-4 \
2x_2-4\
2x_3\
4x_4^3
\end{array}
\right]
= 0
$$
El óptimo $x^* \in \mathbb{R}^4$ está dado por:
$$x^*=
\left[ \begin{array}{c}
2\
2\
0\
0
\end{array}
\right]
$$
¿Cómo encontramos numéricamente el óptimo con un método iterativo?
Condición para que un paso o dirección de búsqueda sea de descenso
La idea de los métodos de optimización es calcular direcciones $\Delta x$ de búsqueda que sean de descenso, esto es, que al movernos de un punto a otro en tal dirección, el valor de $f_o$ decrece. Existen muchas direcciones de descenso (de hecho infinitas) una que se muestra en el dibujo siguiente es la dirección de descenso de Newton $\Delta x_{nt}$:
<img src="https://dl.dropboxusercontent.com/s/25bmebx645howjw/direccion_de_descenso_de_Newton_1d.png?dl=0" heigth="600" width="600">
En el dibujo $f = f_o$ y $\hat{f}$ es un modelo cuadrático. Del punto $(x,f(x))$ nos debemos mover al punto $(x+\Delta x_{nt}, f(x + \Delta x_{nt}))$ para llegar al óptimo. En tal dirección $f$ decrece: $f(x+\Delta x_{nt}) < f(x)$ y obsérvese que $\Delta x_{nt}$ es mínimo de $\hat{f}$.
```{margin}
Ver {ref}teorema de Taylor para una función de varias variables <TEOTAYLORNVARIABLES>
```
```{admonition} Comentario
El modelo cuadrático del dibujo anterior está dado por la aproximación de segundo orden a la función $f_o$ por el teorema de Taylor con centro en $x$:
$$m(x + v) = \hat{f}_o(x + v) = f_o(x) + \nabla f_o(x)^T v + \frac{1}{2} v^T \nabla^2f_o(x)v$$
con único mínimo si $\nabla ^2 f_o(x) \in \mathbb{S}^n_{++}$ dado por $v^* = \Delta x_{nt}$ y $\Delta x_{nt}$ dirección de Newton cuya expresión está más adelante. En cada iteración se construye un modelo cuadrático:
$$m(x^{(k)} + v) = \hat{f}_o(x^{(k)} + v) = f_o(x^{(k)}) + \nabla f_o(x^{(k)})^T v + \frac{1}{2} v^T \nabla^2f_o(x^{(k)})v$$
```
Geométricamente las direcciones de descenso forman un ángulo agudo con $-\nabla f_o(x)$:
<img src="https://dl.dropboxusercontent.com/s/eednhn6lj1rag1j/zone_for_descent_directions.png?dl=0" heigth="350" width="350">
En el dibujo $f = f_o$.
```{admonition} Observación
:class: tip
Aunque se tienen una cantidad infinita de direcciones de descenso, las direcciones de descenso que típicamente son elegidas no son cercanas a ser ortogonales con el gradiente de $f_o$.
```
Tenemos una condición para garantizar que una dirección sea de descenso:
```{admonition} Definición
Si el paso o dirección de búsqueda satisface: $\nabla f_o(x)^T\Delta x < 0$ se le nombra dirección de descenso.
```
```{margin}
Recuérdese que el teorema de Taylor nos ayuda a aproximar a una función de forma local.
```
```{admonition} Comentarios
Recuérdese que $\nabla f_o(x)^T \Delta x$ es una derivada direccional de $f_o$ en $x$ en la dirección $\Delta x$, ver {ref}ejemplo función restringida a una línea <EJRestriccionALinea>.
La definición anterior se justifica pues recuérdese que por la aproximación del teorema de Taylor a primer orden se tiene:
$$f_o(x + \Delta x) \approx f_o(x) + \nabla f_o(x) ^T \Delta x$$
y si $\Delta x$ es dirección de descenso entonces: $f_o(x) + \nabla f_o(x) ^T \Delta x < f_o(x)$.
```
```{admonition} Observación
:class: tip
Obsérvese que si $x^$ es mínimo local entonces $\nabla f_o(x^) = 0$ (condición necesaria de primer orden) por lo que no existen direcciones de descenso.
```
Ejemplos de direcciones de descenso
```{sidebar} La dirección del gradiente...
Una forma de obtener la dirección de Newton es encontrando el mínimo del modelo cuadrático referido anteriormente asumiendo $\nabla ^2 f_o(x) \in \mathbb{S}^n_{++}$. La dirección del gradiente se obtiene al resolver el problema de optimización con restricciones siguiente:
$$\min_{v \in \mathbb{R}^n} \nabla f_o(x)^T v$$
$$\text{sujeto a:} ||v|| = 1$$
para la norma $2$. Se utiliza una restricción del tipo normalización pues la función objetivo involucra un producto punto (derivada direccional) que es dependiente o proporcional a la longitud de $v$.
```
$\Delta x = - \nabla f_o \left (x^{(k)} \right )$ que da lugar al método de descenso en gradiente para $x^{(k)}$ no óptimo.
$\Delta x = - \nabla^2 f_o \left (x^{(k)} \right )^{-1} \nabla f_o\left(x^{(k)} \right)$ que da lugar al método de descenso por Newton con $\nabla^2 f_o \left (x^{(k)} \right ) \in \mathbb{S}^n_{++}$ y $x^{(k)}$ no óptimo.
$\Delta x = - H_k ^{-1} \nabla f_o\left(x^{(k)}\right)$ con $H_k$ aproximación a la Hessiana de $f_o$ con $\nabla^2 f_o \left (x^{(k)} \right ) \in \mathbb{S}^n_{++}$ y $x^{(k)}$ no óptimo.
```{admonition} Observaciones
:class: tip
La definición de una dirección de descenso aplica para funciones en general, no es necesario que $f_o$ sea convexa.
Para funciones en general, la dirección de Newton es de descenso si la Hessiana es definida positiva y análogamente para las direcciones en las que se utilicen aproximaciones a la Hessiana. Esto asegura que el modelo cuadrático tenga un único mínimo y que $f_o$ decrezca su valor en tal dirección.
Comúnmente los métodos que utilizan aproximaciones a la Hessiana se conocen con el nombre de métodos Cuasi-Newton, ver Quasi-Newton_method.
```
Continuando con el ejemplo anterior
Encontrar el mínimo del siguiente problema con un método iterativo.
$$\displaystyle \min_{x \in \mathbb{R}^4} (x_1-2)^2+(2-x_2)^2+x_3^2+x_4^4$$
Opción descenso en gradiente: usando la dirección del gradiente de $f_o$ se tiene:
$$x^{(k+1)} = x^{(k)} - \nabla f_o(x^{(k)})$$
Tomando $x^{(0)} = \left [ \begin{array}{c} 5 \ 5 \1 \ 0.1 \ \end{array} \right ]$ como punto inicial:
End of explanation
f_o_np = lambda x: (x[0]-2)**2 + (2-x[1])**2 + x[2]**2 + x[3]**2
Explanation: ```{margin}
Definición de $f_o$
```
End of explanation
print(f_o_np(x_0))
gf_eval = lambda x: np.array([partial_derivative.subs({"x1": x[0],
"x2": x[1],
"x3": x[2],
"x4": x[3]}) for partial_derivative in gf],
dtype=float)
Explanation: ```{margin}
Evaluando $f_o$ en $x^{(0)}$.
```
End of explanation
print(gf_eval(x_0))
Explanation: ```{margin}
Evaluando el gradiente en $x^{(0)}$: $\nabla f_o(x^{(0)})$.
```
End of explanation
print(gf_eval(x_0).dot(-gf_eval(x_0)))
Explanation: ```{margin}
Verificando que es dirección de descenso: $\nabla f_o \left (x^{(0)} \right )^T \Delta x < 0$ con $\Delta x = -\nabla f_o \left (x^{(0)} \right )$.
```
End of explanation
x_1 = x_0 - gf_eval(x_0)
print(x_1)
Explanation: Primera iteración
```{margin}
Esquema iterativo: $x_1 = x_0 + \Delta x = x_0 - \nabla f_o(x^{(0)})$.
```
End of explanation
print(f_o_np(x_0))
print(f_o_np(x_1))
Explanation: ```{margin}
Evaluando $f_o$ en $x^{(1)}$ se observa que $f_o$ decreció muy poco, de hecho $f_o(x^{(0)}) \approx f_o(x^{(1)})$.
```
End of explanation
print(f_o_np(x_0) + gf_eval(x_0).dot(-gf_eval(x_0)))
Explanation: Obsérvese que la aproximación a primer orden por Taylor no es correcta: $f_o(x_0 + \Delta x) \neq f_o(x_0) + \nabla f_o(x_0)^T \Delta x$.
End of explanation
print(gf_eval(x_1))
Explanation: Segunda iteración
```{margin}
Evaluando el gradiente en $x^{(1)}$: $\nabla f_o(x^{(1)})$.
```
End of explanation
print(gf_eval(x_1).dot(-gf_eval(x_1)))
Explanation: ```{margin}
Verificando que es dirección de descenso: $\nabla f_o \left (x^{(1)} \right )^T \Delta x < 0$ con $\Delta x = -\nabla f_o \left (x^{(1)} \right )$.
```
End of explanation
x_2 = x_1 - gf_eval(x_1)
print(x_2)
Explanation: ```{margin}
Esquema iterativo: $x_2 = x_1 + \Delta x = x_1 - \nabla f_o(x^{(1)})$.
```
End of explanation
print(f_o_np(x_1))
print(f_o_np(x_2))
Explanation: ```{margin}
Evaluando $f_o$ en $x^{(1)}$ se observa que $f_o$ decreció muy poco, de hecho $f_o(x^{(2)}) \approx f_o(x^{(1)})$.
```
End of explanation
print(f_o_np(x_1) + gf_eval(x_1).dot(-gf_eval(x_1)))
Explanation: Obsérvese que la aproximación a primer orden por Taylor no es correcta: $f_o(x_1 + \Delta x) \neq f_o(x_1) + \nabla f_o(x_1)^T \Delta x$.
End of explanation
x_3 = x_2 - gf_eval(x_2)
Explanation: Tercera iteración
```{margin}
Esquema iterativo: $x_3 = x_2 + \Delta x = x_2 - \nabla f_o(x^{(2)})$.
```
End of explanation
print(x_1)
print(x_3)
Explanation: ```{margin}
Obsérvese que $x_3 \approx x_1$.
```
End of explanation
x_4 = x_3 - gf_eval(x_3)
Explanation: Cuarta iteración
```{margin}
Esquema iterativo: $x_4 = x_3 + \Delta x = x_3 - \nabla f_o(x^{(3)})$.
```
End of explanation
print(x_2)
print(x_4)
Explanation: ```{margin}
Obsérvese que $x_4 \approx x_2$.
```
End of explanation
x_0 = np.array([5,5,1,0.1])
Hf_eval = lambda x: np.array([second_partial_derivative.subs({"x1": x[0],
"x2": x[1],
"x3": x[2],
"x4": x[3]}) for second_partial_derivative in Hf],
dtype=float)
Explanation: y así nos quedaremos ciclando por muchas iteraciones...
```{admonition} Comentario
El método de descenso en gradiente para el ejemplo anterior no convergerá al óptimo $x^* = \left [ \begin{array}{c} 2 \ 2 \0 \ 0 \ \end{array} \right ]$
```
```{admonition} Ejercicio
:class: tip
¿Converge el método de descenso en gradiente al óptimo para un punto inicial $x^{(0)} = \left [ \begin{array}{c} 2.5 \ 2.5 \0.001 \ 0.001 \ \end{array} \right ]$?
```
Opción descenso por dirección de Newton: usando la dirección de descenso de Newton de $f_o$ se tiene:
$$x^{(k+1)} = x^{(k)} - \nabla^2 f_o \left (x^{(k)} \right )^{-1} \nabla f_o\left(x^{(k)} \right)$$
Con
$$
\nabla ^2 f_o(x) =
\left [
\begin{array}{cccc}
2 & 0 & 0 & 0 \
0 & 2 & 0 & 0 \
0 & 0 & 2 & 0 \
0 & 0 & 0 & 12x_4^2
\end{array}
\right ]
$$
Tomando $x^{(0)} = \left [ \begin{array}{c} 5 \ 5 \1 \ 0.1 \ \end{array} \right ]$ como punto inicial y no calculando la inversa de la Hessiana pues en su lugar resolvemos el sistema de ecuaciones lineales $\nabla ^2 f_o \left ( x^{(k)} \right ) \Delta x = - \nabla f_o \left(x^{(k)}\right )$ resulta
End of explanation
Hf_sympy_eval = sympy.Matrix(Hf_eval(x_0))
sympy.pprint(Hf_sympy_eval)
Explanation: ```{margin}
Evaluando la Hessiana en $x^{(0)}$: $\nabla^2f_o(x^{(0)})$.
```
End of explanation
gf_sympy_eval = gf_eval(x_0)
sympy.pprint(gf_sympy_eval)
Explanation: ```{margin}
Evaluando el gradiente en $x^{(0)}$: $\nabla f_o(x^{(0)})$.
```
End of explanation
Hf_np_eval = np.array(Hf_sympy_eval, dtype=float)
_, n = Hf_np_eval.shape
Explanation: El sistema de ecuaciones lineales a resolver es:
$$
\left [
\begin{array}{cccc}
2 & 0 & 0 & 0 \
0 & 2 & 0 & 0 \
0 & 0 & 2 & 0 \
0 & 0 & 0 & 0.12
\end{array}
\right ]
\Delta x =
-\left [
\begin{array}{c}
6 \
6 \
2 \
0.004
\end{array}
\right ]
$$
Resolviendo con NumPy el sistema de ecuaciones lineales:
```{margin}
Aquí convertimos de un objeto de SymPy a un array de NumPy pues si bien podríamos resolver el sistema con SymPy es menos costoso utilizar arrays de NumPy.
```
End of explanation
gf_np_eval = np.array(gf_sympy_eval, dtype = float)
gf_np_eval
Explanation: ```{margin}
Aquí convertimos de un objeto de SymPy a un array de NumPy pues si bien podríamos resolver el sistema con SymPy es menos costoso utilizar arrays de NumPy.
```
End of explanation
dir_Newton = np.linalg.solve(Hf_np_eval, -gf_np_eval)
print(dir_Newton)
Explanation: ```{margin}
Resolvemos $\nabla ^2 f_o \left ( x^{(0)} \right ) \Delta x = - \nabla f_o \left(x^{(0)}\right )$ para obtener $\Delta x$ dirección de Newton.
```
End of explanation
print(gf_np_eval.dot(dir_Newton))
Explanation: ```{margin}
Verificando que es dirección de descenso: $\nabla f_o \left (x^{(0)} \right )^T \Delta x < 0$ con $\Delta x = -\nabla ^2f_o(x^{(0)})^{-1} \nabla f_o(x^{(0)})$.
```
End of explanation
x_1 = x_0 + dir_Newton
print(x_1)
Explanation: Primera iteración
```{margin}
Esquema iterativo: $x_1 = x_0 + \Delta x = x_0 - \nabla ^2f_o(x^{(0)})^{-1} \nabla f_o(x^{(0)})$
```
End of explanation
print(np.linalg.cond(Hf_np_eval))
Explanation: Recuérdese que siempre es útil monitorear el número de condición de la matriz del sistema de ecuaciones lineales que en este caso es la Hessiana de $f_o$ en $x^{(0)}$ para confiabilidad de nuestros cálculos al resolver el sistema de ecuaciones lineales asociado, ver {ref}Número de condición de una matriz <NCM>:
End of explanation
print(f_o_np(x_0))
print(f_o_np(x_1))
Explanation: ```{margin}
Evaluando $f_o$ en $x^{(1)}$ se observa que $f_o$ sí decrece $f_o(x^{(1)}) < f_o(x^{(0)})$.
```
End of explanation
print(f_o_np(x_0) + gf_np_eval.dot(dir_Newton) + 1/2*dir_Newton.dot(Hf_np_eval@dir_Newton))
Explanation: Obsérvese que la aproximación a segundo orden por Taylor es cercana: $f_o(x_0 + \Delta x) \approx f_o(x_0) + \nabla f_o(x_0)^T \Delta x + \frac{1}{2}\Delta x ^T\nabla^2 f_o(x_0) \Delta x$.
End of explanation
Hf_sympy_eval = sympy.Matrix(Hf_eval(x_1))
gf_sympy_eval = gf_eval(x_1)
Hf_np_eval = np.array(Hf_sympy_eval, dtype=float)
gf_np_eval = np.array(gf_sympy_eval, dtype = float)
dir_Newton = np.linalg.solve(Hf_np_eval, -gf_np_eval)
x_2 = x_1 + dir_Newton
print(x_2)
Explanation: Segunda iteración
```{margin}
Evaluando la Hessiana en $x^{(1)}$: $\nabla^2f_o(x^{(1)})$, evaluando el gradiente en $x^{(1)}$: $\nabla f_o(x^{(1)})$ y resolviendo el sistema de ecuaciones $\nabla ^2 f_o \left ( x^{(1)} \right ) \Delta x = - \nabla f_o \left(x^{(1)}\right )$.
```
```{margin}
Esquema iterativo: $x_2 = x_1 + \Delta x = x_1 - \nabla ^2f_o(x^{(1)})^{-1} \nabla f_o(x^{(1)})$
```
End of explanation
print(gf_np_eval.dot(dir_Newton))
Explanation: ```{margin}
Verificando que es dirección de descenso: $\nabla f_o \left (x^{(1)} \right )^T \Delta x < 0$ con $\Delta x = -\nabla ^2f_o(x^{(1)})^{-1} \nabla f_o(x^{(1)})$.
```
End of explanation
print(f_o_np(x_1))
print(f_o_np(x_2))
Explanation: ```{margin}
Evaluando $f_o$ en $x^{(2)}$ se observa que $f_o$ sí decrece $f_o(x^{(2)}) < f_o(x^{(1)})$.
```
End of explanation
print(f_o_np(x_1) + gf_np_eval.dot(dir_Newton) + 1/2*dir_Newton.dot(Hf_np_eval@dir_Newton))
Explanation: Obsérvese que la aproximación a segundo orden por Taylor es cercana: $f_o(x_1 + \Delta x) \approx f_o(x_1) + \nabla f_o(x_1)^T \Delta x + \frac{1}{2}\Delta x ^T\nabla^2 f_o(x_1) \Delta x$.
End of explanation
print(np.linalg.cond(Hf_np_eval))
Explanation: ```{margin}
Número de condición de la Hessiana de $f_o$ en $x^{(1)}$.
```
End of explanation
Hf_sympy_eval = sympy.Matrix(Hf_eval(x_2))
gf_sympy_eval = gf_eval(x_2)
Hf_np_eval = np.array(Hf_sympy_eval, dtype=float)
gf_np_eval = np.array(gf_sympy_eval, dtype = float)
dir_Newton = np.linalg.solve(Hf_np_eval, -gf_np_eval)
x_3 = x_2 + dir_Newton
print(x_3)
Explanation: Tercera iteración
```{margin}
Evaluando la Hessiana en $x^{(2)}$: $\nabla^2f_o(x^{(2)})$, evaluando el gradiente en $x^{(2)}$: $\nabla f_o(x^{(2)})$ y resolviendo el sistema de ecuaciones $\nabla ^2 f_o \left ( x^{(2)} \right ) \Delta x = - \nabla f_o \left(x^{(2)}\right )$.
```
```{margin}
Esquema iterativo: $x_3 = x_2 + \Delta x = x_2 - \nabla ^2f_o(x^{(2)})^{-1} \nabla f_o(x^{(2)})$
```
End of explanation
print(gf_np_eval.dot(dir_Newton))
Explanation: ```{margin}
Verificando que es dirección de descenso: $\nabla f_o \left (x^{(2)} \right )^T \Delta x < 0$ con $\Delta x = -\nabla ^2f_o(x^{(2)})^{-1} \nabla f_o(x^{(2)})$.
```
End of explanation
print(f_o_np(x_2))
print(f_o_np(x_3))
Explanation: ```{margin}
Evaluando $f_o$ en $x^{(3)}$ se observa que $f_o$ sí decrece $f_o(x^{(3)}) < f_o(x^{(2)})$.
```
End of explanation
print(f_o_np(x_2) + gf_np_eval.dot(dir_Newton) + 1/2*dir_Newton.dot(Hf_np_eval@dir_Newton))
Explanation: Obsérvese que la aproximación a segundo orden por Taylor es cercana: $f_o(x_2 + \Delta x) \approx f_o(x_2) + \nabla f_o(x_2)^T \Delta x + \frac{1}{2}\Delta x ^T\nabla^2 f_o(x_2) \Delta x$.
End of explanation
print(np.linalg.cond(Hf_np_eval))
Explanation: ```{margin}
Número de condición de la Hessiana de $f_o$ en $x^{(2)}$.
```
End of explanation
Hf_sympy_eval = sympy.Matrix(Hf_eval(x_3))
gf_sympy_eval = gf_eval(x_3)
Hf_np_eval = np.array(Hf_sympy_eval, dtype=float)
gf_np_eval = np.array(gf_sympy_eval, dtype = float)
dir_Newton = np.linalg.solve(Hf_np_eval, -gf_np_eval)
x_4 = x_3 + dir_Newton
print(x_4)
Explanation: Cuarta iteración
```{margin}
Evaluando la Hessiana en $x^{(3)}$: $\nabla^2f_o(x^{(3)})$, evaluando el gradiente en $x^{(3)}$: $\nabla f_o(x^{(3)})$ y resolviendo el sistema de ecuaciones $\nabla ^2 f_o \left ( x^{(3)} \right ) \Delta x = - \nabla f_o \left(x^{(3)}\right )$.
```
```{margin}
Esquema iterativo: $x_4 = x_3 + \Delta x = x_3 - \nabla ^2f_o(x^{(3)})^{-1} \nabla f_o(x^{(3)})$
```
End of explanation
print(gf_np_eval.dot(dir_Newton))
Explanation: ```{margin}
Verificando que es dirección de descenso: $\nabla f_o \left (x^{(3)} \right )^T \Delta x < 0$ con $\Delta x = -\nabla ^2f_o(x^{(3)})^{-1} \nabla f_o(x^{(3)})$.
```
```{margin}
Obsérvese que el gradiente y la dirección de Newton son cercanos a ser ortogonales.
```
End of explanation
print(f_o_np(x_3))
print(f_o_np(x_4))
Explanation: ```{margin}
Evaluando $f_o$ en $x^{(4)}$ se observa que $f_o$ sí decrece $f_o(x^{(4)}) < f_o(x^{(3)})$.
```
End of explanation
print(f_o_np(x_3) + gf_np_eval.dot(dir_Newton) + 1/2*dir_Newton.dot(Hf_np_eval@dir_Newton))
Explanation: Obsérvese que la aproximación a segundo orden por Taylor es cercana: $f_o(x_3 + \Delta x) \approx f_o(x_3) + \nabla f_o(x_3)^T \Delta x + \frac{1}{2}\Delta x ^T\nabla^2 f_o(x_3) \Delta x$.
End of explanation
print(np.linalg.cond(Hf_np_eval))
print(x_4 == approx(np.array([2,2,0,0.0]), abs=1e-1, rel=1e-1))
Explanation: ```{margin}
Obsérvese cómo va aumentando el número de condición de la Hessiana conforme nos aproximamos a la solución, en este paso se ha calculado la Hessiana de $f_o$ en $x^{(3)}$.
```
End of explanation
x_0 = np.array([5, 5, 1, 0])
Explanation: ```{admonition} Comentario
El método por dirección de Newton sí convergerá al óptimo $x^* = \left [ \begin{array}{c} 2 \ 2 \0 \ 0 \ \end{array} \right ]$ pero la convergencia será lenta.
```
Si hubiéramos elegido como punto inicial $x^{(0)} = \left [ \begin{array}{c} 5 \ 5 \1 \ 0 \ \end{array} \right ]$
End of explanation
Hf_sympy_eval = sympy.Matrix(Hf_eval(x_0))
sympy.pprint(Hf_sympy_eval)
Explanation: ```{margin}
Evaluando la Hessiana en $x^{(0)}$: $\nabla^2f_o(x^{(0)})$.
```
End of explanation
gf_sympy_eval = sympy.Matrix(gf_eval(x_0))
sympy.pprint(gf_sympy_eval)
Explanation: ```{margin}
Evaluando el gradiente en $x^{(0)}$: $\nabla f_o(x^{(0)})$.
```
End of explanation
x = sympy.Matrix([x1, x2, x3, x4])
Explanation: El sistema de ecuaciones lineales a resolver es:
$$
\left [
\begin{array}{cccc}
2 & 0 & 0 & 0 \
0 & 2 & 0 & 0 \
0 & 0 & 2 & 0 \
0 & 0 & 0 & 0
\end{array}
\right ]
\Delta x =
-\left [
\begin{array}{c}
6 \
6 \
2 \
0
\end{array}
\right ]
$$
Obsérvese que la última ecuación es redundante por lo que una solución al sistema anterior es considerar $x_4=0$ y resolver:
$$
\left [
\begin{array}{ccc}
2 & 0 & 0 \
0 & 2 & 0 \
0 & 0 & 2 \
\end{array}
\right ]
\Delta x =
-\left [
\begin{array}{c}
6 \
6 \
2 \
\end{array}
\right ]
$$
End of explanation
sympy.pprint(sympy.solve(Hf_sympy_eval*x - (-gf_sympy_eval), x))
Explanation: ```{margin}
SymPy nos permite obtener soluciones a sistemas de ecuaciones lineales que tienen un renglón y columna de ceros.
```
End of explanation
x_1 = x_0 + np.array([-3, -3, -1, 0])
Explanation: ```{margin}
Esquema iterativo: $x_1 = x_0 + \Delta x = x_0 - \nabla ^2f_o(x^{(0)})^{-1} \nabla f_o(x^{(0)})$
```
End of explanation
print(x_1)
Explanation: ```{margin}
$x_1$ es el óptimo del problema.
```
End of explanation
sympy.pprint(Hf_sympy_eval.condition_number())
print(x_1 == approx(np.array([2,2,0,0.0]), abs=1e-4, rel=1e-4))
Explanation: ```{margin}
El número de condición es $\infty$, con el print de SymPy se ve así.
```
End of explanation
x1 = cp.Variable()
x2 = cp.Variable()
x3 = cp.Variable()
x4 = cp.Variable()
fo_cvxpy = (x1 -2)**2 + (2-x2)**2 + x3**2 + x4**2
obj = cp.Minimize(fo_cvxpy)
prob = cp.Problem(obj)
print(prob.solve())
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", x1.value, x2.value, x3.value, x4.value)
Explanation: ```{admonition} Comentarios
De acuerdo al ejemplo anterior:
Utilizar información de primer o segundo orden nos ayuda a encontrar óptimo(s) de funciones.
El método de descenso en gradiente no converge para el punto inicial elegido. En el caso del método de Newton sí hay convergencia pero es lenta si el punto inicial tiene en la última entrada un número cercano a $0$.
Una de las ventajas que tiene utilizar la dirección del gradiente vs la dirección de Newton es que el gradiente involucra menos almacenamiento en memoria que el almacenamiento de la Hessiana: $\mathcal{O}(n)$ vs $\mathcal{O}(n^2)$.
```
Resolviendo el problema con CVXPY
End of explanation
x_0 = np.array([5,5,1,0.1])
t_0=0.5
x_1 = x_0 - t_0*gf_eval(x_0)
print(x_1)
print(x_1 == approx(np.array([2,2,0,0.0]), abs=1e-1, rel=1e-1))
Explanation: Tamaño o longitud de paso
En el ejemplo anterior en el que se aproximó al mínimo del siguiente problema con el método de Newton
$$\displaystyle \min_{x \in \mathbb{R}^4} (x_1-2)^2+(2-x_2)^2+x_3^2+x_4^4$$
se concluyó que tal método converge de forma lenta y el método de descenso en gradiente no converge para el punto inicial elegido en ambos métodos. La pequeña reducción que se obtenía en $f_o$ por cada iteración fue la razón de tal situación en el caso del descenso en gradiente. Una metodología que resuelve la no convergencia del método de descenso en gradiente utiliza el siguiente esquema iterativo:
$$x^{(k+1)} = x^{(k)} + t^{(k)}\Delta x$$
con $t^{(k)}>0$.
```{admonition} Comentario
Para el caso del método de Newton, la convergencia cuadrática depende además de elegir tamaños de pasos adecuados que:
las iteraciones vayan aproximándose a $x^*$,
la función objetivo sea dos veces diferenciable y su Hessiana sea definida positiva en un conjunto abierto que contenga a $x^$ y sea Lipschitz* continua en tal conjunto, ver Lipschitz_continuity , que ayuda a acotar la diferencia entre $f_o$ y el modelo cuadrático $m$.
```
Continuando con el ejemplo anterior
Usando el método de descenso en gradiente de $f_o$ y el esquema iterativo:
$$x^{(k+1)} = x^{(k)} - t^{(k)} \nabla f_o(x^{(k)})$$
con $t^{(0)} = 0.5$ con punto inicial $x^{(0)} = \left [ \begin{array}{c} 5 \ 5 \1 \ 0.1 \ \end{array} \right ]$ se tiene:
End of explanation
f_o = lambda x:x**2
n = 10
def minimization_sequence():
for k in range(n):
yield (-1)**k*(1+2**(-k))
t = np.linspace(-2.3, 2.3, 100)
plt.plot(t, f_o(t))
[plt.scatter(s, f_o(s)) for s in minimization_sequence()]
plt.title("Secuencia de minimización que no converge al mínimo de $f_o$")
plt.annotate('$f(x^{(0)})$',(2, f_o(2)),fontsize=12)
plt.annotate('$f(x^{(1)})$',(-1.5, f_o(-1.5)),fontsize=12)
plt.annotate('$f(x^{(2)})$',(1.25, f_o(1.25)),fontsize=12)
plt.show()
Explanation: por lo que llegamos a aproximar al óptimo en una iteración.
```{admonition} Observación
:class: tip
Para problemas small scale el método de Newton o Cuasi-Newton son muy superiores al método de descenso en gradiente. Sin embargo para problemas large scale el método de descenso en gradiente es utilizado ampliamente en aplicaciones de machine learning.
```
```{admonition} Definición
Al escalar $t^{(k)}$ se le nombra tamaño o longitud de paso y siempre es positivo salvo en el caso en que $x^{(k)}$ sea óptimo.
```
El valor $t^{(k)}$ se calcula con metodologías como búsqueda de línea o regiones de confianza, ver line search, trust region y en esta nota se revisa la búsqueda de línea con backtracking.
¿Por qué funciona lo anterior?
La condición para que una dirección produzca descenso en $f_o$, $f_o(x^{(k+1)}) < f_o(x^{(k)})$, no es la única para que la secuencia de minimización converja a un mínimo de $f_o$. El siguiente ejemplo muestra el comportamiento del ejemplo trabajado antes en el plano.
Considérese $f_o(x) = x^2$, $x^{(0)} = 2$ y los pasos dados por la secuencia: $(-1)^k(1+2^{-k})$. Entonces:
End of explanation |
14,568 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Regularization
Welcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that overfitting can be a serious problem, if the training dataset is not big enough. Sure it does well on the training set, but the learned network doesn't generalize to new examples that it has never seen!
You will learn to
Step1: Problem Statement
Step3: Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.
- If the dot is blue, it means the French player managed to hit the ball with his/her head
- If the dot is red, it means the other team's player hit the ball with their head
Your goal
Step4: Let's train the model without any regularization, and observe the accuracy on the train/test sets.
Step5: The train accuracy is 94.8% while the test accuracy is 91.5%. This is the baseline model (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.
Step7: The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.
2 - L2 Regularization
The standard way to avoid overfitting is called L2 regularization. It consists of appropriately modifying your cost function, from
Step9: Expected Output
Step10: Expected Output
Step11: Congrats, the test set accuracy increased to 93%. You have saved the French football team!
You are not overfitting the training data anymore. Let's plot the decision boundary.
Step13: Observations
Step15: Expected Output
Step16: Expected Output
Step17: Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you!
Run the code below to plot the decision boundary. | Python Code:
# import packages
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
Explanation: Regularization
Welcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that overfitting can be a serious problem, if the training dataset is not big enough. Sure it does well on the training set, but the learned network doesn't generalize to new examples that it has never seen!
You will learn to: Use regularization in your deep learning models.
Let's first import the packages you are going to use.
End of explanation
train_X, train_Y, test_X, test_Y = load_2D_dataset()
Explanation: Problem Statement: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head.
<img src="images/field_kiank.png" style="width:600px;height:350px;">
<caption><center> <u> Figure 1 </u>: Football field<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>
They give you the following 2D dataset from France's past 10 games.
End of explanation
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob - probability of keeping a neuron active during drop-out, scalar.
Returns:
parameters -- parameters learned by the model. They can then be used to predict.
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary.
parameters = initialize_parameters(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward propagation.
assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
Explanation: Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.
- If the dot is blue, it means the French player managed to hit the ball with his/her head
- If the dot is red, it means the other team's player hit the ball with their head
Your goal: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.
Analysis of the dataset: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well.
You will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem.
1 - Non-regularized model
You will use the following neural network (already implemented for you below). This model can be used:
- in regularization mode -- by setting the lambd input to a non-zero value. We use "lambd" instead of "lambda" because "lambda" is a reserved keyword in Python.
- in dropout mode -- by setting the keep_prob to a value less than one
You will first try the model without any regularization. Then, you will implement:
- L2 regularization -- functions: "compute_cost_with_regularization()" and "backward_propagation_with_regularization()"
- Dropout -- functions: "forward_propagation_with_dropout()" and "backward_propagation_with_dropout()"
In each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.
End of explanation
parameters = model(train_X, train_Y)
print ("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
Explanation: Let's train the model without any regularization, and observe the accuracy on the train/test sets.
End of explanation
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
Explanation: The train accuracy is 94.8% while the test accuracy is 91.5%. This is the baseline model (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.
End of explanation
# GRADED FUNCTION: compute_cost_with_regularization
def compute_cost_with_regularization(A3, Y, parameters, lambd):
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
### START CODE HERE ### (approx. 1 line)
L2_regularization_cost = (1 / m) * (lambd / 2) * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
### END CODER HERE ###
cost = cross_entropy_cost + L2_regularization_cost
return cost
A3, Y_assess, parameters = compute_cost_with_regularization_test_case()
print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))
Explanation: The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.
2 - L2 Regularization
The standard way to avoid overfitting is called L2 regularization. It consists of appropriately modifying your cost function, from:
$$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{L}\right) + (1-y^{(i)})\log\left(1- a^{L}\right) \large{)} \tag{1}$$
To:
$$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{L}\right) + (1-y^{(i)})\log\left(1- a^{L}\right) \large{)} }\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$
Let's modify your cost and observe the consequences.
Exercise: Implement compute_cost_with_regularization() which computes the cost given by formula (2). To calculate $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , use :
python
np.sum(np.square(Wl))
Note that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \frac{1}{m} \frac{\lambda}{2} $.
End of explanation
# GRADED FUNCTION: backward_propagation_with_regularization
def backward_propagation_with_regularization(X, Y, cache, lambd):
Implements the backward propagation of our baseline model to which we added an L2 regularization.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation()
lambd -- regularization hyperparameter, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
### START CODE HERE ### (approx. 1 line)
dW3 = 1./m * np.dot(dZ3, A2.T) + (lambd / m) * W3
### END CODE HERE ###
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
### START CODE HERE ### (approx. 1 line)
dW2 = 1./m * np.dot(dZ2, A1.T) + (lambd / m) * W2
### END CODE HERE ###
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
### START CODE HERE ### (approx. 1 line)
dW1 = 1./m * np.dot(dZ1, X.T) + (lambd / m) * W1
### END CODE HERE ###
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()
grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)
print ("dW1 = "+ str(grads["dW1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("dW3 = "+ str(grads["dW3"]))
Explanation: Expected Output:
<table>
<tr>
<td>
**cost**
</td>
<td>
1.78648594516
</td>
</tr>
</table>
Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost.
Exercise: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\frac{d}{dW} ( \frac{1}{2}\frac{\lambda}{m} W^2) = \frac{\lambda}{m} W$).
End of explanation
parameters = model(train_X, train_Y, lambd = 0.7)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
Explanation: Expected Output:
<table>
<tr>
<td>
**dW1**
</td>
<td>
[[-0.25604646 0.12298827 -0.28297129]
[-0.17706303 0.34536094 -0.4410571 ]]
</td>
</tr>
<tr>
<td>
**dW2**
</td>
<td>
[[ 0.79276486 0.85133918]
[-0.0957219 -0.01720463]
[-0.13100772 -0.03750433]]
</td>
</tr>
<tr>
<td>
**dW3**
</td>
<td>
[[-1.77691347 -0.11832879 -0.09397446]]
</td>
</tr>
</table>
Let's now run the model with L2 regularization $(\lambda = 0.7)$. The model() function will call:
- compute_cost_with_regularization instead of compute_cost
- backward_propagation_with_regularization instead of backward_propagation
End of explanation
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
Explanation: Congrats, the test set accuracy increased to 93%. You have saved the French football team!
You are not overfitting the training data anymore. Let's plot the decision boundary.
End of explanation
# GRADED FUNCTION: forward_propagation_with_dropout
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1,1)
cache -- tuple, information stored for computing the backward propagation
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above.
D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
D1 = (D1 < keep_prob) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 = A1 * D1 # Step 3: shut down some neurons of A1
A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
### START CODE HERE ### (approx. 4 lines)
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
D2 = (D2 < keep_prob) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
A2 = A2 * D2 # Step 3: shut down some neurons of A2
A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
X_assess, parameters = forward_propagation_with_dropout_test_case()
A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)
print ("A3 = " + str(A3))
Explanation: Observations:
- The value of $\lambda$ is a hyperparameter that you can tune using a dev set.
- L2 regularization makes your decision boundary smoother. If $\lambda$ is too large, it is also possible to "oversmooth", resulting in a model with high bias.
What is L2-regularization actually doing?:
L2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes.
<font color='blue'>
What you should remember -- the implications of L2-regularization on:
- The cost computation:
- A regularization term is added to the cost
- The backpropagation function:
- There are extra terms in the gradients with respect to weight matrices
- Weights end up smaller ("weight decay"):
- Weights are pushed to smaller values.
3 - Dropout
Finally, dropout is a widely used regularization technique that is specific to deep learning.
It randomly shuts down some neurons in each iteration. Watch these two videos to see what this means!
<!--
To understand drop-out, consider this conversation with a friend:
- Friend: "Why do you need all these neurons to train your network and classify images?".
- You: "Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!"
- Friend: "I see, but are you sure that your neurons are learning different features and not all the same features?"
- You: "Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution."
!-->
<center>
<video width="620" height="440" src="images/dropout1_kiank.mp4" type="video/mp4" controls>
</video>
</center>
<br>
<caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep_prob$ or keep it with probability $keep_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption>
<center>
<video width="620" height="440" src="images/dropout2_kiank.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption>
When you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time.
3.1 - Forward propagation with dropout
Exercise: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer.
Instructions:
You would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:
1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using np.random.rand() to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{1} d^{1} ... d^{1}] $ of the same dimension as $A^{[1]}$.
2. Set each entry of $D^{[1]}$ to be 0 with probability (1-keep_prob) or 1 with probability (keep_prob), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: X = (X < 0.5). Note that 0 and 1 are respectively equivalent to False and True.
3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.
4. Divide $A^{[1]}$ by keep_prob. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)
End of explanation
# GRADED FUNCTION: backward_propagation_with_dropout
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
### START CODE HERE ### (≈ 2 lines of code)
dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
### START CODE HERE ### (≈ 2 lines of code)
dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation
dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()
gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)
print ("dA1 = " + str(gradients["dA1"]))
print ("dA2 = " + str(gradients["dA2"]))
Explanation: Expected Output:
<table>
<tr>
<td>
**A3**
</td>
<td>
[[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]
</td>
</tr>
</table>
3.2 - Backward propagation with dropout
Exercise: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache.
Instruction:
Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:
1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to A1. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to dA1.
2. During forward propagation, you had divided A1 by keep_prob. In backpropagation, you'll therefore have to divide dA1 by keep_prob again (the calculus interpretation is that if $A^{[1]}$ is scaled by keep_prob, then its derivative $dA^{[1]}$ is also scaled by the same keep_prob).
End of explanation
parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
Explanation: Expected Output:
<table>
<tr>
<td>
**dA1**
</td>
<td>
[[ 0.36544439 0. -0.00188233 0. -0.17408748]
[ 0.65515713 0. -0.00337459 0. -0. ]]
</td>
</tr>
<tr>
<td>
**dA2**
</td>
<td>
[[ 0.58180856 0. -0.00299679 0. -0.27715731]
[ 0. 0.53159854 -0. 0.53159854 -0.34089673]
[ 0. 0. -0.00292733 0. -0. ]]
</td>
</tr>
</table>
Let's now run the model with dropout (keep_prob = 0.86). It means at every iteration you shut down each neurons of layer 1 and 2 with 24% probability. The function model() will now call:
- forward_propagation_with_dropout instead of forward_propagation.
- backward_propagation_with_dropout instead of backward_propagation.
End of explanation
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
Explanation: Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you!
Run the code below to plot the decision boundary.
End of explanation |
14,569 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Fit a simple poisson/gaussian model to IPNO test bench data
calin/examples/calib/ipno spe fits poisson gaussian.ipynb - Stephen Fegan - 2017-04-21
Copyright 2017, Stephen Fegan sfegan@llr.in2p3.fr
Laboratoire Leprince-Ringuet, CNRS/IN2P3, Ecole Polytechnique, Institut Polytechnique de Paris
This file is part of "calin". "calin" is free software
Step1: Load the IPNO SPE and pedestal data files as histograms
Apply cuts on the minimum value to filter out ADC values of zero.
Step2: Plot the histograms on linear and logarithmic scales
Step3: Set up the model and cost function
There are four components
Step4: Print list of cost function axes with intrinsic limits
Step5: Create optimizer, define problem space and run optimization
Choose optimizer akgorithm
Step6: Plot solution over the data
Step7: Plot the Single electron spectrum
Step8: Calculate the PMT gain, resolution and ENF
Step9: Use "robust" MLE cost function (experimental!)
This section shows how deweighting points in the the tails of the model can improve the fit. It should be considered experimental. The basic approach is to modify the log(probability) for each bin that is summed in the likelihood using a function rho which is linear at low values becoming asymptotically constant. This is a cost function that comes under the umbrella of being an M-Estimate, an it effectively "Winsorizes" the data.
More description is needed!
Step10: Calculate the PMT gain, resolution and ENF | Python Code:
%pylab inline
import calin.diagnostics.functional
import calin.io.sql_transceiver
import calin.calib.spe_fit
import calin.math.histogram
import calin.math.optimizer
import calin.math.pdf_1d
import calin.iact_data.ipno
import calin.plotting
import calin.math.data_modeling
Explanation: Fit a simple poisson/gaussian model to IPNO test bench data
calin/examples/calib/ipno spe fits poisson gaussian.ipynb - Stephen Fegan - 2017-04-21
Copyright 2017, Stephen Fegan sfegan@llr.in2p3.fr
Laboratoire Leprince-Ringuet, CNRS/IN2P3, Ecole Polytechnique, Institut Polytechnique de Paris
This file is part of "calin". "calin" is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License version 2 or later,
as published by the Free Software Foundation. "calin" is distributed in the hope
that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Introduction
This notebook loads IPNO single photo-electron test-bench data and fits it using a Poisson/Gaussian model.
First, import the required packages.
End of explanation
mes_hist = calin.iact_data.ipno.make_ipno_adc_hist('/CTA/IPNO/2017-04-06-B/SET_1/5_LEDs_8.5V.bin', value_min=10)
ped_hist = calin.iact_data.ipno.make_ipno_adc_hist('/CTA/IPNO/2017-04-06-B/SET_1/pedestal_3.bin', value_min=10)
Explanation: Load the IPNO SPE and pedestal data files as histograms
Apply cuts on the minimum value to filter out ADC values of zero.
End of explanation
calin.plotting.plot_histogram(mes_hist,label="SPE")
calin.plotting.plot_histogram(ped_hist,label="Ped")
legend()
xlabel('Charge [ADC samples]')
ylabel('Number of events')
calin.plotting.plot_histogram(mes_hist,label="SPE")
calin.plotting.plot_histogram(ped_hist,label="Ped")
legend()
xlabel('Charge [ADC samples]')
ylabel('Number of events')
gca().set_yscale('log')
Explanation: Plot the histograms on linear and logarithmic scales
End of explanation
ped_gauss_pdf = calin.math.pdf_1d.BinnedGaussianPDF(mes_hist.dxval())
ses_g_pdf = calin.math.pdf_1d.LimitedGaussianPDF(0,numpy.inf,mes_hist.dxval())
xleft = min(mes_hist.xval_left(0), ped_hist.xval_left(0))
#xleft = mes_hist.xval_left(0)
mes_model_g = calin.calib.spe_fit.GeneralPoissonMES(xleft, mes_hist.dxval(),\
mes_hist.size()*2 + int((mes_hist.xval_left(0)-xleft)/mes_hist.dxval()), \
ses_g_pdf, ped_gauss_pdf)
# Uncomment first version to ONLY fit MES. Uncomment second version to fit both MES and Ped
#like_g = calin.calib.spe_fit.SPELikelihood(mes_model_g, mes_hist)
like_g = calin.calib.spe_fit.SPELikelihood(mes_model_g, mes_hist, ped_hist)
Explanation: Set up the model and cost function
There are four components:
1. the model for the pedestal, a binned Gaussian with bin width derived from histogram
2. the model of the single electron spectrum (SES), a binned Gaussian cut off at negative values, since the charge from the PMT cannot be negative
3. model for the multi electron spectrum (MES) that combines the pedestal and SES models with a Poisson model for the number of PEs, calculating the resultant MES shape through convolutions.
4. the cost function that computes the (negative of the) log likelihood of the measured MES and pedestal runs.
The configuration of the MES model is somewhat tricky as the bounds of the FFT must be specified. Here the left bound of the FFT is taken to be the lowest bin from the MES and pedestal histopgrams. Thenumber of bins in th FFT is basically twice the number in the MES histogram adjusted for the left bound. This may be automatized in future versions.
End of explanation
for iax, ax in enumerate(like_g.domain_axes()):
print('%-2d %-30s %-15.6g %-15.6g'%(iax,ax.name,ax.lo_bound,ax.hi_bound))
Explanation: Print list of cost function axes with intrinsic limits
End of explanation
opt_g = calin.math.optimizer.NLOptOptimizer("LD_LBFGS", like_g)
opt_g.set_verbosity_level(calin.math.optimizer.OptimizerVerbosityLevel_MAX);
opt_g.set_abs_tolerance(0.0001);
opt_g.set_initial_values([0.5, 30.0, 0.5, 8.0, 0.5]);
opt_g.set_limits_lo([0.01, 20.0, 0.25, 4.0, 0.25])
opt_g.set_limits_hi([3.0, 50.0, 2.00, 20.0, 5.0])
status, xopt_g, fval_g = opt_g.minimize()
print(status, xopt_g, fval_g)
status, err_mat_g = opt_g.calc_error_matrix()
xerr_g = sqrt(err_mat_g.diagonal())
print(xerr_g)
Explanation: Create optimizer, define problem space and run optimization
Choose optimizer akgorithm : LBFGS, which makes use of analytic gradient
Set maximum verbosity which shows all evaluations of cost function
Set initial point for each parameter
Set low and high bounds on parameter space. Best results with smallest space!
Run optimizer
Print staus, coordinates of solution found and cost function there
Calculate error matrix and print errors (from diagnonals of matrix)
End of explanation
calin.plotting.plot_histogram(mes_hist)
xlabel('Signal [DC]')
ylabel('Events per %d DC bin'%mes_hist.dxval())
ihist = range(0,mes_hist.nbin());
xhist = mes_hist.all_xval_center()
mes_model_g.set_parameter_values(xopt_g)
ymodel_g = \
list(map(lambda x: mes_hist.sum_w()*mes_hist.dxval()*mes_model_g.pdf_mes(x),xhist))
plot(xhist,ymodel_g,lw=1.5, label='Gaussian')
ax = gca()
#ax.set_yscale('log')
Explanation: Plot solution over the data
End of explanation
ses_x = arange(0,20,0.1);
ses_y = fromiter(map(ses_g_pdf.value_1d, ses_x),dtype='float')
plot(ses_x, ses_y)
#axis([0,250,0,0.02])
xlabel('Signal [ADC]')
ylabel('Probability density [1/ADC]')
#gcf().savefig('icrr_ses.pdf')
Explanation: Plot the Single electron spectrum
End of explanation
qscale = 1
gunits = "ADC"
ses_norm = sum(ses_y*qscale)
ses_mean = sum(ses_y*ses_x*qscale)/ses_norm
ses_rms = sqrt(sum(ses_y*ses_x**2*qscale)/ses_norm - ses_mean**2)
print("Norm, mean, RMS [1,%s,%s]: "%(gunits,gunits),ses_norm, ses_mean, ses_rms)
print("Gain [%s]: "%gunits,ses_mean)
print("Light intensity [PE/pulse]: ",xopt_g[0])
print("Resolution: ",ses_rms/ses_mean)
print("ENF: ",sqrt(1+(ses_rms/ses_mean)**2))
Explanation: Calculate the PMT gain, resolution and ENF
End of explanation
rho = calin.math.data_modeling.ModifiedHyperbolicLikelihoodRhoFunction(7.0,2.0)
#like_mest = calin.calib.spe_fit.SPERobust(mes_model_g, mes_hist, rho) # ped_hist, rho)
like_mest = calin.calib.spe_fit.SPERobust(mes_model_g, mes_hist, ped_hist, rho)
opt_mest = calin.math.optimizer.NLOptOptimizer("LD_LBFGS", like_mest)
opt_mest.set_verbosity_level(calin.math.optimizer.OptimizerVerbosityLevel_MAX);
opt_mest.set_abs_tolerance(0.0001);
opt_mest.set_initial_values(xopt_g);
opt_mest.set_limits_lo([0.5, 20.0, 0.5, 3.0, 0.5])
opt_mest.set_limits_hi([3.0, 50.0, 2.0, 15.0, 3.0])
status, xopt_mest, fval_mest = opt_mest.minimize()
print(status, xopt_mest, fval_mest)
status, err_mat_mest = opt_mest.calc_error_matrix()
xerr_mest = sqrt(err_mat_mest.diagonal())
print(xerr_mest)
calin.plotting.plot_histogram(mes_hist)
xlabel('Signal [DC]')
ylabel('Events per %d DC bin'%mes_hist.dxval())
ihist = range(0,mes_hist.nbin());
xhist = mes_hist.all_xval_center()
mes_model_g.set_parameter_values(xopt_mest)
ymodel_mest = \
list(map(lambda x: mes_hist.sum_w()*mes_hist.dxval()*mes_model_g.pdf_mes(x),xhist))
plot(xhist,ymodel_mest,lw=1.5, label='Gaussian')
ax = gca()
#ax.set_yscale('log')
Explanation: Use "robust" MLE cost function (experimental!)
This section shows how deweighting points in the the tails of the model can improve the fit. It should be considered experimental. The basic approach is to modify the log(probability) for each bin that is summed in the likelihood using a function rho which is linear at low values becoming asymptotically constant. This is a cost function that comes under the umbrella of being an M-Estimate, an it effectively "Winsorizes" the data.
More description is needed!
End of explanation
ses_x = arange(0,20,0.1);
ses_y = fromiter(map(ses_g_pdf.value_1d, ses_x),dtype='float')
qscale = 1
gunits = "ADC"
ses_norm = sum(ses_y*qscale)
ses_mean = sum(ses_y*ses_x*qscale)/ses_norm
ses_rms = sqrt(sum(ses_y*ses_x**2*qscale)/ses_norm - ses_mean**2)
print("Norm, mean, RMS [1,%s,%s]: "%(gunits,gunits),ses_norm, ses_mean, ses_rms)
print("Gain [%s]: "%gunits,ses_mean)
print("Light intensity [PE/pulse]: ",xopt_g[0])
print("Resolution: ",ses_rms/ses_mean)
print("ENF: ",sqrt(1+(ses_rms/ses_mean)**2))
Explanation: Calculate the PMT gain, resolution and ENF
End of explanation |
14,570 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Copyright 2019 The TensorFlow Authors.
Step1: 분산 전략을 사용한 모델 저장 및 불러오기
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https
Step2: tf.distribute.Strategy를 사용하는 모델과 데이터 준비하기
Step3: 모델 훈련시키기
Step4: 모델 저장하고 불러오기
이제 사용할 모델을 가지고 있으므로 API를 이용해 모델을 저장하고 불러오는 방법에 대해 살펴봅시다.
두 가지 API를 사용 할 수 있습니다
Step5: tf.distribute.Strategy없이 모델 복원시키기
Step6: 모델을 복원시킨 후에는 compile()이 이미 저장되기 전에 컴파일 되기 때문에, compile()을 다시 호출하지 않고도 모델 훈련을 계속 할 수 있습니다. 그 모델은 텐서플로 표준 SavedModel의 프로토 타입에 저장됩니다. 더 많은 정보를 원한다면, the guide to saved_model format를 참고하세요.
tf.distribute.strategy의 범위를 벗어나서 model.save() 방법을 호출하는 것은 중요합니다. 범위 안에서 호출하는 것은 지원하지 않습니다.
이제 모델을 불러와서 tf.distribute.Strategy를 사용해 훈련시킵니다
Step7: 위에서 볼 수 있듯이, 불러오기는 tf.distribute.Strategy에서 예상한대로 작동합니다. 여기서 사용된 전략은 이전에 사용된 전략과 같지 않아도 됩니다.
tf.saved_model 형 API
이제 저수준 API에 대해서 살펴봅시다. 모델을 저장하는 것은 케라스 API와 비슷합니다
Step8: tf.saved_model.load()로 불러올 수 있습니다. 그러나 저수준 단계의 API이기 때문에 (따라서 더 넓은 사용범위를 갖습니다), 케라스 모델을 반환하지 않습니다. 대신, 추론하기 위해 사용될 수 있는 기능들을 포함한 객체를 반환합니다. 예를 들어
Step9: 불러와진 객체는 각각 키와 관련된 채, 여러 기능을 포함할 수 있습니다. "serving_default"는 저장된 케라스 모델이 있는 추론 기능을 위한 기본 키입니다. 이 기능을 이용하여 추론합니다
Step10: 또한 분산방식으로 불러오고 추론할 수 있습니다
Step11: 복원된 기능을 호출하는 것은 단지 저장된 모델로의 정방향 패쓰입니다(예상하기에). 만약 계속해서 불러온 기능을 훈련시키고 싶다면 어떻게 하실건가요? 불러온 기능을 더 큰 모델에 내장시킬 건가요? 일반적인 방법은 이 불러온 객체를 케라스 층에 싸서(wrap) 달성하는 것입니다. 다행히도, TF Hub는 이 목적을 위해 hub.KerasLayer을 갖고 있으며, 다음과 같습니다.
Step12: 볼 수 있듯이, hub.KerasLayer은 tf.saved_model.load()로부터 불려온 결과를 또 다른 모델을 만드는데 사용될 수 있는 케라스 층으로 포장(wrap)합니다. 이것은 학습에 매우 유용합니다.
어떤 API를 사용해야 할까요?
저장에 관해서, 케라스 모델을 사용하는 경우, 케라스의 model.save() API를 사용하는 것을 권장합니다. 저장하려는 모델이 케라스 모델이 아닌 경우, 더 낮은 단계의 API를 선택해야 합니다.
모델을 불러옴에 있어서, 어떤 API를 사용하느냐는 로딩 API에서 얻고자 하는 내용에 따라 결정됩니다. 케라스 모델을 가져올 수 없으면(또는 가져오고 싶지 않다면), tf.saved_model.load()를 사용합니다. 그 외의 경우에는, tf.keras.models.load_model()을 사용합니다. 케라스 모델을 저장한 경우에만 케라스 모델을 반환 받을 수 있다는 점을 유의하세요.
API들을 목적에 따라 혼합하고 짜 맞추는 것이 가능합니다. 케라스 모델을 model.save와 함께 저장할 수 있고, 저수준 API인, tf.saved_model.load로 케라스가 아닌 모델을 불러올 수 있습니다.
Step13: 주의사항
특별한 경우는 잘 정의되지 않은 입력을 갖는 케라스 모델을 갖고 있는 경우입니다. 예를 들어, 순차 모델은 입력 형태(Sequential([Dense(3), ...]) 없이 만들 수 있습니다. 하위 분류된 모델들 또한 초기화 후에 잘 정의된 입력을 갖고 있지 않습니다. 이 경우 모델을 저장하고 불러올 시 저수준 API를 사용해야 하며, 그렇지 않으면 오류가 발생할 수 있습니다.
모델이 잘 정의된 입력을 갖는지 확인하려면, model.inputs 이 None인지 확인합니다. None이 아니라면 잘 정의된 입력입니다. 입력 형태들은 모델이 .fit, .evaluate, .predict에서 쓰이거나 모델을 호출 (model(inputs)) 할 때 자동으로 정의됩니다.
예시를 살펴봅시다 | Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
!pip install tf-nightly
import tensorflow_datasets as tfds
import tensorflow as tf
tfds.disable_progress_bar()
Explanation: 분산 전략을 사용한 모델 저장 및 불러오기
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/distribute/save_and_load"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/distribute/save_and_load.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/distribute/save_and_load.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub)소스 보기</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/distribute/save_and_load.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />노트북 다운로드 하기</a>
</td>
</table>
Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 tensorflow/docs-l10n 깃허브 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 [email protected]로 메일을 보내주시기 바랍니다.
개요
훈련 도중 모델을 저장하고 불러오는 것은 흔히 일어나는 일입니다. 케라스 모델을 저장하고 불러오기 위한 API에는 high-level API와 low-level API, 두 가지가 있습니다. 이 튜토리얼은 tf.distribute.Strategy를 사용할 때 어떻게 SavedModel APIs를 사용할 수 있는지 보여줍니다. SavedModel과 직렬화에 관한 일반적인 내용을 학습하려면, saved model guide와 Keras model serialization guide를 읽어보는 것을 권장합니다. 간단한 예로 시작해보겠습니다:
필요한 패키지 가져오기:
End of explanation
mirrored_strategy = tf.distribute.MirroredStrategy()
def get_data():
datasets, ds_info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
BUFFER_SIZE = 10000
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * mirrored_strategy.num_replicas_in_sync
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
train_dataset = mnist_train.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
eval_dataset = mnist_test.map(scale).batch(BATCH_SIZE)
return train_dataset, eval_dataset
def get_model():
with mirrored_strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
return model
Explanation: tf.distribute.Strategy를 사용하는 모델과 데이터 준비하기:
End of explanation
model = get_model()
train_dataset, eval_dataset = get_data()
model.fit(train_dataset, epochs=2)
Explanation: 모델 훈련시키기:
End of explanation
keras_model_path = "/tmp/keras_save"
model.save(keras_model_path) # save()는 전략 범위를 벗어나 호출되어야 합니다.
Explanation: 모델 저장하고 불러오기
이제 사용할 모델을 가지고 있으므로 API를 이용해 모델을 저장하고 불러오는 방법에 대해 살펴봅시다.
두 가지 API를 사용 할 수 있습니다:
고수준 케라스 model.save와 tf.keras.models.load_model
저수준 케라스 tf.saved_model.save와 tf.saved_model.load
케라스 API
케라스 API들을 이용해 모델을 저장하고 불러오는 예를 소개합니다.
End of explanation
restored_keras_model = tf.keras.models.load_model(keras_model_path)
restored_keras_model.fit(train_dataset, epochs=2)
Explanation: tf.distribute.Strategy없이 모델 복원시키기:
End of explanation
another_strategy = tf.distribute.OneDeviceStrategy("/cpu:0")
with another_strategy.scope():
restored_keras_model_ds = tf.keras.models.load_model(keras_model_path)
restored_keras_model_ds.fit(train_dataset, epochs=2)
Explanation: 모델을 복원시킨 후에는 compile()이 이미 저장되기 전에 컴파일 되기 때문에, compile()을 다시 호출하지 않고도 모델 훈련을 계속 할 수 있습니다. 그 모델은 텐서플로 표준 SavedModel의 프로토 타입에 저장됩니다. 더 많은 정보를 원한다면, the guide to saved_model format를 참고하세요.
tf.distribute.strategy의 범위를 벗어나서 model.save() 방법을 호출하는 것은 중요합니다. 범위 안에서 호출하는 것은 지원하지 않습니다.
이제 모델을 불러와서 tf.distribute.Strategy를 사용해 훈련시킵니다:
End of explanation
model = get_model() # 새 모델 얻기
saved_model_path = "/tmp/tf_save"
tf.saved_model.save(model, saved_model_path)
Explanation: 위에서 볼 수 있듯이, 불러오기는 tf.distribute.Strategy에서 예상한대로 작동합니다. 여기서 사용된 전략은 이전에 사용된 전략과 같지 않아도 됩니다.
tf.saved_model 형 API
이제 저수준 API에 대해서 살펴봅시다. 모델을 저장하는 것은 케라스 API와 비슷합니다:
End of explanation
DEFAULT_FUNCTION_KEY = "serving_default"
loaded = tf.saved_model.load(saved_model_path)
inference_func = loaded.signatures[DEFAULT_FUNCTION_KEY]
Explanation: tf.saved_model.load()로 불러올 수 있습니다. 그러나 저수준 단계의 API이기 때문에 (따라서 더 넓은 사용범위를 갖습니다), 케라스 모델을 반환하지 않습니다. 대신, 추론하기 위해 사용될 수 있는 기능들을 포함한 객체를 반환합니다. 예를 들어:
End of explanation
predict_dataset = eval_dataset.map(lambda image, label: image)
for batch in predict_dataset.take(1):
print(inference_func(batch))
Explanation: 불러와진 객체는 각각 키와 관련된 채, 여러 기능을 포함할 수 있습니다. "serving_default"는 저장된 케라스 모델이 있는 추론 기능을 위한 기본 키입니다. 이 기능을 이용하여 추론합니다:
End of explanation
another_strategy = tf.distribute.MirroredStrategy()
with another_strategy.scope():
loaded = tf.saved_model.load(saved_model_path)
inference_func = loaded.signatures[DEFAULT_FUNCTION_KEY]
dist_predict_dataset = another_strategy.experimental_distribute_dataset(
predict_dataset)
# 분산방식으로 기능 호출하기
for batch in dist_predict_dataset:
another_strategy.run(inference_func,args=(batch,))
Explanation: 또한 분산방식으로 불러오고 추론할 수 있습니다:
End of explanation
import tensorflow_hub as hub
def build_model(loaded):
x = tf.keras.layers.Input(shape=(28, 28, 1), name='input_x')
# KerasLayer로 감싸기
keras_layer = hub.KerasLayer(loaded, trainable=True)(x)
model = tf.keras.Model(x, keras_layer)
return model
another_strategy = tf.distribute.MirroredStrategy()
with another_strategy.scope():
loaded = tf.saved_model.load(saved_model_path)
model = build_model(loaded)
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
model.fit(train_dataset, epochs=2)
Explanation: 복원된 기능을 호출하는 것은 단지 저장된 모델로의 정방향 패쓰입니다(예상하기에). 만약 계속해서 불러온 기능을 훈련시키고 싶다면 어떻게 하실건가요? 불러온 기능을 더 큰 모델에 내장시킬 건가요? 일반적인 방법은 이 불러온 객체를 케라스 층에 싸서(wrap) 달성하는 것입니다. 다행히도, TF Hub는 이 목적을 위해 hub.KerasLayer을 갖고 있으며, 다음과 같습니다.
End of explanation
model = get_model()
# 케라스의 save() API를 사용하여 모델 저장하기
model.save(keras_model_path)
another_strategy = tf.distribute.MirroredStrategy()
# 저수준 API를 사용하여 모델 불러오기
with another_strategy.scope():
loaded = tf.saved_model.load(keras_model_path)
Explanation: 볼 수 있듯이, hub.KerasLayer은 tf.saved_model.load()로부터 불려온 결과를 또 다른 모델을 만드는데 사용될 수 있는 케라스 층으로 포장(wrap)합니다. 이것은 학습에 매우 유용합니다.
어떤 API를 사용해야 할까요?
저장에 관해서, 케라스 모델을 사용하는 경우, 케라스의 model.save() API를 사용하는 것을 권장합니다. 저장하려는 모델이 케라스 모델이 아닌 경우, 더 낮은 단계의 API를 선택해야 합니다.
모델을 불러옴에 있어서, 어떤 API를 사용하느냐는 로딩 API에서 얻고자 하는 내용에 따라 결정됩니다. 케라스 모델을 가져올 수 없으면(또는 가져오고 싶지 않다면), tf.saved_model.load()를 사용합니다. 그 외의 경우에는, tf.keras.models.load_model()을 사용합니다. 케라스 모델을 저장한 경우에만 케라스 모델을 반환 받을 수 있다는 점을 유의하세요.
API들을 목적에 따라 혼합하고 짜 맞추는 것이 가능합니다. 케라스 모델을 model.save와 함께 저장할 수 있고, 저수준 API인, tf.saved_model.load로 케라스가 아닌 모델을 불러올 수 있습니다.
End of explanation
class SubclassedModel(tf.keras.Model):
output_name = 'output_layer'
def __init__(self):
super(SubclassedModel, self).__init__()
self._dense_layer = tf.keras.layers.Dense(
5, dtype=tf.dtypes.float32, name=self.output_name)
def call(self, inputs):
return self._dense_layer(inputs)
my_model = SubclassedModel()
# my_model.save(keras_model_path) # 오류!
tf.saved_model.save(my_model, saved_model_path)
Explanation: 주의사항
특별한 경우는 잘 정의되지 않은 입력을 갖는 케라스 모델을 갖고 있는 경우입니다. 예를 들어, 순차 모델은 입력 형태(Sequential([Dense(3), ...]) 없이 만들 수 있습니다. 하위 분류된 모델들 또한 초기화 후에 잘 정의된 입력을 갖고 있지 않습니다. 이 경우 모델을 저장하고 불러올 시 저수준 API를 사용해야 하며, 그렇지 않으면 오류가 발생할 수 있습니다.
모델이 잘 정의된 입력을 갖는지 확인하려면, model.inputs 이 None인지 확인합니다. None이 아니라면 잘 정의된 입력입니다. 입력 형태들은 모델이 .fit, .evaluate, .predict에서 쓰이거나 모델을 호출 (model(inputs)) 할 때 자동으로 정의됩니다.
예시를 살펴봅시다:
End of explanation |
14,571 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
[Py-OO] Aula 01
Introdução a Orientação a Objetos em Python
O que você vai aprender nesta aula?
Após o término da aula você terá aprendido
Step1: O dicionários possui diversos métodos que usamos para alterar os objetos
Step2: Podemos usar a função dir() para inspecionar os métodos e atributos do dict notas
Step3: Aqui vemos vários métodos que o nome contém underscores no começo e fim como __len__, __getitem__, __setitem__. Esses métodos são chamados de métodos especiais que fazem parte do modelo de dados do Python. Esses métodos são chamados pelo interpretador quando uma sintaxe especial é acionada. Como, por exemplo, quando acessamos os itens do dicionário por sua chave o interpretador invoca a função dict.__getitem__()
Step4: O dict também possui atributos de dados especiais como __class__, que armazena o nome da classe do objeto, e __doc__ que retém a docstring do objeto
Step5: Para ver a docstring formatada para saída use a função print()
Step6: Números são objetos
Step7: Possuem métodos e atributos
Step8: Só lembrando que os métodos especiais não devem ser chamados diretamente, os exemplos anteriores só servem para ilustrar o funcionamento e existência desses métodos. Caso você queira consultar a documentação de um objeto use a função help()
Step10: Como explicado na [py-intro] Aula 05 funções também são objetos. Na terminologia utilizada pelos livros isso quer dizer que, em Python, as funções são objetos de primeira classe ou cidadãos de primeira classe.
Step11: Podemos a atribuir funções a variáveis
Step12: Acessar atributos
Step13: Podemos ver o bytecode que a função executa usando o módudlo dis (disassembly), enviando a função soma() como argumento da função dis.dis()
Step14: Tipagem dos objetos
Tipagem forte
Os objetos em Python possuem tipagem forte, isso quer dizer que dificilmente são feitas conversões de tipos implícitas na realização de operações. Vamos ver alguns exemplos que ilustram esse conceito
Step15: Tentamos concatenar o número 10 à string "1", porém uma exceção do tipo TypeError foi levantada dizendo que não foi possível converter um objeto int para str de forma implicita.
Em Javascript e PHP, linguagens que possuem tipagem fraca, não seria levantado uma exceção e o interpretador faria a conversão de um dos tipos. No Javascript (1.5) o resultado seria uma string "110" e no PHP (5.6) o número 11.
Aqui percebemos que a operação "1" + 10 pode produzir dois resultados
Step16: Tipagem dinâmica
Dizemos que uma linguagem possui tipagem dinâmica quando não é necessário especificar explicitamente os tipos das váriaveis. Os objetos possuem tipos, porém as variáveis podem referenciar objetos de quaisquer tipos. Verificações de tipos são feitas em tempo de execução e não durante a compilação.
Quando definimos uma função dobra(x) que retorna o valor recebido multiplicado por 2 podemos receber qualquer tipo de objeto como argumento
Step17: Podemos dobrar int
Step18: Dobrar float
Step19: strings
Step20: sequências
Step21: Tipos que não suportam multiplicação por inteiros levantarão exceção quando executados
Step22: A função type() nos permite verificar os tipos dos objetos
Step23: Mutabilidade
No Python existem objetos mutáveis e imutáveis, já vimos vários exemplos disso ao longo do curso. O estado (atributo) de objetos mutáveis podem ser alterados, já obetos imutáveis não podem ser alterados de forma alguma.
A tabela abaixo mostra a mutabilidade dos tipos embutidos do Python
Step24: Todo objeto python possui uma identidade, um número único que diferencia esse objeto. Podemos acessar a identidade de um objeto usando a função id()
Step25: Isso quer dizer que a identidade do objeto a é 10894368.
Agora vamos tentar mudar o valor de a
Step26: A identidade mudou, isso significa que a variável a está referenciando outro objeto que foi criado quando executamos a += b.
Vamos ver agora um exemplo de objeto mutável
Step27: Vamos verificar a identidade dessa lista
Step28: Mesmo modificando a lista através da inserção e remoção de valores sua identidade continua a mesma.
Strings também são imutáveis
Step29: Como vimos na aula dois do módulo de introdução strings são imutáveis e para alterar seu valor precisamos usar slicing
Step30: Comparando a identidade de s antes e depois da mudança vemos que trata-se de objetos diferentes.
Variáveis
Variáveis são apenas referências para objetos, assim como acontece em Java. Variáveis são apenas rótulos (ou post-its) associados a objetos. Diferentemente de C ou Pascal as variáveis em Python não são caixas que armazenam objetos ou valores.
Por exemplo
Step31: As variáveis a e b armazenam referências à mesma lista em vez de cópias.
É importante notar que objetos são criados antes da atribuição. A operação do lado direito de uma atribuição ocorre antes que a atribuição
Step32: Como não foi possível criar o número - por representar uma operação inválida (divisão por zero) para a linguagem - a variável c não foi atribuída a nenhum objeto
Step33: Como as variáveis são apenas rótulos a forma correta de falar sobre atribuição é "a variável x foi atribuída à (instância) lâmpada" e não "a lâmpada foi atribuída à variável x". Pois é como se colocássemos um "post-it" x em um objeto, e não guardássemos esse objeto em uma caixa x.
Por serem rótulos podemos atribuir diversos rótulos a um mesmo objeto. Isso faz com que apelidos (aliases) sejam criados
Step34: Vamos supor que exista um impostor - o João - que possua as mesmas credenciais que o José Silva. Suas credenciais são as mesmas, porém João não é José
Step35: O valor de seus dados (ou credenciais) são iguais, porém eles não são os mesmos
Step36: Nesse exemplo vimos o apelidamento (aliasing). josé e zé são apelidos (aliases)
Step37: Classes
Vamos ver como é a sintaxe de definição de classes no Python, para isso vamos fazer alguns exemplos.
Começaremos por criar uma classe que representa um cão. Armazenaremos
Step38: Na primeira linha definimos uma classe de nome Cão.
Da segunda até a quarta linha definimos os atributos de classe qtd_patas, carnívoro, nervoso. Os atributos de classe representam dados que aparecem em todas as classes.
Na sexta linha definimos o inicializador (também pode ser chamado de construtor) que deve receber o nome do Cão.
Na última linha criamos o atributo da instância nome e associamos à string enviada para o construtor.
Vamos agora criar uma instância de Cão
Step39: Vamos verificar seus atributos
Step40: Podemos também alterar esses atributos
Step41: Mudamos apenas o atributo nervoso da instância rex. O valor de Cão.nervoso continua o mesmo
Step42: Também podemos criar atributos dinamicamente para nossa instância rex
Step43: Lembrando mais uma vez que essas mudanças ocorrem somente na instância e não na classe
Step44: Classes também são objetos e podemos acessar seus atributos
Step45: Não podemos acessar o nome, pois nome é um atributo que é associado somente a instâncias da classe.
Step47: Os atributos de classe são usados para fornecerer valores padrão para dados que são compartilhados por todos os "cães" como, por exemplo, a quantidade de patas.
Agora vamos criar métodos (funções associadas a classes) para a classe Cão
Step49: Vamos brincar um pouco mais com o Cão e implementar ainda mais métodos
Step50: Métodos de instância, classe e estático
Por padrão os metódos de uma classe são métodos de instância (ou instance methods), isso significa que os métodos recebem, obrigatoriamente, uma instância da classe.
Como por exemplo
Step51: Não podemos chamar o método de instância somente com a classe
Step52: Precisamos criar uma instância para utilizá-lo
Step53: Já os métodos de classe (ou class methods) são métodos referentes à classe como um todo e recebem - não uma instância mas o - objeto da classe.
Para tornar o método de uma classe um classmethod usamos o decorador @classmethod. Decoradores são usados para "decorar" (ou marcar) funções e modificar seu comportamento de alguma maneira. Na Aula 05 deste módulo (de orientação a objetos em python) falaremos mais sobre decoradores.
Métodos de classe são definidos e utilizados assim
Step54: Podemos chamar o método usando o objeto de classe ExemploClasse
Step55: Também podemos chamar o método a partir de uma instância dessa classe. Por ser um classmethod o método continuará a receber como argumento o objeto da classe e não a instância
Step56: Por fim também temos os métodos estáticos que funcionam como funções simples agregadas a objetos ou classes. Eles não recebem argumentos de forma automática
Step57: Também podemos chamar o método estático a partir de uma instância | Python Code:
notas = {'bia': 10, 'pedro': 0, 'ana': 7}
notas
Explanation: [Py-OO] Aula 01
Introdução a Orientação a Objetos em Python
O que você vai aprender nesta aula?
Após o término da aula você terá aprendido:
Objetos em Python
Como funcionam
Tipagem
Mutabilidade
Como funciona atribuição e variáveis
Classes
Sintaxe básica, criando instâncias, métodos de instância, de classe e estáticos
Revisão dos conceitos de Orientação a Objetos
Vamos começar com uma rápida revisão dos conceitos de Orientação a Objetos e como eles são aplicados em Python.
O paradigma de programação orientada a objetos tem por objetivo fornecer uma abstração do mundo real e aplicá-la para a programação.
Objetos são componentes de software que incluem dados e comportamentos. Por exemplo cães possuem estados (nome, raça e cor) e comportamentos (latir, abanar o rabo e pegar objetos). Já bicicletas possuem outros estados (modelo, marcha atual e velocidade atual) e comportamentos (mudar marcha, frear).
Outro conceito importante é o de classe. Estas representam a estrutura de um objeto, por exemplo: a receita de um bolo. Nesse exemplo a receita seria a classe que contém instruções de como criar o objeto, além de ter informações sobre a instância (bolo).
Em Python os objetos possuem atributos que podem ser tanto métodos (funções vinculadas ao objeto) ou atributos de dados do objeto. Este último geralmente é geralmente chamado de atributo.
É importante saber que em Python instâncias de classes são chamadas exatamente de instâncias. É comum em Java/C++ chamar "instâncias de classes" de "objetos de classes". Isso não acontece em Python, pois nesta linguagem tudo é um objeto e, portanto, chamar a instância de uma classe de objeto é redundante.
Quando falamos sobre linguagens que implementam o paradigma de orientação a objetos elas devem fornecer quatro conceitos básicos:
- Abstração: habilidade de modelar características do mundo real (?)
- Encapsulamento: permitir a proteção de dados e que operações internas tenham acesso a esses dados.
- Herança: mecanismo que possibilitam a criação de novos objetos por meio da alteração de algo já exisitente; e vincular o objeto criado com o antigo.
- Polimorfismo: capacidade de uma unidade de ter várias formas.
Objetos em Python
Como dito anteriormente tudo em Python é objeto. Vamos começar analisando um objeto dict:
End of explanation
notas.keys()
notas.pop('bia')
notas
Explanation: O dicionários possui diversos métodos que usamos para alterar os objetos:
End of explanation
dir(notas)
Explanation: Podemos usar a função dir() para inspecionar os métodos e atributos do dict notas:
End of explanation
notas
notas.__getitem__('ana')
notas['ana']
notas.__getitem__('joselito')
notas['joselito']
Explanation: Aqui vemos vários métodos que o nome contém underscores no começo e fim como __len__, __getitem__, __setitem__. Esses métodos são chamados de métodos especiais que fazem parte do modelo de dados do Python. Esses métodos são chamados pelo interpretador quando uma sintaxe especial é acionada. Como, por exemplo, quando acessamos os itens do dicionário por sua chave o interpretador invoca a função dict.__getitem__():
End of explanation
notas.__class__
notas.__doc__
Explanation: O dict também possui atributos de dados especiais como __class__, que armazena o nome da classe do objeto, e __doc__ que retém a docstring do objeto:
End of explanation
print(notas.__doc__)
Explanation: Para ver a docstring formatada para saída use a função print():
End of explanation
3 + 4
Explanation: Números são objetos:
End of explanation
print(3 .__doc__)
3 .__add__(4)
3 .__sub__(4)
Explanation: Possuem métodos e atributos:
End of explanation
help(3)
Explanation: Só lembrando que os métodos especiais não devem ser chamados diretamente, os exemplos anteriores só servem para ilustrar o funcionamento e existência desses métodos. Caso você queira consultar a documentação de um objeto use a função help():
End of explanation
def soma(a, b):
retorna a + b
soma = a + b
return soma
soma(1, 2)
soma
Explanation: Como explicado na [py-intro] Aula 05 funções também são objetos. Na terminologia utilizada pelos livros isso quer dizer que, em Python, as funções são objetos de primeira classe ou cidadãos de primeira classe.
End of explanation
adição = soma
adição
Explanation: Podemos a atribuir funções a variáveis:
End of explanation
adição.__name__
adição.__doc__
Explanation: Acessar atributos:
End of explanation
import dis
dis.dis(soma)
Explanation: Podemos ver o bytecode que a função executa usando o módudlo dis (disassembly), enviando a função soma() como argumento da função dis.dis():
End of explanation
"1" + 10
Explanation: Tipagem dos objetos
Tipagem forte
Os objetos em Python possuem tipagem forte, isso quer dizer que dificilmente são feitas conversões de tipos implícitas na realização de operações. Vamos ver alguns exemplos que ilustram esse conceito:
End of explanation
"1" + str(10)
int("1") + 10
Explanation: Tentamos concatenar o número 10 à string "1", porém uma exceção do tipo TypeError foi levantada dizendo que não foi possível converter um objeto int para str de forma implicita.
Em Javascript e PHP, linguagens que possuem tipagem fraca, não seria levantado uma exceção e o interpretador faria a conversão de um dos tipos. No Javascript (1.5) o resultado seria uma string "110" e no PHP (5.6) o número 11.
Aqui percebemos que a operação "1" + 10 pode produzir dois resultados: uma string ou um número. Conforme consta no Zen do Python: "In the face of ambiguity, refuse the temptation to guess" (Ao encontrar uma ambiguidade recuse a tentação de inferir) e é exatamente o que o Python faz: a linguagem recusa-se a inferir o tipo do resultado e levanta uma exceção.
Para fazermos esse exemplo funcionar precisamos converter os tipos explicitamente:
End of explanation
def dobra(x):
return x * 2
Explanation: Tipagem dinâmica
Dizemos que uma linguagem possui tipagem dinâmica quando não é necessário especificar explicitamente os tipos das váriaveis. Os objetos possuem tipos, porém as variáveis podem referenciar objetos de quaisquer tipos. Verificações de tipos são feitas em tempo de execução e não durante a compilação.
Quando definimos uma função dobra(x) que retorna o valor recebido multiplicado por 2 podemos receber qualquer tipo de objeto como argumento:
End of explanation
dobra(2)
Explanation: Podemos dobrar int:
End of explanation
dobra(1.15)
Explanation: Dobrar float:
End of explanation
dobra('bo')
Explanation: strings:
End of explanation
dobra([1, 2, 3])
dobra((4, 5, 6))
Explanation: sequências:
End of explanation
dobra(None)
Explanation: Tipos que não suportam multiplicação por inteiros levantarão exceção quando executados:
End of explanation
type(1)
type([1, 2, 3])
type((1, 2, 3))
type({})
type('lalala')
type(False)
Explanation: A função type() nos permite verificar os tipos dos objetos:
End of explanation
a = 10
a
Explanation: Mutabilidade
No Python existem objetos mutáveis e imutáveis, já vimos vários exemplos disso ao longo do curso. O estado (atributo) de objetos mutáveis podem ser alterados, já obetos imutáveis não podem ser alterados de forma alguma.
A tabela abaixo mostra a mutabilidade dos tipos embutidos do Python:
<table>
<thead>
<th>Imutáveis</th>
<th>Mutáveis</th>
</thead>
<tbody>
<tr>
<td>tuple</td>
<td>list</td>
</tr>
<tr>
<td>números (int, float, complex)</td>
<td>dict</td>
</tr>
<tr>
<td>frozenset</td>
<td>set</td>
</tr>
<tr>
<td>str, bytes</td>
<td>objetos que permitem alteração de atributos por acesso direto, setters ou métodos</td>
</tr>
</tbody>
</table>
Vamos ver alguns exemplos que demonstram isso:
End of explanation
id(a)
Explanation: Todo objeto python possui uma identidade, um número único que diferencia esse objeto. Podemos acessar a identidade de um objeto usando a função id():
End of explanation
b = 3
b
a += b
a
id(a)
Explanation: Isso quer dizer que a identidade do objeto a é 10894368.
Agora vamos tentar mudar o valor de a:
End of explanation
lista = [1, 2, 3, 4]
lista
Explanation: A identidade mudou, isso significa que a variável a está referenciando outro objeto que foi criado quando executamos a += b.
Vamos ver agora um exemplo de objeto mutável:
End of explanation
id(lista)
lista.append(10)
lista.remove(2)
lista += [-4, -3]
lista
id(lista)
Explanation: Vamos verificar a identidade dessa lista:
End of explanation
s = 'abcd'
id(s)
s[0] = 'z'
Explanation: Mesmo modificando a lista através da inserção e remoção de valores sua identidade continua a mesma.
Strings também são imutáveis:
End of explanation
s = 'z' + s[1:]
s
id(s)
Explanation: Como vimos na aula dois do módulo de introdução strings são imutáveis e para alterar seu valor precisamos usar slicing:
End of explanation
a = [1, 2, 3]
a
b = a
a.append(4)
b
Explanation: Comparando a identidade de s antes e depois da mudança vemos que trata-se de objetos diferentes.
Variáveis
Variáveis são apenas referências para objetos, assim como acontece em Java. Variáveis são apenas rótulos (ou post-its) associados a objetos. Diferentemente de C ou Pascal as variáveis em Python não são caixas que armazenam objetos ou valores.
Por exemplo:
End of explanation
c = 1 / 0
Explanation: As variáveis a e b armazenam referências à mesma lista em vez de cópias.
É importante notar que objetos são criados antes da atribuição. A operação do lado direito de uma atribuição ocorre antes que a atribuição:
End of explanation
c
Explanation: Como não foi possível criar o número - por representar uma operação inválida (divisão por zero) para a linguagem - a variável c não foi atribuída a nenhum objeto:
End of explanation
josé = {'nome': 'José Silva', 'idade': 10}
zé = josé
zé is josé
id(zé), id(josé)
zé['ano_nascimento'] = 2006
josé
Explanation: Como as variáveis são apenas rótulos a forma correta de falar sobre atribuição é "a variável x foi atribuída à (instância) lâmpada" e não "a lâmpada foi atribuída à variável x". Pois é como se colocássemos um "post-it" x em um objeto, e não guardássemos esse objeto em uma caixa x.
Por serem rótulos podemos atribuir diversos rótulos a um mesmo objeto. Isso faz com que apelidos (aliases) sejam criados:
End of explanation
joão = {'nome': 'José Silva', 'idade': 10, 'ano_nascimento': 2006}
joão == josé
Explanation: Vamos supor que exista um impostor - o João - que possua as mesmas credenciais que o José Silva. Suas credenciais são as mesmas, porém João não é José:
End of explanation
joão is josé
Explanation: O valor de seus dados (ou credenciais) são iguais, porém eles não são os mesmos:
End of explanation
a = 10
a is None
b = None
b is None
Explanation: Nesse exemplo vimos o apelidamento (aliasing). josé e zé são apelidos (aliases): duas variáveis associadas ao mesmo objeto. Por outro lado vimos que joão não é um apelido de josé: essas variáveis estão associadas a objetos distintos. O que acontece é que joão e josé possuem o mesmo valor - que é isso que == compara - mas têm identidades diferentes.
O operador == realiza a comparação dos valores de objetos (os dados armazenados por eles), enquanto is compara suas identidades. É mais comum comparar valores do que identidades, por esse motivo == aparece com mais frequência que is em códigos Python. Um caso em que o is é bastante utilizada é para comparação com None:
End of explanation
class Cão:
qtd_patas = 4
carnívoro = True
nervoso = False
def __init__(self, nome):
self.nome = nome
Explanation: Classes
Vamos ver como é a sintaxe de definição de classes no Python, para isso vamos fazer alguns exemplos.
Começaremos por criar uma classe que representa um cão. Armazenaremos: o nome, a quantidade de patas, se o cão é carnívoro e se ele está nervoso.
End of explanation
rex = Cão('Rex')
type(rex)
Explanation: Na primeira linha definimos uma classe de nome Cão.
Da segunda até a quarta linha definimos os atributos de classe qtd_patas, carnívoro, nervoso. Os atributos de classe representam dados que aparecem em todas as classes.
Na sexta linha definimos o inicializador (também pode ser chamado de construtor) que deve receber o nome do Cão.
Na última linha criamos o atributo da instância nome e associamos à string enviada para o construtor.
Vamos agora criar uma instância de Cão:
End of explanation
rex.qtd_patas
rex.carnívoro
rex.nervoso
rex.nome
Explanation: Vamos verificar seus atributos:
End of explanation
rex.nervoso = True
rex.nervoso
Explanation: Podemos também alterar esses atributos:
End of explanation
Cão.nervoso
Explanation: Mudamos apenas o atributo nervoso da instância rex. O valor de Cão.nervoso continua o mesmo:
End of explanation
rex.sujo = True
rex.sujo
rex.idade = 5
rex.idade
Explanation: Também podemos criar atributos dinamicamente para nossa instância rex:
End of explanation
Cão.sujo
Cão.idade
Explanation: Lembrando mais uma vez que essas mudanças ocorrem somente na instância e não na classe:
End of explanation
Cão.__name__
Cão.qtd_patas
Cão.nervoso
Cão.carnívoro
Cão.nome
Explanation: Classes também são objetos e podemos acessar seus atributos:
End of explanation
fido = Cão('Fido')
fido.nome
Explanation: Não podemos acessar o nome, pois nome é um atributo que é associado somente a instâncias da classe.
End of explanation
class Cão:
qtd_patas = 4
carnívoro = True
nervoso = False
def __init__(self, nome):
self.nome = nome
def latir(self, vezes=1):
Latir do cão. Quanto mais nervoso mais late.
vezes += self.nervoso * vezes
latido = 'Au! ' * vezes
print('{}: {}'.format(self.nome, latido))
rex = Cão('Rex')
rex.latir()
rex.nervoso = True
rex.latir()
rex.latir(10)
Explanation: Os atributos de classe são usados para fornecerer valores padrão para dados que são compartilhados por todos os "cães" como, por exemplo, a quantidade de patas.
Agora vamos criar métodos (funções associadas a classes) para a classe Cão:
End of explanation
class Cão:
qtd_patas = 4
carnívoro = True
nervoso = False
def __init__(self, nome, truques=None):
self.nome = nome
if not truques:
self.truques = []
else:
self.truques = list(truques)
def latir(self, vezes=1):
Latir do cão. Quanto mais nervoso mais late.
vezes += self.nervoso * vezes
latido = 'Au! ' * vezes
print('{}: {}'.format(self.nome, latido))
def ensina_truque(self, truque):
if truque not in self.truques:
self.truques.append(truque)
fido = Cão('Fido', truques=['Pegar'])
fido.truques
fido.ensina_truque('Rolar')
fido.truques
fido.ensina_truque('Pegar')
fido.truques
Explanation: Vamos brincar um pouco mais com o Cão e implementar ainda mais métodos:
End of explanation
class ExemploInstancia:
def metodo_instancia(self):
print('Recebi {}'.format(self))
Explanation: Métodos de instância, classe e estático
Por padrão os metódos de uma classe são métodos de instância (ou instance methods), isso significa que os métodos recebem, obrigatoriamente, uma instância da classe.
Como por exemplo:
End of explanation
ExemploInstancia.metodo_instancia()
Explanation: Não podemos chamar o método de instância somente com a classe:
End of explanation
inst = ExemploInstancia()
inst.metodo_instancia()
Explanation: Precisamos criar uma instância para utilizá-lo:
End of explanation
class ExemploClasse:
@classmethod
def metodo_classe(cls):
print("Recebi {}".format(cls))
Explanation: Já os métodos de classe (ou class methods) são métodos referentes à classe como um todo e recebem - não uma instância mas o - objeto da classe.
Para tornar o método de uma classe um classmethod usamos o decorador @classmethod. Decoradores são usados para "decorar" (ou marcar) funções e modificar seu comportamento de alguma maneira. Na Aula 05 deste módulo (de orientação a objetos em python) falaremos mais sobre decoradores.
Métodos de classe são definidos e utilizados assim:
End of explanation
ExemploClasse.metodo_classe()
Explanation: Podemos chamar o método usando o objeto de classe ExemploClasse:
End of explanation
inst = ExemploClasse()
inst.metodo_classe()
Explanation: Também podemos chamar o método a partir de uma instância dessa classe. Por ser um classmethod o método continuará a receber como argumento o objeto da classe e não a instância:
End of explanation
class Exemplo:
@staticmethod
def metodo_estático():
print('Sou estátio e não recebo nada')
Exemplo.metodo_estático()
Explanation: Por fim também temos os métodos estáticos que funcionam como funções simples agregadas a objetos ou classes. Eles não recebem argumentos de forma automática:
End of explanation
inst = Exemplo()
inst.metodo_estático()
Explanation: Também podemos chamar o método estático a partir de uma instância:
End of explanation |
14,572 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Gaussian Mixture Models and Expectation Maximisation in Shogun
By Heiko Strathmann - [email protected] - http
Step2: Set up the model in Shogun
Step3: Sampling from mixture models
Sampling is extremely easy since every instance of the <a href="http
Step4: Evaluating densities in mixture Models
Next, let us visualise the density of the joint model (which is a convex sum of the densities of the individual distributions). Note the similarity between the calls since all distributions implement the <a href="http
Step5: Density estimating with mixture models
Now let us draw samples from the mixture model itself rather than from individual components. This is the situation that usually occurs in practice
Step6: Imagine you did not know the true generating process of this data. What would you think just looking at it? There are clearly at least two components (or clusters) that might have generated this data, but three also looks reasonable. So let us try to learn a Gaussian mixture model on those.
Step7: So far so good, now lets plot the density of this GMM using the code from above
Step8: It is also possible to access the individual components of the mixture distribution. In our case, we can for example draw 95% ellipses for each of the Gaussians using the method from above. We will do this (and more) below.
On local minima of EM
It seems that three comonents give a density that is closest to the original one. While two components also do a reasonable job here, it might sometimes happen (<a href="http
Step9: Clustering with mixture models
Recall that our initial goal was not to visualise mixture models (although that is already pretty cool) but to find clusters in a given set of points. All we need to do for this is to evaluate the log-likelihood of every point under every learned component and then pick the largest one. Shogun can do both. Below, we will illustrate both cases, obtaining a cluster index, and evaluating the log-likelihood for every point under each component.
Step10: These are clusterings obtained via the true mixture model and the one learned via EM. There is a slight subtlety here
Step11: Note how the lower left and middle cluster are overlapping in the sense that points at their intersection have similar likelihoods. If you do not care at all about this and are just interested in a partitioning of the space, simply choose the maximum.
Below we plot the space partitioning for a hard clustering. | Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all Shogun classes
from shogun import *
from matplotlib.patches import Ellipse
# a tool for visualisation
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
Returns an ellipse artist for nstd times the standard deviation of this
Gaussian, specified by mean and covariance
# compute eigenvalues (ordered)
vals, vecs = eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
Explanation: Gaussian Mixture Models and Expectation Maximisation in Shogun
By Heiko Strathmann - [email protected] - http://github.com/karlnapf - http://herrstrathmann.de.
Based on the GMM framework of the Google summer of code 2011 project of Alesis Novik - https://github.com/alesis
This notebook is about learning and using Gaussian <a href="https://en.wikipedia.org/wiki/Mixture_model">Mixture Models</a> (GMM) in Shogun. Below, we demonstrate how to use them for sampling, for density estimation via <a href="https://en.wikipedia.org/wiki/Expectation-maximization_algorithm">Expectation Maximisation (EM)</a>, and for <a href="https://en.wikipedia.org/wiki/Data_clustering">clustering</a>.
Note that Shogun's interfaces for mixture models are deprecated and are soon to be replace by more intuitive and efficient ones. This notebook contains some python magic at some places to compensate for this. However, all computations are done within Shogun itself.
Finite Mixture Models (skip if you just want code examples)
We begin by giving some intuition about mixture models. Consider an unobserved (or latent) discrete random variable taking $k$ states $s$ with probabilities $\text{Pr}(s=i)=\pi_i$ for $1\leq i \leq k$, and $k$ random variables $x_i|s_i$ with arbritary densities or distributions, which are conditionally independent of each other given the state of $s$. In the finite mixture model, we model the probability or density for a single point $x$ begin generated by the weighted mixture of the $x_i|s_i$
$$
p(x)=\sum_{i=1}^k\text{Pr}(s=i)p(x)=\sum_{i=1}^k \pi_i p(x|s)
$$
which is simply the marginalisation over the latent variable $s$. Note that $\sum_{i=1}^k\pi_i=1$.
For example, for the Gaussian mixture model (GMM), we get (adding a collection of parameters $\theta:={\boldsymbol{\mu}i, \Sigma_i}{i=1}^k$ that contains $k$ mean and covariance parameters of single Gaussian distributions)
$$
p(x|\theta)=\sum_{i=1}^k \pi_i \mathcal{N}(\boldsymbol{\mu}_i,\Sigma_i)
$$
Note that any set of probability distributions on the same domain can be combined to such a mixture model. Note again that $s$ is an unobserved discrete random variable, i.e. we model data being generated from some weighted combination of baseline distributions. Interesting problems now are
Learning the weights $\text{Pr}(s=i)=\pi_i$ from data
Learning the parameters $\theta$ from data for a fixed family of $x_i|s_i$, for example for the GMM
Using the learned model (which is a density estimate) for clustering or classification
All of these problems are in the context of unsupervised learning since the algorithm only sees the plain data and no information on its structure.
Expectation Maximisation
<a href="https://en.wikipedia.org/wiki/Expectation-maximization_algorithm">Expectation Maximisation (EM)</a> is a powerful method to learn any form of latent models and can be applied to the Gaussian mixture model case. Standard methods such as Maximum Likelihood are not straightforward for latent models in general, while EM can almost always be applied. However, it might converge to local optima and does not guarantee globally optimal solutions (this can be dealt with with some tricks as we will see later). While the general idea in EM stays the same for all models it can be used on, the individual steps depend on the particular model that is being used.
The basic idea in EM is to maximise a lower bound, typically called the free energy, on the log-likelihood of the model. It does so by repeatedly performing two steps
The E-step optimises the free energy with respect to the latent variables $s_i$, holding the parameters $\theta$ fixed. This is done via setting the distribution over $s$ to the posterior given the used observations.
The M-step optimises the free energy with respect to the paramters $\theta$, holding the distribution over the $s_i$ fixed. This is done via maximum likelihood.
It can be shown that this procedure never decreases the likelihood and that stationary points (i.e. neither E-step nor M-step produce changes) of it corresponds to local maxima in the model's likelihood. See references for more details on the procedure, and how to obtain a lower bound on the log-likelihood. There exist many different flavours of EM, including variants where only subsets of the model are iterated over at a time. There is no learning rate such as step size or similar, which is good and bad since convergence can be slow.
Mixtures of Gaussians in Shogun
The main class for GMM in Shogun is <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CGMM.html">CGMM</a>, which contains an interface for setting up a model and sampling from it, but also to learn the model (the $\pi_i$ and parameters $\theta$) via EM. It inherits from the base class for distributions in Shogun, <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDistribution.html">CDistribution</a>, and combines multiple single distribution instances to a mixture.
We start by creating a GMM instance, sampling from it, and computing the log-likelihood of the model for some points, and the log-likelihood of each individual component for some points. All these things are done in two dimensions to be able to plot them, but they generalise to higher (or lower) dimensions easily.
Let's sample, and illustrate the difference of knowing the latent variable indicating the component or not.
End of explanation
# create mixture of three Gaussians
num_components=3
num_max_samples=100
gmm=GMM(num_components)
dimension=2
# set means (TODO interface should be to construct mixture from individuals with set parameters)
means=zeros((num_components, dimension))
means[0]=[-5.0, -4.0]
means[1]=[7.0, 3.0]
means[2]=[0, 0.]
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
# set covariances
covs=zeros((num_components, dimension, dimension))
covs[0]=array([[2, 1.3],[.6, 3]])
covs[1]=array([[1.3, -0.8],[-0.8, 1.3]])
covs[2]=array([[2.5, .8],[0.8, 2.5]])
[gmm.set_nth_cov(covs[i],i) for i in range(num_components)]
# set mixture coefficients, these have to sum to one (TODO these should be initialised automatically)
weights=array([0.5, 0.3, 0.2])
gmm.set_coef(weights)
Explanation: Set up the model in Shogun
End of explanation
# now sample from each component seperately first, the from the joint model
hold(True)
colors=["red", "green", "blue"]
for i in range(num_components):
# draw a number of samples from current component and plot
num_samples=int(rand()*num_max_samples)+1
# emulate sampling from one component (TODO fix interface of GMM to handle this)
w=zeros(num_components)
w[i]=1.
gmm.set_coef(w)
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_samples)])
plot(X[:,0], X[:,1], "o", color=colors[i])
# draw 95% elipsoid for current component
gca().add_artist(get_gaussian_ellipse_artist(means[i], covs[i], color=colors[i]))
hold(False)
_=title("%dD Gaussian Mixture Model with %d components" % (dimension, num_components))
# since we used a hack to sample from each component
gmm.set_coef(weights)
Explanation: Sampling from mixture models
Sampling is extremely easy since every instance of the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDistribution.html">CDistribution</a> class in Shogun allows to sample from it (if implemented)
End of explanation
# generate a grid over the full space and evaluate components PDF
resolution=100
Xs=linspace(-10,10, resolution)
Ys=linspace(-8,6, resolution)
pairs=asarray([(x,y) for x in Xs for y in Ys])
D=asarray([gmm.cluster(pairs[i])[3] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,2,1)
pcolor(Xs,Ys,D)
xlim([-10,10])
ylim([-8,6])
title("Log-Likelihood of GMM")
subplot(1,2,2)
pcolor(Xs,Ys,exp(D))
xlim([-10,10])
ylim([-8,6])
_=title("Likelihood of GMM")
Explanation: Evaluating densities in mixture Models
Next, let us visualise the density of the joint model (which is a convex sum of the densities of the individual distributions). Note the similarity between the calls since all distributions implement the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDistribution.html">CDistribution</a> interface, including the mixture.
End of explanation
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_max_samples)])
plot(X[:,0], X[:,1], "o")
_=title("Samples from GMM")
Explanation: Density estimating with mixture models
Now let us draw samples from the mixture model itself rather than from individual components. This is the situation that usually occurs in practice: Someone gives you a bunch of data with no labels attached to it all all. Our job is now to find structure in the data, which we will do with a GMM.
End of explanation
def estimate_gmm(X, num_components):
# bring data into shogun representation (note that Shogun data is in column vector form, so transpose)
features=RealFeatures(X.T)
gmm_est=GMM(num_components)
gmm_est.set_features(features)
# learn GMM
gmm_est.train_em()
return gmm_est
Explanation: Imagine you did not know the true generating process of this data. What would you think just looking at it? There are clearly at least two components (or clusters) that might have generated this data, but three also looks reasonable. So let us try to learn a Gaussian mixture model on those.
End of explanation
component_numbers=[2,3]
# plot true likelihood
D_true=asarray([gmm.cluster(pairs[i])[num_components] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,len(component_numbers)+1,1)
pcolor(Xs,Ys,exp(D_true))
xlim([-10,10])
ylim([-8,6])
title("True likelihood")
for n in range(len(component_numbers)):
# TODO get rid of these hacks and offer nice interface from Shogun
# learn GMM with EM
gmm_est=estimate_gmm(X, component_numbers[n])
# evaluate at a grid of points
D_est=asarray([gmm_est.cluster(pairs[i])[component_numbers[n]] for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise densities
subplot(1,len(component_numbers)+1,n+2)
pcolor(Xs,Ys,exp(D_est))
xlim([-10,10])
ylim([-8,6])
_=title("Estimated likelihood for EM with %d components"%component_numbers[n])
Explanation: So far so good, now lets plot the density of this GMM using the code from above
End of explanation
# function to draw ellipses for all components of a GMM
def visualise_gmm(gmm, color="blue"):
for i in range(gmm.get_num_components()):
component=Gaussian.obtain_from_generic(gmm.get_component(i))
gca().add_artist(get_gaussian_ellipse_artist(component.get_mean(), component.get_cov(), color=color))
# multiple runs to illustrate random initialisation matters
for _ in range(3):
figure(figsize=(18,5))
subplot(1, len(component_numbers)+1, 1)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color="blue")
title("True components")
for i in range(len(component_numbers)):
gmm_est=estimate_gmm(X, component_numbers[i])
subplot(1, len(component_numbers)+1, i+2)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color=colors[i])
# TODO add a method to get likelihood of full model, retraining is inefficient
likelihood=gmm_est.train_em()
_=title("Estimated likelihood: %.2f (%d components)"%(likelihood,component_numbers[i]))
Explanation: It is also possible to access the individual components of the mixture distribution. In our case, we can for example draw 95% ellipses for each of the Gaussians using the method from above. We will do this (and more) below.
On local minima of EM
It seems that three comonents give a density that is closest to the original one. While two components also do a reasonable job here, it might sometimes happen (<a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CKMeans.html">KMeans</a> is used to initialise the cluster centres if not done by hand, using a random cluster initialisation) that the upper two Gaussians are grouped, re-run for a couple of times to see this. This illustrates how EM might get stuck in a local minimum. We will do this below, where it might well happen that all runs produce the same or different results - no guarantees.
Note that it is easily possible to initialise EM via specifying the parameters of the mixture components as did to create the original model above.
One way to decide which of multiple convergenced EM instances to use is to simply compute many of them (with different initialisations) and then choose the one with the largest likelihood. WARNING Do not select the number of components like this as the model will overfit.
End of explanation
def cluster_and_visualise(gmm_est):
# obtain cluster index for each point of the training data
# TODO another hack here: Shogun should allow to pass multiple points and only return the index
# as the likelihood can be done via the individual components
# In addition, argmax should be computed for us, although log-pdf for all components should also be possible
clusters=asarray([argmax(gmm_est.cluster(x)[:gmm.get_num_components()]) for x in X])
# visualise points by cluster
hold(True)
for i in range(gmm.get_num_components()):
indices=clusters==i
plot(X[indices,0],X[indices,1], 'o', color=colors[i])
hold(False)
# learn gmm again
gmm_est=estimate_gmm(X, num_components)
figure(figsize=(18,5))
subplot(121)
cluster_and_visualise(gmm)
title("Clustering under true GMM")
subplot(122)
cluster_and_visualise(gmm_est)
_=title("Clustering under estimated GMM")
Explanation: Clustering with mixture models
Recall that our initial goal was not to visualise mixture models (although that is already pretty cool) but to find clusters in a given set of points. All we need to do for this is to evaluate the log-likelihood of every point under every learned component and then pick the largest one. Shogun can do both. Below, we will illustrate both cases, obtaining a cluster index, and evaluating the log-likelihood for every point under each component.
End of explanation
figure(figsize=(18,5))
for comp_idx in range(num_components):
subplot(1,num_components,comp_idx+1)
# evaluated likelihood under current component
# TODO Shogun should do the loop and allow to specify component indices to evaluate pdf for
# TODO distribution interface should be the same everywhere
component=Gaussian.obtain_from_generic(gmm.get_component(comp_idx))
cluster_likelihoods=asarray([component.compute_PDF(X[i]) for i in range(len(X))])
# normalise
cluster_likelihoods-=cluster_likelihoods.min()
cluster_likelihoods/=cluster_likelihoods.max()
# plot, coloured by likelihood value
cm=get_cmap("jet")
hold(True)
for j in range(len(X)):
color = cm(cluster_likelihoods[j])
plot(X[j,0], X[j,1] ,"o", color=color)
hold(False)
title("Data coloured by likelihood for component %d" % comp_idx)
Explanation: These are clusterings obtained via the true mixture model and the one learned via EM. There is a slight subtlety here: even the model under which the data was generated will not cluster the data correctly if the data is overlapping. This is due to the fact that the cluster with the largest probability is chosen. This doesn't allow for any ambiguity. If you are interested in cases where data overlaps, you should always look at the log-likelihood of the point for each cluster and consider taking into acount "draws" in the decision, i.e. probabilities for two different clusters are equally large.
Below we plot all points, coloured by their likelihood under each component.
End of explanation
# compute cluster index for every point in space
D_est=asarray([gmm_est.cluster(pairs[i])[:num_components].argmax() for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise clustering
cluster_and_visualise(gmm_est)
# visualise space partitioning
hold(True)
pcolor(Xs,Ys,D_est)
hold(False)
Explanation: Note how the lower left and middle cluster are overlapping in the sense that points at their intersection have similar likelihoods. If you do not care at all about this and are just interested in a partitioning of the space, simply choose the maximum.
Below we plot the space partitioning for a hard clustering.
End of explanation |
14,573 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Seaice
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required
Step7: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required
Step8: 3.2. Ocean Freezing Point Value
Is Required
Step9: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required
Step10: 4.2. Canonical Horizontal Resolution
Is Required
Step11: 4.3. Number Of Horizontal Gridpoints
Is Required
Step12: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required
Step13: 5.2. Target
Is Required
Step14: 5.3. Simulations
Is Required
Step15: 5.4. Metrics Used
Is Required
Step16: 5.5. Variables
Is Required
Step17: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required
Step18: 6.2. Additional Parameters
Is Required
Step19: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required
Step20: 7.2. On Diagnostic Variables
Is Required
Step21: 7.3. Missing Processes
Is Required
Step22: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required
Step23: 8.2. Properties
Is Required
Step24: 8.3. Budget
Is Required
Step25: 8.4. Was Flux Correction Used
Is Required
Step26: 8.5. Corrected Conserved Prognostic Variables
Is Required
Step27: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required
Step28: 9.2. Grid Type
Is Required
Step29: 9.3. Scheme
Is Required
Step30: 9.4. Thermodynamics Time Step
Is Required
Step31: 9.5. Dynamics Time Step
Is Required
Step32: 9.6. Additional Details
Is Required
Step33: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required
Step34: 10.2. Number Of Layers
Is Required
Step35: 10.3. Additional Details
Is Required
Step36: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required
Step37: 11.2. Number Of Categories
Is Required
Step38: 11.3. Category Limits
Is Required
Step39: 11.4. Ice Thickness Distribution Scheme
Is Required
Step40: 11.5. Other
Is Required
Step41: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required
Step42: 12.2. Number Of Snow Levels
Is Required
Step43: 12.3. Snow Fraction
Is Required
Step44: 12.4. Additional Details
Is Required
Step45: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required
Step46: 13.2. Transport In Thickness Space
Is Required
Step47: 13.3. Ice Strength Formulation
Is Required
Step48: 13.4. Redistribution
Is Required
Step49: 13.5. Rheology
Is Required
Step50: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required
Step51: 14.2. Thermal Conductivity
Is Required
Step52: 14.3. Heat Diffusion
Is Required
Step53: 14.4. Basal Heat Flux
Is Required
Step54: 14.5. Fixed Salinity Value
Is Required
Step55: 14.6. Heat Content Of Precipitation
Is Required
Step56: 14.7. Precipitation Effects On Salinity
Is Required
Step57: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required
Step58: 15.2. Ice Vertical Growth And Melt
Is Required
Step59: 15.3. Ice Lateral Melting
Is Required
Step60: 15.4. Ice Surface Sublimation
Is Required
Step61: 15.5. Frazil Ice
Is Required
Step62: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Is Required
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required
Step65: 17.2. Constant Salinity Value
Is Required
Step66: 17.3. Additional Details
Is Required
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required
Step68: 18.2. Constant Salinity Value
Is Required
Step69: 18.3. Additional Details
Is Required
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required
Step72: 20.2. Additional Details
Is Required
Step73: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required
Step74: 21.2. Formulation
Is Required
Step75: 21.3. Impacts
Is Required
Step76: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required
Step77: 22.2. Snow Aging Scheme
Is Required
Step78: 22.3. Has Snow Ice Formation
Is Required
Step79: 22.4. Snow Ice Formation Scheme
Is Required
Step80: 22.5. Redistribution
Is Required
Step81: 22.6. Heat Diffusion
Is Required
Step82: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required
Step83: 23.2. Ice Radiation Transmission
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccr-iitm', 'sandbox-2', 'seaice')
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: CCCR-IITM
Source ID: SANDBOX-2
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:48
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation |
14,574 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Python Programming for Data Analysis
1. 데이터 분석을 위한 환경 구성 (패키지 설치 포함)
Step1: TicTaeToe 게임
Step2: TicTaeToe게임을 간단 버젼으로 구현한 것으로 사용자가 먼저 착수하여 승부를 겨루게 됩니다.
향후에는 기계학습으로 발전시켜 실력을 키워 보려 합니다. | Python Code:
# 운영체제
!ver
# 현재 위치 및 하위 디렉토리 구조
!dir
# 파이선 버전
!python --version
# 가상환경 버전
!virtualenv --version
# 존재하는 가상환경 목록
!workon
# 가상환경 kookmin1에 진입
# workon kookmin1
# 가상환경 kookmin1에 설치된 패키지
# 데이터 분석 : numpy, pandas
# 시각화 : matplotlib
!pip freeze
Explanation: Python Programming for Data Analysis
1. 데이터 분석을 위한 환경 구성 (패키지 설치 포함)
End of explanation
from IPython.display import Image
Image(filename='images/TicTaeToe.png')
Explanation: TicTaeToe 게임
End of explanation
# %load TicTaeToe.py
import sys
import random
# 게임 방범 설명
print("출처: http://www.practicepython.org")
print("==================================")
print("가로, 세로, 대각선 방향으로 ")
print("세점을 먼저 이어 놓으면 이기는")
print("게임으로 사용자(U)와 Computer(C)가")
print("번갈아 놓습니다.")
print("==================================\n")
# 3 x 3 정보를 담기 위한 저장소 선언
# 0 은 초기 상태
# 1 은 사용자가 선택한 곳
# 2 는 컴퓨터가 선택한 곳
dim=3
list4 = [0,0,0,0,0,0,0,0,0]
# 사용자 안내를 위한 박스를 그리고 그 안에 번호 넣기
def graph():
k = 1
for i in range(dim+1):
print(" ---"*dim)
for j in range(dim):
if (i < dim):
print("| "+str(k), end=" ")
k = k + 1
if (i != 3):
print("|")
# 사용자 또는 컴퓨터가 수를 둘때 마다,
# 누가 이겼는지 체크
def game_wins(list4):
#print(list4)
for i in range(dim):
#checks to see if you win in a column
if list4[i] == list4[i+3] == list4[i+6] == 1:
print("You Won")
elif list4[i] == list4[i+3] == list4[i+6] == 2:
print("You Lost")
#checks to see if you win in a row
if list4[dim*i] == list4[dim*i+1] == list4[dim*i+2] == 1:
print ("You Won")
elif list4[dim*i] == list4[dim*i+1] == list4[dim*i+2] == 2:
print("You Lost")
#checks to see if you win in a diagonal
if list4[0] == list4[4] == list4[8] == 1:
print ("You Won")
elif list4[0] == list4[4] == list4[8] == 2:
print("You Lost")
if list4[2] == list4[4] == list4[6] == 1:
print ("You Won")
elif list4[2] == list4[4] == list4[6] == 2:
print("You Lost")
# 사용자 안내를 위한 박스를 그리고 그 안에 번호 또는 둔 수 표기
def graph_pos(list4):
for idx in range(len(list4)):
if (idx % 3 == 0):
print(" ---"*dim)
if (list4[idx] == 0):
print("| "+str(idx+1), end=" ")
elif (list4[idx] == 1):
print("| "+"U", end=" ")
else:
print("| "+"C", end=" ")
if (idx % 3 == 2):
print("|")
print("\n")
# 게임 시작
go = input("Play TicTaeToe? Enter, or eXit?")
if (go == 'x' or go == 'X'):
sys.exit(0)
graph()
print("\n")
while(1): # 보드게임이 승부가 날때까지 무한 반복
# 빈곳 선택
pos = int(input("You : ")) - 1
while (pos < 0 or pos > 8 or list4[pos] != 0):
pos = int(input("Again : ")) - 1
list4[pos] = 1
# 보드를 갱신하여 그리고, 승부 체크
graph_pos(list4)
game_wins(list4)
# 컴퓨터 차례로, 빈곳을 랜덤하게 선택하여 List에 저장
pos = random.randrange(9)
while (list4[pos] != 0):
pos = random.randrange(9)
print("Computer : " + str(pos+1))
list4[pos] = 2
# 보드를 갱신하여 그리고, 승부 체크
graph_pos(list4)
game_wins(list4)
Explanation: TicTaeToe게임을 간단 버젼으로 구현한 것으로 사용자가 먼저 착수하여 승부를 겨루게 됩니다.
향후에는 기계학습으로 발전시켜 실력을 키워 보려 합니다.
End of explanation |
14,575 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Title
Step1: Load Iris Dataset
Step2: Create Decision Tree Using Gini Impurity
Step3: Train Model
Step4: Create Observation To Predict
Step5: Predict Observation
Step6: View Predicted Probabilities | Python Code:
# Load libraries
from sklearn.tree import DecisionTreeClassifier
from sklearn import datasets
Explanation: Title: Decision Tree Classifier
Slug: decision_tree_classifier
Summary: Training a decision tree classifier in scikit-learn.
Date: 2017-09-19 12:00
Category: Machine Learning
Tags: Trees And Forests
Authors: Chris Albon
<a alt="Gini Index" href="https://machinelearningflashcards.com">
<img src="decision_tree_classifier/Gini_Index_print.png" class="flashcard center-block">
</a>
Preliminaries
End of explanation
# Load data
iris = datasets.load_iris()
X = iris.data
y = iris.target
Explanation: Load Iris Dataset
End of explanation
# Create decision tree classifer object using gini
clf = DecisionTreeClassifier(criterion='gini', random_state=0)
Explanation: Create Decision Tree Using Gini Impurity
End of explanation
# Train model
model = clf.fit(X, y)
Explanation: Train Model
End of explanation
# Make new observation
observation = [[ 5, 4, 3, 2]]
Explanation: Create Observation To Predict
End of explanation
# Predict observation's class
model.predict(observation)
Explanation: Predict Observation
End of explanation
# View predicted class probabilities for the three classes
model.predict_proba(observation)
Explanation: View Predicted Probabilities
End of explanation |
14,576 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<figure>
<IMG SRC="../../logo/logo.png" WIDTH=250 ALIGN="right">
</figure>
IHE Python course, 2017
List, dict and set comprehensions, list generators and filtering using a key function
T.N.Olsthoorn, Feb2017
List comprehensions (listcomps) dict comprehensions (dictcomps) and set comprehensions (setcomps) are a shortcut to construct lists, dict and sets in a single line.
Step1: List comprehensions
Consider myList, a list of coordinate tuples and the coordinates x0, y0. The idea is the compute the distance between the two coordinates and the coordinate pairs in myList.
Step2: We might compute the distance between the coordinates x0, y0 to each of the points implied by the coordinate tuples in myList using a for-loop as follows
Step3: Now the same thing, but with a list comprehension
Step4: When parenthesis ( ) are used instead of square brackets, then it's not a tuple that is generated, but we createa generator objects
Step5: r is a generator object that we can now use wherever we need the list that it will generate upon request
Step6: Syntax of a list comprehension
Step7: List comprehensions are especially useful for inspection of objects, to see their public attributes
Step8: We could also use it to for a better introspection of the methods of an object like so. For this we can use the inspect module with the function getmembers. With a small list comprehension we can easily idendity the type of the public attributes of the list
Step9: Set comprehensions
Set comprehensions work the same as list comprehensions, but curly braces { } are used instead.
Here we construct a set for the remainder of integer division by 5. We do this for numbers 0 to 50. The result is a set with the unique values only.
Step10: Dict comprehensions
Dict comprehensions are similar to list comprehensions, but two values [key, value] must be supplied.
For example the list of tuples can be regarded as a list of x, y coordinates and now we want to use the first value as de key and the second values at its value.
Step11: More advanced comprehensions will be shown shortly when we deal with world population data in an extended example.
Using key for filtering sequences of objects (lambda function)
find the minimum of tuple, in the lis of tuples, when concluding the minimum from the second item in each tuple.
Let's first generate a tuple of tuples, each with three numbers.
Step12: Then find the tuple for which the seond field (that with index 1) is lowest.
Step13: Now a more elegant one using keyword key
Step14: Why does this work ?
In general with a list of arbitrary objects, comparing them is not defined, like in this case. We can, however, come around this, by defining how two of the objects in questions have to be compared to decide which of them is smallest. This comparison is then done using some value that is computed for each object. In this case it's the second value of the tuple that we compare between objects to decide which of them is smallest.
The function computes this value. This function is than passed to the min function as the argment of key.
What then happens, is that min runs along the list of tuples and for each of them computes the value for comparison using the passed function. These values are compared to decide which object is smallest. When done the smallest object is returned.
And the most concise way, using a lambda function | Python Code:
from pprint import pprint
import numpy as np
Explanation: <figure>
<IMG SRC="../../logo/logo.png" WIDTH=250 ALIGN="right">
</figure>
IHE Python course, 2017
List, dict and set comprehensions, list generators and filtering using a key function
T.N.Olsthoorn, Feb2017
List comprehensions (listcomps) dict comprehensions (dictcomps) and set comprehensions (setcomps) are a shortcut to construct lists, dict and sets in a single line.
End of explanation
myList = [(3, 2), (40, 12), (-5, 4), (-6, -21), (-7, 23)]
x0 = 44
y0 = 13
Explanation: List comprehensions
Consider myList, a list of coordinate tuples and the coordinates x0, y0. The idea is the compute the distance between the two coordinates and the coordinate pairs in myList.
End of explanation
r = []
for x, y in myList:
r.append(np.sqrt((x - x0)**2 + (y - y0)**2))
print(r)
Explanation: We might compute the distance between the coordinates x0, y0 to each of the points implied by the coordinate tuples in myList using a for-loop as follows:
End of explanation
r = [ np.sqrt((x - x0)**2 + (y - y0)**2 ) for x, y in myList]
print(type(r))
print(r)
Explanation: Now the same thing, but with a list comprehension:
End of explanation
r = (np.sqrt((x - x0)**2 + (y - y0)**2 ) for x, y in myList)
print(type(r))
Explanation: When parenthesis ( ) are used instead of square brackets, then it's not a tuple that is generated, but we createa generator objects:
End of explanation
r
Explanation: r is a generator object that we can now use wherever we need the list that it will generate upon request:
End of explanation
from random import shuffle
cards1 = ['Clubs', 'Diamonds', 'Hearts', 'Spades']
cards2 = ['Ace', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']
# gnerate the deck
cards = [c1 + '_' + c2 for c1 in cards1 for c2 in cards2]
shuffle(cards) # shuffle the cards
#pprint(cards) # show them
print("\nShuffled playing cards:\n")
for i in range(13):
for j in range(4):
print("{:20}".format(cards[4 * i + j]), end="")
print()
Explanation: Syntax of a list comprehension:
The syntax of list comprehensions is:
new = [ expression for p in old_list if expression]
The if part is use to filer out values for p in the original lists or tuples.
On the other hand, numerical stuff is mostly better done using numpy functionality such as numpy arrays.
Let's generate a deck of playing cards and shuffle them.
A deck of cards looks like this:
End of explanation
[p for p in dir(r) if not p.startswith('_')]
Explanation: List comprehensions are especially useful for inspection of objects, to see their public attributes:
End of explanation
from inspect import getmembers
[p for p in getmembers(myList) if not p[0].startswith('_')]
Explanation: We could also use it to for a better introspection of the methods of an object like so. For this we can use the inspect module with the function getmembers. With a small list comprehension we can easily idendity the type of the public attributes of the list:
End of explanation
myList = [p%5 for p in range(51)] # % computes remainder of a division
mySet ={p%5 for p in range(51)}
print(myList)
print()
print(mySet)
Explanation: Set comprehensions
Set comprehensions work the same as list comprehensions, but curly braces { } are used instead.
Here we construct a set for the remainder of integer division by 5. We do this for numbers 0 to 50. The result is a set with the unique values only.
End of explanation
myList = [(3, 2), (40, 12), (-5, 4), (-6, -21), (-7, 23)]
myDict1 = {key : value for key, value in myList}
myDict2 = {value : key for key, value in myList}
print(myDict1)
print(myDict2)
print()
pprint(myDict1) # sorts the keys
pprint(myDict2) # sorts the keys
Explanation: Dict comprehensions
Dict comprehensions are similar to list comprehensions, but two values [key, value] must be supplied.
For example the list of tuples can be regarded as a list of x, y coordinates and now we want to use the first value as de key and the second values at its value.
End of explanation
from numpy import random
myTuples = tuple([tuple(random.randint(-5, 5, 3)) for i in range(20)])
pprint(myTuples)
Explanation: More advanced comprehensions will be shown shortly when we deal with world population data in an extended example.
Using key for filtering sequences of objects (lambda function)
find the minimum of tuple, in the lis of tuples, when concluding the minimum from the second item in each tuple.
Let's first generate a tuple of tuples, each with three numbers.
End of explanation
import sys
m = myTuples[0] # initialize by taking the the first tuple, any other would do as well
for tp in myTuples:
if tp[1] < m[1]: # compare the field with that of the current minimum tuple
m = tp # if true then replace the current minimum tuple
print(m) # show the update mininum tuple
print("\nminimum in field 2 is: ",m)
Explanation: Then find the tuple for which the seond field (that with index 1) is lowest.
End of explanation
def vcmp(tp):
x, y, z = tp
return y
min(myTuples, key=vcmp)
Explanation: Now a more elegant one using keyword key
End of explanation
min(myTuples, key = lambda x: x[1])
Explanation: Why does this work ?
In general with a list of arbitrary objects, comparing them is not defined, like in this case. We can, however, come around this, by defining how two of the objects in questions have to be compared to decide which of them is smallest. This comparison is then done using some value that is computed for each object. In this case it's the second value of the tuple that we compare between objects to decide which of them is smallest.
The function computes this value. This function is than passed to the min function as the argment of key.
What then happens, is that min runs along the list of tuples and for each of them computes the value for comparison using the passed function. These values are compared to decide which object is smallest. When done the smallest object is returned.
And the most concise way, using a lambda function:
End of explanation |
14,577 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https
Step1: Function
Step2: Display Code | Python Code:
def char2vec(word):
from collections import Counter
from math import sqrt
# count the characters in word
cw = Counter(word)
# precomputes a set of the unique characters
sw = set(cw)
# precomputes the "length" of the word vector
lw = sqrt(sum(c*c for c in cw.values()))
# return a tuple
return cw, sw, lw
Explanation: Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Character to Vector Example
Function: char2vec
This function: <br>
1) Counts the characters in the word <br>
2) Computes a set of unique characters <br>
3) Computes the length of the vector in "character space"
End of explanation
def cosdis(v1, v2):
# which characters are common to the two words?
common = v1[1].intersection(v2[1])
# by definition of cosine distance we have
return sum(v1[0][ch]*v2[0][ch] for ch in common)/v1[2]/v2[2]
Explanation: Function: cosdis
This function: Takes the cosine distance in character-space between two char2vec vectors.
End of explanation
#Our sample "words"
a = 'TheBrownFoxJumpsOverTheLazyDog'
b = 'TheWhiteFoxJumpsOverTheLazyDog'
c = 'SupermanJumpsOverTheTallBuilding'
wordlist = [a,b,c]
#For each of our words, create and print a dictionary describing the contained characters
#We use a dictionary instead of a sparse matrix to describe the characters, however the concept is identical
char2vecdict = {}
for word in wordlist:
char2vecdict[word] = char2vec(word)
print(word)
print(char2vecdict[word], end = '\n\n')
#Find the cosine distance between each of the 3 vectors created above
#Similar senteces will have higher scores, ranging from 0-1.
print ("\n\nCosine Distance between", a, "and", b,"=", cosdis(char2vecdict[a],char2vecdict[b]))
print ("\nCosine Distance between", b, "and", c,"=", cosdis(char2vecdict[b],char2vecdict[c]))
print ("\nCosine Distance between", c, "and", a,"=", cosdis(char2vecdict[c],char2vecdict[a]))
print ("\nCosine Distance between", a, "and", a,"=", cosdis(char2vecdict[a],char2vecdict[a]))
Explanation: Display Code
End of explanation |
14,578 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Visual mathhammer for 8th edition
Introduction to plots
The charts and numbers below visually present the distribution of total wounds lost for various attack situations under 8th edition rules. The plots allow to get an intuitive sense of the importance of statlines, and also report the "expected" number of wounds, the number you would get on average if you keep repeating this situation. Each bar shows the probability of the targeted unit losing a certain number of wounds, after all rolls and checks. The expectation should be interpreted with caution — an expectation of x does not, in general, mean that in 50% of the cases the attacker will deal x or more damage.
Let us start with a simple case to illustrate the approach, here we consider a squad of 10 Guardsmen firing 10 Lasgun shots into an identical counterpart. The to chance hit is 3+, or 1 / 2, same for the chance to wound of S3 weapon into T3 models, and the a save of 5+ gives a probability of 4 / 6 for the wound to be left standing.
Step1: As you can see, the squad has about 47% chance of scoring 1 or 0 hits and around 53% chance of scoring 2 or more hits. The expectation is 1.7, which is misleading
Step2: The distribution shifted to the right, the chance to score 2+ hits went up to around 62%, but really the effect is negligible, with the re-roll still just as likely to miss as it is to hit. Now let's look at 'First Rank Fire!, Second Rank Fire!', which turns Lasguns into Rapid Fire 2 weapons
Step3: A much stronger effect. You still cannot really rely on getting 3+ hits (~70% chance), but they will happen often enough and rarely would you get less than 2.
Now consider Veterans, who can shoot somewhat better and cost 6 points against 4 per a regular squaddie, what does that improvement in Ballistic Skill actually yield?
Step4: Maybe not as much as one would think, the chance to score 2+ has gone up to around 68%, compared to the troop squad's 53%. If you have 60 points to spend, you are probably better off with 15 regular shots | Python Code:
profiles[0] = {'shots': 10, 'p_hit': 1 / 2, 'p_wound': 1 / 2, 'p_unsaved': 4 / 6, 'damage': '1'}
profile_damage = damage_dealt(profiles[0])
wound_chart(profile_damage, profiles)
Explanation: Visual mathhammer for 8th edition
Introduction to plots
The charts and numbers below visually present the distribution of total wounds lost for various attack situations under 8th edition rules. The plots allow to get an intuitive sense of the importance of statlines, and also report the "expected" number of wounds, the number you would get on average if you keep repeating this situation. Each bar shows the probability of the targeted unit losing a certain number of wounds, after all rolls and checks. The expectation should be interpreted with caution — an expectation of x does not, in general, mean that in 50% of the cases the attacker will deal x or more damage.
Let us start with a simple case to illustrate the approach, here we consider a squad of 10 Guardsmen firing 10 Lasgun shots into an identical counterpart. The to chance hit is 3+, or 1 / 2, same for the chance to wound of S3 weapon into T3 models, and the a save of 5+ gives a probability of 4 / 6 for the wound to be left standing.
End of explanation
profiles[0]['p_hit'] = 0.583
wound_chart(damage_dealt(profiles[0]), profiles)
Explanation: As you can see, the squad has about 47% chance of scoring 1 or 0 hits and around 53% chance of scoring 2 or more hits. The expectation is 1.7, which is misleading: the squad actually has better than even odds of getting 2+ hits. The biggest takeaway is that you can only really count on getting 1 hit, and even then you will be frustrated every sixth roll.
Let us take a look at how orders affect things. First we put our squad under 'Take Aim!' increasing the to hit chance to 0.583:
End of explanation
profiles[0]['p_hit'] = 0.5
profiles[0]['shots'] = 20
wound_chart(damage_dealt(profiles[0]), profiles)
Explanation: The distribution shifted to the right, the chance to score 2+ hits went up to around 62%, but really the effect is negligible, with the re-roll still just as likely to miss as it is to hit. Now let's look at 'First Rank Fire!, Second Rank Fire!', which turns Lasguns into Rapid Fire 2 weapons:
End of explanation
profiles[0]['shots'] = 10
profiles[0]['p_hit'] = 2 / 3
wound_chart(damage_dealt(profiles[0]), profiles)
Explanation: A much stronger effect. You still cannot really rely on getting 3+ hits (~70% chance), but they will happen often enough and rarely would you get less than 2.
Now consider Veterans, who can shoot somewhat better and cost 6 points against 4 per a regular squaddie, what does that improvement in Ballistic Skill actually yield?
End of explanation
profiles[0]['shots'] = 15
profiles[0]['p_hit'] = 1 / 2
wound_chart(damage_dealt(profiles[0]), profiles)
Explanation: Maybe not as much as one would think, the chance to score 2+ has gone up to around 68%, compared to the troop squad's 53%. If you have 60 points to spend, you are probably better off with 15 regular shots:
End of explanation |
14,579 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Ordinal Regression
Some data are discrete but intrinsically ordered, these are called ordinal data. One example is the likert scale for questionairs ("this is an informative tutorial"
Step1: Data Generation
First, generate some data with ordinal structure
Step2: Improper Prior
We will model the outcomes Y as coming from an OrderedLogistic distribution, conditional on X. The OrderedLogistic distribution in numpyro requires ordered cutpoints. We can use the ImproperUnifrom distribution to introduce a parameter with an arbitrary support that is otherwise completely uninformative, and then add an ordered_vector constraint.
Step3: The ImproperUniform distribution allows us to use parameters with constraints on their domain, without adding any additional information e.g. about the location or scale of the prior distribution on that parameter.
If we want to incorporate such information, for instance that the values of the cut-points should not be too far from zero, we can add an additional sample statement that uses another prior, coupled with an obs argument. In the example below we first sample cutpoints c_y from the ImproperUniform distribution with constraints.ordered_vector as before, and then sample a dummy parameter from a Normal distribution while conditioning on c_y using obs=c_y. Effectively, we've created an improper / unnormalized prior that results from restricting the support of a Normal distribution to the ordered domain
Step4: Proper Prior
If having a proper prior for those cutpoints c_y is desirable (e.g. to sample from that prior and get prior predictive), we can use TransformedDistribution with an OrderedTransform transform as follows.
Step5: Principled prior with Dirichlet Distribution
It is non-trivial to apply our expertise over the cutpoints in latent space (even more so when we are having to provide a prior before applying the OrderedTransform).
Natural inclination would be to apply Dirichlet prior model to the ordinal probabilities. We will follow proposal by M.Betancourt ([1], Section 2.2) and use Dirichlet prior model to induce cutpoints indirectly via SimplexToOrderedTransform.
This approach should be advantageous when there is a need for strong prior knowledge to be added to our Ordinal model, eg, when one of the categories is missing in our dataset or when some categories are strongly separated (leading to non-identifiability of the cutpoints). Moreover, such parametrization allows us to sample our model and conduct prior predictive checks (unlike model1 with ImproperUniform).
We can sample cutpoints directly from TransformedDistribution(Dirichlet(concentration),transforms.SimplexToOrderedTransform(anchor_point)). However, if we use the Transform within the reparam handler context, we can capture not only the induced cutpoints, but also the sampled Ordinal probabilities implied by the concentration parameter. anchor_point is a nuisance parameter to improve identifiability of our transformation (for details please see [1], Section 2.2)
Please note that we cannot compare latent cutpoints or b_X_eta separately across the various models as they are inherently linked. | Python Code:
# !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro
from jax import numpy as np, random
import numpyro
from numpyro import sample, handlers
from numpyro.distributions import (
Categorical,
Dirichlet,
ImproperUniform,
Normal,
OrderedLogistic,
TransformedDistribution,
constraints,
transforms,
)
from numpyro.infer import MCMC, NUTS
from numpyro.infer.reparam import TransformReparam
import pandas as pd
import seaborn as sns
assert numpyro.__version__.startswith("0.9.2")
Explanation: Ordinal Regression
Some data are discrete but intrinsically ordered, these are called ordinal data. One example is the likert scale for questionairs ("this is an informative tutorial": 1. strongly disagree / 2. disagree / 3. neither agree nor disagree / 4. agree / 5. strongly agree). Ordinal data is also ubiquitous in the medical world (e.g. the Glasgow Coma Scale for measuring neurological disfunctioning).
This poses a challenge for statistical modeling as the data do not fit the most well known modelling approaches (e.g. linear regression). Modeling the data as categorical is one possibility, but it disregards the inherent ordering in the data, and may be less statistically efficient. There are multiple appoaches for modeling ordered data. Here we will show how to use the OrderedLogistic distribution using cutpoints that are sampled from Improper priors, from a Normal distribution and induced via categories' probabilities from Dirichlet distribution. For a more in-depth discussion of Bayesian modeling of ordinal data, see e.g. Michael Betancourt's Ordinal Regression case study
References:
1. Betancourt, M. (2019), “Ordinal Regression”, (https://betanalpha.github.io/assets/case_studies/ordinal_regression.html)
End of explanation
simkeys = random.split(random.PRNGKey(1), 2)
nsim = 50
nclasses = 3
Y = Categorical(logits=np.zeros(nclasses)).sample(simkeys[0], sample_shape=(nsim,))
X = Normal().sample(simkeys[1], sample_shape=(nsim,))
X += Y
print("value counts of Y:")
df = pd.DataFrame({"X": X, "Y": Y})
print(df.Y.value_counts())
for i in range(nclasses):
print(f"mean(X) for Y == {i}: {X[np.where(Y==i)].mean():.3f}")
sns.violinplot(x="Y", y="X", data=df);
Explanation: Data Generation
First, generate some data with ordinal structure
End of explanation
def model1(X, Y, nclasses=3):
b_X_eta = sample("b_X_eta", Normal(0, 5))
c_y = sample(
"c_y",
ImproperUniform(
support=constraints.ordered_vector,
batch_shape=(),
event_shape=(nclasses - 1,),
),
)
with numpyro.plate("obs", X.shape[0]):
eta = X * b_X_eta
sample("Y", OrderedLogistic(eta, c_y), obs=Y)
mcmc_key = random.PRNGKey(1234)
kernel = NUTS(model1)
mcmc = MCMC(kernel, num_warmup=250, num_samples=750)
mcmc.run(mcmc_key, X, Y, nclasses)
mcmc.print_summary()
Explanation: Improper Prior
We will model the outcomes Y as coming from an OrderedLogistic distribution, conditional on X. The OrderedLogistic distribution in numpyro requires ordered cutpoints. We can use the ImproperUnifrom distribution to introduce a parameter with an arbitrary support that is otherwise completely uninformative, and then add an ordered_vector constraint.
End of explanation
def model2(X, Y, nclasses=3):
b_X_eta = sample("b_X_eta", Normal(0, 5))
c_y = sample(
"c_y",
ImproperUniform(
support=constraints.ordered_vector,
batch_shape=(),
event_shape=(nclasses - 1,),
),
)
sample("c_y_smp", Normal(0, 1), obs=c_y)
with numpyro.plate("obs", X.shape[0]):
eta = X * b_X_eta
sample("Y", OrderedLogistic(eta, c_y), obs=Y)
kernel = NUTS(model2)
mcmc = MCMC(kernel, num_warmup=250, num_samples=750)
mcmc.run(mcmc_key, X, Y, nclasses)
mcmc.print_summary()
Explanation: The ImproperUniform distribution allows us to use parameters with constraints on their domain, without adding any additional information e.g. about the location or scale of the prior distribution on that parameter.
If we want to incorporate such information, for instance that the values of the cut-points should not be too far from zero, we can add an additional sample statement that uses another prior, coupled with an obs argument. In the example below we first sample cutpoints c_y from the ImproperUniform distribution with constraints.ordered_vector as before, and then sample a dummy parameter from a Normal distribution while conditioning on c_y using obs=c_y. Effectively, we've created an improper / unnormalized prior that results from restricting the support of a Normal distribution to the ordered domain
End of explanation
def model3(X, Y, nclasses=3):
b_X_eta = sample("b_X_eta", Normal(0, 5))
c_y = sample(
"c_y",
TransformedDistribution(
Normal(0, 1).expand([nclasses - 1]), transforms.OrderedTransform()
),
)
with numpyro.plate("obs", X.shape[0]):
eta = X * b_X_eta
sample("Y", OrderedLogistic(eta, c_y), obs=Y)
kernel = NUTS(model3)
mcmc = MCMC(kernel, num_warmup=250, num_samples=750)
mcmc.run(mcmc_key, X, Y, nclasses)
mcmc.print_summary()
Explanation: Proper Prior
If having a proper prior for those cutpoints c_y is desirable (e.g. to sample from that prior and get prior predictive), we can use TransformedDistribution with an OrderedTransform transform as follows.
End of explanation
# We will apply a nudge towards equal probability for each category (corresponds to equal logits of the true data generating process)
concentration = np.ones((nclasses,)) * 10.0
def model4(X, Y, nclasses, concentration, anchor_point=0.0):
b_X_eta = sample("b_X_eta", Normal(0, 5))
with handlers.reparam(config={"c_y": TransformReparam()}):
c_y = sample(
"c_y",
TransformedDistribution(
Dirichlet(concentration),
transforms.SimplexToOrderedTransform(anchor_point),
),
)
with numpyro.plate("obs", X.shape[0]):
eta = X * b_X_eta
sample("Y", OrderedLogistic(eta, c_y), obs=Y)
kernel = NUTS(model4)
mcmc = MCMC(kernel, num_warmup=250, num_samples=750)
mcmc.run(mcmc_key, X, Y, nclasses, concentration)
# with exclude_deterministic=False, we will also show the ordinal probabilities sampled from Dirichlet (vis. `c_y_base`)
mcmc.print_summary(exclude_deterministic=False)
Explanation: Principled prior with Dirichlet Distribution
It is non-trivial to apply our expertise over the cutpoints in latent space (even more so when we are having to provide a prior before applying the OrderedTransform).
Natural inclination would be to apply Dirichlet prior model to the ordinal probabilities. We will follow proposal by M.Betancourt ([1], Section 2.2) and use Dirichlet prior model to induce cutpoints indirectly via SimplexToOrderedTransform.
This approach should be advantageous when there is a need for strong prior knowledge to be added to our Ordinal model, eg, when one of the categories is missing in our dataset or when some categories are strongly separated (leading to non-identifiability of the cutpoints). Moreover, such parametrization allows us to sample our model and conduct prior predictive checks (unlike model1 with ImproperUniform).
We can sample cutpoints directly from TransformedDistribution(Dirichlet(concentration),transforms.SimplexToOrderedTransform(anchor_point)). However, if we use the Transform within the reparam handler context, we can capture not only the induced cutpoints, but also the sampled Ordinal probabilities implied by the concentration parameter. anchor_point is a nuisance parameter to improve identifiability of our transformation (for details please see [1], Section 2.2)
Please note that we cannot compare latent cutpoints or b_X_eta separately across the various models as they are inherently linked.
End of explanation |
14,580 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Moving to the Cloud
Setup TensorFlow locally
Follow the instructions to install TensorFlow locally on your computer
Step1: Create VM
Prerequisite
Step2: Use your own data
Create a new storage bucket
https | Python Code:
import os
import tensorflow as tf
print('version={}, CUDA={}, GPU={}, TPU={}'.format(
tf.__version__, tf.test.is_built_with_cuda(),
# GPU attached? Note that you can "Runtime/Change runtime type..." in Colab.
len(tf.config.list_physical_devices('GPU')) > 0,
# TPU accessible? (only works on Colab)
'COLAB_TPU_ADDR' in os.environ))
Explanation: Moving to the Cloud
Setup TensorFlow locally
Follow the instructions to install TensorFlow locally on your computer:
https://www.tensorflow.org/install/
You can also skip this section and proceed with "Create VM" if you get stuck with the installation.
Test your installation by copying the following cell into a file and executing it on your computer:
End of explanation
import json
import tensorflow as tf
data_path = 'gs://amld-datasets/zoo_img'
batch_size = 100
labels = [label.strip() for label in
tf.io.gfile.GFile('{}/labels.txt'.format(data_path))]
counts = json.load(tf.io.gfile.GFile('{}/counts.json'.format(data_path)))
train_steps = counts['train'] // batch_size
eval_steps = counts['eval'] // batch_size
feature_spec = {
'label': tf.io.FixedLenFeature(shape=[1], dtype=tf.int64),
'img_64': tf.io.FixedLenFeature(shape=[64, 64], dtype=tf.int64),
}
def parse_example(serialized_example):
features = tf.io.parse_single_example(serialized_example, feature_spec)
label = tf.one_hot(tf.squeeze(features['label']), len(labels))
img_64 = tf.cast(features['img_64'], tf.float32) / 255.
return img_64, label
ds_train = tf.data.TFRecordDataset(
tf.io.gfile.glob('{}/train-*'.format(data_path))
).map(parse_example).batch(batch_size)
ds_eval = tf.data.TFRecordDataset(
tf.io.gfile.glob('{}/eval-*'.format(data_path))
).map(parse_example).batch(batch_size)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(64, 64,)),
tf.keras.layers.Dense(len(labels), activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(ds_train, steps_per_epoch=train_steps, epochs=1)
print('eval: ', model.evaluate(ds_eval, steps=eval_steps))
model.save('linear.h5')
Explanation: Create VM
Prerequisite : Before using any Cloud services, you will need to set up a billing account (https://console.cloud.google.com/billing) and register a credit card. Once your credit card is validated (by charging and immediately reimbursing a small amount), you will get 300 USD credit for one year.
Participants of the workshop will get a 50 USD voucher and don't need to set up any billing.
Create a new virtual machine in Google Cloud Console. By far the easiest way to get this set up correctly is by copying the following command into the Cloud Shell :
$ gcloud compute instances create cpu-1 --zone=us-east1-b --image-project=deeplearning-platform-release --image-family=tf2-2-1-cpu
Once the VM named "cpu-1" has started (green icon), click on the "SSH" button in the "Connect" column - if you have copy'n'paste problems with the web terminal, try with another browser. You can also install your SSH key (make sure to specify the cloud username when copy'n'pasting) and then use your favorite terminal emulator to connect to the VM.
Check the installation
$ python3 -c'import tensorflow as tf; print(tf.version)'
Tip: If you turn down the VM when you don't need it, your credits will last a lot longer :-)
Train as Python program
Below is a minimal program that trains a linear model using data from cloud. Copy it into a file and start it on your computer or cloud instance (don't forget to run the program with python3 on Cloud if you used the above commands).
End of explanation
data_src_directory = '/content/gdrive/My Drive/amld_data/zoo_img'
# YOUR ACTION REQUIRED:
# Change the bucket name to the bucket that you created and have write
# access to.
data_dst_bucket = 'gs://amld-datasets'
# Authenticate for Drive & Cloud.
from google.colab import drive
drive.mount('/content/gdrive')
from google.colab import auth
auth.authenticate_user()
!gsutil cp -R "$data_src_directory" "$data_dst_bucket"
# YOUR ACTION REQUIRED:
# Change the data_path in above training script to your own data and re-run.
# Note that you may have to update access rights accordingly.
Explanation: Use your own data
Create a new storage bucket
https://console.cloud.google.com/storage/browser
Upload data with commands below
Optionally set access permissions (e.g. give allUsers "Storage Object Viewer" access)
By the way: All workshop Colabs also work with paths that are inside cloud storage buckets!
End of explanation |
14,581 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<center>
<a href="http
Step1: 2.2 Vérification
Step2: 2.3 Tableau disjonctif
La documentation de la fonction MCA est inexistante. Cette fonction peut en principe analyser un DataFrame mais la gestion de variables qualitatives complexes semble déffectueuse. Il est préférable d'anticiper en construisant les variables indicatrices (dummies).
Step3: 2.4 AFCM
Step4: Q De l'AFC de quel tableau sont ces valeurs propres? (comparer avec R).
Q Que sont les tableaux ci-dessous?
Step5: La fonction MCA calcule également des contributions, cosinus carrés et "parts de variance expliquée".
2.5 Graphiques de l'AFCM
Step6: 3 Données Titanic
3.1 Lecture des données
Les données sur le naufrage du Titanic sont décrites dans le calepin d'introduction à Python et consacré à pandas. Reconstruire la table de données en lisant le fichier .csv.
Step7: 3.2 Données manquantes
Vérifier que les données contiennent des valeurs manquantes, faire des imputations à la médiane d'une valeur quantitative manquante ou la modalité la plus fréquente d'une valeur qualitative absente.
Step8: Continuer en transformant les variables.
2.3 Transformations
Les variabels quantitatives sont découpées en classes pour exécuter ue AFCM sur toutes les variables. Les modalités des variables qualitatives sont renommées afin de pouvoir construire des graphiques explicites.
Step9: 2.4 Analyse multiple des correspondances
Les données "Titanic" regroupent des variables qualitatives et quantitatives. Après recodage en classes (discrétisation) des variables quantitatives, la table obtenue se prête à une analyse factorielle multiple des correspondances.
Step10: Calcul de l'AFCM et représentations graphiques. | Python Code:
# Librairies
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Lire les données avec plusieurs espaces comme séparateur
# oublier la première colonne et utilisaer la première ligne pour le nom des variables
datFic=pd.read_table('Data/afcfic.dat',header=0,sep='\s+',usecols=[1,2])
datFic
# Changer le type
datFic["csp"]=pd.Categorical(datFic["csp"],ordered=False)
datFic["sport"]=pd.Categorical(datFic["sport"],ordered=False)
datFic
Explanation: <center>
<a href="http://www.insa-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo-insa.jpg" style="float:left; max-width: 120px; display: inline" alt="INSA"/></a>
<a href="http://wikistat.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/wikistat.jpg" style="float:right; max-width: 250px; display: inline" alt="Wikistat"/></a>
</center>
Scénarios d'Exploration Statistique
Analyse factorielle multiple des correspondances (AFCM) avec <a href="https://www.python.org/"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Python_logo_and_wordmark.svg/390px-Python_logo_and_wordmark.svg.png" style="max-width: 150px; display: inline" alt="Python"/></a>
Résumé: L'analyse factorielle multiple des correspondance (AFCM) n'est pas disponible dans la librairie scikit-learn ni dans tout autre librairie officielle. Elle est obtenue en utilisant celle plus fruste: MCA. Illustration sommaire de l'AFCM, sur des données "jouet" puis sur celles du naufrage du Titanic.
1 Disponibilité de l'AFCM
L'AFCM est une méthode proposée en France par Benzécri en 1982 avant d'être diffusée en langue anglaise de Leeuw (1985) puis par Greenacre à partir de 2005 sans pour autant être très utilisée et donc présente dans la plupart des logiciels (sauf SAS). Elle est très développée dans la librairie R factoMineR au sein de l'Agrocampus de Rennes mais absente des principales lbrairies en Python plus concernées par des données physiques, signaux ou images.
Il ne serait pas difficile de recontruire cette analyse en suivant le déroulement des calculs matriciels: SVD du tableau disjonctif complet. Néanmoins, ce qui reste le plus compliqué est la gestion des types possibles de données : table de contingence, tableau de Burt, tableau disjontif, en association ou nom à la classe DataFrame, ainsi que la production de graphiques de qualité. Il est aussi possible d'exploiter un embryon de programme: MCA en installant la librairie correspondante par la commande pip install --user mca selon l'installation, ou en chargeant le seul module mca.py dans le répertoire courant.
Comme pour l'ACP ou l'AFD, il est bienvenu de contrôler les résultats fournis sur un exemple jouet avant d'aborder des données plus complexes.
Il reste du travail pour: traiter directement un objet DataFrame, accepter des variables siupplémentaires, construire des fonction graphiques de qualité...
2 AFCM de données jouet
2.1 Lecture des données
End of explanation
datFic.describe()
pd.crosstab(datFic["csp"],datFic["sport"])
Explanation: 2.2 Vérification
End of explanation
# Indicatrices
dc=pd.DataFrame(pd.get_dummies(datFic))
dc.head()
Explanation: 2.3 Tableau disjonctif
La documentation de la fonction MCA est inexistante. Cette fonction peut en principe analyser un DataFrame mais la gestion de variables qualitatives complexes semble déffectueuse. Il est préférable d'anticiper en construisant les variables indicatrices (dummies).
End of explanation
from mca import MCA
mcaFic=MCA(dc,benzecri=False)
# Valeurs singulières
print(mcaFic.L)
Explanation: 2.4 AFCM
End of explanation
print(mcaFic.fs_c())
print(mcaFic.fs_r())
Explanation: Q De l'AFC de quel tableau sont ces valeurs propres? (comparer avec R).
Q Que sont les tableaux ci-dessous?
End of explanation
plt.scatter(mcaFic.fs_c()[:, 0],mcaFic.fs_c()[:, 1])
for i, j, nom in zip(mcaFic.fs_c()[:, 0],mcaFic.fs_c()[:, 1], dc.columns):
plt.text(i, j, nom)
plt.show()
plt.scatter(mcaFic.fs_r()[:, 0],mcaFic.fs_r()[:, 1])
for i, j, nom in zip(mcaFic.fs_r()[:, 0],mcaFic.fs_r()[:, 1], dc.index):
plt.text(i, j, nom)
plt.show()
Explanation: La fonction MCA calcule également des contributions, cosinus carrés et "parts de variance expliquée".
2.5 Graphiques de l'AFCM
End of explanation
# Lire les données
df=pd.read_csv('Data/titanic.csv',skiprows=1,header=None,usecols=[1,2,4,5,9,11],
names=["Surv","Classe","Genre","Age","Prix","Port"],dtype={"Surv":object,"Classe":object,"Genre":object,"Port":object})
df.head()
df.shape # dimensions
# Redéfinir les types
df["Surv"]=pd.Categorical(df["Surv"],ordered=False)
df["Classe"]=pd.Categorical(df["Classe"],ordered=False)
df["Genre"]=pd.Categorical(df["Genre"],ordered=False)
df["Port"]=pd.Categorical(df["Port"],ordered=False)
df.dtypes
Explanation: 3 Données Titanic
3.1 Lecture des données
Les données sur le naufrage du Titanic sont décrites dans le calepin d'introduction à Python et consacré à pandas. Reconstruire la table de données en lisant le fichier .csv.
End of explanation
df.count()
# imputation des valeurs manquantes
df["Age"]=df["Age"].fillna(df["Age"].median())
df.Port=df["Port"].fillna("S")
Explanation: 3.2 Données manquantes
Vérifier que les données contiennent des valeurs manquantes, faire des imputations à la médiane d'une valeur quantitative manquante ou la modalité la plus fréquente d'une valeur qualitative absente.
End of explanation
# Discrétiser les variables quantitatives
df["AgeQ"]=pd.qcut(df.Age,3,labels=["Ag1","Ag2","Ag3"])
df["PrixQ"]=pd.qcut(df.Prix,3,labels=["Pr1","Pr2","Pr3"])
# redéfinir les noms des modalités
df["Surv"]=df["Surv"].cat.rename_categories(["Vnon","Voui"])
df["Classe"]=df["Classe"].cat.rename_categories(["Cl1","Cl2","Cl3"])
df["Genre"]=df["Genre"].cat.rename_categories(["Gfem","Gmas"])
df["Port"]=df["Port"].cat.rename_categories(["Pc","Pq","Ps"])
df.head()
Explanation: Continuer en transformant les variables.
2.3 Transformations
Les variabels quantitatives sont découpées en classes pour exécuter ue AFCM sur toutes les variables. Les modalités des variables qualitatives sont renommées afin de pouvoir construire des graphiques explicites.
End of explanation
# Suppression des variables quantitatives
# pour l'AFCM
df_q=df.drop(["Age","Prix"],axis=1)
df_q.head()
# Indicatrices
dc=pd.DataFrame(pd.get_dummies(df_q[["Surv","Classe","Genre","Port","AgeQ","PrixQ"]]))
dc.head()
Explanation: 2.4 Analyse multiple des correspondances
Les données "Titanic" regroupent des variables qualitatives et quantitatives. Après recodage en classes (discrétisation) des variables quantitatives, la table obtenue se prête à une analyse factorielle multiple des correspondances.
End of explanation
mca_df=MCA(dc,benzecri=False)
# Valeurs singulières
print(mca_df.L)
# Composantes principales des colonnes (modalités)
print(mca_df.fs_c())
# Premier plan principal
col=[1,1,2,2,2,3,3,5,5,5,6,6,6,7,7,7]
plt.scatter(mca_df.fs_c()[:, 0],mca_df.fs_c()[:, 1],c=col)
for i, j, nom in zip(mca_df.fs_c()[:, 0],mca_df.fs_c()[:, 1], dc.columns):
plt.text(i, j, nom)
plt.show()
Explanation: Calcul de l'AFCM et représentations graphiques.
End of explanation |
14,582 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<!--BOOK_INFORMATION-->
<a href="https
Step1: This will download the data from the Amazon Cloud (might take a while depending on
your internet connection) and automatically split the data into training and test sets.
This data comes in a format that we are already familiar with
Step2: We should take note that the labels come as integer values between zero and nine
(corresponding to the digits 0-9)
Step3: We can have a look at some example digits
Step4: In fact, the MNIST dataset is the successor to the NIST digits dataset provided by scikit-learn
that we used before (sklearn.datasets.load_digits (refer to Chapter 2, Working
with Data in OpenCV and Python).
Some notable differences are as follows
Step5: This will transform the labels of the training set from a <n_samples x 1> vector with
integers 0-9 into a <n_samples x 10> matrix with floating point numbers 0.0 or 1.0.
Analogously, we can transform y_test using the same procedure
Step6: In addition, we need to preprocess X_train and X_test for the purpose of working with
OpenCV. Currently, X_train and X_test are 3-D matrices <n_samples x 28 x 28>
with integer values between 0 and 255. Preferably, we want a 2-D matrix <n_samples x
n_features> with floating point numbers, where n_features is 784
Step7: Then we are ready to train the network.
Training an MLP using OpenCV
We can set up and train an MLP in OpenCV with the following recipe
Step8: Specify the size of every layer in the network. We are free to add as many layers as we want, but we need to make sure that the first layer has the same number of neurons as input features (784 in our case), and that the last layer has the same number of neurons as class labels (10 in our case)
Step9: Specify an activation function. Here we use the sigmoidal activation function
from before
Step10: Specify the training method. Here we use the backpropagation algorithm
described above. We also need to make sure that we choose a small enough
learning rate. Since we have on the order of $10^5$ training samples, it is a good idea
to set the learning rate to at most $10^{-5}$
Step11: Specify the termination criteria. Here we use the same criteria as above
Step12: Train the network on the training set (X_train_pre)
Step13: But, of course, what really counts is the accuracy score we get on the held-out test data | Python Code:
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
Explanation: <!--BOOK_INFORMATION-->
<a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a>
This notebook contains an excerpt from the book Machine Learning for OpenCV by Michael Beyeler.
The code is released under the MIT license,
and is available on GitHub.
Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations.
If you find this content useful, please consider supporting the work by
buying the book!
<!--NAVIGATION-->
< Getting Acquainted with Deep Learning | Contents | Training a Deep Neural Net to Classify Handwritten Digits Using Keras >
https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
Training an MLP in OpenCV to Classify Handwritten Digits
In this section, we will use an MLP in OpenCV to classify
handwritten digits from the popular MNIST dataset, which has been constructed by Yann
LeCun and colleagues and serves as a popular benchmark dataset for machine learning
algorithms.
Loading the MNIST dataset
The easiest way to obtain the MNIST dataset is using Keras:
End of explanation
X_train.shape, y_train.shape
Explanation: This will download the data from the Amazon Cloud (might take a while depending on
your internet connection) and automatically split the data into training and test sets.
This data comes in a format that we are already familiar with:
End of explanation
import numpy as np
np.unique(y_train)
Explanation: We should take note that the labels come as integer values between zero and nine
(corresponding to the digits 0-9):
End of explanation
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(16, 6))
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.imshow(X_train[i, :, :], cmap='gray')
plt.axis('off')
plt.savefig('mnist-examples.png')
Explanation: We can have a look at some example digits:
End of explanation
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(sparse=False, dtype=np.float32)
y_train_pre = enc.fit_transform(y_train.reshape(-1, 1))
Explanation: In fact, the MNIST dataset is the successor to the NIST digits dataset provided by scikit-learn
that we used before (sklearn.datasets.load_digits (refer to Chapter 2, Working
with Data in OpenCV and Python).
Some notable differences are as follows:
- MNIST images are significantly larger (28x28 pixels) than NIST images (8x8 pixels), thus paying more attention to fine details, such as distortions and individual differences between images of the same digit
- The MNIST dataset is much larger than the NIST dataset, providing 60,000 training and 10,000 test samples (as compared to a total of 5,620 NIST images)
Preprocessing the MNIST dataset
As we learned in Chapter 4, Representing Data and Engineering Features, there are a number
of preprocessing steps we might like to apply here, such as centering, scaling, and representing categorical features.
The easiest way to transform y_train and y_test is by the one-hot encoder from scikit-learn:
End of explanation
y_test_pre = enc.fit_transform(y_test.reshape(-1, 1))
Explanation: This will transform the labels of the training set from a <n_samples x 1> vector with
integers 0-9 into a <n_samples x 10> matrix with floating point numbers 0.0 or 1.0.
Analogously, we can transform y_test using the same procedure:
End of explanation
X_train_pre = X_train.astype(np.float32) / 255.0
X_train_pre = X_train_pre.reshape((X_train.shape[0], -1))
X_test_pre = X_test.astype(np.float32) / 255.0
X_test_pre = X_test_pre.reshape((X_test.shape[0], -1))
Explanation: In addition, we need to preprocess X_train and X_test for the purpose of working with
OpenCV. Currently, X_train and X_test are 3-D matrices <n_samples x 28 x 28>
with integer values between 0 and 255. Preferably, we want a 2-D matrix <n_samples x
n_features> with floating point numbers, where n_features is 784:
End of explanation
import cv2
mlp = cv2.ml.ANN_MLP_create()
Explanation: Then we are ready to train the network.
Training an MLP using OpenCV
We can set up and train an MLP in OpenCV with the following recipe:
Instantiate a new MLP object:
End of explanation
mlp.setLayerSizes(np.array([784, 512, 512, 10]))
Explanation: Specify the size of every layer in the network. We are free to add as many layers as we want, but we need to make sure that the first layer has the same number of neurons as input features (784 in our case), and that the last layer has the same number of neurons as class labels (10 in our case):
End of explanation
mlp.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2.5, 1.0)
Explanation: Specify an activation function. Here we use the sigmoidal activation function
from before:
End of explanation
mlp.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
mlp.setBackpropWeightScale(0.0001)
Explanation: Specify the training method. Here we use the backpropagation algorithm
described above. We also need to make sure that we choose a small enough
learning rate. Since we have on the order of $10^5$ training samples, it is a good idea
to set the learning rate to at most $10^{-5}$:
End of explanation
term_mode = cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS
term_max_iter = 10
term_eps = 0.01
mlp.setTermCriteria((term_mode, term_max_iter, term_eps))
Explanation: Specify the termination criteria. Here we use the same criteria as above: to run
training for ten iterations (term_max_iter) or until the error does no longer
decrease significantly (term_eps):
End of explanation
mlp.train(X_train_pre, cv2.ml.ROW_SAMPLE, y_train_pre)
_, y_hat_train = mlp.predict(X_train_pre)
from sklearn.metrics import accuracy_score
accuracy_score(y_hat_train.round(), y_train_pre)
Explanation: Train the network on the training set (X_train_pre):
When the training completes, we can calculate the accuracy score on the training set to see
how far we got:
End of explanation
_, y_hat_test = mlp.predict(X_test_pre)
accuracy_score(y_hat_test.round(), y_test_pre)
Explanation: But, of course, what really counts is the accuracy score we get on the held-out test data:
End of explanation |
14,583 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Composing and fitting distributions
Gilles Louppe, January 2016.
This notebook introduces the carl.distributions module. It illustrates how distributions can be defined and composed, and how their parameters can be fit to data.
Disclaimer
Step1: Base API
All base distributions from carl.distributions implement the DistributionMixin interface, providing the following methods
Step2: Advanced API
As shown above for simple cases, parameters of a distribution can be specified as scalars. In more complex situations, arbitrary Theano symbolic expressions defined from shared variables or constants can also be used to specify parameters, as demonstrated below.
Step3: Note
Step4: By construction, p.mu is a Theano symbolic expression which depends on several inputs, in this case a and b. Accordingly, the actual variables (or hyper-parameters) that fully define p are the shared variables a (created explicitly) and sigma (created implicitly from the scalar 2.0). In particular, mu is not an hyper-parameter of p since it is itself defined from the variable a and the constant b.
In terms of API, inputs of all expressions specified at initialization form together the hyper-parameters of the distribution, provided they are Theano shared variables. Hyper-parameters are stored in p.parameters_, while constants are stored in p.constants_.
Step5: Additionally, parameter expressions can be defined in terms of free Theano variables that are not (yet) tied to any value. These auxiliary inputs will need to be passed at evaluation. All required extra inputs are stored in p.observeds_.
Step6: Composing mixtures
Mixtures distributions can be composed from a set of other distribution objects, as long as they implement the DistributionMixin API.
Weights assigned to components are themselves parameters, and can be provided either as a list of scalars or as a list of Theano expressions.
Step7: Note that weights are automatically normalized such that they sum to 1.
Fitting parameters to data
Finally, distribution objects also implement a fit method for fitting through maximum likelihood all parameters from p.parameters_
The Theano expression engine is used to symbolically derive the gradient of the objective function with respect to the parameters. This gradient function is then used to guide to underlying optimization algorithm. If needed, bounds and constraints can also be specified. | Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import theano
import theano.tensor as T
Explanation: Composing and fitting distributions
Gilles Louppe, January 2016.
This notebook introduces the carl.distributions module. It illustrates how distributions can be defined and composed, and how their parameters can be fit to data.
Disclaimer: This module is meant to be a proof-of-concept that a full Python+Theano based RooFit equivalent could reallistically be achieved. At the moment, the module remains very experimental and should not be used in production.
End of explanation
from carl.distributions import Normal
p = Normal(mu=0.0, sigma=1.0)
reals = np.linspace(-5, 5, num=1000)
pdf = p.pdf(X=reals.reshape(-1, 1)) # X is a 2D array of shape n_samples x n_features
print(pdf[:10])
plt.plot(reals, pdf,label="pdf(x)")
plt.legend(loc="best")
plt.show()
reals = np.linspace(-5, 5, num=1000)
plt.plot(reals, p.nll(reals.reshape(-1, 1)), label="-log(pdf(x))")
plt.legend(loc="best")
plt.show()
reals = np.linspace(-5, 5, num=1000)
plt.plot(reals, p.cdf(reals.reshape(-1, 1)), label="cdf(x)")
plt.legend(loc="best")
plt.show()
reals = np.linspace(0, 1, num=1000)
plt.plot(reals, p.ppf(reals.reshape(-1, 1)), label="ppf(x)")
plt.legend(loc="best")
plt.show()
p.rvs(n_samples=10000)
Explanation: Base API
All base distributions from carl.distributions implement the DistributionMixin interface, providing the following methods:
pdf(X) for computing the probability density at X
nllf(X) for computing -log(pdf(X))
rvs(n_samples) for sampling data
cdf(X) for computing the cumulative density at X (for 1D)
ppf(p) for computing the p-th quantile (for 1D)
Parameters of a distribution (e.g., the location mu of a normal distribution) are specified at initialization.
End of explanation
a = theano.shared(1.0, name="a")
b = T.constant(0.5, name="b")
p = Normal(mu=a * b, sigma=2.0)
Explanation: Advanced API
As shown above for simple cases, parameters of a distribution can be specified as scalars. In more complex situations, arbitrary Theano symbolic expressions defined from shared variables or constants can also be used to specify parameters, as demonstrated below.
End of explanation
# Parameters are Theano symbolic expressions
print(type(p.mu))
print(type(p.sigma)) # sigma=2.0 was embedded into a shared variable
Explanation: Note: Under the hood, if a parameter is specified as a scalar, then it is in fact automatically embedded into a Theano shared variable.
End of explanation
p.parameters_ # all input parameters (note that mu is not part of those!)
p.constants_ # all input constants`
Explanation: By construction, p.mu is a Theano symbolic expression which depends on several inputs, in this case a and b. Accordingly, the actual variables (or hyper-parameters) that fully define p are the shared variables a (created explicitly) and sigma (created implicitly from the scalar 2.0). In particular, mu is not an hyper-parameter of p since it is itself defined from the variable a and the constant b.
In terms of API, inputs of all expressions specified at initialization form together the hyper-parameters of the distribution, provided they are Theano shared variables. Hyper-parameters are stored in p.parameters_, while constants are stored in p.constants_.
End of explanation
a = T.dmatrix(name="a") # free input to be specified at evaluation
b = theano.shared(-1.0, name="b")
c = theano.shared(1.0, name="c")
p = Normal(mu=a*b + c)
p.parameters_
p.constants_
p.observeds_
p.pdf(X=np.array([[0.0], [0.0]]),
a=np.array([[1.0], [2.0]])) # specify the auxiliary input `a` at evaluation
# Plot pdf(x, a)
import mpl_toolkits.mplot3d.axes3d as axes3d
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
Xs = np.arange(-5, 5, 0.25)
As = np.arange(-5, 5, 0.25)
Xs, As = np.meshgrid(Xs, As)
Ps = p.pdf(X=Xs.reshape(-1, 1),
a=As.reshape(-1, 1))
Ps = Ps.reshape(Xs.shape)
ax.plot_wireframe(Xs, As, Ps, rstride=4, cstride=4, alpha=0.5)
ax.set_xlabel("x")
ax.set_ylabel("a")
ax.set_zlabel("p(x, a)")
plt.show()
Explanation: Additionally, parameter expressions can be defined in terms of free Theano variables that are not (yet) tied to any value. These auxiliary inputs will need to be passed at evaluation. All required extra inputs are stored in p.observeds_.
End of explanation
from carl.distributions import Mixture
components = [
Normal(mu=-2.0, sigma=0.75), # c0
Normal(mu=0.0, sigma=2.0), # c1
Normal(mu=1.0, sigma=0.5) # c2 (bump)
]
g = theano.shared(0.05, name="g")
p = Mixture(components=components, weights=[0.5 - g / 2., 0.5 - g / 2., g])
p.parameters_ # union of all component parameters + g
reals = np.linspace(-5, 5, num=1000)
plt.plot(reals, p.pdf(reals.reshape(-1, 1)), label="pdf(x)")
plt.legend()
plt.show()
reals = np.linspace(-5, 5, num=1000)
plt.plot(reals, p.cdf(reals.reshape(-1, 1)), label="cdf(x)")
plt.legend()
plt.show()
p.weights
p.compute_weights()
Explanation: Composing mixtures
Mixtures distributions can be composed from a set of other distribution objects, as long as they implement the DistributionMixin API.
Weights assigned to components are themselves parameters, and can be provided either as a list of scalars or as a list of Theano expressions.
End of explanation
# Target distribution
p0 = Mixture(components=[Normal(mu=1.0, sigma=1.0), Normal(mu=4.0, sigma=1.0)],
weights=[0.7, 0.3])
# Fit components[0].mu and mixture weights, freeze all others
w = theano.shared(0.5, name="w")
p1 = Mixture(components=[Normal(mu=0.0, sigma=T.constant(1.0)),
Normal(mu=T.constant(4.0), sigma=T.constant(1.0))],
weights=[w, 1.0 - w])
p1.parameters_
X = p0.rvs(10000)
p1.fit(X, bounds=[{"param": w, "bounds": (0.5, 1.0)}], use_gradient=False)
p1.components[0].mu.eval()
p1.compute_weights()
reals = np.linspace(-5, 5, num=1000)
plt.hist(X.ravel(), bins=100, normed=1, alpha=0.5, label="x~p0")
plt.plot(reals, p1.pdf(reals.reshape(-1, 1)), label="p1(x)")
plt.legend()
plt.show()
Explanation: Note that weights are automatically normalized such that they sum to 1.
Fitting parameters to data
Finally, distribution objects also implement a fit method for fitting through maximum likelihood all parameters from p.parameters_
The Theano expression engine is used to symbolically derive the gradient of the objective function with respect to the parameters. This gradient function is then used to guide to underlying optimization algorithm. If needed, bounds and constraints can also be specified.
End of explanation |
14,584 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
FDMS TME3
Kaggle How Much Did It Rain? II
Florian Toque & Paul Willot
Dear professor Denoyer...
Warning
This is an early version of our entry for the Kaggle challenge
It's still very messy and we send it because we forgot that we had to submit our progress step by step...
To summarize our goal, we plan to use a RNN to take advantage of the sequential data
Step1: 13.765.202 lines in train.csv
8.022.757 lines in test.csv
Reduced to
10.000
5.000
Step2: Get rid of Nan value for now
Step3: Forums indicate that a higher than 1m rainfall is probably an error. Which is quite understandable. We filter that out
Step5: Memento (mauri)
Step6: Submit
Step9: RNN | Python Code:
# from __future__ import exam_success
from __future__ import absolute_import
from __future__ import print_function
%matplotlib inline
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import random
import pandas as pd
# Sk cheats
from sklearn.cross_validation import cross_val_score # cross val
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.preprocessing import Imputer # get rid of nan
Explanation: FDMS TME3
Kaggle How Much Did It Rain? II
Florian Toque & Paul Willot
Dear professor Denoyer...
Warning
This is an early version of our entry for the Kaggle challenge
It's still very messy and we send it because we forgot that we had to submit our progress step by step...
To summarize our goal, we plan to use a RNN to take advantage of the sequential data
End of explanation
filename = "data/reduced_train_10000.csv"
train = pd.read_csv(filename)
train = train.set_index('Id')
train = train.dropna()
train.head()
train["Expected"].describe()
Explanation: 13.765.202 lines in train.csv
8.022.757 lines in test.csv
Reduced to
10.000
5.000
End of explanation
#train_clean = train[[not i for i in np.isnan(train["Ref_5x5_10th"])]]
Explanation: Get rid of Nan value for now
End of explanation
train = train[train['Expected'] < 1000]
train['Expected'].describe()
Explanation: Forums indicate that a higher than 1m rainfall is probably an error. Which is quite understandable. We filter that out
End of explanation
etreg = ExtraTreesRegressor(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0)
columns = train_clean.columns
columns = ["minutes_past","radardist_km","Ref","Ref_5x5_10th", "Ref_5x5_50th"]
columns = [u'Id', u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th', u'Expected']
columns = [u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th']
labels = train_clean["Expected"].values
features = train_clean[list(columns)].values
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(features)
features_trans = imp.transform(features)
ftrain = features_trans[:3000]
ltrain = labels[:3000]
ftest = features_trans[3000:]
ltest = labels[3000:]
%%time
etreg.fit(ftrain,ltrain)
def scorer(estimator, X, y):
return (estimator.predict(X[0])-y)**2
%%time
et_score = cross_val_score(etreg, features_trans, labels, cv=5)
print("Features: %s\nScore: %s\tMean: %.03f"%(columns, et_score,et_score.mean()))
r = random.randrange(len(ltrain))
print(r)
print(etreg.predict(ftrain[r]))
print(ltrain[r])
r = random.randrange(len(ltest))
print(r)
print(etreg.predict(ftest[r]))
print(ltest[r])
err = (etreg.predict(ftest)-ltest)**2
err.sum()/len(err)
Explanation: Memento (mauri)
End of explanation
filename = "data/reduced_test_5000.csv"
test = pd.read_csv(filename)
columns = [u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',
u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
u'Kdp_5x5_50th', u'Kdp_5x5_90th']
features = test[list(columns)].values
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(features)
features_trans = imp.transform(features)
fall = test[test.columns].values
fall[20]
features_trans[0]
i = 1
pred = 0
while fall[i][0] == 1:
#print(fall[i])
pred+=etreg.predict(features_trans[i])[0]
#print(etreg.predict(features_trans[i])[0])
i+=1
print(i)
fall[-1][0]
%%time
res=[]
i=0
while i<len(fall) and i < 10000:
pred = 0
lenn = 0
curr=fall[i][0]
while i<len(fall) and fall[i][0] == curr:
#print(fall[i])
pred+=etreg.predict(features_trans[i])[0]
#print(etreg.predict(features_trans[i])[0])
i+=1
lenn += 1
res.append((curr,pred/lenn))
#i+=1
#print(i)
len(res)
res[:10]
def myfunc(hour):
#rowid = hour['Id'].iloc[0]
# sort hour by minutes_past
hour = hour.sort('minutes_past', ascending=True)
#est = (hour['Id'],random.random())
est = random.random()
return est
def marshall_palmer(ref, minutes_past):
#print("Estimating rainfall from {0} observations".format(len(minutes_past)))
# how long is each observation valid?
valid_time = np.zeros_like(minutes_past)
valid_time[0] = minutes_past.iloc[0]
for n in xrange(1, len(minutes_past)):
valid_time[n] = minutes_past.iloc[n] - minutes_past.iloc[n-1]
valid_time[-1] = valid_time[-1] + 60 - np.sum(valid_time)
valid_time = valid_time / 60.0
# sum up rainrate * validtime
sum = 0
for dbz, hours in zip(ref, valid_time):
# See: https://en.wikipedia.org/wiki/DBZ_(meteorology)
if np.isfinite(dbz):
mmperhr = pow(pow(10, dbz/10)/200, 0.625)
sum = sum + mmperhr * hours
return sum
def simplesum(ref,hour):
hour.sum()
# each unique Id is an hour of data at some gauge
def myfunc(hour):
#rowid = hour['Id'].iloc[0]
# sort hour by minutes_past
hour = hour.sort('minutes_past', ascending=True)
est = marshall_palmer(hour['Ref'], hour['minutes_past'])
return est
estimates = train.groupby(train.index).apply(myfunc)
estimates.head(20)
train["Expected"].head(20)
res=[]
for i in fall:
pred = 0
curr=i[0]
while fall[i][0] == 1:
#print(fall[i])
pred+=etreg.predict(features_trans[i])[0]
#print(etreg.predict(features_trans[i])[0])
i+=1
print(i)
etreg.predict(features_trans[0])
def marshall_palmer(data):
res=[]
for n in data:
res.append(etreg.predict(n)[0])
return np.array(res).mean()
def simplesum(ref,hour):
hour.sum()
def myfunc(hour):
hour = hour.sort('minutes_past', ascending=True)
est = marshall_palmer(hour[train.columns])
return est
estimates = train_clean.groupby(train_clean.index).apply(myfunc)
estimates.head(20)
Explanation: Submit
End of explanation
import pandas as pd
from random import random
flow = (list(range(1,10,1)) + list(range(10,1,-1)))*1000
pdata = pd.DataFrame({"a":flow, "b":flow})
pdata.b = pdata.b.shift(9)
data = pdata.iloc[10:] * random() # some noise
#columns = [u'minutes_past', u'radardist_km', u'Ref', u'Ref_5x5_10th',
# u'Ref_5x5_50th', u'Ref_5x5_90th', u'RefComposite',
# u'RefComposite_5x5_10th', u'RefComposite_5x5_50th',
# u'RefComposite_5x5_90th', u'RhoHV', u'RhoHV_5x5_10th',
# u'RhoHV_5x5_50th', u'RhoHV_5x5_90th', u'Zdr', u'Zdr_5x5_10th',
# u'Zdr_5x5_50th', u'Zdr_5x5_90th', u'Kdp', u'Kdp_5x5_10th',
# u'Kdp_5x5_50th', u'Kdp_5x5_90th']
columns = [u'radardist_km', u'Ref', u'Ref_5x5_10th']
nb_features = len(columns)
data = train[list(columns)]
data.head(10)
data.iloc[0].as_matrix()
train.head(5)
train.loc[11]
train.loc[11][:1]["Expected"].as_matrix
#train.index.unique()
def _load_data(data, n_prev = 100):
data should be pd.DataFrame()
docX, docY = [], []
for i in range(len(data)-n_prev):
docX.append(data.iloc[i:i+n_prev].as_matrix())
docY.append(data.iloc[i+n_prev].as_matrix())
alsX = np.array(docX)
alsY = np.array(docY)
return alsX, alsY
def train_test_split(df, test_size=0.1):
ntrn = round(len(df) * (1 - test_size))
X_train, y_train = _load_data(df.iloc[0:ntrn])
X_test, y_test = _load_data(df.iloc[ntrn:])
return (X_train, y_train), (X_test, y_test)
(X_train, y_train), (X_test, y_test) = train_test_split(data)
np.shape(X_train)
t = np.array([2,1])
t.shape = (1,2)
t.tolist()[0]
np.shape(t)
X_train[:2,:2]
XX[:2,:2]
XX[:2][:2]
np.shape(XX)
for i in XX:
print(np.shape(i))
np.shape(XX[0])
z = np.zeros([297,9,23])
np.shape(z)
np.shape(np.reshape(XX,(297,1)))
tl = train.loc[2][:1]["Expected"]
tl.as_blocks()
tl.as_matrix()
data.iloc[2:4].as_matrix()
train.loc[2].as_matrix()
m = data.loc[10].as_matrix()
pad = np.pad(m, ((0, max_padding -len(m) ),(0,0)), 'constant')
pad
train.index.unique()
max_padding = 20
%%time
docX, docY = [], []
for i in train.index.unique():
if isinstance(train.loc[i],pd.core.series.Series):
m = [data.loc[i].as_matrix()]
pad = np.pad(m, ((max_padding -len(m), 0),(0,0)), 'constant') # pre-padding
docX.append(pad)
docY.append(float(train.loc[i]["Expected"]))
else:
m = data.loc[i].as_matrix()
pad = np.pad(m, ((max_padding -len(m), 0),(0,0)), 'constant')
docX.append(pad)
docY.append(float(train.loc[i][:1]["Expected"]))
#docY.append(train.loc[i][:1]["Expected"].as_matrix)
XX = np.array(docX)
yy = np.array(docY)
np.shape(XX)
#from keras.preprocessing import sequence
#sequence.pad_sequences(X_train, maxlen=maxlen)
def _load_data(data):
data should be pd.DataFrame()
docX, docY = [], []
for i in data.index.unique():
#np.pad(tmp, ((0, max_padding -len(tmp) ),(0,0)), 'constant')
m = data.loc[i].as_matrix()
pad = np.pad(m, ((0, max_padding -len(m) ),(0,0)), 'constant')
docX.append(pad)
if isinstance(train.loc[i],pd.core.series.Series):
docY.append(float(train.loc[i]["Expected"]))
else:
docY.append(float(train.loc[i][:1]["Expected"]))
alsX = np.array(docX)
alsY = np.array(docY)
return alsX, alsY
def train_test_split(df, test_size=0.1):
ntrn = round(len(df) * (1 - test_size))
X_train, y_train = _load_data(df.iloc[0:ntrn])
X_test, y_test = _load_data(df.iloc[ntrn:])
return (X_train, y_train), (X_test, y_test)
(X_train, y_train), (X_test, y_test) = train_test_split(train)
len(X_train[0])
train.head()
X_train[0][:10]
yt = []
for i in y_train:
yt.append([i[0]])
yt[0]
X_train.shape
len(fea[0])
len(X_train[0][0])
f = np.array(fea)
f.shape()
XX[0]
#(X_train, y_train), (X_test, y_test) = train_test_split(data) # retrieve data
# and now train the model
# batch_size should be appropriate to your memory size
# number of epochs should be higher for real world problems
model.fit(X_train, yt, batch_size=450, nb_epoch=2, validation_split=0.05)
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
%%time
input_dim = nb_features
out_dim = 1
hidden_dim = 200
model = Sequential()
#Embedding(input_dim, hidden_dim, mask_zero=True)
#model.add(LSTM(hidden_dim, hidden_dim, return_sequences=False))
model.add(LSTM(input_dim, hidden_dim, return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(hidden_dim, out_dim))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
model.fit(XX, yy, batch_size=10, nb_epoch=10, validation_split=0.1)
test = random.randint(0,len(XX))
print(model.predict(XX[test:test+1])[0][0])
print(yy[test])
Explanation: RNN
End of explanation |
14,585 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<img src="../images/qiskit-heading.gif" alt="Note
Step2: Notice that each of the codes is represented by a bitstring of length 64. By comparing characters at the same position in the strings, we can see that Protozoan's is closer to Bacterial's than Yeast's.
Exploiting quantum superposition, we can create quantum states by using only 7 qubits such that each of the quantum states corresponds to the genetic code of Yeast, Protozoan, and Bacterial. We then compare the closeness of their genetic codes by comparing their quantum states, which is made possible by the reversibility of quantum circuit.
The reversibility of quantum circuit to test the similarity of quantum states works as follow. Assume that we can create a quantum superposition starting from all-zero states by a quantum circuit. Then by inverting the same quantum circuit and we give it the same quantum superposition as input, we will get exactly all-zero bits as the output. Now, when we give a similar quantum superposition as input to the inverted circuit, we can still get all-zero bits as the output with probability proportional to the similarity of the quantum states
Step3: We can now create quantum circuits to create the quantum states for the Yeast's, Protozoan's, and Bacterial's.
Step4: Inverting quantum circuit
We can easily invert a quantum circuit by inverse() function. These inversed circuits are desirable to compute the closeness of the quantum states.
Step5: Comparing bitsrings
We can now compare how close the starts of the genetic codes of Protozoan to Yeast's and Bacterial's by performing the test. | Python Code:
YEAST = "----------------------------------MM----------------------------"
PROTOZOAN = "--MM---------------M------------MMMM---------------M------------"
BACTERIAL = "---M---------------M------------MMMM---------------M------------"
Explanation: <img src="../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
Comparing Strings with Quantum Superpositon
The latest version of this notebook is available on https://github.com/QISKit/qiskit-tutorial.
For more information about how to use the IBM Q Experience (QX), consult the tutorials, or check out the community.
Contributors
Rudy Raymond
Motivation
If we can use quantum states to represent genetic codes, we may be able to compare them, and/or find similar genetic codes quickly.
For example, according to this site the starts of the genetic codes for the Yeast Mitochondrial, Protozoan Mitochondrial, and Bacterial Code are respectively as follow.
End of explanation
import sys
import numpy as np
import math
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import CompositeGate
from qiskit import execute, register, available_backends
def encode_bitstring(bitstring, qr, cr, inverse=False):
create a circuit for constructing the quantum superposition of the bitstring
n = math.ceil(math.log2(len(bitstring))) + 1 #number of qubits
assert n > 2, "the length of bitstring must be at least 2"
qc = QuantumCircuit(qr, cr)
#the probability amplitude of the desired state
desired_vector = np.array([ 0.0 for i in range(2**n) ]) #initialize to zero
amplitude = np.sqrt(1.0/2**(n-1))
for i, b in enumerate(bitstring):
pos = i * 2
if b == "1" or b == "M":
pos += 1
desired_vector[pos] = amplitude
if not inverse:
qc.initialize(desired_vector, [ qr[i] for i in range(n) ] )
qc.barrier(qr)
else:
qc.initialize(desired_vector, [ qr[i] for i in range(n) ] ).inverse() #invert the circuit
for i in range(n):
qc.measure(qr[i], cr[i])
print()
return qc
Explanation: Notice that each of the codes is represented by a bitstring of length 64. By comparing characters at the same position in the strings, we can see that Protozoan's is closer to Bacterial's than Yeast's.
Exploiting quantum superposition, we can create quantum states by using only 7 qubits such that each of the quantum states corresponds to the genetic code of Yeast, Protozoan, and Bacterial. We then compare the closeness of their genetic codes by comparing their quantum states, which is made possible by the reversibility of quantum circuit.
The reversibility of quantum circuit to test the similarity of quantum states works as follow. Assume that we can create a quantum superposition starting from all-zero states by a quantum circuit. Then by inverting the same quantum circuit and we give it the same quantum superposition as input, we will get exactly all-zero bits as the output. Now, when we give a similar quantum superposition as input to the inverted circuit, we can still get all-zero bits as the output with probability proportional to the similarity of the quantum states: the more similar, the more we observe all-zero bits.
Thus, to decide which code (Yeast's or Bacterial's) is the most similar to the Protozoan, we can do the following:
We first prepare the quantum state that encodes the Protozoan's
We then use the quantum state as inputs to the inverted circuits that each prepare the quantum state of Yeast's and Bacterial's. Run and measure the circuits
Output the name of the inverted circuit whose measurements result in more frequent measurements of all-zero bits.
Quantum Superposition for Bitstrings
A qubit can be in a superposition of two basis states: "0" and "1" at the same time. Going further, two qubits can be in a superposition of four basis states: "00", "01", "10", and "11". In general, $n$ qubits can be in a superposition of $2^n$ (exponential in the number of qubits!) basis states.
Here, we show a simple example to create quantum superpositon for bitstrings and use them to compare the similarity between two bitstrings. This tutorial makes use the quantum state initialization function and circuit inversion. It also illustrates the power of loading data into quantum states.
Comparing bitstrings of length 64 with 7 qubits
Let say we have three genetic codes as above.
YEAST = "----------------------------------MM----------------------------"
PROTOZOAN = "--MM---------------M------------MMMM---------------M------------"
BACTERIAL = "---M---------------M------------MMMM---------------M------------"
Let use 7 qubits to encode the above codes: the first 6 qubits for indexing the location in the code (because we have 64 positions that we number from 0 to 63), and the last qubit for the content of the code (we use "0" for "-" and "1" for "M"). Thus, numbering the position of the code from left to right, we can create quantum states for each of the code as below:
\begin{eqnarray}
|YEAST \rangle &=& \frac{1}{8} \left( |000000\rangle |0\rangle + |000001\rangle |0\rangle + |000010\rangle |0\rangle + |000011\rangle |0\rangle + \ldots \right) \
|PROTOZOAN \rangle &=& \frac{1}{8} \left( |000000\rangle |0\rangle + |000001\rangle |0\rangle + |000010\rangle |1\rangle + |000011\rangle |1\rangle + \ldots \right) \
|BACTERIAL \rangle &=& \frac{1}{8} \left( |000000\rangle |0\rangle + |000001\rangle |0\rangle + |000010\rangle |0\rangle + |000011\rangle |1\rangle + \ldots \right)
\end{eqnarray}
The first four codes of Yeast's are all "-", and therefore at the above all of the second registers of the corresponding state are "0". And so on.
Creating quantum superposition for genetic codes
Below is the python function to create a quantum superposition for a given genetic code as above.
End of explanation
n = math.ceil(math.log2(len(YEAST))) + 1 #number of qubits
qr = QuantumRegister(n)
cr = ClassicalRegister(n)
qc_yeast = encode_bitstring(YEAST, qr, cr)
qc_protozoan = encode_bitstring(PROTOZOAN, qr, cr)
qc_bacterial = encode_bitstring(BACTERIAL, qr, cr)
circs = {"YEAST": qc_yeast, "PROTOZOAN": qc_protozoan, "BACTERIAL": qc_bacterial}
Explanation: We can now create quantum circuits to create the quantum states for the Yeast's, Protozoan's, and Bacterial's.
End of explanation
inverse_qc_yeast = encode_bitstring(YEAST, qr, cr, inverse=True)
inverse_qc_protozoan = encode_bitstring(PROTOZOAN, qr, cr, inverse=True)
inverse_qc_bacterial = encode_bitstring(BACTERIAL, qr, cr, inverse=True)
inverse_circs = {"YEAST": inverse_qc_yeast, "PROTOZOAN": inverse_qc_protozoan, "BACTERIAL": inverse_qc_bacterial}
Explanation: Inverting quantum circuit
We can easily invert a quantum circuit by inverse() function. These inversed circuits are desirable to compute the closeness of the quantum states.
End of explanation
print("Available backends:", available_backends())
key = "PROTOZOAN" #the name of the code used as key to find similar ones
# use local simulator
backend = "local_qasm_simulator"
shots = 1000
combined_circs = {}
count = {}
most_similar, most_similar_score = "", -1.0
for other_key in inverse_circs:
if other_key == key:
continue
combined_circs[other_key] = circs[key] + inverse_circs[other_key] #combined circuits to look for similar codes
job = execute(combined_circs[other_key], backend=backend,shots=shots)
st = job.result().get_counts(combined_circs[other_key])
if "0"*n in st:
sim_score = st["0"*n]/shots
else:
sim_score = 0.0
print("Similarity score of",key,"and",other_key,"is",sim_score)
if most_similar_score < sim_score:
most_similar, most_similar_score = other_key, sim_score
print("[ANSWER]", key,"is most similar to", most_similar)
Explanation: Comparing bitsrings
We can now compare how close the starts of the genetic codes of Protozoan to Yeast's and Bacterial's by performing the test.
End of explanation |
14,586 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Sebastian Raschka
back to the matplotlib-gallery at https
Step1: <font size="1.5em">More info about the %watermark extension</font>
Step2: <br>
<br>
Matplotlib Formatting II
Step3: <br>
<br>
Default grid
[back to top]
Step4: Or alternatively
Step5: <br>
<br>
Vertical and horizontal grids
[back to top]
Vertical grid
[back to top]
Step6: Horizontal grid
[back to top]
Step7: <br>
<br>
Controlling the gridline style
[back to top]
Changing the tick frequency
[back to top]
Step8: Changing the tick color and linestyle
[back to top] | Python Code:
%load_ext watermark
%watermark -u -v -d -p matplotlib,numpy
Explanation: Sebastian Raschka
back to the matplotlib-gallery at https://github.com/rasbt/matplotlib-gallery
End of explanation
%matplotlib inline
Explanation: <font size="1.5em">More info about the %watermark extension</font>
End of explanation
import numpy as np
import random
from matplotlib import pyplot as plt
data = np.random.normal(0, 20, 1000)
# fixed bin size
bins = np.arange(-100, 100, 5) # fixed bin size
plt.xlim([min(data)-5, max(data)+5])
plt.hist(data, bins=bins, alpha=0.5)
plt.show()
Explanation: <br>
<br>
Matplotlib Formatting II: gridlines
Sections
Generating-some-sample-data
Default grid
Vertical and horizontal grids
Vertical grid
Horizontal grid
Controlling the gridline style
Changing the tick frequency
Changing the tick color and linestyle
<br>
<br>
Generating some sample data
[back to top]
End of explanation
plt.hist(data, bins=bins, alpha=0.5)
plt.grid()
plt.show()
Explanation: <br>
<br>
Default grid
[back to top]
End of explanation
plt.hist(data, bins=bins, alpha=0.5)
ax = plt.gca()
ax.grid(True)
plt.show()
Explanation: Or alternatively:
End of explanation
plt.hist(data, bins=bins, alpha=0.5)
ax = plt.gca()
ax.xaxis.grid(True)
plt.show()
Explanation: <br>
<br>
Vertical and horizontal grids
[back to top]
Vertical grid
[back to top]
End of explanation
plt.hist(data, bins=bins, alpha=0.5)
ax = plt.gca()
ax.yaxis.grid(True)
plt.show()
Explanation: Horizontal grid
[back to top]
End of explanation
import numpy as np
# major ticks every 10
major_ticks = np.arange(-100, 101, 10)
ax = plt.gca()
ax.yaxis.grid()
ax.set_yticks(major_ticks)
plt.hist(data, bins=bins, alpha=0.5)
plt.show()
Explanation: <br>
<br>
Controlling the gridline style
[back to top]
Changing the tick frequency
[back to top]
End of explanation
from matplotlib import rcParams
rcParams['grid.linestyle'] = '-'
rcParams['grid.color'] = 'blue'
rcParams['grid.linewidth'] = 0.2
plt.grid()
plt.hist(data, bins=bins, alpha=0.5)
plt.show()
Explanation: Changing the tick color and linestyle
[back to top]
End of explanation |
14,587 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<img src='http
Step1: Funkcje wbudowane
Step2: Tuple (krotka)
Step3: Czym się różni krotka od listy?
Set (zbiory)
Step4: Prosta matematyka
Step5: Trochę programowania funkcyjnego
map, filter, reduce
wyrażenie lambda $\lambda$
Step6: Więcej informacji temat funkcji wbudowanych na https
Step7: Funkcje
Step8: LEGB
<img src="http
Step9: Funkcje to też obiekty!
Step10: Fabryki funkcji
Step11: Zadania 2
1 . Napisz funkcję która stworzy plik z pierwiastkami liczb z zakresu $[0, 1]$, każdy w sobnej linii
2 . Napisz funkcję wczytująca pierwiastki z pliku z poprzedniego zadania, oblicz ich sumę i dopisz do pliku
3 . Napisz funkcję która będzie działała jak ''.join() za pomocą reduce | Python Code:
help([1, 2, 3])
dir([1, 2, 3])
sum??
Explanation: <img src='http://pycircle.org/static/pycircle_big.png' style="margin-left:auto; margin-right:auto; height:70%; width:70%">
Wprowadzenie część 2
End of explanation
all([1==1, True, 10, -1]), all([1==5, True, 10, -1])
any([False, True]), any([False, False])
bin(12), oct(12), hex(12), int('12'), float(12)
ord('A'), chr(65)
raw_input(u"Podaj liczbę: ")
zip([1,2,3], [2, 3, 4])
sorted([8, 3, 12, 9, 3]), reversed(range(10)), list(reversed(range(10)))
len([3, 2, 1]), len([[1, 2], [3, 4, 5]])
list(), dict(), set(), tuple()
Explanation: Funkcje wbudowane
End of explanation
A = (1, 2, 3)
B = [1, 2, 3]
A == B
Explanation: Tuple (krotka)
End of explanation
A = set()
A.add(2)
A.add(3)
A.add(4)
A
A.add(3)
A
B = set((4, 5, 6))
A.difference(B)
A.symmetric_difference(B)
A.intersection(B)
A.union(B)
Explanation: Czym się różni krotka od listy?
Set (zbiory)
End of explanation
pow(2, 10), divmod(10, 3), sum([1, 2, 3])
round(0.5), round(0.2), round(0.9)
min([1, 2, 3]), max([1, 2, 3])
abs(10), abs(-10)
24 % 5, 24 % 2
Explanation: Prosta matematyka
End of explanation
f = lambda x: x+1
f(3)
f = lambda a, b: a+b**3
f(2, 3)
map(lambda x: x+10, [0, 2, 5, 234])
[x+10 for x in [0, 2]]
map(chr, [80, 121, 67, 105, 114, 99, 108, 101])
[chr(x) for x in [80, 121, 67, 105, 114, 99, 108, 101]]
filter(lambda x: x > 0, [-1, 0, 4, -3, 2])
[x for x in [-1, 0, 4, -3, 2] if x > 0]
reduce(lambda a, b: a - b, [2, 3, 4])
2 - 3 - 4
Explanation: Trochę programowania funkcyjnego
map, filter, reduce
wyrażenie lambda $\lambda$
End of explanation
%ls -l
fp = open("pycircle.txt", "w")
%ls -l
fp.write("Hello world\n")
fp.close()
%cat pycircle.txt
with open("pycircle.txt") as fp:
print fp.read(),
Explanation: Więcej informacji temat funkcji wbudowanych na https://docs.python.org/2/library/functions.html
Zadania 1
1 . Napisz kod tworzący listę z przedziału $[0, 100]$ liczb podzielnych przez 3 ale nie podzielnych przez 9
2 . Napisz kod który zwraca unikalne elementy z podanej listy
3 . Napisz kod który znajdzie maksimum wartości słownika
Pliki
End of explanation
def fun1(a):
a.append(9)
return a
def fun2(a=[]):
a.append(9)
return a
lista1 = [1, 2, 3]
lista2 = [3, 4, 5]
fun1(lista1), fun2(lista2)
def fun2(a=[]):
a.append(9)
return a
fun2()
fun2()
fun2()
Explanation: Funkcje
End of explanation
def show_local():
x = 23
print("Local: %s" % x)
show_local()
def show_enclosing(a):
def enclosing():
print("Enclosing: %s" % a)
enclosing()
show_enclosing(5)
x = 43
def show_global():
print("Global %s" % x)
show_global()
def show_built():
print("Built-in: %s" % abs)
show_built()
x = 43
def what_x():
print(x)
x = 4
what_x()
x = 43
def encl_x():
x = 23
def enclosing():
print("Enclosing: %s" % x)
enclosing()
encl_x()
x = 43
def what_about_globals():
global x
x = 37
print("In function %s" % x)
what_about_globals()
print("After function %s" % x)
Explanation: LEGB
<img src="http://sandeeps.in/_images/python_legb.png" style="margin-left:auto; margin-right:auto;">
End of explanation
def f(x):
f.l += x
print "x: ", x
print "f.l: ", f.l
f.l = 10
f(2)
f(14)
Explanation: Funkcje to też obiekty!
End of explanation
def powerer(power):
def nested(number):
return number ** power
return nested
f = powerer(3)
f(2), f(10)
def licznik(start):
def nested(label):
print(label, nested.state)
nested.state += 1
nested.state = start
return nested
f = licznik(0)
f('a')
f('b')
f('c')
Explanation: Fabryki funkcji
End of explanation
' '.join(['a', 'b', 'c'])
Explanation: Zadania 2
1 . Napisz funkcję która stworzy plik z pierwiastkami liczb z zakresu $[0, 1]$, każdy w sobnej linii
2 . Napisz funkcję wczytująca pierwiastki z pliku z poprzedniego zadania, oblicz ich sumę i dopisz do pliku
3 . Napisz funkcję która będzie działała jak ''.join() za pomocą reduce
End of explanation |
14,588 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Data Preprocess
Zhiang Chen, March 2017
This notebook is to get training dataset, validation dataset and test dataset. First, it reads the 24 pickle files. These 24 pickle files contain data from three different height of desk. For example, file [1-8] are from the height_1 of the desk; file [9-16] are from the height_2 of the desk; file [17-24] are from the height_3 of the desk. Two of the pickle files are randomly chosen from each 8 files to compose the validation dataset and test dataset. Three of them are randomly chosen as validation dataset, and the rest are the test dataset.
1. Import the necessary packages
Step1: 2. Read pickle files
Step2: 3. Group dataset
Step3: 4. Label the data
Step4: 5. Convert one-hot code
Step5: 6. Save data
Step6: 7. Pick some data for batch normalization inference | Python Code:
from six.moves import cPickle as pickle
import matplotlib.pyplot as plt
import os
from random import sample, shuffle
import numpy as np
Explanation: Data Preprocess
Zhiang Chen, March 2017
This notebook is to get training dataset, validation dataset and test dataset. First, it reads the 24 pickle files. These 24 pickle files contain data from three different height of desk. For example, file [1-8] are from the height_1 of the desk; file [9-16] are from the height_2 of the desk; file [17-24] are from the height_3 of the desk. Two of the pickle files are randomly chosen from each 8 files to compose the validation dataset and test dataset. Three of them are randomly chosen as validation dataset, and the rest are the test dataset.
1. Import the necessary packages
End of explanation
files = os.listdir('pickle')
dataset = dict()
for file_name in files:
with open('pickle/'+file_name, 'rb') as f:
save = pickle.load(f)
dataset.setdefault(file_name, save['image'])
del save
Explanation: 2. Read pickle files
End of explanation
v_t = sample(xrange(1,9),2) + sample(xrange(9,17),2) + sample(xrange(16,25),2)
shuffle(v_t)
valid = v_t[:3]
test = v_t[3:]
train = list(set(range(1,25)) - set(v_t))
def get_names(ls):
return ['p'+str(x) for x in ls]
train = get_names(train)
valid = get_names(valid)
test = get_names(test)
print('train',train)
print('valid',valid)
print('test',test)
def add_dic(x,y):
return dict(x.items() + y.items())
def get_data(name_list):
data = [dataset.get(name,False) for name in name_list]
return reduce(add_dic,data)
# the dictionary is {name:numpy}; for example, one of the names is '30-8-1-gball-288.png'
train_dataset = get_data(train)
valid_dataset = get_data(valid)
test_dataset = get_data(test)
Explanation: 3. Group dataset
End of explanation
non_orientations = ['empty','cup','tball','pball','gball']
image_size = 50
def label_data(data):
objects = list()
orientations = list()
values = list()
for name, value in data.iteritems():
obj = name.split('.')[0].split('-')[-2] # object name
ori = name.split('.')[0].split('-')[-1] # orientation
objects.append(obj)
if obj in non_orientations:
orientations.append(0)
elif obj == 'gstick':
if name.split('.')[0].split('-')[2] in ['1','3']:
orientations.append(0)
else:
orientations.append(int(ori))
else:
orientations.append(int(ori))
values.append(value.reshape(image_size,image_size,1).astype(np.float32))
return objects, orientations, values
train_objects, train_orientations, train_values = label_data(train_dataset)
valid_objects, valid_orientations, valid_values = label_data(valid_dataset)
test_objects, test_orientations, test_values = label_data(test_dataset)
Explanation: 4. Label the data
End of explanation
object2value = {'empty':0,'duck':1,'cup':2,'sponge':3,'tball':4,'pball':5,'gball':6,'gstick':7,'nerf':8,'calc':9,'stapler':10}
value2object = dict((value,name) for name,value in object2value.items())
orientations = [18*x for x in range(20)]
def convert_objects(objects):
obj_values = np.asarray([object2value[obj] for obj in objects])
return (np.arange(len(object2value)) == obj_values[:,None]).astype(np.float32)
def convert_orientations(orientations):
ori_values = np.asarray(orientations)/18%10
return (np.arange(10) == ori_values[:,None]).astype(np.float32)
train_objects_ = convert_objects(train_objects)
valid_objects_ = convert_objects(valid_objects)
test_objects_ = convert_objects(test_objects)
train_orientations_ = convert_orientations(train_orientations)
valid_orientations_ = convert_orientations(valid_orientations)
test_orientations_ = convert_orientations(test_orientations)
train_values_ = np.asarray(train_values).astype(np.float32)
valid_values_ = np.asarray(valid_values).astype(np.float32)
test_values_ = np.asarray(test_values).astype(np.float32)
Explanation: 5. Convert one-hot code
End of explanation
data_file = 'depth_data'
with open(data_file,'wb') as f:
save={
'train_orientations':train_orientations_,
'valid_orientations':valid_orientations_,
'test_orientations':test_orientations_,
'train_objects':train_objects_,
'valid_objects':valid_objects_,
'test_objects':test_objects_,
'train_values':train_values_,
'valid_values':valid_values_,
'test_values':test_values_,
'object2value':object2value,
'value2object':value2object
}
pickle.dump(save,f)
f.close()
statinfo = os.stat(data_file)
file_size = float(statinfo.st_size)/1000
print('Compressed data size: %0.1fkB' % file_size)
Explanation: 6. Save data
End of explanation
def randomize(dataset, classes, angles):
permutation = np.random.permutation(classes.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_classes = classes[permutation]
shuffled_angles = angles[permutation]
return shuffled_dataset, shuffled_classes, shuffled_angles
train_dataset, train_classes, train_angles = randomize(train_values_, train_objects_, train_orientations_)
small_data = train_dataset[0:100,:,:,:]
with open('small_data','wb') as f:
save={
'small_data':small_data,
}
pickle.dump(save,f,pickle.HIGHEST_PROTOCOL)
f.close()
##test
image = train_dataset[11,:,:,:].reshape(-1,image_size, image_size,1)
image = np.append(image,small_data,axis=0)
print(image.shape)
Explanation: 7. Pick some data for batch normalization inference
End of explanation |
14,589 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Land
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 1.3. Description
Is Required
Step7: 1.4. Land Atmosphere Flux Exchanges
Is Required
Step8: 1.5. Atmospheric Coupling Treatment
Is Required
Step9: 1.6. Land Cover
Is Required
Step10: 1.7. Land Cover Change
Is Required
Step11: 1.8. Tiling
Is Required
Step12: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required
Step13: 2.2. Water
Is Required
Step14: 2.3. Carbon
Is Required
Step15: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required
Step16: 3.2. Time Step
Is Required
Step17: 3.3. Timestepping Method
Is Required
Step18: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required
Step19: 4.2. Code Version
Is Required
Step20: 4.3. Code Languages
Is Required
Step21: 5. Grid
Land surface grid
5.1. Overview
Is Required
Step22: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required
Step23: 6.2. Matches Atmosphere Grid
Is Required
Step24: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required
Step25: 7.2. Total Depth
Is Required
Step26: 8. Soil
Land surface soil
8.1. Overview
Is Required
Step27: 8.2. Heat Water Coupling
Is Required
Step28: 8.3. Number Of Soil layers
Is Required
Step29: 8.4. Prognostic Variables
Is Required
Step30: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required
Step31: 9.2. Structure
Is Required
Step32: 9.3. Texture
Is Required
Step33: 9.4. Organic Matter
Is Required
Step34: 9.5. Albedo
Is Required
Step35: 9.6. Water Table
Is Required
Step36: 9.7. Continuously Varying Soil Depth
Is Required
Step37: 9.8. Soil Depth
Is Required
Step38: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required
Step39: 10.2. Functions
Is Required
Step40: 10.3. Direct Diffuse
Is Required
Step41: 10.4. Number Of Wavelength Bands
Is Required
Step42: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required
Step43: 11.2. Time Step
Is Required
Step44: 11.3. Tiling
Is Required
Step45: 11.4. Vertical Discretisation
Is Required
Step46: 11.5. Number Of Ground Water Layers
Is Required
Step47: 11.6. Lateral Connectivity
Is Required
Step48: 11.7. Method
Is Required
Step49: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required
Step50: 12.2. Ice Storage Method
Is Required
Step51: 12.3. Permafrost
Is Required
Step52: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required
Step53: 13.2. Types
Is Required
Step54: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required
Step55: 14.2. Time Step
Is Required
Step56: 14.3. Tiling
Is Required
Step57: 14.4. Vertical Discretisation
Is Required
Step58: 14.5. Heat Storage
Is Required
Step59: 14.6. Processes
Is Required
Step60: 15. Snow
Land surface snow
15.1. Overview
Is Required
Step61: 15.2. Tiling
Is Required
Step62: 15.3. Number Of Snow Layers
Is Required
Step63: 15.4. Density
Is Required
Step64: 15.5. Water Equivalent
Is Required
Step65: 15.6. Heat Content
Is Required
Step66: 15.7. Temperature
Is Required
Step67: 15.8. Liquid Water Content
Is Required
Step68: 15.9. Snow Cover Fractions
Is Required
Step69: 15.10. Processes
Is Required
Step70: 15.11. Prognostic Variables
Is Required
Step71: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required
Step72: 16.2. Functions
Is Required
Step73: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required
Step74: 17.2. Time Step
Is Required
Step75: 17.3. Dynamic Vegetation
Is Required
Step76: 17.4. Tiling
Is Required
Step77: 17.5. Vegetation Representation
Is Required
Step78: 17.6. Vegetation Types
Is Required
Step79: 17.7. Biome Types
Is Required
Step80: 17.8. Vegetation Time Variation
Is Required
Step81: 17.9. Vegetation Map
Is Required
Step82: 17.10. Interception
Is Required
Step83: 17.11. Phenology
Is Required
Step84: 17.12. Phenology Description
Is Required
Step85: 17.13. Leaf Area Index
Is Required
Step86: 17.14. Leaf Area Index Description
Is Required
Step87: 17.15. Biomass
Is Required
Step88: 17.16. Biomass Description
Is Required
Step89: 17.17. Biogeography
Is Required
Step90: 17.18. Biogeography Description
Is Required
Step91: 17.19. Stomatal Resistance
Is Required
Step92: 17.20. Stomatal Resistance Description
Is Required
Step93: 17.21. Prognostic Variables
Is Required
Step94: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required
Step95: 18.2. Tiling
Is Required
Step96: 18.3. Number Of Surface Temperatures
Is Required
Step97: 18.4. Evaporation
Is Required
Step98: 18.5. Processes
Is Required
Step99: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required
Step100: 19.2. Tiling
Is Required
Step101: 19.3. Time Step
Is Required
Step102: 19.4. Anthropogenic Carbon
Is Required
Step103: 19.5. Prognostic Variables
Is Required
Step104: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required
Step105: 20.2. Carbon Pools
Is Required
Step106: 20.3. Forest Stand Dynamics
Is Required
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required
Step109: 22.2. Growth Respiration
Is Required
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required
Step111: 23.2. Allocation Bins
Is Required
Step112: 23.3. Allocation Fractions
Is Required
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required
Step115: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required
Step116: 26.2. Carbon Pools
Is Required
Step117: 26.3. Decomposition
Is Required
Step118: 26.4. Method
Is Required
Step119: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required
Step120: 27.2. Carbon Pools
Is Required
Step121: 27.3. Decomposition
Is Required
Step122: 27.4. Method
Is Required
Step123: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required
Step124: 28.2. Emitted Greenhouse Gases
Is Required
Step125: 28.3. Decomposition
Is Required
Step126: 28.4. Impact On Soil Properties
Is Required
Step127: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required
Step128: 29.2. Tiling
Is Required
Step129: 29.3. Time Step
Is Required
Step130: 29.4. Prognostic Variables
Is Required
Step131: 30. River Routing
Land surface river routing
30.1. Overview
Is Required
Step132: 30.2. Tiling
Is Required
Step133: 30.3. Time Step
Is Required
Step134: 30.4. Grid Inherited From Land Surface
Is Required
Step135: 30.5. Grid Description
Is Required
Step136: 30.6. Number Of Reservoirs
Is Required
Step137: 30.7. Water Re Evaporation
Is Required
Step138: 30.8. Coupled To Atmosphere
Is Required
Step139: 30.9. Coupled To Land
Is Required
Step140: 30.10. Quantities Exchanged With Atmosphere
Is Required
Step141: 30.11. Basin Flow Direction Map
Is Required
Step142: 30.12. Flooding
Is Required
Step143: 30.13. Prognostic Variables
Is Required
Step144: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required
Step145: 31.2. Quantities Transported
Is Required
Step146: 32. Lakes
Land surface lakes
32.1. Overview
Is Required
Step147: 32.2. Coupling With Rivers
Is Required
Step148: 32.3. Time Step
Is Required
Step149: 32.4. Quantities Exchanged With Rivers
Is Required
Step150: 32.5. Vertical Grid
Is Required
Step151: 32.6. Prognostic Variables
Is Required
Step152: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required
Step153: 33.2. Albedo
Is Required
Step154: 33.3. Dynamics
Is Required
Step155: 33.4. Dynamic Lake Extent
Is Required
Step156: 33.5. Endorheic Basins
Is Required
Step157: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccr-iitm', 'iitm-esm', 'land')
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: CCCR-IITM
Source ID: IITM-ESM
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:48
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation |
14,590 | Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
I want to process a gray image in the form of np.array. | Problem:
import numpy as np
im = np.array([[0,0,0,0,0,0],
[0,0,1,1,1,0],
[0,1,1,0,1,0],
[0,0,0,1,1,0],
[0,0,0,0,0,0]])
mask = im == 0
rows = np.flatnonzero((~mask).sum(axis=1))
cols = np.flatnonzero((~mask).sum(axis=0))
if rows.shape[0] == 0:
result = np.array([])
else:
result = im[rows.min():rows.max()+1, cols.min():cols.max()+1] |
14,591 | Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
I have a DataFrame and I would like to transform it to count views that belong to certain bins. | Problem:
import pandas as pd
df = pd.DataFrame({'username': ['john', 'john', 'john', 'john', 'jane', 'jane', 'jane', 'jane'],
'post_id': [1, 2, 3, 4, 7, 8, 9, 10],
'views': [3, 23, 44, 82, 5, 25,46, 56]})
bins = [1, 10, 25, 50, 100]
def g(df, bins):
groups = df.groupby(['username', pd.cut(df.views, bins)])
return groups.size().unstack()
result = g(df.copy(),bins.copy()) |
14,592 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Deep learning goes to the movies
Kaggle tutorial Part 1
Step1: 读取数据
Step2: 清洗数据
Step3: 计算特征向量(词向量)
Step4: Cross validation Score of RandomForestClassifier
RandomForestClassifier
在机器学习中,随机森林是一个包含多个决策树的分类器, 并且其输出的类别是由个别树输出的类别的众数而定。 Leo Breiman和Adele Cutler发展出推论出随机森林的算法。 而 "Random Forests" 是他们的商标。 这个术语是1995年由贝尔实验室的Tin Kam Ho所提出的随机决策森林(random decision forests)而来的。这个方法则是结合 Breimans 的 "Bootstrap aggregating" 想法和 Ho 的"random subspace method"以建造决策树的集合.
根据下列算法而建造每棵树:
一. 用 N 来表示训练例子的个数,M表示变量的数目。
二. 我们会被告知一个数 m ,被用来决定当在一个节点上做决定时,会使用到多少个变量。m应小于M
三. 从N个训练案例中以可重复取样的方式,取样N次,形成一组训练集(即bootstrap取样)。并使用这棵树来对剩余预测其类别,并评估其误差。
四. 对于每一个节点,随机选择m个基于此点上的变量。根据这 m 个变量,计算其最佳的分割方式。
五. 每棵树都会完整成长而不会剪枝(Pruning)(这有可能在建完一棵正常树状分类器后会被采用)。
Step5: Use all train data to train a forest model
Step6: Predict the testset | Python Code:
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility # in the same folader
import pandas as pd
import numpy as np
Explanation: Deep learning goes to the movies
Kaggle tutorial Part 1: Natural Language Processing.
Author: Angela Chapman
Date: 8/6/2014
Bag of Words Meets Bags of Popcorn
The labeled data set consists of 50,000 IMDB movie reviews, specially selected for sentiment analysis. The sentiment of reviews is binary, meaning the IMDB rating < 5 results in a sentiment score of 0, and rating >=7 have a sentiment score of 1. No individual movie has more than 30 reviews. The 25,000 review labeled training set does not include any of the same movies as the 25,000 review test set. In addition, there are another 50,000 IMDB reviews provided without any rating labels.
End of explanation
traindata_path = "/Users/chengjun/bigdata/kaggle_popcorn_data/labeledTrainData.tsv"
testdata_path = "/Users/chengjun/bigdata/kaggle_popcorn_data/testData.tsv"
train = pd.read_csv(traindata_path, header=0, delimiter="\t", quoting=3)
test = pd.read_csv(testdata_path, header=0, delimiter="\t", quoting=3 )
print 'The first review is:'
print train["review"][0]
train[:3]
test[:3]
Explanation: 读取数据
End of explanation
import nltk
nltk.download()
# 'Download text data sets. If you already have NLTK datasets downloaded, just close the Python download window...'
# Download text data sets, including stop words
# Initialize an empty list to hold the clean reviews
clean_train_reviews = []
# Loop over each review; create an index i that goes from 0 to the length
# of the movie review list
print "Cleaning and parsing the training set movie reviews...\n"
for i in xrange( 0, len(train["review"])):
clean_train_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(train["review"][i], True)))
clean_train_reviews[0]
train['review'][0]
Explanation: 清洗数据
End of explanation
# ****** Create a bag of words from the training set
# Initialize the "CountVectorizer" object, which is scikit-learn's
# bag of words tool.
vectorizer = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 5000)
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of strings.
train_data_features = vectorizer.fit_transform(clean_train_reviews)
# Numpy arrays are easy to work with, so convert the result to an array
train_data_features = train_data_features.toarray()
type(train_data_features)
len(train_data_features)
train_data_features[1][100:105]
Explanation: 计算特征向量(词向量)
End of explanation
from sklearn.cross_validation import cross_val_score
forest_val = RandomForestClassifier(n_estimators = 100)
scores = cross_val_score(forest_val, train_data_features, train["sentiment"], cv = 3)
scores.mean()
scores
Explanation: Cross validation Score of RandomForestClassifier
RandomForestClassifier
在机器学习中,随机森林是一个包含多个决策树的分类器, 并且其输出的类别是由个别树输出的类别的众数而定。 Leo Breiman和Adele Cutler发展出推论出随机森林的算法。 而 "Random Forests" 是他们的商标。 这个术语是1995年由贝尔实验室的Tin Kam Ho所提出的随机决策森林(random decision forests)而来的。这个方法则是结合 Breimans 的 "Bootstrap aggregating" 想法和 Ho 的"random subspace method"以建造决策树的集合.
根据下列算法而建造每棵树:
一. 用 N 来表示训练例子的个数,M表示变量的数目。
二. 我们会被告知一个数 m ,被用来决定当在一个节点上做决定时,会使用到多少个变量。m应小于M
三. 从N个训练案例中以可重复取样的方式,取样N次,形成一组训练集(即bootstrap取样)。并使用这棵树来对剩余预测其类别,并评估其误差。
四. 对于每一个节点,随机选择m个基于此点上的变量。根据这 m 个变量,计算其最佳的分割方式。
五. 每棵树都会完整成长而不会剪枝(Pruning)(这有可能在建完一棵正常树状分类器后会被采用)。
End of explanation
# ******* Train a random forest using the bag of words
# Initialize a Random Forest classifier with 100 trees
forest = RandomForestClassifier(n_estimators = 100)
# Fit the forest to the training set, using the bag of words as
# features and the sentiment labels as the response variable
# This may take a few minutes to run
forest = forest.fit( train_data_features, train["sentiment"] )
Explanation: Use all train data to train a forest model
End of explanation
# Create an empty list and append the clean reviews one by one
clean_test_reviews = []
for i in xrange(0,len(test["review"])):
clean_test_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(test["review"][i], True)))
len(clean_test_reviews)
clean_test_reviews[0]
test['review'][0]
# Get a bag of words for the test set, and convert to a numpy array
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
test_data_features[3]
# Use the random forest to make sentiment label predictions
result = forest.predict(test_data_features)
# Copy the results to a pandas dataframe with an "id" column and a "sentiment" column
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
# Use pandas to write the comma-separated output file
output.to_csv('/Users/chengjun/github/cjc2016/data/Bag_of_Words_model.csv', index=False, quoting=3)
Explanation: Predict the testset
End of explanation |
14,593 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Title
Step1: Load Iris Dataset
Step2: Use Cross-Validation To Find The Best Value Of C | Python Code:
# Load libraries
from sklearn import linear_model, datasets
Explanation: Title: Fast C Hyperparameter Tuning
Slug: fast_c_hyperparameter_tuning
Summary: How to fast C hyperparameter tuning for logistic regression in scikit-learn for machine learning in Python.
Date: 2017-09-18 12:00
Category: Machine Learning
Tags: Logistic Regression
Authors: Chris Albon
Sometimes the characteristics of a learning algorithm allows us to search for the best hyperparameters significantly faster than either brute force or randomized model search methods.
scikit-learn's LogisticRegressionCV method includes a parameter Cs. If supplied a list, Cs is the candidate hyperparameter values to select from. If supplied a integer, Cs a list of that many candidate values will is drawn from a logarithmic scale between 0.0001 and and 10000 (a range of reasonable values for C).
Preliminaries
End of explanation
# Load data
iris = datasets.load_iris()
X = iris.data
y = iris.target
Explanation: Load Iris Dataset
End of explanation
# Create cross-validated logistic regression
clf = linear_model.LogisticRegressionCV(Cs=100)
# Train model
clf.fit(X, y)
Explanation: Use Cross-Validation To Find The Best Value Of C
End of explanation |
14,594 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
Step3: Explore the Data
Play around with view_sentence_range to view different parts of the data.
Step6: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing
Step8: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
Step10: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
Step12: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
Step15: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below
Step18: Process Decoding Input
Implement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the begining of each batch.
Step21: Encoding
Implement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().
Step24: Decoding - Training
Create training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.
Step27: Decoding - Inference
Create inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().
Step30: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Create RNN cell for decoding using rnn_size and num_layers.
Create the output fuction using lambda to transform it's input, logits, to class logits.
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.
Note
Step33: Build the Neural Network
Apply the functions you implemented above to
Step34: Neural Network Training
Hyperparameters
Tune the following parameters
Step36: Build the Graph
Build the graph using the neural network you implemented.
Step39: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
Step41: Save Parameters
Save the batch_size and save_path parameters for inference.
Step43: Checkpoint
Step46: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
Step48: Translate
This will translate translate_sentence from English to French. | Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
source_id_text = [[source_vocab_to_int[y] for y in x] for x in
[sentence.split() for sentence in source_text.split('\n')]]
target_id_text = [[target_vocab_to_int[y] for y in x] for x in
[sentence.split() for sentence in target_text.split('\n')]]
for l in target_id_text:
l.append(target_vocab_to_int['<EOS>'])
return source_id_text, target_id_text
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, learning_rate, keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoding_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Return the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)
End of explanation
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for dencoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
decoding_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return decoding_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
Explanation: Process Decoding Input
Implement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the begining of each batch.
End of explanation
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
enc_cell = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob=keep_prob)
_, enc_state = tf.nn.dynamic_rnn(enc_cell, rnn_inputs, dtype=tf.float32)
return enc_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().
End of explanation
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
decoder = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
prediction, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder, dec_embed_input,
sequence_length, scope=decoding_scope)
logits = output_fn(prediction)
return logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
Explanation: Decoding - Training
Create training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.
End of explanation
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
decoder = tf.contrib.seq2seq.simple_decoder_fn_inference(output_fn, encoder_state, dec_embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size)
logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder, scope=decoding_scope)
return logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
Explanation: Decoding - Inference
Create inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().
End of explanation
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
with tf.variable_scope("decoding") as decoding_scope:
dec_cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
dec_cell = tf.contrib.rnn.MultiRNNCell([dec_cell] * num_layers)
_, dec_state = tf.nn.dynamic_rnn(dec_cell, dec_embed_input, dtype=tf.float32)
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
t_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
i_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'], sequence_length, vocab_size,
decoding_scope, output_fn, keep_prob)
return t_logits, i_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Create RNN cell for decoding using rnn_size and num_layers.
Create the output fuction using lambda to transform it's input, logits, to class logits.
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
rnn_inputs = tf.contrib.layers.embed_sequence(input_data, vocab_size=source_vocab_size,
embed_dim=enc_embedding_size)
encoder_state = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob)
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
t_logits, i_logits = decoding_layer(dec_embed_input, dec_embeddings, encoder_state, target_vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return t_logits, i_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Apply embedding to the input data for the encoder.
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
Apply embedding to the target data for the decoder.
Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
End of explanation
# Number of Epochs
epochs = 4
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 384
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 128
decoding_embedding_size = 128
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.6
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
Explanation: Checkpoint
End of explanation
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
return [vocab_to_int[word] if word in vocab_to_int else vocab_to_int['<UNK>']
for word in sentence.lower().split()]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation |
14,595 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<font color="blue">
Vitali C.
Data Scientist<br>
Step1: Column names
Number of times pregnant
Plasma glucose concentration a 2 hours in an oral glucose tolerance test
Diastolic blood pressure (mm Hg)
Triceps skin fold thickness (mm)
2-Hour serum insulin (mu U/ml)
Body mass index (weight in kg/(height in m)^2)
Diabetes pedigree function
Age (years)
Class variable (0 or 1)
Step2: Tuning hyperparameters with GridSearch | Python Code:
# Importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
%matplotlib inline
# To learn more about the data set https://archive.ics.uci.edu/ml/datasets/pima+indians+diabetes
data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data'
df = pd.read_csv(data_url)
df.head()
Explanation: <font color="blue">
Vitali C.
Data Scientist<br>
End of explanation
columns = ["#pregnancies", "glucose_conc", "blood_pressure",
"skin_thickness", "serum_insulin", "bmi", "dpf", "age", "class"]
df.columns = columns
df.head()
df.shape
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
# Split data into a training and testing datasets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
# Logistic Regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
lr_train_score = lr.score(X_train, y_train)
lr_test_score = lr.score(X_test, y_test)
print("Accuracy of training score is", train_score)
print("Accuracy of testing score is", test_score )
# Random Forest
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
rf_train_score = rf.score(X_train, y_train)
rf_test_score = rf.score(X_test, y_test)
print("Accuracy of training score is", rf_train_score)
print("Accuracy of testing score is", rf_test_score)
# Naive Bayse Classifier
nb = MultinomialNB()
nb.fit(X_train, y_train)
nb_train_score = nb.score(X_train, y_train)
nb_test_score = nb.score(X_test, y_test)
print("Accuracy of training score is", nb_train_score)
print("Accuracy of testing score is", nb_test_score)
# Support Vector Machines
svm = SVC()
svm.fit(X_train, y_train)
svm_train_score = svm.score(X_train, y_train)
svm_test_score = svm.score(X_test, y_test)
print("Accuracy of training score is", svm_train_score)
print("Accuracy of testing score is", svm_test_score)
# Plotting the results
classifiers = ["Logistic_Reg", "Random_Forest", "Naive_Bayes", "Support_Vector"]
y_axis = range(len(classifiers))
scores = [lr_test_score, rf_test_score, nb_test_score, svm_test_score]
plt.bar(y_axis, scores, align='center', alpha=0.5)
plt.xticks(y_axis, classifiers)
plt.ylabel('Testing score')
plt.title('Comparison of ML classifiers')
Explanation: Column names
Number of times pregnant
Plasma glucose concentration a 2 hours in an oral glucose tolerance test
Diastolic blood pressure (mm Hg)
Triceps skin fold thickness (mm)
2-Hour serum insulin (mu U/ml)
Body mass index (weight in kg/(height in m)^2)
Diabetes pedigree function
Age (years)
Class variable (0 or 1)
End of explanation
from sklearn.grid_search import GridSearchCV
grid_values = {
'n_estimators': (5, 10, 20, 50),
'max_depth': (50, 150, 250),
'min_samples_split': [2, 3],
'min_samples_leaf': (1, 2, 3)
}
grid_search = GridSearchCV(rf, param_grid=grid_values, verbose=1, n_jobs=-1, cv=3)
grid_search.fit(X_train, y_train)
print ('Best score: %0.3f' % grid_search.best_score_)
print ('Best parameters set:')
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(grid_values.keys()):
print ('\t%s: %r' % (param_name, best_parameters[param_name]))
Explanation: Tuning hyperparameters with GridSearch
End of explanation |
14,596 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
GEM-PRO - Genes & Sequences
This notebook gives an example of how to run the GEM-PRO pipeline with a dictionary of gene IDs and their protein sequences.
<div class="alert alert-info">
**Input
Step1: Logging
Set the logging level in logger.setLevel(logging.<LEVEL_HERE>) to specify how verbose you want the pipeline to be. Debug is most verbose.
CRITICAL
Only really important messages shown
ERROR
Major errors
WARNING
Warnings that don't affect running of the pipeline
INFO (default)
Info such as the number of structures mapped per gene
DEBUG
Really detailed information that will print out a lot of stuff
<div class="alert alert-warning">
**Warning
Step2: Initialization of the project
Set these three things
Step3: Mapping sequence --> structure
Since the sequences have been provided, we just need to BLAST them to the PDB.
<p><div class="alert alert-info">**Note
Step4: Downloading and ranking structures
Methods
<div class="alert alert-warning">
**Warning
Step5: Creating homology models
For those proteins with no representative structure, we can create homology models for them. ssbio contains some built in functions for easily running I-TASSER locally or on machines with SLURM (ie. on NERSC) or Torque job scheduling.
You can load in I-TASSER models once they complete using the get_itasser_models later.
<p><div class="alert alert-info">**Info
Step6: Saving your GEM-PRO
<p><div class="alert alert-warning">**Warning | Python Code:
import sys
import logging
# Import the GEM-PRO class
from ssbio.pipeline.gempro import GEMPRO
# Printing multiple outputs per cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
Explanation: GEM-PRO - Genes & Sequences
This notebook gives an example of how to run the GEM-PRO pipeline with a dictionary of gene IDs and their protein sequences.
<div class="alert alert-info">
**Input:**
Dictionary of gene IDs and protein sequences
</div>
<div class="alert alert-info">
**Output:**
GEM-PRO model
</div>
Imports
End of explanation
# Create logger
logger = logging.getLogger()
logger.setLevel(logging.INFO) # SET YOUR LOGGING LEVEL HERE #
# Other logger stuff for Jupyter notebooks
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M")
handler.setFormatter(formatter)
logger.handlers = [handler]
Explanation: Logging
Set the logging level in logger.setLevel(logging.<LEVEL_HERE>) to specify how verbose you want the pipeline to be. Debug is most verbose.
CRITICAL
Only really important messages shown
ERROR
Major errors
WARNING
Warnings that don't affect running of the pipeline
INFO (default)
Info such as the number of structures mapped per gene
DEBUG
Really detailed information that will print out a lot of stuff
<div class="alert alert-warning">
**Warning:**
`DEBUG` mode prints out a large amount of information, especially if you have a lot of genes. This may stall your notebook!
</div>
End of explanation
# SET FOLDERS AND DATA HERE
import tempfile
ROOT_DIR = tempfile.gettempdir()
PROJECT = 'genes_and_sequences_GP'
GENES_AND_SEQUENCES = {'b0870': 'MIDLRSDTVTRPSRAMLEAMMAAPVGDDVYGDDPTVNALQDYAAELSGKEAAIFLPTGTQANLVALLSHCERGEEYIVGQAAHNYLFEAGGAAVLGSIQPQPIDAAADGTLPLDKVAMKIKPDDIHFARTKLLSLENTHNGKVLPREYLKEAWEFTRERNLALHVDGARIFNAVVAYGCELKEITQYCDSFTICLSKGLGTPVGSLLVGNRDYIKRAIRWRKMTGGGMRQSGILAAAGIYALKNNVARLQEDHDNAAWMAEQLREAGADVMRQDTNMLFVRVGEENAAALGEYMKARNVLINASPIVRLVTHLDVSREQLAEVAAHWRAFLAR',
'b3041': 'MNQTLLSSFGTPFERVENALAALREGRGVMVLDDEDRENEGDMIFPAETMTVEQMALTIRHGSGIVCLCITEDRRKQLDLPMMVENNTSAYGTGFTVTIEAAEGVTTGVSAADRITTVRAAIADGAKPSDLNRPGHVFPLRAQAGGVLTRGGHTEATIDLMTLAGFKPAGVLCELTNDDGTMARAPECIEFANKHNMALVTIEDLVAYRQAHERKAS'}
PDB_FILE_TYPE = 'mmtf'
# Create the GEM-PRO project
my_gempro = GEMPRO(gem_name=PROJECT, root_dir=ROOT_DIR, genes_and_sequences=GENES_AND_SEQUENCES, pdb_file_type=PDB_FILE_TYPE)
Explanation: Initialization of the project
Set these three things:
ROOT_DIR
The directory where a folder named after your PROJECT will be created
PROJECT
Your project name
LIST_OF_GENES
Your list of gene IDs
A directory will be created in ROOT_DIR with your PROJECT name. The folders are organized like so:
```
ROOT_DIR
└── PROJECT
├── data # General storage for pipeline outputs
├── model # SBML and GEM-PRO models are stored here
├── genes # Per gene information
│ ├── <gene_id1> # Specific gene directory
│ │ └── protein
│ │ ├── sequences # Protein sequence files, alignments, etc.
│ │ └── structures # Protein structure files, calculations, etc.
│ └── <gene_id2>
│ └── protein
│ ├── sequences
│ └── structures
├── reactions # Per reaction information
│ └── <reaction_id1> # Specific reaction directory
│ └── complex
│ └── structures # Protein complex files
└── metabolites # Per metabolite information
└── <metabolite_id1> # Specific metabolite directory
└── chemical
└── structures # Metabolite 2D and 3D structure files
```
<div class="alert alert-info">**Note:** Methods for protein complexes and metabolites are still in development.</div>
End of explanation
# Mapping using BLAST
my_gempro.blast_seqs_to_pdb(all_genes=True, seq_ident_cutoff=.9, evalue=0.00001)
my_gempro.df_pdb_blast.head(2)
Explanation: Mapping sequence --> structure
Since the sequences have been provided, we just need to BLAST them to the PDB.
<p><div class="alert alert-info">**Note:** These methods do not download any 3D structure files.</div></p>
Methods
End of explanation
# Download all mapped PDBs and gather the metadata
my_gempro.pdb_downloader_and_metadata()
my_gempro.df_pdb_metadata.head(2)
# Set representative structures
my_gempro.set_representative_structure()
my_gempro.df_representative_structures.head()
# Looking at the information saved within a gene
my_gempro.genes.get_by_id('b0870').protein.representative_structure
my_gempro.genes.get_by_id('b0870').protein.representative_structure.get_dict()
Explanation: Downloading and ranking structures
Methods
<div class="alert alert-warning">
**Warning:**
Downloading all PDBs takes a while, since they are also parsed for metadata. You can skip this step and just set representative structures below if you want to minimize the number of PDBs downloaded.
</div>
End of explanation
# Prep I-TASSER model folders
my_gempro.prep_itasser_modeling('~/software/I-TASSER4.4', '~/software/ITLIB/', runtype='local', all_genes=False)
Explanation: Creating homology models
For those proteins with no representative structure, we can create homology models for them. ssbio contains some built in functions for easily running I-TASSER locally or on machines with SLURM (ie. on NERSC) or Torque job scheduling.
You can load in I-TASSER models once they complete using the get_itasser_models later.
<p><div class="alert alert-info">**Info:** Homology modeling can take a long time - about 24-72 hours per protein (highly dependent on the sequence length, as well as if there are available templates).</div></p>
Methods
End of explanation
import os.path as op
my_gempro.save_json(op.join(my_gempro.model_dir, '{}.json'.format(my_gempro.id)), compression=False)
Explanation: Saving your GEM-PRO
<p><div class="alert alert-warning">**Warning:** Saving is still experimental. For a full GEM-PRO with sequences & structures, depending on the number of genes, saving can take >5 minutes.</div></p>
End of explanation |
14,597 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Neural Network demo
Start a Mosquitto container first. For example
Step1: List of neurons
Step2: Start client
Step3: Utility functions
Step4: Reset neurons
Step5: Probe neurons by blinking LEDs
Step6: Setup connections / weights / thresholds
Step7: Stop the demo | Python Code:
import os
import sys
import time
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'client')))
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'node')))
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'shared')))
sys.path.append(os.path.abspath(os.path.join(os.path.pardir, os.path.sep.join(['..', 'codes']), 'micropython')))
import client
from collections import OrderedDict
import pandas as pd
from pandas import DataFrame
from time import sleep
REFRACTORY_PERIOD = 0.1 # 0.1 seconds
Explanation: Neural Network demo
Start a Mosquitto container first. For example:
- Use codes\_demo\1_start_broker.sh to start a Mosquitto container on Raspberry Pi.
- Config files are in mqtt_config\mqtt.
- set allow_anonymous true in mqtt_config\mqtt\config\mosquitto.conf to allow anonymous client.
Getting Started
What this notebook does:
- Using:
- a client on PC
- 6 ESP8266 modules (NodeMCU and D1 mini) as remote nodes
- List connected nodes
- Rename remote nodes
- Setup neural network configuration (connections, weights, thresholds)
- Fire up neurons and get logs.
End of explanation
neurons = ['n_Alpha', 'n_Beta', 'n_Lambda']
# neurons = ['n_Alpha']
neurons
Explanation: List of neurons
End of explanation
the_client = client.Client()
the_client.start()
while not the_client.status['Is connected']:
time.sleep(1)
print('Node not ready yet.')
Explanation: Start client
End of explanation
# # Ask Hub for a list of connected nodes
# def list_nodes():
# the_client.node.worker.roll_call()
# time.sleep(2)
# remote_nodes = sorted(the_client.node.worker.contacts.keys())
# print('\n[____________ Connected nodes ____________]\n')
# print('\nConnected nodes:\n{}\n'.format(remote_nodes))
# return remote_nodes
def reset_node(node):
message = {'message_type': 'exec',
'to_exec': 'import machine;machine.reset()'}
the_client.request(node, message)
def fire(node):
message = {'message_type': 'function',
'function': 'fire'}
the_client.request(node, message)
def addConnection(node, neuron):
message = {'message_type': 'function',
'function': 'addConnection',
'kwargs': {'neuron_id': neuron}}
the_client.request(node, message)
def set_connections(node, connections):
message = {'message_type': 'function',
'function': 'setConnections',
'kwargs': {'connections': connections}}
the_client.request(node, message)
def get_connections(node):
message = {'message_type': 'function',
'function': 'getConnections',
'need_result': True}
_, result = the_client.request(node, message)
return result.get()
def setWeight(node, neuron, weight):
message = {'message_type': 'function',
'function': 'setWeight',
'kwargs': {'neuron_id': neuron,
'weight': weight,}}
the_client.request(node, message)
def setThreshold(node, threshold):
message = {'message_type': 'function',
'function': 'setThreshold',
'kwargs': {'threshold': threshold}}
the_client.request(node, message)
def getConfig(node):
message = {'message_type': 'function',
'function': 'getConfig',
'need_result': True}
_, result = the_client.request(node, message)
return result.get()
def getLog(node):
message = {'message_type': 'function',
'function': 'getLog',
'need_result': True}
_, result = the_client.request(node, message)
return result.get()
def emptyLog(node):
message = {'message_type': 'function',
'function': 'emptyLog'}
the_client.request(node, message)
def emptyLogs():
for neuron in neurons:
emptyLog(neuron)
def mergeLogs():
logs = []
for neuron in neurons:
if neuron != the_client.node.worker.name: # exclude client self
currentLog = getLog(neuron)
if currentLog:
logs += currentLog
df = DataFrame(list(logs), columns = ['time', 'neuron', 'message'])
df.set_index('time', inplace = True)
df.sort_index(inplace = True)
return df
def printConfig(neuron):
print('{0:_^78}\n {1}\n'.format(neuron + " config:", getConfig(neuron)))
Explanation: Utility functions
End of explanation
# reset_node('Hub');
Explanation: Reset neurons
End of explanation
messages = {}
messages['blink_led'] = {'message_type': 'command',
'command': 'blink led',
'kwargs': {'times': 3, 'on_seconds': 0.1, 'off_seconds': 0.1}}
the_client.request('Hub', messages['blink_led']);
Explanation: Probe neurons by blinking LEDs
End of explanation
addConnection('n_Alpha', 'n_Lambda');
addConnection('n_Beta', 'n_Lambda');
setWeight('n_Lambda', 'n_Alpha', 1);
setWeight('n_Lambda', 'n_Beta', 1);
setThreshold('n_Lambda', 2.8); # input enough to trigger Lambda
fire('n_Alpha');
fire('n_Beta');
setThreshold('n_Lambda', 2.8); # input not enough to trigger Lambda
fire('n_Alpha');
fire('n_Beta');
setThreshold('n_Lambda', 1.8); # input not enough to trigger Lambda
fire('n_Alpha');
setThreshold('n_Lambda', 1.8); # input enough to trigger Lambda
fire('n_Alpha');
fire('n_Beta');
# setThreshold('n_Lambda', 1.8);
# emptyLogs()
# sleep(REFRACTORY_PERIOD)
# fire('n_Alpha')
# fire('n_Beta')
# sleep(2)
# mergeLogs()
# for neuron in reversed(neurons): printConfig(neuron)
Explanation: Setup connections / weights / thresholds
End of explanation
# Stopping
the_client.stop()
the_client = None
print ('\n[________________ Demo stopped ________________]\n')
Explanation: Stop the demo
End of explanation |
14,598 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Case study
Given a user’s past reviews on Yelp (available from yelp-challenge dataset),
When the user writes a review for a business she hasn't reviewed before,
How likely will it be a Five-Star review?
Load data
Visualize the data
Featurize the data
Join tables to populate the features
Model the data
Step1: Useful Functions to Explore the DataFrames
review_df.head()
Step2: Example 2
Step3: Step 3
Step4: 3.2 Join DataFrames to Populate the Features
After generatING the featureS within each dataframe, we join them together into one big dataframe.
Each join is a many-to-one join.
After the joins, each row represents one review, including features from the review itself, the user who’s written the review, and the business the review is for.
Step5: Step 4
Step6: 4.2 Split Training and Testing Set
Training set
Step7: 4.3 What Model to Use? Some Examples
Decision Tree
Step8: 4.5 Cross Validation
Holding out a portion of the training data for model validation, and do this for n_folds.
- Ensure that the model does not overfit the training data.
- Select optimal model parameters.
Step9: Step 5
Step10: 5.2 Evaluation via Confusion Matrix
Step11: 5.3 Make Prediction with the Model
Process
Step12: biz1
Step13: Make predictions for user[1,2,3]'s review on biz2 | Python Code:
import pandas as pd
PATH = '/scratch/xun/docs/yelp_dataset_challenge_round10/'
biz_df = pd.read_csv(PATH + 'business.csv')
user_df = pd.read_csv(PATH + 'user.csv')
review_df = pd.read_csv(PATH + 'review.csv')
review_df = review_df.set_index('review_id')
user_df = user_df.set_index('user_id')
biz_df = biz_df.set_index('business_id')
Explanation: Case study
Given a user’s past reviews on Yelp (available from yelp-challenge dataset),
When the user writes a review for a business she hasn't reviewed before,
How likely will it be a Five-Star review?
Load data
Visualize the data
Featurize the data
Join tables to populate the features
Model the data: Logistic regression
Evaluate the model
Make prediction with the model
Data source
Yelp Dataset Round 10
We took the full set of business, user, and review data in json format, pre-processed the data (converting from json to csv format) using this script.
Step 1: Load the Data
We store data in Pandas DataFrames.
We load three types of data from the dataset, each stored in a separate df.
set_index(): tells pandas to index the review df by the column named review_id.
End of explanation
import seaborn as sns
%matplotlib inline
# Set context to "talk" for figure aesthetics
sns.set_context(context="talk")
# set plot figure size to larger
sns.set(palette='Set2', rc={"figure.figsize": (15, 8)}, style="ticks")
ax = sns.countplot(x='stars', data=review_df)
# Removing spines
sns.despine()
Explanation: Useful Functions to Explore the DataFrames
review_df.head(): Print top rows in the data frame.
review_df.describe(): Generate various summary statistics, mean, max, count, etc.
Step 2: Explore & Visualize the Data
Example 1: Plot Review Star Rating Distribution
First ask yourself this question:
Among all the reviews available in the dataset, what percentage of them are 5 star?
One common mis-conception about Yelp, is that folks only come to Yelp to complaint about their negative experiences with a business, so most of the reviews "should" be low rating.
However, this is a misconception, according to the below graph we plot, using the Yelp Open Dataset.
End of explanation
review_df['datetime'] = pd.to_datetime(review_df['date'])
review_df['year'] = review_df['datetime'].dt.year
ax = sns.countplot(x='year', data=review_df, hue='stars')
sns.despine()
Explanation: Example 2: Plot Review Star Rating Distribution by year
From below graph,
- healthy growth over the years, on the amount of reviews people wrote every year.
- later years, there seems to be a shift towards 5 star reviews.
End of explanation
def calculate_date_delta(df, from_column, to_column):
datetime = pd.to_datetime(df[from_column])
time_delta = datetime.max() - datetime
df[to_column] = time_delta.apply(lambda x: x.days)
df.drop(from_column, axis=1, inplace=True)
def to_length(df, from_column, to_column):
df[to_column] = df[from_column].apply(lambda x: len(x))
df.drop(from_column, axis=1, inplace=True)
def drop_columns(df, columns):
for column in columns:
df.drop(column, axis=1, inplace=True)
def to_boolean(df, columns):
for column in columns:
to_column = column+'_bool'
df[to_column] = df[column].apply(lambda x: bool(x))
df.drop(column, axis=1, inplace=True)
FILL_WITH = 0.0
def to_category(df, columns):
for column in columns:
df[column] = df[column].astype('category')
# add FILL_WITH category for fillna() to work w/o error
if (FILL_WITH not in df[column].cat.categories):
df[column] = df[column].cat.add_categories([FILL_WITH])
#print 'categories for ', column, ' include ', df[column].cat.categories
def category_rename_to_int(df, columns):
for column in columns:
df[column].cat.remove_unused_categories()
size = len(df[column].cat.categories)
#print 'column ', column, ' has ', size, ' columns, include ', df[column].cat.categories
df[column] = df[column].cat.rename_categories(range(1, size+1))
#print 'becomes ', df[column].cat.categories
calculate_date_delta(df=review_df, from_column='date', to_column='date_delta')
to_length(df=review_df, from_column='text', to_column='text_len')
drop_columns(df=review_df, columns=['year', 'datetime'])
review_df.fillna(value=0.0, inplace=True)
calculate_date_delta(df=user_df, from_column='yelping_since', to_column='date_delta')
to_length(df=user_df, from_column='friends', to_column='friends_count')
to_length(df=user_df, from_column='elite', to_column='elite_count')
drop_columns(df=user_df, columns=['name'])
user_df.fillna(value=0.0, inplace=True)
drop_columns(
df=biz_df,
columns=[
'postal_code',
'name',
'city',
'address',
'state',
'categories',
'longitude',
'latitude',
'neighborhood',
'hours.Monday',
'hours.Tuesday',
'hours.Wednesday',
'hours.Thursday',
'hours.Friday',
'hours.Saturday',
'hours.Sunday',
]
)
# print out all the unique values, help clean up data to be float type
#for column in biz_df.columns:
#print column, biz_df[column].unique()
to_cat_columns = [
'attributes.Ambience.casual',
'attributes.AgesAllowed',
'attributes.Alcohol',
'attributes.NoiseLevel',
'attributes.RestaurantsAttire',
'attributes.RestaurantsPriceRange2',
'attributes.BYOBCorkage',
'attributes.WiFi',
'attributes.Smoking',
]
to_category(
df=biz_df,
columns=to_cat_columns,
)
biz_df.fillna(value=FILL_WITH, inplace=True)
category_rename_to_int(
df=biz_df,
columns=to_cat_columns,
)
Explanation: Step 3: Generate the Features
3.1 Examples Feature Extractions & Conversions
Convert date string to date delta
For example, business_age
Convert strings to categorical features
For example, noise_level: {'quiet', 'loud', 'very loud'}.
Drop unused features
For example, business_name
End of explanation
# The `user_df` DataFrame is already indexed by the join key (`user_id`). Make sure it's on the right side of join.
review_join_user = review_df.join(user_df, on='user_id', lsuffix='_review', rsuffix='_user')
review_join_user_join_biz = review_join_user.join(biz_df, on='business_id', rsuffix='_biz')
drop_columns(df=review_join_user_join_biz, columns=['user_id', 'business_id'])
Explanation: 3.2 Join DataFrames to Populate the Features
After generatING the featureS within each dataframe, we join them together into one big dataframe.
Each join is a many-to-one join.
After the joins, each row represents one review, including features from the review itself, the user who’s written the review, and the business the review is for.
End of explanation
# Target y is whether a review is five-star (True / False)
y = review_join_user_join_biz.stars.apply(lambda x: x == 5)
# Exclude the `stars` columns from the feature matrix, since it is the target
X = review_join_user_join_biz
review_join_user_join_biz.drop('stars', axis=1, inplace=True)
# get the feature names - this will be useful for the model visualization and feature analysis
features = X.columns.values
Explanation: Step 4: Train a Model
The process of training an ML model involves providing an ML algorithm (that is, the learning algorithm) with training data to learn from. The term ML model refers to the model artifact that is created by the training process.
4.1 Arrange Data into a Feature Matrix and a Target Array
Feature matrix X:
All features gathered from business, user, and review dataframes.
Two-dimensional array of features with shape = (n_samples, n_features)
Target array y:
What we'd like to predict: Whether the review is Five-star or not.
Array of labels with shape = (n_samples)
End of explanation
from sklearn.cross_validation import train_test_split
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y)
print 'training data shape', X_train.shape
print 'test data shape', X_test.shape
print 'converted label data shape', y_train.shape
print 'features', features
Explanation: 4.2 Split Training and Testing Set
Training set: used for an machine learning algorithm to train from.
Testing set: used to to estimate / evaluate how well the model has been trained.
Split them so that we don’t evaluate on the same dataset we train from
End of explanation
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
Explanation: 4.3 What Model to Use? Some Examples
Decision Tree: predicts the value of a target variable by learning simple decision rules inferred from the data features
Random Forest: combines de-correlated trees, where each tree is built from a bootstrap sample and node splits are calculated from random feature subsets
Ensemble Model: combine predictions of several models in order to improve the accuracy (decrease bias) and robustness (decrease variance) over a single model
The model we used here: Logistic Regression
Logistic regression estimates the probability of a binary response based on one or more features.
The probabilities describing the possible outcomes of a single trial are modeled using a logistic function.
Here we estimate the probability of a review being five-star.
4.4 Normalize the Features
Standardize features by removing the mean and scaling to unit variance.
- Logistic Regression requires all features normalized.
End of explanation
from sklearn.cross_validation import cross_val_score
import numpy as np
# Function used to print cross-validation scores
def training_score(est, X, y, cv):
acc = cross_val_score(est, X, y, cv = cv, scoring='accuracy')
roc = cross_val_score(est, X, y, cv = cv, scoring='roc_auc')
print '5-fold Train CV | Accuracy:', round(np.mean(acc), 3),'+/-', \
round(np.std(acc), 3),'| ROC AUC:', round(np.mean(roc), 3), '+/-', round(np.std(roc), 3)
from sklearn import linear_model
# Build model using default parameter values
lrc = linear_model.LogisticRegression()
from sklearn.cross_validation import StratifiedKFold
# cross-validation
cv = StratifiedKFold(y_train, n_folds=5, shuffle=True)
Explanation: 4.5 Cross Validation
Holding out a portion of the training data for model validation, and do this for n_folds.
- Ensure that the model does not overfit the training data.
- Select optimal model parameters.
End of explanation
# print cross-validation scores
training_score(est=lrc, X=X_train_scaled, y=y_train, cv=cv)
Explanation: Step 5: Evaluation the Model
5.1 Metrics
Accuracy: percentage of labels correctly predicted. The higher the better.
ROC AUC: ROC curve, is a graphical plot that illustrates the diagnostic ability of a binary classifier system as its discrimination threshold is varied. "AUC" ("Area Under Curve”). The higher the better.
End of explanation
# Compute confusion matrix
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Run classifier
lrc_fit = lrc.fit(X_train_scaled, y_train)
y_pred = lrc_fit.predict(X_test_scaled)
cm = confusion_matrix(y_test, y_pred)
# Normalize the confusion matrix by row (i.e by the number of samples in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure(figsize=(8, 8))
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
Explanation: 5.2 Evaluation via Confusion Matrix
End of explanation
def predict_given_user_biz(user, biz, review_df):
a_user = user.copy()
a_biz = biz.copy()
# The review hasn't actually be written yet.
# XXX(xun): We fake the feature using global mean/avg values to create a "general" review.
# There are many other/better ways to fake the review to be more representative
# (e.g. draw from the real distribution, or avg of the user / the biz).
a_review = pd.DataFrame(
{
'funny': review_df['funny'].mean(),
'user_id': a_user.index,
'business_id': a_biz.index,
'useful': review_df['useful'].mean(),
'cool': review_df['cool'].mean(),
'date_delta': 0.0, # the review is written the most recently
'text_len': review_df['text_len'].mean(),
}
)
a_review_join_user = a_review.join(a_user, on='user_id', lsuffix='_review', rsuffix='_user')
a_X_test = a_review_join_user.join(a_biz, on='business_id', rsuffix='_biz')
drop_columns(df=a_X_test, columns=['user_id', 'business_id'])
a_X_test.fillna(value=FILL_WITH, inplace=True)
a_X_test_scaled = scaler.transform(a_X_test)
a_y_pred = lrc_fit.predict(a_X_test_scaled)
a_y_pred_proba = lrc_fit.predict_proba(a_X_test_scaled)
print a_y_pred[0], ', with probability [False, True] == ', a_y_pred_proba[0]
user1 = user_df[user_df.index == 'kEtR1ZVL3Xr-tEX7lg16dQ']
#print user1.review_count
print user1.average_stars
user2 = user_df[user_df.index == 'Hj20fg3vyzKnJwnLn_rMqw']
#print user2.review_count
print user2.average_stars
user3 = user_df[user_df.index == 'om5ZiponkpRqUNa3pVPiRg']
#print user2.review_count
print user3.average_stars
biz1 = biz_df[biz_df.index == 'u-SJ5QUwrNquL9VnXwl8cg']
#print biz1.review_count
print biz1.stars
biz2 = biz_df[biz_df.index == 'MtUUc5w3HuDMcmL89RcG6Q']
#print biz2.review_count
print biz2.stars
Explanation: 5.3 Make Prediction with the Model
Process:
- Randomly pick a few anonymous users from the dataset.
- Randomly pick a few businesses from the dataset.
- Predict whether the user will give the business a review with five-star rating.
End of explanation
predict_given_user_biz(user=user1, biz=biz1, review_df=review_df)
predict_given_user_biz(user=user2, biz=biz1, review_df=review_df)
predict_given_user_biz(user=user3, biz=biz1, review_df=review_df)
Explanation: biz1: https://www.yelp.com/biz/postino-arcadia-phoenix
<img src="postino-arcadia-phoenix.png" alt="postino-arcadia-phoenix" width="800">
biz2 https://www.yelp.com/biz/port-authority-of-allegheny-county-pittsburgh
<img src="port-authority-of-allegheny-county-pittsburgh.png" alt="port-authority-of-allegheny-county-pittsburgh" width="800">
Make predictions for user[1,2,3]'s review on biz1
End of explanation
predict_given_user_biz(user=user1, biz=biz2, review_df=review_df)
predict_given_user_biz(user=user2, biz=biz2, review_df=review_df)
predict_given_user_biz(user=user3, biz=biz2, review_df=review_df)
Explanation: Make predictions for user[1,2,3]'s review on biz2
End of explanation |
14,599 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
FD_1D_stability 1-D FD stability calculation
GNU General Public License v3.0
Author
Step1: Input Parameter
Step2: Calculate Taylor coefficient
Calculate the Taylor coefficient in an arbitrary order for the
second-order derivative.
Usage
Step3: Calculate spatial derivation impact
Assumption
Step4: Temporal 2 order (Leapfrog)
Step5: Temporal 3 order (Adams-Bashforth method)
Step6: Temporal 4 order (Adams-Bashforth method) | Python Code:
import numpy as np
Explanation: FD_1D_stability 1-D FD stability calculation
GNU General Public License v3.0
Author: Florian Wittkamp
Calculate stability limit of FD-simulations
Stability limit is calculated in terms of the CFL-number, which is
defined as: CFL=v_(max)DT/DX
You get the maximum DT by DT=CFLDX/v_(max)
Theory:
Fei, X., & Xiaohong, T. (2006).
Stability and numerical dispersion analysis of a fourth-order accurate FDTD method. Antennas and Propagation,
IEEE Transactions on, 54(9), 2525-2530.
Initialisation
End of explanation
spatial_order=4
Explanation: Input Parameter
End of explanation
def coeff(order):
## Check some conditions
if int(order)%2!=0:
print("Error: coeff")
print("Order has to be an integer multiple of 2!")
return
if order==2:
print("Error: coeff")
print("Order has to be at least 4!")
return
## Calculation
c=np.transpose(np.hstack((1, np.zeros(int(order/2)-1))))
M=np.zeros((int(order/2),int(order/2)))
# Condition 1: \sum^{N/2}_{k=1} b_k(2k-1)=1
for n in range(1,int(order/2+1)):
M[0,n-1]=(2*n-1)
# Condition 2: \sum^{N/2}_{k=1} b_k(2k-1)^(2j-1)=0; j=2,3...N/2
for j in range(2,int(order/2+1)):
for n in range(1,int(order/2+1)):
M[j-1,n-1]=(2*n-1)**(2*j-1)
coeff=np.transpose(np.dot(np.linalg.inv(M),c))
return(coeff)
Explanation: Calculate Taylor coefficient
Calculate the Taylor coefficient in an arbitrary order for the
second-order derivative.
Usage:
Lets say you want to calculate the 4th order accurate second-order
FD-stencil. Then you have to set order=4 and the result would be used
as follow:
p_x = 1/DH * ( coeff(1) * (p(x+1)-p(x)) + coeff(2) * (p(x+2)-p(x-1)) )
where p_x is the derivative.
End of explanation
sum_fd_stencil=np.sum(np.abs(coeff(spatial_order)))
theta=np.arange(0,2*np.pi,0.01)
print("You choose a spatial order of ",spatial_order)
Explanation: Calculate spatial derivation impact
Assumption: Spatial sampling is at the Nyquist condition
End of explanation
f=-1j*(((np.exp(-1j*theta))**(2.5)-(np.exp(-1j*theta))**1.5))
f2=(2*sum_fd_stencil)
f_ges=f/f2
print("2. Temporal Order has the stability limit CFL=",np.max(f_ges.real))
Explanation: Temporal 2 order (Leapfrog)
End of explanation
f=-1j*(((np.exp(-1j*theta))**2.5-(np.exp(-1j*theta))**1.5)*1);
f2=(2*sum_fd_stencil*(25.0/24.0*(np.exp(-1j*theta))**2
-1./12.*(np.exp(-1j*theta))**1+1./24.))
f_ges=f/f2
print("3. Temporal Order (ABS-method) has the stability limit CFL=",np.max(f_ges.real))
Explanation: Temporal 3 order (Adams-Bashforth method)
End of explanation
f=-1j*(((np.exp(-1j*theta))**3.5-(np.exp(-1j*theta))**2.5))
f2=(2*sum_fd_stencil*(13./12.*(np.exp(-1j*theta))**3
-5./24.*(np.exp(-1j*theta))**2+1./6.*(np.exp(-1j*theta))**1-1./24.))
f_ges=f/f2
print("4. Temporal Order (ABS-method) has the stability limit CFL=",np.max(f_ges.real))
Explanation: Temporal 4 order (Adams-Bashforth method)
End of explanation |
Subsets and Splits